metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonathankamau/songs-api",
"score": 3
} |
#### File: songs-api/api/routes.py
```python
from api.endpoints.songs import (AverageDifficultyResource,
RatingMetricsResource, SongRatingResource,
SongSearchResource, SongsListResource)
def resource_routes(api):
"""Define the routes for the API."""
api.add_resource(
SongsListResource,
"/api/v1/songs",
endpoint="song"
)
api.add_resource(
AverageDifficultyResource,
"/api/v1/songs/difficulty",
endpoint="difficulty"
)
api.add_resource(
SongSearchResource,
"/api/v1/songs/search",
endpoint="search"
)
api.add_resource(
SongRatingResource,
"/api/v1/songs/rating",
endpoint="rating"
)
api.add_resource(
RatingMetricsResource,
"/api/v1/songs/rating/metrics",
endpoint="metric"
)
```
#### File: api/tests/test_average_difficulty.py
```python
class TestSongAverageDifficulty:
def test_average_difficulty_of_all_songs(self, client):
response = client.get("/api/v1/songs/difficulty")
response_data = response.json
assert response.status_code == 200
assert response.content_type == "application/json"
assert response_data["average_difficulty"] == 10.32
def test_average_difficulty_of_selected_songs(self, client):
response = client.get("/api/v1/songs/difficulty?level=9")
response_data = response.json
assert response.status_code == 200
assert response.content_type == "application/json"
assert response_data["average_difficulty"] == 9.69
def test_average_difficulty_invalid_parameter_value(self, client):
response = client.get("/api/v1/songs/difficulty?level=fsdfdsfsdf")
response_data = response.json
assert response.status_code == 400
assert response_data["message"] == "Input payload validation failed"
def test_average_difficulty_no_songs_found_for_level(self, client):
response = client.get("/api/v1/songs/difficulty?level=100")
response_data = response.json
assert response.status_code == 404
assert "No songs found for level 100" in response_data["message"]
```
#### File: jonathankamau/songs-api/app.py
```python
import os
from flask import Flask, jsonify
from flask_mongoengine import MongoEngine
from flask_restx import Api
from api.models import Songs
from api.routes import resource_routes
from api.utils.chunks import chunks, generate_song_data
def create_app():
"""Factory Method that creates an instance of the app with the given config.
Args:
environment (str): Specify the configuration to initilize app with.
Returns:
app (Flask): it returns an instance of Flask.
"""
app = Flask(__name__)
db = MongoEngine()
app.config["MONGODB_SETTINGS"] = {
"db": os.environ["MONGODB_DBNAME"],
"host": os.environ["MONGODB_URI"],
}
db.init_app(app)
if Songs.objects.count() == 0:
songs_file = open("songs.json")
song_instances = []
for songs in chunks(generate_song_data(songs_file)):
song_instances += [Songs(**song) for song in list(songs)]
Songs.objects.insert(song_instances, load_bulk=True)
api = Api(
app=app,
default="Api",
default_label="Song Endpoints",
title="Songs API",
version="1.0.0",
description="""Song API Documentation 📚""",
)
# Resources
resource_routes(api)
# handle default 404 exceptions
@app.errorhandler(404)
def resource_not_found(error):
response = jsonify(
dict(
error="Not found",
message="The requested URL was not found on the server.",
)
)
response.status_code = 404
return response
# handle default 500 exceptions
@app.errorhandler(500)
def internal_server_error(error):
response = jsonify(
dict(
error="Internal server error",
message="The server encountered an internal error.",
)
)
response.status_code = 500
return response
return app
``` |
{
"source": "jonathankamau/udacity-spark-streaming-project",
"score": 2
} |
#### File: jonathankamau/udacity-spark-streaming-project/data_stream.py
```python
import logging
import json
from pathlib import Path
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import col, from_json
schema = StructType([
StructField("crime_id", StringType(), True),
StructField("original_crime_type_name", StringType(), True),
StructField("report_date", TimestampType(), True),
StructField("call_date", TimestampType(), True),
StructField("offense_date", TimestampType(), True),
StructField("call_time", StringType(), True),
StructField("call_date_time", TimestampType(), True),
StructField("disposition", StringType(), True),
StructField("address", StringType(), True),
StructField("city", StringType(), True),
StructField("state", StringType(), True),
StructField("agency_id", StringType(), True),
StructField("address_type", StringType(), True),
StructField("common_location", StringType(), True)
])
radio_schema = StructType([
StructField("disposition_code", StringType(), True),
StructField("description", StringType(), True)
])
def run_spark_job(spark):
spark.sparkContext.setLogLevel("WARN")
df = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "org.spark.streaming") \
.option("startingOffsets", "earliest") \
.option("maxOffsetsPerTrigger", 200) \
.option("maxRatePerPartition", 10) \
.option("stopGracefullyOnShutdown", "true") \
.load()
df.printSchema()
kafka_df = df.selectExpr("CAST(value AS STRING)")
service_table = kafka_df.select(
from_json("value", schema).alias('data')).select('data.*')
distinct_table = service_table \
.select("original_crime_type_name", "disposition", "call_date_time") \
.withWatermark("call_date_time", '20 minutes')
agg_df = distinct_table.groupBy(
"original_crime_type_name").count().sort("count", ascending=False)
query = agg_df \
.writeStream \
.trigger(processingTime="10 seconds") \
.outputMode("Complete") \
.format("console") \
.option("truncate", "false") \
.start()
query.awaitTermination()
radio_code_json_filepath = f"{Path(__file__).parents[0]}/radio_code.json"
radio_code_df = spark.read.json(
radio_code_json_filepath, schema=radio_schema)
radio_code_df = radio_code_df.withColumnRenamed(
"disposition_code", "disposition")
join_df = distinct_table.join(radio_code_df, "disposition", "left")
join_query = join_df \
.writeStream \
.trigger(processingTime="10 seconds") \
.outputMode("Update") \
.format("console") \
.option("truncate", "false") \
.start()
join_query.awaitTermination()
if __name__ == "__main__":
logger = logging.getLogger(__name__)
spark = SparkSession \
.builder \
.master("local[*]") \
.appName("KafkaSparkStructuredStreaming") \
.config('spark.ui.port', '3000') \
.getOrCreate()
logger.info("Spark started")
run_spark_job(spark)
spark.stop()
``` |
{
"source": "jonathankamau/udend-data-pipeline-project",
"score": 2
} |
#### File: plugins/operators/load_dimension.py
```python
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class LoadDimensionOperator(BaseOperator):
"""
Operator that loads data from staging table to the dimension table.
"""
ui_color = '#80BD9E'
@apply_defaults
def __init__(self,
conn_id,
drop_table,
target_table,
create_query,
insert_query,
append,
*args, **kwargs):
super(LoadDimensionOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.drop_table = drop_table
self.target_table = target_table
self.create_query = create_query
self.insert_query = insert_query
self.append = append
def execute(self, context):
self.hook = PostgresHook(postgres_conn_id=self.conn_id)
if self.drop_table:
self.log.info('Dropping {} table if it exists...'.format(
self.target_table))
self.hook.run("DROP TABLE IF EXISTS {}".format(self.target_table))
self.log.info(
"Table {} has been successfully dropped".format(
self.target_table))
self.log.info(
'Creating {} table if it does not exist...'.format(
self.target_table))
self.hook.run(self.create_query)
if not self.append:
self.log.info("Removing data from {}".format(self.target_table))
self.hook.run("DELETE FROM {}".format(self.target_table))
self.log.info('Inserting data from staging table...')
self.hook.run(self.insert_query)
self.log.info("Insert execution complete...")
```
#### File: plugins/operators/stage_redshift.py
```python
from airflow.hooks.postgres_hook import PostgresHook
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class StageToRedshiftOperator(BaseOperator):
"""
Operator that loads any JSON formatted files from S3 to Amazon Redshift.
"""
ui_color = '#358140'
template_fields = ("s3_key",)
copy_query = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
{}
"""
@apply_defaults
def __init__(self,
table,
drop_table,
aws_connection_id,
redshift_connection_id,
create_query,
s3_bucket,
s3_key,
copy_options,
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
self.table = table
self.drop_table = drop_table
self.aws_connection_id = aws_connection_id
self.redshift_connection_id = redshift_connection_id
self.create_query = create_query
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.copy_options = copy_options
def execute(self, context):
self.hook = PostgresHook(postgres_conn_id=self.redshift_connection_id)
self.aws_instance = AwsHook(aws_conn_id=self.aws_connection_id)
credentials = self.aws_instance.get_credentials()
rendered_key = self.s3_key.format(**context)
s3_path = "s3://{}/{}".format(self.s3_bucket, rendered_key)
formatted_query = StageToRedshiftOperator.copy_query.format(
self.table,
s3_path,
credentials.access_key,
credentials.secret_key,
self.copy_options
)
if self.drop_table:
self.log.info('Dropping {} table if it exists...'.format(
self.table))
self.hook.run("DROP TABLE IF EXISTS {}".format(self.table))
self.log.info(
"Table {} has been successfully dropped".format(
self.table))
self.log.info(
'Creating {} table if it does not exist...'.format(self.table))
self.hook.run(self.create_query)
self.log.info("Removing data from {}".format(self.table))
self.hook.run("DELETE FROM {}".format(self.table))
self.log.info('Executing copy query...')
self.hook.run(formatted_query)
self.log.info("copy query execution complete...")
``` |
{
"source": "jonathankamau/works-single-view",
"score": 3
} |
#### File: works-single-view/scripts/extract.py
```python
import unicodedata
from scripts.spark_session import CreateSparkSession
from pyspark.sql.functions import collect_set, concat_ws, udf, col
import os
class ExtractWorks(CreateSparkSession):
"""Contains Methods to extract data from the csv file."""
def extract_data(self):
"""Method to extract data from the csv file."""
works_data = self.data_path + '*'
works_data_df = self.spark.read.load(
works_data,
format="csv",
header="true"
)
unicode_conversion = udf(
lambda value: unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode())
works_data_df = works_data_df.withColumn(
'converted_title', unicode_conversion(col('title')))
works_data_df = works_data_df.withColumn(
'converted_contributors', unicode_conversion(col('contributors')))
reconciled_data = works_data_df.select('*') \
.groupBy('iswc') \
.agg(concat_ws(', ', collect_set('converted_title')) \
.alias('title'),
concat_ws('|', collect_set('converted_contributors')) \
.alias('contributors'),
concat_ws(', ', collect_set('source')) \
.alias('sources')) \
.dropDuplicates() \
.na.drop()
return reconciled_data
``` |
{
"source": "jonathankeuser/OpenSlides",
"score": 2
} |
#### File: openslides/utils/autoupdate.py
```python
from collections import defaultdict
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from django.db.models import Model
from mypy_extensions import TypedDict
from .auth import UserDoesNotExist
from .autoupdate_bundle import AutoupdateElement, autoupdate_bundle
from .cache import ChangeIdTooLowError, element_cache
from .utils import is_iterable, split_element_id
AutoupdateFormat = TypedDict(
"AutoupdateFormat",
{
"changed": Dict[str, List[Dict[str, Any]]],
"deleted": Dict[str, List[int]],
"from_change_id": int,
"to_change_id": int,
"all_data": bool,
},
)
def disable_history() -> None:
""""""
with autoupdate_bundle() as bundle:
bundle.disable_history()
def inform_changed_data(
instances: Union[Iterable[Model], Model],
information: List[str] = None,
user_id: Optional[int] = None,
disable_history: bool = False,
no_delete_on_restriction: bool = False,
) -> None:
"""
Informs the autoupdate system and the caching system about the creation or
update of an element.
The argument instances can be one instance or an iterable over instances.
History creation is enabled.
"""
if information is None:
information = []
if not is_iterable(instances):
instances = (instances,)
root_instances = set(instance.get_root_rest_element() for instance in instances)
elements = []
for root_instance in root_instances:
element = AutoupdateElement(
id=root_instance.get_rest_pk(),
collection_string=root_instance.get_collection_string(),
disable_history=disable_history,
information=information,
user_id=user_id,
no_delete_on_restriction=no_delete_on_restriction,
)
elements.append(element)
inform_elements(elements)
def inform_deleted_data(
deleted_elements: Iterable[Tuple[str, int]],
information: List[str] = None,
user_id: Optional[int] = None,
) -> None:
"""
Informs the autoupdate system and the caching system about the deletion of
elements.
History creation is enabled.
"""
if information is None:
information = []
elements = [
AutoupdateElement(
id=deleted_element[1],
collection_string=deleted_element[0],
full_data=None,
information=information,
user_id=user_id,
)
for deleted_element in deleted_elements
]
inform_elements(elements)
def inform_elements(elements: Iterable[AutoupdateElement]) -> None:
"""
Informs the autoupdate system about some elements. This is used just to send
some data to all users.
If you want to save history information, user id or disable history you
have to put information or flag inside the elements.
"""
with autoupdate_bundle() as bundle:
bundle.add(elements)
async def get_autoupdate_data(
from_change_id: int, user_id: int
) -> Tuple[int, Optional[AutoupdateFormat]]:
try:
return await _get_autoupdate_data(from_change_id, user_id)
except UserDoesNotExist:
return 0, None
async def _get_autoupdate_data(
from_change_id: int, user_id: int
) -> Tuple[int, Optional[AutoupdateFormat]]:
"""
Returns the max_change_id and the autoupdate from from_change_id to max_change_id
"""
try:
(
max_change_id,
changed_elements,
deleted_element_ids,
) = await element_cache.get_data_since(user_id, from_change_id)
except ChangeIdTooLowError:
# The change_id is lower the the lowerst change_id in redis. Return all data
(
max_change_id,
changed_elements,
) = await element_cache.get_all_data_list_with_max_change_id(user_id)
deleted_elements: Dict[str, List[int]] = {}
all_data = True
else:
all_data = False
deleted_elements = defaultdict(list)
for element_id in deleted_element_ids:
collection_string, id = split_element_id(element_id)
deleted_elements[collection_string].append(id)
# Check, if the autoupdate has any data.
if not changed_elements and not deleted_element_ids:
# Skip empty updates
return max_change_id, None
else:
# Normal autoupdate with data
return (
max_change_id,
AutoupdateFormat(
changed=changed_elements,
deleted=deleted_elements,
from_change_id=from_change_id,
to_change_id=max_change_id,
all_data=all_data,
),
)
``` |
{
"source": "jonathankingfc/quay",
"score": 3
} |
#### File: data/cache/redis_cache.py
```python
from redis import StrictRedis, RedisError
from rediscluster import RedisCluster
class ReadEndpointSupportedRedis(object):
""" Wrapper class for Redis to split read/write requests between separate endpoints."""
def __init__(self, primary=None, replica=None):
if not primary or primary.get("host") is None:
raise Exception("Missing primary host for Redis model cache configuration")
self.write_client = StrictRedis(
**primary,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
)
if not replica:
self.read_client = self.write_client
else:
self.read_client = StrictRedis(
**replica,
socket_connect_timeout=1,
socket_timeout=2,
health_check_interval=2,
)
def get(self, key, *args, **kwargs):
return self.read_client(key, *args, **kwargs)
def set(self, key, val, *args, **kwargs):
return self.write_client(key, val, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.write_client, name, None)
REDIS_DRIVERS = {
"redis": ReadEndpointSupportedRedis,
"rediscluster": RedisCluster,
}
def redis_cache_from_config(cache_config):
"""Return the Redis class to use based on the cache config.
redis:
DATA_MODEL_CACHE_CONFIG
engine: redis,
redis_config:
primary:
host: localhost
pass: password
replica:
host: localhost
pass: password
rediscluster:
DATA_MODEL_CACHE_CONFIG:
engine: rediscluster
redis_config:
startup_nodes:
- host: "test"
port: 6379
readonly_mode: true
rediscluster uses the same client as redis internally for commands.
Anything that can be set in StricRedis() can also be set under the redis_config structure.
NOTE: Known issue - To allow read from replicas in redis cluster mode, set read_from_replicas instead
of readonly_mode.
Ref: https://github.com/Grokzen/redis-py-cluster/issues/339
"""
driver = cache_config.get("engine", None)
if driver is None or driver.lower() not in REDIS_DRIVERS.keys():
raise ValueError("Invalid Redis driver for cache model")
driver_cls = REDIS_DRIVERS[driver]
redis_config = cache_config.get("redis_config", None)
if not redis_config:
raise ValueError("Invalid Redis config for %s" % driver)
return driver_cls(**redis_config)
``` |
{
"source": "jonathanking/sidechainnet",
"score": 3
} |
#### File: sidechainnet/structure/StructureBuilder.py
```python
from io import UnsupportedOperation
import numpy as np
import torch
from sidechainnet.utils.sequence import ONE_TO_THREE_LETTER_MAP, VOCAB
from sidechainnet.structure.build_info import SC_BUILD_INFO, BB_BUILD_INFO, NUM_COORDS_PER_RES, SC_ANGLES_START_POS, NUM_ANGLES
from sidechainnet.structure.structure import nerf
from sidechainnet.structure.HydrogenBuilder import HydrogenBuilder, NUM_COORDS_PER_RES_W_HYDROGENS
class StructureBuilder(object):
"""Reconstruct a protein's structure given its sequence and angles or coordinates.
The hydroxyl-oxygen of terminal residues is not placed because this would
mean that the number of coordinates per residue would not be constant, or
cause other complications (i.e. what if the last atom of a structure is not
really a terminal atom because it's tail is masked out?).
"""
def __init__(self,
seq,
ang=None,
crd=None,
device=torch.device("cpu"),
nerf_method="standard"):
"""Initialize a StructureBuilder for a single protein. Does not build coordinates.
To generate coordinates after initialization, see build().
To create PDB/GLTF files or to generate a py3Dmol visualization, see
to_{pdb,gltf,3Dmol}.
Args:
seq: An integer tensor or a string of length L that represents the protein's
amino acid sequence.
ang: A float tensor (L X NUM_PREDICTED_ANGLES) that contains all of the
protein's interior angles.
crd: A float tensor ((L X NUM_COORDS_PER_RES) X 3) that contains all of the
protein's atomic coordinates. Each residue must contain the same number
of coordinates, with empty coordinate entries padded with 0-vectors.
device: An optional torch device on which to build the structure.
nerf_method (str, optional): Which NeRF implementation to use. "standard" uses
the standard NeRF formulation described in many papers. "sn_nerf" uses an
optimized version with less vector normalizations. Defaults to
"standard".
"""
# TODO support one-hot sequences
# Perhaps the user mistakenly passed coordinates for the angle arguments
if ang is not None and crd is None and ang.shape[-1] == 3:
self.coords = ang
self.ang = None
elif crd is not None and ang is None:
self.ang = None
self.coords = crd
if len(self.coords.shape) == 3:
raise ValueError("Batches of structures are not supported by "
"StructureBuilder. See BatchedStructureBuilder instead.")
elif crd is None and ang is not None:
self.coords = None
self.ang = ang
if len(self.ang.shape) == 3:
raise ValueError("Batches of structures are not supported by "
"StructureBuilder. See BatchedStructureBuilder instead.")
elif (ang is None and crd is None) or (ang is not None and crd is not None):
raise ValueError("You must provide exactly one of either coordinates (crd) "
"or angles (ang).")
self.seq_as_str = seq if type(seq) == str else _convert_seq_to_str(seq)
self.seq_as_ints = np.asarray([VOCAB._char2int[s] for s in self.seq_as_str])
self.device = device
# Validate input data
if self.coords is not None:
self.data_type = "torch" if isinstance(self.coords, torch.Tensor) else "numpy"
else:
self.data_type = "torch" if isinstance(self.ang, torch.Tensor) else "numpy"
if self.ang is not None and self.ang.shape[-1] != NUM_ANGLES:
raise ValueError(f"Angle matrix dimensions must match (L x {NUM_ANGLES}). "
f"You have provided {tuple(self.ang.shape)}.")
if (self.coords is not None and self.coords.shape[-1] != 3):
raise ValueError(f"Coordinate matrix dimensions must match (L x 3). "
f"You have provided {tuple(self.coords.shape)}.")
if (self.coords is not None and
(self.coords.shape[0] // NUM_COORDS_PER_RES) != len(self.seq_as_str)):
raise ValueError(
f"The length of the coordinate matrix must match the sequence length "
f"times {NUM_COORDS_PER_RES}. You have provided {self.coords.shape[0]} //"
f" {NUM_COORDS_PER_RES} = {self.coords.shape[0] // NUM_COORDS_PER_RES}.")
if self.ang is not None and (self.ang == 0).all(axis=1).any():
missing_loc = np.where((self.ang == 0).all(axis=1))
raise ValueError(f"Building atomic coordinates from angles is not supported "
f"for structures with missing residues. Missing residues = "
f"{list(missing_loc[0])}. Protein structures with missing "
"residues are only supported if built directly from "
"coordinates (also supported by StructureBuilder).")
self.prev_ang = None
self.prev_bb = None
self.next_bb = None
self.pdb_creator = None
self.nerf_method = nerf_method
self.has_hydrogens = False
self.atoms_per_res = NUM_COORDS_PER_RES
self.terminal_atoms = None
def __len__(self):
"""Return length of the protein sequence.
Returns:
int: Integer sequence length.
"""
return len(self.seq_as_str)
def _iter_resname_angs(self, start=0):
for resname, angles in zip(self.seq_as_ints[start:], self.ang[start:]):
yield resname, angles
def _build_first_two_residues(self):
"""Construct the first two residues of the protein."""
resname_ang_iter = self._iter_resname_angs()
first_resname, first_ang = next(resname_ang_iter)
second_resname, second_ang = next(resname_ang_iter)
first_res = ResidueBuilder(first_resname, first_ang, prev_res=None, next_res=None)
second_res = ResidueBuilder(second_resname,
second_ang,
prev_res=first_res,
next_res=None)
# After building both backbones use the second residue's N to build the first's CB
first_res.build_bb()
second_res.build()
first_res.next_res = second_res
first_res.build_sc()
return first_res, second_res
def build(self):
"""Construct all of the atoms for a residue.
Special care must be taken for the first residue in the sequence in
order to place its CB, if present.
Returns:
(numpy.ndarray, torch.Tensor): An array or tensor of the generated coordinates
with shape ((L X NUM_COORDS_PER_RES) X 3).
"""
# If a StructureBuilder does not have angles, build returns its coordinates
if self.ang is None:
return self.coords
# Build the first and second residues, a special case
first, second = self._build_first_two_residues()
# Combine the coordinates and build the rest of the protein
self.coords = first._stack_coords() + second._stack_coords()
# Build the rest of the structure
prev_res = second
for i, (resname, ang) in enumerate(self._iter_resname_angs(start=2)):
res = ResidueBuilder(resname,
ang,
prev_res=prev_res,
next_res=None,
is_last_res=i + 2 == len(self.seq_as_str) - 1)
self.coords += res.build()
prev_res = res
if self.data_type == 'torch':
self.coords = torch.stack(self.coords)
else:
self.coords = np.stack(self.coords)
return self.coords
def _initialize_coordinates_and_PdbCreator(self):
if self.coords is None or len(self.coords) == 0:
self.build()
if not self.pdb_creator:
from sidechainnet.structure.PdbBuilder import PdbBuilder
if self.data_type == 'numpy':
self.pdb_creator = PdbBuilder(self.seq_as_str, self.coords,
self.atoms_per_res,
terminal_atoms=self.terminal_atoms)
else:
self.pdb_creator = PdbBuilder(self.seq_as_str,
self.coords.detach().numpy(),
self.atoms_per_res,
terminal_atoms=self.terminal_atoms)
def add_hydrogens(self):
"""Add Hydrogen atom coordinates to coordinate representation (re-apply PADs)."""
if self.coords is None or not len(self.coords):
raise ValueError("Cannot add hydrogens to a structure whose heavy atoms have"
" not yet been built.")
self.hb = HydrogenBuilder(self.seq_as_str, self.coords)
self.coords = self.hb.build_hydrogens()
self.has_hydrogens = True
self.atoms_per_res = NUM_COORDS_PER_RES_W_HYDROGENS
self.terminal_atoms = self.hb.terminal_atoms
def to_pdb(self, path, title="pred"):
"""Save protein structure as a PDB file to given path.
Args:
path (str): Path to save PDB file.
title (str, optional): Title of structure for PDB file. Defaults to "pred".
"""
self._initialize_coordinates_and_PdbCreator()
self.pdb_creator.save_pdb(path, title)
def to_pdbstr(self, title="pred"):
"""Return protein structure as a PDB string.
Args:
title (str, optional): Title of structure for PDB file. Defaults to "pred".
"""
self._initialize_coordinates_and_PdbCreator()
return self.pdb_creator.get_pdb_string(title)
def to_gltf(self, path, title="pred"):
"""Save protein structure as a GLTF (3D-object) file to given path.
Args:
path (str): Path to save GLTF file.
title (str, optional): Title of structure for GLTF file. Defaults to "pred".
"""
self._initialize_coordinates_and_PdbCreator()
self.pdb_creator.save_gltf(path, title)
def to_3Dmol(self, style=None, **kwargs):
"""Generate protein structure & return interactive py3Dmol.view for visualization.
Args:
style (str, optional): Style string to be passed to py3Dmol for
visualization. Defaults to None.
Returns:
py3Dmol.view object: A view object that is interactive in iPython notebook
settings.
"""
import py3Dmol
if not style:
style = {'cartoon': {'color': 'spectrum'}, 'stick': {'radius': .15}}
view = py3Dmol.view(**kwargs)
view.addModel(self.to_pdbstr(), 'pdb', {'keepH': True})
if style:
view.setStyle(style)
view.zoomTo()
return view
class ResidueBuilder(object):
"""Builds the atomic coordinates from angles for a specified amino acid residue."""
def __init__(self,
name,
angles,
prev_res,
next_res,
is_last_res=False,
device=torch.device("cpu"),
nerf_method="standard"):
"""Initialize a residue builder for building a residue's coordinates from angles.
If prev_{bb, ang} are None, then this is the first residue.
Args:
name: The integer amino acid code for this residue.
angles: A float tensor containing necessary angles to define this residue.
prev_bb: Coordinate tensor (3 x 3) of previous residue, upon which this
residue is extending.
prev_ang : Angle tensor (1 X NUM_PREDICTED_ANGLES) of previous reside, upon
which this residue is extending.
nerf_method (str, optional): Which NeRF implementation to use. "standard" uses
the standard NeRF formulation described in many papers. "sn_nerf" uses an
optimized version with less vector normalizations. Defaults to
"standard".
"""
if (not isinstance(name, np.int64) and not isinstance(name, np.int32) and
not isinstance(name, int) and not isinstance(name, torch.Tensor)):
raise ValueError("Expected integer AA code." + str(name.shape) +
str(type(name)))
if isinstance(angles, np.ndarray):
angles = torch.tensor(angles, dtype=torch.float32)
self.name = name
self.ang = angles.squeeze()
self.prev_res = prev_res
self.next_res = next_res
self.device = device
self.is_last_res = is_last_res
self.nerf_method = nerf_method
self.bb = []
self.sc = []
self.coords = []
self.coordinate_padding = torch.zeros(3, requires_grad=True)
@property
def AA(self):
"""Return the one-letter amino acid code (str) for this residue."""
return VOCAB.int2char(int(self.name))
def build(self):
"""Construct and return atomic coordinates for this protein."""
self.build_bb()
self.build_sc()
return self._stack_coords()
def build_bb(self):
"""Build backbone for residue."""
if self.prev_res is None:
self.bb = self._init_bb()
else:
pts = [self.prev_res.bb[0], self.prev_res.bb[1], self.prev_res.bb[2]]
for j in range(4):
if j == 0:
# Placing N
t = self.prev_res.ang[4] # thetas["ca-c-n"]
b = BB_BUILD_INFO["BONDLENS"]["c-n"]
pb = BB_BUILD_INFO["BONDLENS"]["ca-c"] # pb is previous bond len
dihedral = self.prev_res.ang[1] # psi of previous residue
elif j == 1:
# Placing Ca
t = self.prev_res.ang[5] # thetas["c-n-ca"]
b = BB_BUILD_INFO["BONDLENS"]["n-ca"]
pb = BB_BUILD_INFO["BONDLENS"]["c-n"]
dihedral = self.prev_res.ang[2] # omega of previous residue
elif j == 2:
# Placing C
t = self.ang[3] # thetas["n-ca-c"]
b = BB_BUILD_INFO["BONDLENS"]["ca-c"]
pb = BB_BUILD_INFO["BONDLENS"]["n-ca"]
dihedral = self.ang[0] # phi of current residue
else:
# Placing O (carbonyl)
t = torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"])
b = BB_BUILD_INFO["BONDLENS"]["c-o"]
pb = BB_BUILD_INFO["BONDLENS"]["ca-c"]
if self.is_last_res:
# we explicitly measure this angle during dataset creation,
# no need to invert it here.
dihedral = self.ang[1]
else:
# the angle for placing oxygen is opposite to psi of current res
dihedral = self.ang[1] - np.pi
next_pt = nerf(pts[-3],
pts[-2],
pts[-1],
b,
t,
dihedral,
l_bc=pb,
nerf_method=self.nerf_method)
pts.append(next_pt)
self.bb = pts[3:]
return self.bb
def _init_bb(self):
"""Initialize the first 3 points of the protein's backbone.
Placed in an arbitrary plane (z = .001).
"""
n = torch.tensor([0, 0, 0.001], device=self.device, requires_grad=True)
ca = n + torch.tensor([BB_BUILD_INFO["BONDLENS"]["n-ca"], 0, 0],
device=self.device, requires_grad=True)
cx = torch.cos(np.pi - self.ang[3]) * BB_BUILD_INFO["BONDLENS"]["ca-c"]
cy = torch.sin(np.pi - self.ang[3]) * BB_BUILD_INFO["BONDLENS"]['ca-c']
c = ca + torch.tensor([cx, cy, 0], device=self.device, dtype=torch.float32, requires_grad=True)
o = nerf(
n,
ca,
c,
torch.tensor(BB_BUILD_INFO["BONDLENS"]["c-o"]),
torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"]),
self.ang[1] - np.pi, # opposite to current residue's psi
l_bc=torch.tensor(BB_BUILD_INFO["BONDLENS"]["ca-c"]), # Previous bond length
nerf_method=self.nerf_method)
return [n, ca, c, o]
def build_sc(self):
"""Build the sidechain atoms for this residue.
Care is taken when placing the first sc atom (the beta-Carbon). This is
because the dihedral angle that places this atom must be defined using a
neighboring (previous or next) residue.
"""
assert len(self.bb) > 0, "Backbone must be built first."
self.atom_names = ["N", "CA", "C", "O"]
self.pts = {"N": self.bb[0], "CA": self.bb[1], "C": self.bb[2]}
if self.next_res:
self.pts["N+"] = self.next_res.bb[0]
else:
self.pts["C-"] = self.prev_res.bb[2]
last_torsion = None
for i, (pbond_len, bond_len, angle, torsion, atom_names) in enumerate(
_get_residue_build_iter(self.name, SC_BUILD_INFO)):
# Select appropriate 3 points to build from
if self.next_res and i == 0:
a, b, c = self.pts["N+"], self.pts["C"], self.pts["CA"]
elif i == 0:
a, b, c = self.pts["C-"], self.pts["N"], self.pts["CA"]
else:
a, b, c = (self.pts[an] for an in atom_names[:-1])
# Select appropriate torsion angle, or infer if part of a planar configuration
if type(torsion) is str and torsion == "p":
torsion = self.ang[SC_ANGLES_START_POS + i]
elif type(torsion) is str and torsion == "i" and last_torsion is not None:
torsion = last_torsion - np.pi
new_pt = nerf(a,
b,
c,
bond_len,
angle,
torsion,
l_bc=pbond_len,
nerf_method=self.nerf_method)
self.pts[atom_names[-1]] = new_pt
self.sc.append(new_pt)
last_torsion = torsion
self.atom_names.append(atom_names[-1])
return self.sc
def _stack_coords(self):
self.coords = self.bb + self.sc + (NUM_COORDS_PER_RES - len(self.bb) -
len(self.sc)) * [self.coordinate_padding]
return self.coords
def to_prody(self, res):
import prody as pr
ag = pr.AtomGroup()
ag.setCoords(torch.stack(self.bb + self.sc).detach().numpy())
ag.setNames(self.atom_names)
ag.setResnames([ONE_TO_THREE_LETTER_MAP[VOCAB._int2char[self.name]]] *
len(self.atom_names))
ag.setResnums([res.getResnum()] * len(self.atom_names))
return pr.Residue(ag, [0] * len(self.atom_names), None)
def __repr__(self):
"""Return a string describing the name of the residue used for this object."""
return f"ResidueBuilder({self.AA})"
def _get_residue_build_iter(res, build_dictionary):
"""Return an iterator over (bond-lens, angles, torsions, atom names) for a residue.
This function makes it easy to iterate over the huge amount of data contained in
the dictionary sidechainnet.structure.build_info.SC_BUILD_INFO. This dictionary
contains all of the various standard bond and angle values that are used during atomic
reconstruction of a residue from its angles.
Args:
res (int): An interger representing the integer code for a particular amino acid,
e.g. 'Ala' == 'A' == 0 in sequence.py.
build_dictionary (dict): A dictionary mapping 3-letter amino acid codes to
dictionaries of information relevant to the construction of this amino acid
from angles (i.e. angle names, atom types, bond lengths, bond types, torsion
types, etc.). See sidechainnet.structure.build_info.SC_BUILD_INFO.
Returns:
iterator: An iterator that yields 4-tuples of (bond-value, angle-value,
torsion-value, atom-name). These values can be used to generating atomic
coordinates for a residue via the NeRF algorithm
(sidechainnet.structure.structure.nerf).
"""
r = build_dictionary[VOCAB.int2chars(int(res))]
bond_vals = [torch.tensor(b, dtype=torch.float32) for b in r["bonds-vals"]]
pbond_vals = [torch.tensor(BB_BUILD_INFO['BONDLENS']['n-ca'], dtype=torch.float32)
] + [torch.tensor(b, dtype=torch.float32) for b in r["bonds-vals"]][:-1]
angle_vals = [torch.tensor(a, dtype=torch.float32) for a in r["angles-vals"]]
torsion_vals = [
torch.tensor(t, dtype=torch.float32) if t not in ["p", "i"] else t
for t in r["torsion-vals"]
]
return iter(
zip(pbond_vals, bond_vals, angle_vals, torsion_vals,
[t.split("-") for t in r["torsion-names"]]))
def _convert_seq_to_str(seq):
"""Assuming seq is an int list or int tensor, returns its str representation."""
seq_as_str = ""
if isinstance(seq, torch.Tensor):
seq = seq.numpy()
seq = seq.flatten()
if len(seq.shape) == 1:
# The seq is represented as an integer sequence
if len(seq.shape) == 1:
seq_as_str = VOCAB.ints2str(seq)
elif len(seq.shape) == 2:
if seq.shape[0] != 1:
raise ValueError(f"Seq shape {seq.shape} is not supported.")
else:
seq_as_str = VOCAB.ints2str(seq[0])
else:
raise UnsupportedOperation(f"Seq shape {seq.shape} is not supported.")
return seq_as_str
if __name__ == '__main__':
import pickle
def _load_data(path):
with open(path, "rb") as f:
data = pickle.load(f)
return data
d = _load_data(
"/home/jok120/dev_sidechainnet/data/sidechainnet/sidechainnet_casp12_30.pkl")
idx = 15
sb = StructureBuilder(d['train']['seq'][idx], d['train']['ang'][idx])
sb.to_pdb("test00.pdb")
``` |
{
"source": "jonathankoren/langdet",
"score": 2
} |
#### File: jonathankoren/langdet/langdet.py
```python
import math
import operator
import sys
def topNValues(x, topN, thresh, minPercent):
'''Given a dictionary containing keys and counts, returns the subset
of keys that are:
- are the topN most frequent keys
- have a frequency in excess of `thresh`
- account for more than `minPercent` of all values'''
totalNum = float(sumValues(x))
sorted_x = sorted(x.items(), key=operator.itemgetter(1), reverse=True)
if len(sorted_x) > topN:
sorted_x = sorted_x[:topN]
z = {}
for kvp in sorted_x:
if kvp[1] > thresh and float(kvp[1]) / totalNum > minPercent:
z[kvp[0]] = kvp[1]
return z
def normalize(d):
'''Takes a dictionary of keys to numeric values, and then rescales all the
values by the L2 norm (i.e. euclidian distance) of the dictionary.'''
norm = 0.0
for kvp in d.items():
norm += kvp[1] * kvp[1]
norm = math.sqrt(norm)
n = {}
for k in d:
n[k] = d[k] / norm
return n
def sumValues(x):
'''Given a dictionary of keys to numbers, returns the sum of all the
values in the dictionary.'''
s = 0
for (k,v) in x.items():
s += v
return s
def processStream(inStream, ngramSize):
ngramCounts = {}
for line in inStream:
if line == '':
continue
length = len(line)
for i in range(0, length):
c = line[i]
if ngramSize > 0:
if i + ngramSize < length:
ngram = line[i:i+ngramSize]
if ngram not in ngramCounts:
ngramCounts[ngram] = 1
else:
ngramCounts[ngram] = ngramCounts[ngram] + 1
return ngramCounts
def train(inStream, config):
'''Processes characters from inStream and returns a dictionary that
represents the language detection model. The model is defined the `config`
argument. All keys in the config are optional, but at least one key needs
to be defined in order to build a model. Config parameters are:
- ngrams
- ngramSize -- size in characters of the ngram
- maxValues -- maximum number of ngrams to consider
- freqThresh -- consider only ngrams that occur more than this number of times
- percentThresh -- consider only ngrams that account for more than this percentage of all ngrams'''
# setup ngram features
ngramSize = config.get('ngramSize', 0)
maxNgrams = config.get('maxValues', sys.maxsize)
threshNgrams = config.get('freqThresh', 0)
percentNgrams = config.get('percentThresh', 0.0)
ngramCounts = processStream(inStream, ngramSize)
model = {}
model['ngramSize'] = ngramSize
model['ngrams'] = normalize(topNValues(ngramCounts, maxNgrams, threshNgrams, percentNgrams))
return model
def classify(inStream, modelMap):
'''Takes an input stream and a map of language codes to models, and returns
the language code that best describes the input stream.
IMPORTANT: All models must have the same ngramSize.'''
ngramSize = list(modelMap.items())[0][1].get('ngramSize', 0)
ngramCounts = processStream(inStream, ngramSize)
item_ngrams = normalize(ngramCounts)
bestLang = None
bestSim = 0.0
for (lang, model) in modelMap.items():
sim = 0.0
for (ngram, score) in model['ngrams'].items():
try:
sim += item_ngrams[ngram] * score
except KeyError:
pass
if sim > bestSim:
bestSim = sim
bestLang = lang
return (bestLang, bestSim)
``` |
{
"source": "JonathanKuelz/election_programmes",
"score": 3
} |
#### File: JonathanKuelz/election_programmes/wahlprogramme.py
```python
import argparse
from collections import Counter
from functools import reduce
import operator
from pathlib import Path
import math
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from FightingWords import Corpus, FightingWords
from typing import Dict, List
import re
from wordcloud import WordCloud
TITLES = {
'afd': 'AfD',
'cdu_csu': 'CDU/CSU',
'fdp': 'FDP',
'gruene': 'BÜNDNIS 90/DIE GRÜNEN',
'linke': 'DIE LINKE',
'spd': 'SPD'
}
parser = argparse.ArgumentParser()
parser.add_argument('--save_as', required=True, help='Folder name for the wordcloud images to be saved.')
parser.add_argument('-n', '--ngrams', required=False, help='Specify ngram lengths', nargs='+', default=[1], type=int)
def preprocess(txt: str) -> List[str]:
remove_words = ['bundestagswahlprogramm', 'f', 'ff', 'kapitel', 'seite']
german_stop_words = stopwords.words('german') + remove_words
alpha = re.sub('[\\W_0-9]+', ' ', txt).lower()
return [tok for tok in alpha.split(' ') if tok not in german_stop_words]
def ngrams(tokens: List[str], n):
if n == 1:
return tokens
grams = []
for i in range(len(tokens) - n + 1):
grams.append(' '.join(tokens[i:i+n]))
return grams
def fighting_words_against_all_others(corpora: Dict[str, Corpus], key: str):
self_corpus = corpora[key]
other_corpus = reduce(operator.add, (corpora[k] for k in corpora if k != key))
return FightingWords(self_corpus, other_corpus, prior=0.1)
def draw_fighting_words(fw: Dict[str, FightingWords], save: Path) -> None:
for name, fw_object in fw.items():
wc = WordCloud(background_color="white", width=1920, height=1080, min_font_size=8)
cloud_data = dict(zip(*fw_object.get_top_k_ngrams(100, 'second')))
wc.generate_from_frequencies({w: math.sqrt(weight) for w, weight in cloud_data.items()})
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.title(TITLES[name], loc='center')
plt.savefig(save.joinpath(name), dpi=500)
print('Min z-score for {}: {}'.format(name, min(cloud_data.values())))
def main():
args = parser.parse_args()
base_path = Path('data/')
img_path = base_path.joinpath(args.save_as)
img_path.mkdir(exist_ok=True)
files = list(base_path.iterdir())
programs = {p.name.split('.')[0]: preprocess(p.open('r').read()) for p in files if p.is_file()}
for party, content in programs.items():
print("Das Programm der {} enthält {} Wörter.".format(party, len(content)))
words_and_n_grams = {party: ngrams(content, n) for n in args.ngrams for party, content in programs.items()}
corpora = dict()
for party, grams in words_and_n_grams.items():
cnt = Counter(grams)
terms, weights = list(zip(*cnt.items()))
corpora[party] = Corpus(terms, weights)
fighting_words = {party: fighting_words_against_all_others(corpora, party) for party in corpora}
draw_fighting_words(fighting_words, img_path)
if __name__ == '__main__':
main()
``` |
{
"source": "JonathanLayman/Ballot_Manifest",
"score": 3
} |
#### File: JonathanLayman/Ballot_Manifest/Ballot_Manifest.py
```python
import pandas as pd
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from tkinter.scrolledtext import ScrolledText
class BallotManifestGui:
def __init__(self, master):
# Establish base GUI settings.
# Determines the master tkinter window, the title of the window, the resizable status of the window, and
# the size and render location of the window.
self.master = master
self.master.title('Ballot Manifest')
self.master.resizable(False, False)
self.master.geometry('525x600+50+25')
# Establish frame header settings
# This sets the display for the top part of the window. It adds the Arapahoe County Logo and the Title Text
self.frame_header = ttk.Frame(master)
self.frame_header.pack()
self.logo = PhotoImage(file='Data/ArapahoeLogo.png').subsample(20, 20)
self.arapahoe_logo = ttk.Label(self.frame_header, image=self.logo).grid(row=0, column=0, rowspan=2,
pady=(5, 25))
self.title_text = ttk.Label(self.frame_header, font=('Arial', 18, 'bold'), wraplength=400,
text='Ballot Manifest Data Entry').grid(row=1, column=1, padx=25)
# Establish Load File Frame Settings
# This is the landing page of the program that prompts for a filename stored under the self.filename variable.
# The self.content_textbox variable is the text input where a filename can be loaded.
self.frame_load = ttk.Frame(master)
self.filename = None
self.content_text = ttk.Label(self.frame_load, text='Select File to Begin').grid(row=0, column=0, columnspan=2,
sticky='w')
self.content_textbox = ttk.Entry(self.frame_load, width=60)
self.select_button = ttk.Button(self.frame_load, text='Browse Computer', command=lambda: self.locate_file())
self.load_button = ttk.Button(self.frame_load, text='Load File',
command=lambda: self.load_to_form())
# Establish Form Settings
# This page is the form that the user will enter the information on the container label.
# The self.labels and self.batch_entries will be determined after a label is scanned.
self.frame_form = ttk.Frame(master)
self.scan_text = ttk.Label(self.frame_form, text='Scan Label: ')
self.scan_label = ttk.Entry(self.frame_form, width=25, show='*')
self.scan_submit = ttk.Button(self.frame_form, text='Submit', command=lambda: self.load_label())
self.scanner = ttk.Label(self.frame_form, text='Scanner: ')
self.container = ttk.Label(self.frame_form, text='Container: ')
self.seal_text_1 = ttk.Label(self.frame_form, text='Seal 1: ')
self.seal_text_2 = ttk.Label(self.frame_form, text='Seal 2: ')
self.seal_entry_1 = ttk.Entry(self.frame_form, width=25)
self.seal_entry_2 = ttk.Entry(self.frame_form, width=25)
self.labels = []
self.batch_entries = []
self.form_save = ttk.Button(self.frame_form, text='Save', command=lambda: self.submit_form())
self.form_clear = ttk.Button(self.frame_form, text='Clear', command=lambda: self.form_view())
# Establish Menu Bar Settings
# These are the options available under the "File" and "Help" drop down menus.
self.master.option_add('*tearOff', False)
self.menu_bar = Menu(master)
self.master.config(menu=self.menu_bar)
self.file = Menu(self.menu_bar)
self.help_ = Menu(self.menu_bar)
self.menu_bar.add_cascade(menu=self.file, label='File')
self.menu_bar.add_cascade(menu=self.help_, label='Help')
self.file.add_command(label='Create New From Template', command=lambda: self.load_from_template_view())
self.file.add_command(label='Load Manifest', command=lambda: self.load_view())
self.file.add_command(label='Edit or Remove Previous Entry', command=lambda: self.edit_or_remove_view())
self.file.entryconfig('Edit or Remove Previous Entry', state='disabled')
self.file.add_command(label='Save and Quit', command=lambda: self.save_and_quit())
self.file.entryconfig('Save and Quit', state='disabled')
self.help_.add_command(label='How to use this software', command=lambda: self.how_to_use_view())
self.help_.add_separator()
self.help_.add_command(label='About', command=lambda: self.about_view())
# Establish Edit or Remove Settings
# This is the for the edit or remove page. The self.scanner_entry and self.starting_batch_entry variables
# are the text input locations that will be used for looking up an entry.
self.frame_edit = ttk.Frame(master)
self.instruction_text = ttk.Label(self.frame_edit, text='To Edit or Remove an entry: enter the scanner and'
'starting batch number, then select "Edit" or "Remove"',
wraplength=250)
self.scanner_prompt = ttk.Label(self.frame_edit, text='Scanner: ')
self.starting_batch_prompt = ttk.Label(self.frame_edit, text='First Batch in Container: ')
self.scanner_entry = ttk.Entry(self.frame_edit, width=25)
self.starting_batch_entry = ttk.Entry(self.frame_edit, width=25)
self.edit_button = ttk.Button(self.frame_edit, text='Edit', command=lambda: self.edit_entry())
self.remove_button = ttk.Button(self.frame_edit, text='Remove', command=lambda: self.remove_entry())
# Establish Load From New Template Settings
# These are the settings for the load from a new template screen.
self.frame_load_template = ttk.Frame(master)
self.load_template_text = ttk.Label(self.frame_load_template, text='To create a new ballot manifest from the '
'manifest template: click the "Create From '
'Template" button below', wraplength=350)
self.load_template_button = ttk.Button(self.frame_load_template, text='Create From Template',
command=lambda: self.create_from_template())
# Establish Pandas DataFrame Variables
# These variables can be changed to change the county using the software and the column names of the ballot
# manifest.
self.county = 'Arapahoe'
self.df = None
self.column_names = ['County', 'Scanner', 'ICC Batch', 'Ballot Count', 'Container Number', 'Seal 1', 'Seal 2']
self.printed_label = None
# Render load View
self.load_view()
# Methods to render the Various views.
# This method will clear the view on the app, except for the header view which is static.
def clear_view(self):
self.frame_load.pack_forget()
self.frame_form.pack_forget()
self.frame_edit.pack_forget()
self.frame_load_template.pack_forget()
# This view is the screen prompting to load a ballot manifest into the software.
# The self.select_button will call the self.locate_file() method, and the self.load_button will call the
# self.load_to_form() method.
def load_view(self):
self.clear_view()
self.df = None
self.filename = None
self.update_menu_view()
self.frame_load.pack()
self.content_textbox.grid(row=1, column=0, columnspan=4, sticky='w', pady=5)
self.content_textbox.config(state='normal')
self.content_textbox.delete(0, END)
self.content_textbox.config(state='disabled')
self.select_button.grid(row=2, column=0, sticky='w', pady=10)
self.load_button.grid(row=2, column=1)
# This view loads the form to input data from completed labels. It calls the self.generate_batch_widgets() method
# to load the correct ICC Station, Batch Numbers, and Container Number according to the label. The self.form_save
# button will call the self.submit_form() method. The self.form_clear button will call the self.form_view() method,
# effectively looping back to the base form screen without saving the information.
def form_view(self):
self.update_menu_view()
self.frame_form.pack(fill=BOTH, expand=1)
self.scan_text.grid(row=1, column=0, sticky='w', pady=15, padx=10)
self.scan_label.grid(row=1, column=1, sticky='w', padx=10)
self.scan_label.config(state='normal')
self.scan_label.delete(0, END)
self.scan_submit.grid(row=1, column=2)
self.scan_submit.config(state='normal')
self.scanner.grid(row=2, column=0, sticky='w', padx=10, columnspan=2)
self.scanner.config(text='Scanner: ')
self.container.grid(row=3, column=0, sticky='w', padx=10, columnspan=2)
self.container.config(text='Container: ')
self.seal_text_1.grid(row=4, column=2)
self.seal_entry_1.grid(row=4, column=3)
self.seal_entry_1.delete(0, END)
self.seal_entry_1.config(state='disabled')
self.seal_text_2.grid(row=5, column=2)
self.seal_entry_2.grid(row=5, column=3)
self.seal_entry_2.delete(0, END)
self.seal_entry_2.config(state='disabled')
self.form_save.grid(row=7, column=3)
self.form_save.config(state='disabled')
self.form_clear.grid(row=15, column=3)
self.form_clear.config(state='disabled')
self.generate_batch_widgets()
# This view is for the edit or remove screen selected from the file drop down menu. It is only available once a
# ballot manifest has been loaded. The self.edit_button will call the self.edit_entry() method and the
# self.remove_button will call the self.remove_entry() method.
def edit_or_remove_view(self):
self.clear_view()
self.frame_edit.pack(fill=BOTH, expand=1)
self.instruction_text.grid(row=0, column=1, columnspan=2, pady=20)
self.scanner_prompt.grid(row=1, column=1, sticky='e')
self.scanner_entry.grid(row=1, column=2)
self.starting_batch_prompt.grid(row=2, column=1, sticky='e')
self.starting_batch_entry.grid(row=2, column=2)
self.edit_button.grid(row=3, column=1)
self.remove_button.grid(row=3, column=2)
# This view is for creating a new ballot manifest from a a template. The self.load_template_button will call the
# self.create_from_template() method.
def load_from_template_view(self):
self.clear_view()
self.frame_load_template.pack()
self.load_template_text.grid(row=1, column=0, pady=25)
self.load_template_button.grid(row=2, column=0)
# This is used to dynamically verify which option in the file drop down menu are active. It will only activate them
# if there is an active data frame.
def update_menu_view(self):
if self.df is not None:
self.file.entryconfig('Edit or Remove Previous Entry', state='normal')
self.file.entryconfig('Save and Quit', state='normal')
else:
self.file.entryconfig('Edit or Remove Previous Entry', state='disabled')
self.file.entryconfig('Save and Quit', state='disabled')
# This view is an informational pop-up with basic information about the software.
def about_view(self):
messagebox.showinfo('About', 'Arapahoe County Ballot Manifest Data Entry Software.\nDeveloped by Jonathan '
'Layman')
# This view is a pop-up containing the data from a text file describing how to use the software.
def how_to_use_view(self):
with open('Data/How_to_use.txt', 'r') as txt_file:
use_doc = txt_file.read()
use_window = Toplevel(self.master)
use_window.geometry('500x400+75+40')
use_window.title('How to Use')
use_text = ScrolledText(use_window, width=135, height=50, wrap='word')
use_text.insert(1.0, use_doc)
use_text.pack()
use_text.config(state='disabled')
# The following methods are used to read and manipulate the data entered into this software. Many pandas
# data frame methods are called and used to sort and store data.
# This method is called from the self.form_view() and the self.generate_form() methods. It
# is used to dynamically label the batch numbers in the form according to their starting batch number. It starts by
# removing any existing data from the relevant variables. It then creates a list of the 15 batches from the start
# point it is given. The self.form_view() method does not provide a starting point, so a default starting point of 1
# is used. The self.generate_form() method does provide a starting point which is determined from the label.
def generate_batch_widgets(self, start=1):
for label in self.labels:
label.destroy()
del self.labels[:]
del self.batch_entries[:]
for i in range(start, start + 15):
self.labels.append(ttk.Label(self.frame_form, text='Batch ' + str(i) + ':'))
self.batch_entries.append(ttk.Entry(self.frame_form, width=25))
for index, label in enumerate(self.labels, start=4):
label.grid(row=index, column=0, sticky='w', padx=10, pady=2)
for index, entry in enumerate(self.batch_entries, start=4):
entry.grid(row=index, column=1, sticky='w')
entry.config(state='disabled')
# This method is called by the self.scan_submit() method and the edit view screen. It takes data from a scanned
# barcode (3 of 9 format) and splits the data. The raw data looks like: 01/L1/LICC 01-1
# The split point is "/L". The first part of the data is the Scanner Number, The second part is the first batch in
# the container, and the third part is the name of the container.
#
# After splitting the data, this method checks the data frame to make sure the length of the split data is 3 because
# each barode has 3 parts. This will reduce the risk of scanning a wrong barcode. This method then checks to see if
# #there is a matching entry, and if there is, it prompts the user to confirm that they want to overwrite the
# exiting data. If the user answers "Yes" or if there is no matching data, it passes back to the
# self.generate_form() or self.form_view() methods as appropriate.
def load_label(self):
self.printed_label = self.scan_label.get().split('/L')
self.printed_label[0] = int(self.printed_label[0])
self.printed_label[1] = int(self.printed_label[1])
if len(self.printed_label) != 3:
self.form_view()
else:
if (self.county, self.printed_label[0], self.printed_label[1]) in self.df.index:
result = messagebox.askyesno('Entry Found in Manifest', 'This entry already exists in the manifest, '
'would you like to overwrite it?')
if result:
for i in range(15):
self.df.drop((self.county, self.printed_label[0], self.printed_label[1] + i), inplace=True)
self.generate_form()
else:
self.form_view()
else:
self.generate_form()
# This method is called from the self.load_label() method and is used to set the appropriate labels according to the
# batch that is provided from the scanned label.
def generate_form(self):
self.scanner.config(text='Scanner: ICC STATION ' + str(self.printed_label[0]))
self.container.config(text='Container: ' + self.printed_label[2])
self.generate_batch_widgets(start=self.printed_label[1])
self.scan_label.delete(0, END)
self.scan_label.config(state='disabled')
self.scan_submit.config(state='disabled')
self.seal_entry_1.config(state='normal')
self.seal_entry_2.config(state='normal')
self.form_clear.config(state='normal')
self.form_save.config(state='normal')
for label in self.labels:
label.config(state='normal')
for entry in self.batch_entries:
entry.config(state='normal')
# This method is called from the self.select_button. It opens up a dialog to locate a file on the computer, and then
# checks to see if that file is a .csv. If the File is a .csv it will save the filename under the self.filename
# variable and load the contents of that file as the main data frame under the self.df variable.
def locate_file(self):
self.filename = filedialog.askopenfile()
if self.filename is not None and self.filename.name[-3:] == 'csv':
self.filename = self.filename.name
self.df = pd.read_csv(self.filename, index_col=['County', 'Scanner', 'ICC Batch'])
self.df.sort_index(inplace=True)
self.content_textbox.config(state='normal')
self.content_textbox.insert(0, self.filename)
self.content_textbox.config(state='disabled')
else:
messagebox.showerror(title='Ballot Manifest - Error',
message='Please select a CSV file')
# This method is called by the self.create_from_template() method and the self.load_button. It checks that there is
# content in the self.filename variable, and if there is, it clears the screen and displays the form.
def load_to_form(self):
if self.filename is not None:
self.clear_view()
self.form_view()
# This method is called by the self.form_save button. It creates a new data frame in a local variable called
# new_data. It uses list comprehension to populate the form based off of the information gathered from the form.
# The method then appends the data to the main data frame, sorts the data, and then saves it back to the .csv. If
# the .csv is open, than this method will be denied access to the file and not be able to save. However, the data
# frame will remain intact.
def submit_form(self):
new_data = pd.DataFrame(columns=self.column_names)
new_data['County'] = [self.county for i in range(15)]
new_data['Scanner'] = [self.printed_label[0] for i in range(15)]
new_data['ICC Batch'] = [self.printed_label[1] + i for i in range(15)]
new_data['Ballot Count'] = [entry.get() for entry in self.batch_entries]
new_data['Container Number'] = [self.printed_label[2] for i in range(15)]
new_data['Seal 1'] = [self.seal_entry_1.get() for i in range(15)]
new_data['Seal 2'] = [self.seal_entry_2.get() for i in range(15)]
new_data.set_index(keys=['County', 'Scanner', 'ICC Batch'], inplace=True)
self.df = self.df.append(new_data)
self.df.sort_index(inplace=True)
self.df.to_csv(self.filename)
messagebox.showinfo('Ballot Manifest', 'Form successfully submitted')
self.form_view()
# This method is called from the file menu. It saves a copy of the main data frame to the .csv file. A copy is saved
# every time the submit form button is pressed, so saving here is redundant. This method then kills the master
# window.
def save_and_quit(self):
if self.df is not None:
self.df.to_csv(self.filename)
self.master.destroy()
quit()
# This method is called from the self.edit_button. It prompts the user to enter the scanner number and the starting
# batch number. If the data entered does not match a possible entry, the software will show an error explaining how
# to load a proper batch. If a data is a possible match, it will generate a fake bar code that is fed into the
# self.load_label() method.
def edit_entry(self):
list_of_scanners = ['01', '02', '03', '04', '05', '06', '07', '08', '09' '10']
if self.scanner_entry.get() in list_of_scanners and int(self.starting_batch_entry.get()) % 15 == 1:
scanner = self.scanner_entry.get()
batch = self.starting_batch_entry.get()
container = 'ICC ' + scanner + '-' + str(int((int(batch) + 14) / 15))
self.clear_view()
self.form_view()
self.scan_label.insert(0, scanner + '/L' + batch + '/L' + container)
self.scanner_entry.delete(0, END)
self.starting_batch_entry.delete(0, END)
self.load_label()
else:
messagebox.showerror('Data Mismatch', 'The data entered does not match a valid entry. Make sure that the '
'Scanner is entered with two numbers (ie. 05 or 10) and the batch is '
'a valid starting point (is the start of a batch of 15 ie. 31)')
# This method is called by the self.remove_button. It matches the data similar to the self.edit_entry() method.
# If a match is found it will prompt the user if they want to remove the entry, or it will inform the user that
# There is no such entry (even if the search criteria is valid)
def remove_entry(self):
list_of_scanners = ['01', '02', '03', '04', '05', '06', '07', '08', '09' '10']
print(self.df)
print('Check 1')
if self.scanner_entry.get() in list_of_scanners and int(self.starting_batch_entry.get()) % 15 == 1:
if (self.county, int(self.scanner_entry.get()), int(self.starting_batch_entry.get())) in self.df.index:
result = messagebox.askyesno('Entry Found in Manifest', 'Are you sure you want to remove this entry?')
if result:
scanner = int(self.scanner_entry.get())
batch = int(self.starting_batch_entry.get())
for i in range(15):
self.df.drop((self.county, scanner, batch + i), inplace=True)
print(self.df)
print(self.filename)
self.df.to_csv(self.filename)
self.scanner_entry.delete(0, END)
self.starting_batch_entry.delete(0, END)
self.clear_view()
self.form_view()
else:
messagebox.showerror('No Entry Found',
'Your submission does not match any existing entry in the manifest')
# This method is called from the create new from template option in the file menu. It prompts the user to select
# a location and provides a default name of "Ballot Manifest.csv". Once a file location and name is selected it will
# create a new data frame from the column list outlined in self.column_names and exports that to a csv. Finally,
# this method loads the newly created template into the entry form.
def create_from_template(self):
directory = filedialog.asksaveasfilename(title='Select a Location and Name for the Ballot Manifest',
filetypes=[('csv files', '*.csv')],
defaultextension='.csv',
initialfile='Ballot Manifest')
new_from_template = pd.DataFrame(columns=self.column_names)
new_from_template.to_csv(directory, index=False)
self.df = new_from_template.set_index(keys=['County', 'Scanner', 'ICC Batch'])
self.filename = directory
self.load_to_form()
# This function launches the program by creating a tkinter root, and then calling the BallotManifestGui class.
def main():
root = Tk()
manifest_gui = BallotManifestGui(root)
root.mainloop()
if __name__ == '__main__': main()
``` |
{
"source": "JonathanLayman/Music_For_Mom",
"score": 3
} |
#### File: JonathanLayman/Music_For_Mom/MusicPlayer.py
```python
import PySimpleGUI as sg
import vlc
from gmusicapi import Mobileclient
import requests
from datetime import timedelta
class MusicPlayer:
def __init__(self, device_id, token):
# Gmusic API initialization
print("initializing client")
self.api = Mobileclient()
print("logging in")
self.api.oauth_login(device_id, token)
print("loading all songs")
self.all_songs = self.api.get_all_songs()
print("loading playlists")
self.all_playlists = self.api.get_all_user_playlist_contents()
self.all_playlist_names = {}
for playlist in self.all_playlists:
self.all_playlist_names[playlist["name"]] = playlist["id"]
# VLC initialization
self.track_file = vlc.MediaPlayer()
self.track_list = []
self.titles = []
self.track_number = 0
self.playlists = []
self.current_time = -1
self.max_time = -1
# Get playlists, songs from the first playlist, and load the first song
self.get_playlists()
self.get_songs_from_playlist(self.playlists[0])
self.song = self.track_list[self.track_number]["trackId"]
self.load_track()
# GUI initialization
print("creating window")
self.song = ""
self.player_layout = [
[sg.Text("I love you Mom!", size=(15, 1), font=("Helvetica", 25))],
[sg.Listbox(values=self.playlists, size=(30, 20), bind_return_key=True, key="_playlists_"),
# sg.Image(),
sg.Listbox(values=self.titles, size=(30, 20), bind_return_key=True, key="_Tracks_")],
[sg.Text("Click Play or select song", key="_SongName_", enable_events=True)],
[sg.Text("Volume:"), sg.Slider(range=(0, 100), orientation="h", size=(20, 15),
default_value=self.track_file.audio_get_volume(), key="_volume_"),
sg.Button("Play"), sg.Button("Pause"), sg.Button("Next")]
]
self.title = "Music Player"
self.window = sg.Window(self.title).Layout(self.player_layout)
def get_playlists(self):
data = self.api.get_all_playlists()
self.playlists = []
for playlist in data:
if not playlist['deleted']:
self.playlists.append(playlist['name'])
print(self.playlists)
def change_playlists(self, name):
for pos, title in enumerate(self.playlists):
if title == name:
self.get_songs_from_playlist(self.playlists[pos])
def get_songs_from_playlist(self, name):
print("Obtaining track list")
tracks = []
if name in self.all_playlist_names:
for playlist in self.all_playlists:
if playlist["name"] == name:
for track in playlist["tracks"]:
tracks.append(track)
break
self.track_list = tracks
self.get_playlist_song_titles()
def get_playlist_song_titles(self):
print("Getting playlist song titles")
titles = []
for song in self.track_list:
if song["source"] == "2":
titles.append(song["track"]["title"])
else:
for track in self.all_songs:
if track["id"] == song["trackId"]:
print("match found")
titles.append(track["title"])
else:
print("No match found")
print(titles)
self.titles = titles
def get_song_position_from_title(self, title):
for pos, name in enumerate(self.titles):
if name == title:
return pos
else:
print("Couldn't find song in tracks")
def download_song(self):
print("downloading song")
url = self.api.get_stream_url(self.song)
doc = requests.get(url)
with open("song.mp3", "wb") as f:
f.write(doc.content)
def load_track(self):
self.track_file = vlc.MediaPlayer("song.mp3")
print("Time:", self.track_file.get_length())
def play(self):
self.track_file.play()
self.window.FindElement("_SongName_").Update(value=self.titles[self.track_number])
def stop(self):
self.track_file.stop()
def pause(self):
self.track_file.pause()
def next(self):
self.track_number += 1
self.song = self.track_list[self.track_number]["trackId"]
self.window.FindElement("_Tracks_").SetValue(self.titles[self.track_number])
self.download_song()
self.track_file.stop()
self.load_track()
self.track_file.play()
self.max_time = self.track_file.get_time()
def run(self):
print("launching program")
while True:
self.current_time = self.track_file.get_time()
if self.max_time == -1:
self.max_time = self.track_file.get_length()
elif self.max_time == 0:
self.max_time = -1
else:
current = timedelta(milliseconds=self.current_time)
max = timedelta(milliseconds=self.max_time)
# print("Current", current, "Max", max)
# print(int((self.current_time / self.max_time) * 100))
if (self.current_time + 500) > self.max_time:
self.next()
event, values = self.window.Read(timeout=100)
if event is not None:
if event == "Play":
self.play()
elif event == "Stop":
self.stop()
elif event == "Pause":
self.pause()
elif event == "Next":
self.next()
elif event == "_Tracks_":
self.track_number = self.get_song_position_from_title(values[event][0])
self.song = self.track_list[self.track_number]["trackId"]
self.download_song()
self.track_file.stop()
self.load_track()
self.play()
elif event == "_playlists_":
print(values[event][0])
self.change_playlists(values[event][0])
self.window.FindElement("_Tracks_").Update(self.titles)
elif event == "_volume_":
print("Volume", event, values)
else:
self.track_file.audio_set_volume(values["_volume_"])
if event == "Quit" or values is None:
break
if __name__ == "__main__":
try:
with open("oauth/device_id.txt", "r") as f:
device_id = f.read()
mp = MusicPlayer(device_id, "oauth/oauth_code.txt")
mp.run()
except FileNotFoundError:
print("Authorization Token Missing. Run login.py")
answer = input("Would you like to run now? y/n: ")
if answer == "y":
import login
with open("oauth/device_id.txt", "r") as f:
device_id = f.read()
mp = MusicPlayer(device_id, "oauth/oauth_code.txt")
mp.run()
``` |
{
"source": "JonathanLayman/RCV",
"score": 4
} |
#### File: JonathanLayman/RCV/RCVsoftware.py
```python
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from tkinter.scrolledtext import ScrolledText
import csv
import time
import pickle
# Create a Class Object to keep track of Candidate Data
class Candidate:
def __init__(self, name, election):
self.election = election
self.name = name
self.votes = 0
self.status = election.candidate_statuses[0]
# Candidate Class Method used to calculate how many votes a candidate received in a round
# looks at the voter preference calculated in the Voter class and adds to self.votes
# if the preference is equal to the name of the candidate
def calculateRound(self):
for vote in self.election.voter_list:
if vote.preference == self.name:
self.votes += vote.vote_weight
# Candidate Class Method used to reset each candidates vote counts between rounds
def reset(self):
self.votes = 0
# Create a Class object to keep track of every vote and their preferences.
class Voter:
def __init__(self, vote, election):
self.election = election
self.voter_number = vote[0]
self.shared_status = vote[1]
if self.shared_status == 'Shared':
self.vote_weight = 0.5
elif self.shared_status == 'Not Shared':
self.vote_weight = 1
self.votes = vote[2:]
self.preference = None
self.status = 'Active'
# Voter class method to determine which choice will be used in the next round of voting
def highestChoice(self):
for vote in self.votes:
for candidate in self.election.candidates:
if vote == candidate.name and candidate.status != self.election.candidate_statuses[1]:
self.preference = candidate.name
break
else:
continue
break
# Class object that keeps track of all data for an election. This object creates and manages the Voter and
# Candidate classes
class RCVElection:
def __init__(self, result_file):
self.candidate_statuses = ['In the Running', 'Eliminated', 'Winner', 'Tied Winner']
self.candidates = []
self.voter_list = []
self.data = result_file
self.round = 0
self.winner = False
self.tie = False
self.total_votes = 0
self.ballots_counted = 0
self.dual_ballots = 0
self.single_ballots = 0
self.save = ['Summary', [], 'Time', 'Name']
self.buildElection()
# RCV class method to establish initial election information after __init__ method has been run.
# Calculates generic information about the election, like candidate names, voter turn out, etc.
def buildElection(self):
candidate_names = []
number_of_choices = max(len(votes) for votes in self.data)
for choice in range(2, number_of_choices):
for vote in self.data:
if vote[choice] != 'OVER' and vote[choice] != 'UNDER':
if not vote[choice] in candidate_names:
candidate_names.append(vote[choice])
candidate_names.sort()
for name in candidate_names:
self.candidates.append(Candidate(name, self))
for vote in self.data:
self.voter_list.append(Voter(vote, self))
for vote in self.voter_list:
self.total_votes += vote.vote_weight
self.ballots_counted += 1
if vote.shared_status == 'Shared':
self.dual_ballots += 1
elif vote.shared_status == 'Not Shared':
self.single_ballots += 1
# RCV class method to generate the Summary of votes for reconciliation purposes. Calls Voter
# and Candidate class methods and variables.
def summaryOfVotes(self):
choice_number = 0
temp_list = [{'Vote Count': self.total_votes, 'Single Owner Ballots': self.single_ballots,
'Dual Owner Ballots': self.dual_ballots}]
for vote in self.voter_list[0].votes:
temp_dict = {}
under_votes = 0
over_votes = 0
for voter in self.voter_list:
voter.preference = voter.votes[choice_number]
if voter.votes[choice_number] == 'UNDER':
under_votes += voter.vote_weight
elif voter.votes[choice_number] == 'OVER':
over_votes += voter.vote_weight
for candidate in self.candidates:
candidate.calculateRound()
choice_number += 1
temp_dict['Choice'] = choice_number
for candidate in self.candidates:
temp_dict[candidate.name] = candidate.votes
candidate.reset()
temp_dict['Over Votes'] = over_votes
temp_dict['Under Votes'] = under_votes
temp_list.append(temp_dict)
self.save[0] = temp_list
# RCV class method to calculate the results of each round of voting.
def roundTabulation(self):
self.round += 1
for voter in self.voter_list:
voter.highestChoice()
for candidate in self.candidates:
candidate.reset()
candidate.calculateRound()
self.declareWinner()
self.eliminateLowest()
self.saveData()
# RCV class method to calculate if a winner has been determined.
def declareWinner(self):
self.checkForTie()
if not self.tie:
for candidate in self.candidates:
if candidate.votes > self.total_votes / 2:
candidate.status = self.candidate_statuses[2]
self.winner = True
# RCV class method to determine if there is a winning tie.
def checkForTie(self):
votes_list = []
for candidate in self.candidates:
if candidate.status != self.candidate_statuses[0]:
votes_list.append(candidate.votes)
if all(votes_list) and len(votes_list) > 0:
self.tie = True
self.winner = True
for candidate in self.candidates:
if candidate.status != self.candidate_statuses[1]:
candidate.status = self.candidate_statuses[3]
else:
self.tie = False
# RCV class method to determine the lowest vote getter and eliminate them.
def eliminateLowest(self):
if not self.winner:
votes_list = []
for candidate in self.candidates:
if candidate.status != self.candidate_statuses[1]:
votes_list.append(candidate.votes)
lowest = min(votes_list)
for candidate in self.candidates:
if candidate.votes == lowest:
candidate.status = self.candidate_statuses[1]
# RCV class method to create a list that can be saved and read by the GUI.
def saveData(self):
temp_dict = {'Round': self.round}
for candidate in self.candidates:
temp_dict[candidate.name] = [candidate.votes, candidate.status]
self.save[1].append(temp_dict)
self.save[2] = time.strftime('%m/%d%Y %H:%M')
# RCV class method the run the election until a winner is declared
def runElection(self):
while not self.winner:
self.roundTabulation()
# RCV class method for developer to pull all candidate data
def debugCandidateData(self):
for candidate in self.candidates:
print('Name: ', candidate.name, 'Votes: ', candidate.votes,
'Status', candidate.status)
# RCV class method for developer to pull all voter data, or a single voter's data.
def debugVoterData(self, voter_id='All'):
if voter_id == 'All':
for voter in self.voter_list:
print('Number: ', voter.voter_number, 'Shared Status',
voter.shared_status, 'Votes: ', voter.votes,
'Preference: ', voter.preference, 'Status: ', voter.status)
else:
print('Number: ', self.voter_list[voter_id].voter_number, 'Shared Status',
self.voter_list[voter_id].shared_status, 'Votes: ', self.voter_list[voter_id].votes,
'Preference: ', self.voter_list[voter_id].preference,
'Status: ', self.voter_list[voter_id].status)
# Class object that creates and manages the GUI.
class RCVGUI:
def __init__(self, master):
self.election = None
self.summary = None
self.results = None
self.save_name = None
self.election_loaded = False
self.open_filename = None
self.file_load = None
self.data = None
self.summary_export = BooleanVar()
self.results_export = BooleanVar()
self.master = master
self.master.title('Arapahoe County RCV Software')
self.master.geometry('480x500+50+100')
self.master.option_add('*tearOff', False)
self.menubar = Menu(master)
self.master.config(menu=self.menubar)
self.file = Menu(self.menubar)
self.help_ = Menu(self.menubar)
self.menubar.add_cascade(menu=self.file, label='File')
self.menubar.add_cascade(menu=self.help_, label='Help')
self.file.add_command(label='New', command=lambda: self.newElection())
self.file.add_command(label='Open...', command=lambda: self.openFile())
self.file.add_command(label='Save', command=lambda: self.saveFile(), state='disabled')
self.file.add_command(label='Export', command=lambda: self.exportView(), state='disabled')
self.help_.add_command(label='Arapahoe County RCV Procedures', command=lambda: self.procedureWindow())
self.help_.add_command(label='How to use this software', command=lambda: self.howToUse())
self.help_.add_separator()
self.help_.add_command(label='About', command=lambda: self.aboutWindow())
self.logo = PhotoImage(file='Data/ArapahoeLogo.png').subsample(20, 20)
self.frame_header = ttk.Frame(master)
self.arapahoe_logo = ttk.Label(self.frame_header, image=self.logo)
self.title_text = ttk.Label(self.frame_header, font=('Arial', 18, 'bold'), wraplength=250,
text='Arapahoe County RCV Software')
self.frame_content = ttk.Frame(master)
self.welcome_label = ttk.Label(self.frame_content,
text='Thank you for using the RCV Tabulation Software.'
' To load a new election, click the "New" button. To'
' load a previous election, click the "Open" button',
wraplength=350)
self.notebook = ttk.Notebook(self.frame_content)
self.notebook_frame1 = ttk.Frame(self.notebook)
self.notebook_frame2 = ttk.Frame(self.notebook)
self.notebook_textbox_frame1 = ScrolledText(self.notebook_frame1, height=18, width=50,
wrap='word')
self.notebook_textbox_frame2 = ScrolledText(self.notebook_frame2, height=18, width=50,
wrap='word')
self.file_error = messagebox
self.save_message = ttk.Label(self.frame_content, text='Please enter a Name for this election.')
self.save_textbox = ttk.Entry(self.frame_content, width=36)
self.save_button = ttk.Button(self.frame_content, text='Save', command=lambda: self.picklesave())
self.open_message = ttk.Label(self.frame_content, text='Select a rcv load file to load')
self.open_textbox = ttk.Entry(self.frame_content, width=40)
self.select_button = ttk.Button(self.frame_content, text='Select File', command=lambda: self.selectSave())
self.load_button = ttk.Button(self.frame_content, text='Load Election', command=lambda: self.pickleLoad())
self.cancel_button = ttk.Button(self.frame_content, text='Cancel', command=lambda: self.body())
self.export_button = ttk.Button(self.frame_content, text='Export', command=lambda: self.saveExport())
self.summary_checkbox = ttk.Checkbutton(self.frame_content, text='Summary of Votes',
variable=self.summary_export, onvalue=True, offvalue=False)
self.results_checkbox = ttk.Checkbutton(self.frame_content, text='Election Results',
variable=self.results_export, onvalue=True, offvalue=False)
self.export_text = ttk.Label(self.frame_content,
text='Please select the options you would like to export'
'and provide a name for this export', wraplength=350)
self.export_name = ttk.Label(self.frame_content, text='Name: ')
self.frame_content_list = [self.welcome_label, self.notebook, self.save_message, self.save_textbox,
self.save_button, self.open_message, self.open_textbox, self.select_button,
self.load_button, self.cancel_button, self.summary_checkbox,
self.results_checkbox, self.export_text, self.export_button,
self.export_name]
self.header()
self.body()
# GUI class method to display the header on the screen.
def header(self):
self.frame_header.pack(fill=BOTH)
self.arapahoe_logo.grid(row=0, column=0)
self.title_text.grid(row=0, column=1, padx=25)
# GUI class method to display the main body on the screen.
def body(self):
self.clearPage()
self.frame_content.pack(fill=BOTH, padx=25, pady=25)
self.welcome_label.grid(row=0, column=0)
self.file.entryconfig('Save', state='disabled')
self.file.entryconfig('Export', state='disabled')
# GUI class method to view a loaded election.
def electionView(self):
self.clearPage()
self.election_loaded = True
self.notebookView()
self.displaySummary()
self.displayResults()
if self.election_loaded:
self.file.entryconfig('Save', state='normal')
self.file.entryconfig('Export', state='normal')
# GUI class method to manage exporting results from a loaded election.
def exportView(self):
self.clearPage()
self.export_text.grid(row=0, column=0, columnspan=2, pady=15)
self.summary_checkbox.grid(row=1, column=0, sticky='w')
self.results_checkbox.grid(row=2, column=0, sticky='w')
self.export_name.grid(row=3, column=0)
self.open_textbox.grid(row=3, column=1)
self.open_textbox.delete(0, END)
self.export_button.grid(row=4, column=0, pady=15)
self.cancel_button.grid(row=4, column=1, pady=15)
# GUI class method that responds to a button press to export a loaded election.
def saveExport(self):
filename = 'Exports/' + self.open_textbox.get() + '.txt'
with open(filename, 'w') as export_file:
if self.summary_export.get() is True:
export_file.write(self.notebook_textbox_frame1.get(1.0, END))
if self.results_export.get() is True:
export_file.write(self.notebook_textbox_frame2.get(1.0, END))
self.file_error.showinfo('Save Successful', 'Your file has been saved to the export folder')
self.open_textbox.delete(0, END)
self.body()
# GUI class method to load a new election.
def newElection(self):
filename = filedialog.askopenfile(initialdir='/')
try:
with open(filename.name, 'r') as csv_file:
reader = csv.reader(csv_file)
list_of_votes = list(reader)
self.election = RCVElection(list_of_votes)
self.election.summaryOfVotes()
self.election.runElection()
self.data = self.election.save
self.electionView()
except:
self.file_error.showerror('Error', 'Please select a election result file in the proper format')
self.body()
# GUI class method to view a notebook of the Summary of Votes and Election Results.
def notebookView(self):
self.clearPage()
self.notebook.grid(row=0, column=0)
self.notebook.add(self.notebook_frame1, text='Summary of Votes')
self.notebook.add(self.notebook_frame2, text='RCV Election Results')
# GUI class method to display the Summary of Votes to the Notebook.
def displaySummary(self):
self.summary = self.data[0]
self.notebook_textbox_frame1.pack()
self.notebook_textbox_frame1.insert(1.0, '----- Summary of Votes -----\n\n----- Basic Info -----\n')
for line in self.summary:
for key, value in line.items():
if key == 'Choice':
self.notebook_textbox_frame1.insert(END, '----- ' + key)
self.notebook_textbox_frame1.insert(END, ' ' + str(value) + ' -----\n')
else:
self.notebook_textbox_frame1.insert(END, str(value).rjust(4) + ' -- ')
self.notebook_textbox_frame1.insert(END, key + '\n')
self.notebook_textbox_frame1.insert(END, '\n')
self.notebook_textbox_frame1.config(state='disabled')
# GUI class method to display the Election Results to the Notebook.
def displayResults(self):
self.results = self.data[1]
self.notebook_textbox_frame2.pack()
self.notebook_textbox_frame2.insert(1.0, '----- RCV Election Results -----\n\n')
for line in self.results:
for key, value in line.items():
if key == 'Round':
self.notebook_textbox_frame2.insert(END, '----- ' + key)
self.notebook_textbox_frame2.insert(END, ' ' + str(value) + ' -----\n')
else:
self.notebook_textbox_frame2.insert(END, str(value[1]).rjust(14) + ' -- ' +
str(value[0]).rjust(4) + ' -- ' + key + '\n')
self.notebook_textbox_frame2.insert(END, '\n')
self.notebook_textbox_frame2.config(state='disabled')
# GUI Class method to remove all widgets from the content frame
def clearPage(self):
for widget in self.frame_content_list:
widget.grid_remove()
# GUI class method to save a loaded election as a .rcv file.
def saveFile(self):
self.clearPage()
self.save_message.grid(row=0, column=0)
self.save_textbox.grid(row=0, column=1)
self.save_button.grid(row=1, column=1, sticky='w')
self.cancel_button.grid(row=1, column=1, sticky='e')
# GUI class method to save the bianary data as .rcv using the pickle module.
def picklesave(self):
self.clearPage()
self.save_name = self.save_textbox.get()
self.save_name = 'Saves/' + self.save_name + '.rcv'
pickle.dump(self.data, open(self.save_name, 'wb'))
self.save_textbox.delete(0, END)
self.body()
# GUI class method to open a saved election as a .rcv file.
def openFile(self):
self.clearPage()
self.open_message.grid(row=0, column=0)
self.open_textbox.grid(row=0, column=1)
self.open_textbox.delete(0, END)
self.select_button.grid(row=1, column=1, sticky='w')
self.load_button.grid(row=1, column=1)
self.cancel_button.grid(row=1, column=1, sticky='e')
# GUI class method to select a file to load from.
def selectSave(self):
self.open_filename = filedialog.askopenfile(initialdir='Saves')
self.open_textbox.delete(0, END)
try:
self.open_textbox.insert(0, self.open_filename.name)
except:
self.openFile()
# GUI class method to load the .rcv file and interpret it.
def pickleLoad(self):
self.clearPage()
self.data = pickle.load(open(self.open_filename.name, 'rb'))
self.electionView()
# GUI class method to pop up a window with the arapahoe county RCV Procedures.
def procedureWindow(self):
with open('Data/Procedures.txt', 'r') as txt_file:
help_doc = txt_file.read()
help_window = Toplevel(self.master)
help_window.geometry('800x600+75+40')
help_window.title('RCV Procedures')
help_text = ScrolledText(help_window, width=135, height=50, wrap='word')
help_text.insert(1.0, help_doc)
help_text.pack()
help_text.config(state='disabled')
# GUI class method that displays the about information
def aboutWindow(self):
self.file_error.showinfo('About', 'Arapahoe County RCV Voting Tabulation Software.\nDeveloped by Jonathan'
' Layman.')
# GUI class method that displays the how to use documentation
def howToUse(self):
with open('Data/How_to_use.txt', 'r') as txt_file:
use_doc = txt_file.read()
use_window = Toplevel(self.master)
use_window.geometry('800x600+75+40')
use_window.title('How to Use')
use_text = ScrolledText(use_window, width=135, height=50, wrap='word')
use_text.insert(1.0, use_doc)
use_text.pack()
use_text.config(state='disabled')
# Main program loop
def main():
root = Tk()
rcv_gui = RCVGUI(root)
root.mainloop()
if __name__ == "__main__": main()
``` |
{
"source": "JonathanLehner/cassini_2021_nature_discoverer",
"score": 4
} |
#### File: cassini_2021_nature_discoverer/sentinel2_processing/geotiff2png_fullcolor.py
```python
import gc
import os
import pprint as pp
import time
from datetime import datetime
from os import listdir
from os.path import join, isfile
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import rasterio
import rasterio.mask
import wordninja
from cleantext import clean
from natsort import natsorted
from tqdm import tqdm
tif_dir_path = str(input("Enter path to folder with geotiff files -->"))
# -----------------------------------------------------------------
output_folder_name = "full_color_PNGs_geotiff2png"
if not os.path.isdir(join(tif_dir_path, output_folder_name)):
os.mkdir(join(tif_dir_path, output_folder_name))
# make a place to store outputs if one does not exist
output_path_full = os.path.join(tif_dir_path, output_folder_name)
print("outputs will be in: \n", output_path_full)
# ----------------------------------------------------------------------------
def cleantxt_wrap(ugly_text):
# a wrapper for clean text with options different than default
# https://pypi.org/project/clean-text/
cleaned_text = clean(ugly_text,
fix_unicode=True, # fix various unicode errors
to_ascii=True, # transliterate to closest ASCII representation
lower=True, # lowercase text
no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them
no_urls=True, # replace all URLs with a special token
no_emails=True, # replace all email addresses with a special token
no_phone_numbers=True, # replace all phone numbers with a special token
no_numbers=False, # replace all numbers with a special token
no_digits=False, # replace all digits with a special token
no_currency_symbols=True, # replace all currency symbols with a special token
no_punct=True, # remove punctuations
replace_with_punct="", # instead of removing punctuations you may replace them
replace_with_url="<URL>",
replace_with_email="<EMAIL>",
replace_with_phone_number="<PHONE>",
replace_with_number="<NUM>",
replace_with_digit="0",
replace_with_currency_symbol="<CUR>",
lang="en" # set to 'de' for German special handling
)
return cleaned_text
def beautify_filename(filename, num_words=20, start_reverse=False,
word_separator="_"):
# takes a filename stored as text, removes extension, separates into X words ...
# and returns a nice filename with the words separateed by
# useful for when you are reading files, doing things to them, and making new files
filename = str(filename)
index_file_Ext = filename.rfind('.')
current_name = str(filename)[:index_file_Ext] # get rid of extension
clean_name = cleantxt_wrap(current_name) # wrapper with custom defs
file_words = wordninja.split(clean_name)
# splits concatenated text into a list of words based on common word freq
if len(file_words) <= num_words:
num_words = len(file_words)
if start_reverse:
t_file_words = file_words[-num_words:]
else:
t_file_words = file_words[:num_words]
pretty_name = word_separator.join(t_file_words) # see function argument
# NOTE IT DOES NOT RETURN THE EXTENSION
return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1
# ----------------------------------------------------------------------------
def load_landsat_image_single(imgpath):
# ONLY USEFUL IF BANDS ARE IN DIFFERENT FILE
image = {}
datboi = rasterio.open(imgpath)
bands = datboi.indexes
for band in bands:
# considering the landsat images end with *_SR_B#.TIF, we will use it to locate the correct file
this_label = "B" + str(band)
image.update({this_label: datboi.read(band)})
return image
def convert_tif_to_png_v2(full_input_path, output_folder, download_pic=False):
img = load_landsat_image_single(full_input_path)
basename = beautify_filename(os.path.basename(full_input_path))
# stack the layers to create a cube
rgb = np.stack([img['B4'], img['B3'], img['B2']], axis=-1)
# normalize the values
rgb = rgb / rgb.max() * 2
# display the image with a slightly increased figure size
plt.figure(figsize=(10, 10), tight_layout=True, clear=True)
plt.imshow(rgb, norm=matplotlib.colors.Normalize(), interpolation="lanczos")
plt.title(os.path.basename(full_input_path))
outname = "[conv to pretty image]" + basename + ".png"
plt.savefig(join(output_folder, outname), dpi=300, facecolor='w', edgecolor='w',
transparent=True, bbox_inches="tight")
plt.close()
# ----------------------------------------------------------------------------
# load files
files_to_munch = natsorted([f for f in listdir(tif_dir_path) if isfile(os.path.join(tif_dir_path, f))])
total_files_1 = len(files_to_munch)
removed_count_1 = 0
approved_files = []
# remove non-tif_image files
for prefile in files_to_munch:
if prefile.endswith(".tif"):
approved_files.append(prefile)
else:
files_to_munch.remove(prefile)
removed_count_1 += 1
print("out of {0:3d} file(s) originally in the folder, ".format(total_files_1),
"{0:3d} non-tif_image files were removed".format(removed_count_1))
print('\n {0:3d} tif_image file(s) in folder will be transcribed.'.format(len(approved_files)))
pp.pprint(approved_files[:10])
print("...\n")
# loop
st = time.time()
verbose = False
for tif_file in tqdm(approved_files, total=len(approved_files),
desc="Resizing tif_images"):
index_pos = approved_files.index(tif_file)
try:
convert_tif_to_png_v2(join(tif_dir_path, tif_file), output_path_full)
except:
print("was unable to process the file {}. moving to next one".format(tif_file))
if index_pos % 5 == 0:
gc.collect()
rt = round((time.time() - st) / 60, 2)
print("\n\nfinished converting all tif_images - ", datetime.now())
print("Converted {} tif_images in {} minutes".format(len(approved_files), rt))
print("they are located in: \n", output_path_full)
print("they are located in: \n", output_path_full)
```
#### File: cassini_2021_nature_discoverer/sentinel2_processing/geotiff_to_jpg_v4.py
```python
import os
import pprint as pp
import time
from datetime import datetime
from os import listdir
from os.path import join, isfile
import wordninja
from cleantext import clean
from natsort import natsorted
from osgeo import gdal
from tqdm import tqdm
tif_dir_path = str(input("Enter path to folder with geotiff files -->"))
# -----------------------------------------------------------------
output_folder_name = "converted_to_image_gdal_v4"
if not os.path.isdir(join(tif_dir_path, output_folder_name)):
os.mkdir(join(tif_dir_path, output_folder_name))
# make a place to store outputs if one does not exist
output_path_full = os.path.join(tif_dir_path, output_folder_name)
print("outputs will be in: \n", output_path_full)
# ----------------------------------------------------------------------------
def cleantxt_wrap(ugly_text):
# a wrapper for clean text with options different than default
# https://pypi.org/project/clean-text/
cleaned_text = clean(ugly_text,
fix_unicode=True, # fix various unicode errors
to_ascii=True, # transliterate to closest ASCII representation
lower=True, # lowercase text
no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them
no_urls=True, # replace all URLs with a special token
no_emails=True, # replace all email addresses with a special token
no_phone_numbers=True, # replace all phone numbers with a special token
no_numbers=False, # replace all numbers with a special token
no_digits=False, # replace all digits with a special token
no_currency_symbols=True, # replace all currency symbols with a special token
no_punct=True, # remove punctuations
replace_with_punct="", # instead of removing punctuations you may replace them
replace_with_url="<URL>",
replace_with_email="<EMAIL>",
replace_with_phone_number="<PHONE>",
replace_with_number="<NUM>",
replace_with_digit="0",
replace_with_currency_symbol="<CUR>",
lang="en" # set to 'de' for German special handling
)
return cleaned_text
def beautify_filename(filename, num_words=20, start_reverse=False,
word_separator="_"):
# takes a filename stored as text, removes extension, separates into X words ...
# and returns a nice filename with the words separateed by
# useful for when you are reading files, doing things to them, and making new files
filename = str(filename)
index_file_Ext = filename.rfind('.')
current_name = str(filename)[:index_file_Ext] # get rid of extension
clean_name = cleantxt_wrap(current_name) # wrapper with custom defs
file_words = wordninja.split(clean_name)
# splits concatenated text into a list of words based on common word freq
if len(file_words) <= num_words:
num_words = len(file_words)
if start_reverse:
t_file_words = file_words[-num_words:]
else:
t_file_words = file_words[:num_words]
pretty_name = word_separator.join(t_file_words) # see function argument
# NOTE IT DOES NOT RETURN THE EXTENSION
return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1
# ----------------------------------------------------------------------------
# load files
files_to_munch = natsorted([f for f in listdir(tif_dir_path) if isfile(os.path.join(tif_dir_path, f))])
total_files_1 = len(files_to_munch)
removed_count_1 = 0
approved_files = []
# remove non-tif_image files
for prefile in files_to_munch:
if prefile.endswith(".tif"):
approved_files.append(prefile)
else:
files_to_munch.remove(prefile)
removed_count_1 += 1
print("out of {0:3d} file(s) originally in the folder, ".format(total_files_1),
"{0:3d} non-tif_image files were removed".format(removed_count_1))
print('\n {0:3d} tif_image file(s) in folder will be transcribed.'.format(len(approved_files)))
pp.pprint(approved_files)
# ----------------------------------------------------------------------------
options_list = [
'of GTiff ',
'-of PNG',
'-ot Byte',
# '-b 1 -b 2 -b 3 -b 4',
'-b 2 -b 3 -b 4',
'-scale',
'-r lanczos'
]
options_string = " ".join(options_list)
# loop
st = time.time()
verbose = False
for tif_file in tqdm(approved_files, total=len(approved_files),
desc="Resizing tif_images"):
index_pos = approved_files.index(tif_file)
out_name = beautify_filename(tif_file) + "converted_nr_{}_".format(index_pos) + ".png"
infile = join(tif_dir_path, tif_file)
outfile = join(output_path_full, out_name)
if verbose:
# For no. of bands and resolution
gd_img = gdal.Open(infile, gdal.GA_ReadOnly)
print("\n data on rasters from gdal:")
print(gd_img.RasterCount, gd_img.RasterXSize, gd_img.RasterYSize)
gdal.Translate(
join(output_path_full, out_name),
join(tif_dir_path, tif_file),
options=options_string
)
rt = round((time.time() - st) / 60, 2)
print("\n\nfinished converting all tif_images - ", datetime.now())
print("Converted {} tif_images in {} minutes".format(len(approved_files), rt))
print("they are located in: \n", output_path_full)
print("they are located in: \n", output_path_full)
``` |
{
"source": "JonathanLehner/korali",
"score": 3
} |
#### File: checkpoint.resume/_model/model.py
```python
import os
import sys
import numpy as np
# This is the negative square -(x^2)
def model(s):
x = s["Parameters"][0]
r = -0.5 * x * x
s["F(x)"] = r
# Function and Gradient function evaluation
def model_with_gradient(p):
X = p["Parameters"]
x = X[0]
gradient = []
evaluation = -0.5 * x * x
gradient.append(-x)
p["F(x)"] = evaluation
p["Gradient"] = gradient
def calculateLogLikelihood(s):
x = s["Parameters"][0]
r = -0.5 * x * x
s["logLikelihood"] = r
def prepareFile(fileName):
if os.path.isdir('_executor_output') == False:
os.mkdir('_executor_output')
os.system('touch {0}'.format(fileName))
def put_normal_rnds(theta, Ns, fileName):
mu = theta["Parameters"][0]
var = theta["Parameters"][1]
y = np.random.normal(mu, var, Ns)
if os.path.isfile(fileName):
f = open(fileName, 'a+')
np.savetxt(f, np.transpose(y))
f.close()
else:
sys.exit(
'put_normal_rnds: file \'{0}\' does not exist! exit..'.format(fileName))
```
#### File: running.mpi.python/_model/model.py
```python
import numpy
import korali
from mpi4py import MPI
# This toy model has no other purpose rather than showing how to use MPI4py in Korali
# It evaluates different x (one per worker) and returns its average as team evaluation.
def model(p):
comm = korali.getWorkerMPIComm()
rank = comm.Get_rank()
size = comm.Get_size()
x = p["Parameters"][0]
y = -0.5 * x * x
# Testing reduce operation
recvdata = numpy.zeros(size,dtype=numpy.float)
senddata = numpy.arange(size,dtype=numpy.float)
senddata[0] = y
comm.Allreduce(senddata, recvdata, op=MPI.SUM)
y = recvdata[0]
if (rank == 0): print(f"MPI Rank: {rank}/{size} - Evaluation: f({x}) = {y}")
p["F(x)"] = y
```
#### File: swarm/_model/plotter.py
```python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
def plotSwarm( sim, t ):
fig = plt.figure()
ax = fig.gca(projection='3d')
locations = []
directions = []
for fish in sim.fishes:
locations.append(fish.location)
directions.append(fish.curDirection)
#print(np.linalg.norm(fish.curDirection))
locations = np.array(locations)
directions = np.array(directions)
cmap = cm.jet
norm = Normalize(vmin=0, vmax=sim.N)
ax.quiver(locations[:,0],locations[:,1],locations[:,2],
directions[:,0], directions[:,1], directions[:,2],
color=cmap(norm(np.arange(sim.N))))
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_zlim([-1,1])
plt.savefig("_figures/swarm_t={:04d}.png".format(t))
plt.close()
def plotSwarmCentered( sim, t ):
fig = plt.figure()
ax = fig.gca(projection='3d')
locations = []
directions = []
for fish in sim.fishes:
locations.append(fish.location)
directions.append(fish.curDirection)
#print(np.linalg.norm(fish.curDirection))
locations = np.array(locations)
directions = np.array(directions)
center = sim.computeCenter()
cmap = cm.jet
norm = Normalize(vmin=0, vmax=sim.N)
ax.quiver(locations[:,0],locations[:,1],locations[:,2],
directions[:,0], directions[:,1], directions[:,2],
color=cmap(norm(np.arange(sim.N))))
ax.set_xlim([center[0]-3,center[0]+3])
ax.set_ylim([center[1]-3,center[1]+3])
ax.set_zlim([center[2]-3,center[2]+3])
plt.savefig("_figures/swarm_t={:04d}.png".format(t))
plt.close()
def plotSwarmSphere( sim, t, i ):
fig = plt.figure()
ax = fig.gca(projection='3d')
locations = []
directions = []
for fish in sim.swarm:
locations.append(fish.location)
directions.append(fish.curDirection)
#print(np.linalg.norm(fish.curDirection))
locations = np.array(locations)
directions = np.array(directions)
ax.quiver(locations[:,0],locations[:,1],locations[:,2],
directions[:,0], directions[:,1], directions[:,2])
# Create a sphere
r = 1
pi = np.pi
cos = np.cos
sin = np.sin
phi, theta = np.mgrid[0.0:pi:100j, 0.0:2.0*pi:100j]
x = r*sin(phi)*cos(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(phi)
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='c', alpha=0.3, linewidth=0)
ax.set_aspect('equal', 'box')
#ax.set_xlim([-2,2])
#ax.set_ylim([-2,2])
#ax.set_zlim([-2,2])
plt.savefig("_figures/swarm_t={}_sphere_i={}.png".format(t,i))
plt.close()
def plotFishs( fishs, i, t, type ):
if fishs.size == 0:
print("no fish of type {}".format(type))
return
fig = plt.figure()
ax = fig.gca(projection='3d')
locations = []
directions = []
for fish in fishs:
locations.append(fish.location)
directions.append(fish.curDirection)
#print(np.linalg.norm(fish.curDirection))
locations = np.array(locations)
directions = np.array(directions)
ax.quiver(locations[:,0],locations[:,1],locations[:,2],
directions[:,0], directions[:,1], directions[:,2])
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
ax.set_zlim([-2,2])
plt.savefig("_figures/{}_t={}_i={}.png".format(type, t, i))
plt.close()
def plotFish( fish, i, t ):
fig = plt.figure()
ax = fig.gca(projection='3d')
loc = fish.location
vec = fish.curDirection
ax.quiver(loc[0], loc[1], loc[2], vec[0], vec[1], vec[2])
ax.set_xlim([-2,2])
ax.set_ylim([-2,2])
ax.set_zlim([-2,2])
plt.savefig("_figures/fish_t={}_i={}.png".format(t, i))
plt.close()
def plotRot( vec1, vec2, rotvec, angle ):
fig = plt.figure()
ax = fig.gca(projection='3d')
locations = [vec1,vec2,rotvec]
vecs = np.array([vec1,vec2,rotvec])
loc = np.zeros(3)
ax.quiver(loc, loc, loc, vecs[:,0], vecs[:,1], vecs[:,2], color=['green','red','black'])
ax.set_title("rotation by {} degree".format(angle))
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_zlim([-1,1])
plt.show()
```
#### File: upswing/_model/single_env.py
```python
from single_pendulum import *
######## Defining Environment Storage
upswing = SinglePendulum()
maxSteps = 500
def env(s):
# Initializing environment
upswing.reset()
s["State"] = upswing.getState().tolist()
step = 0
done = False
maxs = np.ones(len(upswing.getState().tolist()))
while not done and step < maxSteps:
# Getting new action
s.update()
# Performing the action
done = upswing.advance(s["Action"])
# Getting Reward
s["Reward"] = upswing.getReward()
# Storing New State
state = upswing.getState().tolist()
s["State"] = state
maxs = np.maximum(maxs, state)
# Advancing step counter
step = step + 1
# Setting finalization status
if (done):
s["Termination"] = "Terminal"
else:
s["Termination"] = "Truncated"
```
#### File: surrogates/optimization/train-surrogate.py
```python
import korali
import numpy as np
def create_train_data(n=20, L=2):
""" create synthetic data to train on """
x = np.linspace(-L / 2, L / 2, n)
y = x**2
return x, y
k = korali.Engine()
e = korali.Experiment()
xtrain, ytrain = create_train_data()
e['Random Seed'] = 0xC0FFEE
e['Problem']['Type'] = 'Supervised Learning'
trainingInput = [ [ [ x ] ] for x in xtrain ]
solutionInput = [ [ y ] for y in ytrain ]
e["Problem"]["Training Batch Size"] = 1
e["Problem"]["Inference Batch Size"] = 1
e["Problem"]["Input"]["Data"] = trainingInput
e["Problem"]["Input"]["Size"] = 1
e["Problem"]["Solution"]["Data"] = solutionInput
e["Problem"]["Solution"]["Size"] = 1
e['Solver']['Type'] = 'Learner/Gaussian Process'
e['Solver']['Covariance Function'] = 'CovSEiso'
e['Solver']['Optimizer']['Type'] = 'Optimizer/Rprop'
e['Solver']['Optimizer']['Termination Criteria']['Max Generations'] = 1000
e['Solver']['Optimizer']['Termination Criteria']['Parameter Relative Tolerance'] = 1e-8
e['Console Output']['Verbosity'] = 'Normal'
e['Console Output']['Frequency'] = 10
e['File Output']['Frequency'] = 100
e["File Output"]["Path"] = "_korali_result_surrogate"
k.run(e)
show_figure = False
if show_figure:
xtest = np.linspace(-1, 1, 100)
xtest = xtest.reshape((len(xtest), 1))
ytest = np.array( [ e.getEvaluation(v) for v in xtest.tolist() ] )
import matplotlib.pyplot as plt
fig = plt.figure(0)
ax = fig.subplots()
ax.plot(xtrain, ytrain, 'ob')
ax.plot(xtest.flatten(), ytest[:, 0], '-r')
plt.show()
```
#### File: discrete/_model/model.py
```python
def model(s):
npar = 10
res = 0.0
v = s["Parameters"]
for i in range(npar):
if (i == 0 or i == 1 or i == 3 or i == 6):
res += pow(10, 6.0 * i / npar) * round(v[i]) * round(v[i])
else:
res += pow(10, 6.0 * i / npar) * v[i] * v[i]
s["F(x)"] = -res
def modelGrid(p):
x = p["Parameters"][0]
y = p["Parameters"][1]
p["F(x)"] = -0.5 * (x * x + y * y)
```
#### File: propagation/_model/plots.py
```python
import json
import numpy as np
import matplotlib.pyplot as plt
def plot_credible_intervals( file, data):
with open(file) as f: d = json.load(f)
var ='Evaluations'
percentages = [0.8, 0.9, 0.99]
# draw ns samples from the generative model. If the plots are not smooth
# increase ns
ns = 100
N = len(d['Samples'])
Ny = len( d['Samples'][0][var] )
x = d['Samples'][0]['X']
y = np.zeros((N,Ny))
s = np.zeros((N,Ny))
for k in range(N):
y[k,:] = d['Samples'][k][var]
s[k,:] = d['Samples'][k]['sigma']
samples = np.zeros((N*ns,Ny))
# draw the samples from the generative model (likelihood)
for k in range(Ny):
m = y[:,k]
r = s[:,k]
yy = [ np.random.normal(m,r) for _ in range(ns) ]
samples[:,k] = np.asarray(yy).flatten()
# compute and plot statistics
mean = np.zeros((Ny,1))
median = np.zeros((Ny,1))
for k in range(Ny):
median[k] = np.quantile( samples[:,k],0.5)
mean[k] = np.mean( samples[:,k] )
fig, ax = plt.subplots(1, 1)
for p in np.sort(percentages)[::-1]:
q1 = np.zeros((Ny,));
q2 = np.zeros((Ny,));
for k in range(Ny):
q1[k] = np.quantile( samples[:,k],0.5-p/2)
q2[k] = np.quantile( samples[:,k],0.5+p/2)
ax.fill_between( x, q1 , q2, alpha=0.5, label=f' {100*p:.1f}% credible interval' )
ax.plot( x, mean, '-', lw=2, label='Mean', color='black')
ax.plot( x, median, '--', lw=2, label='Median', color='black')
ax.plot( data['X'], data['Y'], '.', color='red', markersize=16)
ax.legend(loc='upper left')
ax.grid()
ax.set_xlim(left=x[1])
plt.show()
```
#### File: bubblePipe/_model/model.py
```python
import os
import sys
import subprocess
import shutil
def model(x, resultFolder, objective):
SourceFolderName = "_config"
DestinationFolderName = resultFolder + '/gen' + str(x["Current Generation"]).zfill(3) + '/sample' + str(x["Sample Id"]).zfill(6)
# Copy the 'model' folder into a temporary directory
if os.path.exists( DestinationFolderName ):
shutil.rmtree( DestinationFolderName)
shutil.copytree( SourceFolderName, DestinationFolderName )
CurrentDirectory = os.getcwd()
# Move inside the temporary directory
try:
os.chdir( DestinationFolderName )
except OSError as e:
print("I/O error(" + str(e.errno) + "): " + e.strerror )
print("The folder " + DestinationFolderName + " is missing")
sys.exit(1)
# Storing base parameter file
configFile='par.py'
with open(configFile, 'a') as f:
f.write('arc_width = [ 1, %.10f, %.10f, %.10f, %.10f, %.10f, 1 ]\n' % ( x["Parameters"][0], x["Parameters"][1], x["Parameters"][2], x["Parameters"][3], x["Parameters"][4] ))
f.write('arc_offset = [ 0, %.10f, %.10f, %.10f, %.10f, %.10f, 0 ]\n' % ( x["Parameters"][5], x["Parameters"][6], x["Parameters"][7], x["Parameters"][8], x["Parameters"][9] ))
# Run Aphros for this sample
sampleOutFile = open('sample.out', 'w')
sampleErrFile = open('sample.err', 'w')
subprocess.call(['bash', 'run.sh'], stdout=sampleOutFile, stderr=sampleErrFile)
# Loading results from file
resultFile = 'objectives'
try:
with open(resultFile) as f:
resultContent = f.read()
except IOError:
print("[Korali] Error: Could not load result file: " + resultFile)
exit(1)
# Printing resultContent
print(resultContent)
# Parsing output into a python dict
resultDict = eval(resultContent)
# Declaring objective value as -inf, for the case of an invalid evaluation
objectiveValue = float('-inf')
# If sample is valid, evaluating result based on objective
if (resultDict['valid'] == True):
if (objective == 'minNumCoal'):
objectiveValue = -float(resultDict['max_volume'])
if (objective == 'maxNumCoal'):
objectiveValue = float(resultDict['max_volume'])
if (objective == 'maxMeanVel'):
objectiveValue = -float(resultDict['max_time'])
# Assigning objective function value
x["F(x)"] = objectiveValue
# Move back to the base directory
os.chdir( CurrentDirectory )
```
#### File: CUP2D/optimal-transport/plotSample.py
```python
import numpy as np
import matplotlib.pyplot as plt
def logDivision(start, end, nvertices):
vertices = np.zeros(nvertices)
for idx in range(nvertices):
vertices[idx] = np.exp(idx / (nvertices - 1.0) * np.log(end-start+1.0)) - 1.0 + start
return vertices
sample1 = [0.0029774214492784834, 0.0012545252423366115, 0.0022471337578267294, 0.0004487711837245371]
sample2 = [0.004315617345056653, 0.002357595158866241, 0.00045192343087713835, 0.003751587911891705, 0.00029270425844050757, 0.001791944628598642, 0.0006044968601873586, 0.0005902863745657632]
#sample1 = [0.009899837203413772, 0.006042697786444769, 0.007737424752668662, 0.004035855841072438]
#sample2 = [0.009963723478185877, 0.009494772171610993, 0.007958669389234783, 0.004936448245926574, 0.005157520884244215, 0.007214823809055573, 0.004063898057291126, 0.0060274437185140455]
sample1 = [0.0] + sample1
sample2 = [0.0] + sample2
vertices1 = logDivision(0.2, 0.8, len(sample1))
vertices2 = logDivision(0.2, 0.8, len(sample2))
print(vertices1)
plt.title('Energy 0.001')
#plt.title('Energy 0.004')
plt.step(vertices1, sample1, label='D = 4 (T = 6.09)')
plt.step(vertices2, sample2, label='D = 8 (T = 6.00)')
#plt.step(vertices1, sample1, label='D = 4 (T = 3.07)')
#plt.step(vertices2, sample2, label='D = 8 (T = 3.11)')
plt.legend()
plt.xlabel('Location')
plt.ylabel('Force')
plt.ylim(0.0, 0.012)
plt.savefig('params.png')
```
#### File: CUP2D/windmills/process-results.py
```python
import numpy as np
# ---------- Accessing files ----------
path_to_test = '_results_windmill_testing/'
folders_test = ['both', 'energy_zero', 'flow_zero_4', 'flow_zero_3', 'uncontrolled']
output = '_results_test/'
def genResults(foldername):
# force and velocity values go from 0 to num_windmills - 1
num_windmills = 2
values_name = 'values_'
vels_name = 'targetvelocity_'
rew_name = 'rewards_'
# contains link to all the sample folders in the test results
num_trials = 64
sample = 'sample000000'
folders = [path_to_test + foldername + "/" + sample + "{:02d}".format(trial) + '/' for trial in range(num_trials)]
# shape num_trials x num_windmills
# file names
value_files = [ [folder_ + values_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
vel_files = [ [folder_ + vels_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
rew_files = [ [folder_ + rew_name + str(num) + '.dat' for num in range(num_windmills)] for folder_ in folders]
dat = np.genfromtxt(value_files[0][0], delimiter=' ')
num_steps = dat.shape[0]
print(num_steps)
num_el = dat.shape[1]
# arrays
values = np.zeros((num_trials, num_windmills, num_steps, num_el))
vels = np.zeros((num_trials, num_steps, 2))
rewards = np.zeros((num_trials, num_windmills, num_steps, 3))
# factor to non dimensionalize the torque
u = 0.15
a = 0.0405
for trial in range(num_trials):
for mill in range(num_windmills):
data = np.genfromtxt(value_files[trial][mill], delimiter=' ')
# nondimensionalize the time
data[:, 0] *= u / a
# nondimensionalize the torque
data[:, 1] /= (u**2 * a**2)
# nondimensionalize the angular velocity
data[:, 3] *= a / u
print(data.shape)
values[trial, mill, :, : ] = data
reward = np.genfromtxt(rew_files[trial][mill], delimiter=' ')
# nondimensionalize the time
reward[:, 0] *= u / a
rewards[trial, mill, :, :] = reward
velo = np.genfromtxt(vel_files[trial][0], delimiter=' ')
# nondimensionalize the time
velo[:, 0] *= u / a
vels[trial, :, :] = velo
# compute the interesting values
# time, action, state (std and mean) for both windmills
# fan 1
tau_mean_0 = np.mean(values[:,0,:, 1], axis=0)
tau_std_0 = np.std(values[:,0,:, 1], axis=0)
ang_mean_0 = np.mean(values[:,0, :, 2], axis=0)
ang_std_0 = np.std(values[:,0, :, 2], axis=0)
ang_vel_mean_0 = np.mean(values[:,0, :, 3], axis=0)
ang_vel_std_0 = np.std(values[:,0, :, 3], axis=0)
# fan 2
tau_mean_1 = np.mean(values[:,1,:, 1], axis=0)
tau_std_1 = np.std(values[:,1,:, 1], axis=0)
ang_mean_1 = np.mean(values[:,1, :, 2], axis=0)
ang_std_1 = np.std(values[:,1, :, 2], axis=0)
ang_vel_mean_1 = np.mean(values[:,1, :, 3], axis=0)
ang_vel_std_1 = np.std(values[:,1, :, 3], axis=0)
# first element is time
out = np.stack( (values[0, 0, :, 0], tau_mean_0, tau_std_0, tau_mean_1, tau_std_1,
ang_mean_0, ang_std_0, ang_mean_1, ang_std_1,
ang_vel_mean_0, ang_vel_std_0, ang_vel_mean_1, ang_vel_std_1), axis=1)
np.save(output + foldername + "_values.npy", out)
# velocity at target point vs time (mean and std)
vels_mean = np.mean(vels, axis=0)
vels_std = np.std(vels, axis=0)
out2 = np.stack( (vels_mean[:, 0], vels_mean[:, 1], vels_std[:, 1]), axis=1)
np.save(output + foldername + "_vels.npy", out2)
# rewards vs time for the two fans
en_mean_0 = np.mean(rewards[:,0,:, 1], axis=0)
en_std_0 = np.std(rewards[:,0,:, 1], axis=0)
flow_mean_0 = np.mean(rewards[:,0, :, 2], axis=0)
flow_std_0 = np.std(rewards[:,0, :, 2], axis=0)
en_mean_1 = np.mean(rewards[:,1,:, 1], axis=0)
en_std_1 = np.std(rewards[:,1,:, 1], axis=0)
flow_mean_1 = np.mean(rewards[:,1, :, 2], axis=0)
flow_std_1 = np.std(rewards[:,1, :, 2], axis=0)
out3 = np.stack( (rewards[0, 0, :, 0], en_mean_0, en_std_0, en_mean_1, en_std_1,
flow_mean_0, flow_std_0, flow_mean_1, flow_std_1), axis=1)
np.save(output + foldername + "_rews.npy", out3)
for ind, folder in enumerate(folders_test):
genResults(folder)
```
#### File: RBCRelax/model/simulation.py
```python
import os
constants = {
# physical units
"kBoltz" : 1.38064852e-23, # Boltzmann constant, J/K
"rho" : 1000., # water density, kg/m^3
"Ndumps" : 250, # dumber of dumps
# numerical approximations
"Cdt" : 0.25, # time step constraint coefficient: sonic and acceleration terms
"Cdt_visc" : 0.125, # time step constraint coefficient: viscous term
"Gka" : 4.0e-3, # numerical rbc global area constraint, J / m^2
"Gkv" : 4.0e4, # numerical rbc global volume constraint, J / m^3
"a3" : -2.0, # higher order stretch coefficients, LWM strain law
"a4" : 8.0, # higher order stretch coefficients, LWM strain law
"b1" : 0.7, # higher order shear coefficients, LWM strain law
"b2" : 0.75, # higher order shear coefficients, LWM strain law
# simulation units
"rc" : 1.0, # cutoff radius (DPD units, L)
"mass" : 1, # mass of a DPD particle, (DPD units, M)
"nd" : 10, # number density (DPD units, 1/L**3)
"AIJ" : 50, # dimensionless DPD compressibility: aij * rc / kBT
"kpow" : 0.125, # DPD weight function exponent, dimensionless
"R0" : 4.0, # effective cell radius in DPD length units
"rho0" : 10, # density used in viscosity surrogate (DPD units, M/L^3)
"correctfreq" : 1000, # number of timesteps for bounce correction
"statsfreq" : 10000, # number of timesteps for bounce correction
"safety" : 1.5, # safety factor for domain size
"dtmax" : 5.e-3 # maximum dt allowed
}
def simulation(u, plasma_par, hemogl_par, rbc_par, mesh_par, sim_par, ply_dir):
import mirheo as mir
"""
u : Mirheo object
plasma : Parameters for outer solvent
hemogl : Parameters for inner solvent
rbc : Parameters for rbc mechanical properties
mesh : Parameters for rbc mesh
sim : Parameters related to simulation setup
"""
logfile = open(ply_dir + "config.txt", "a")
if u.isComputeTask()==True:
logfile.write("\n~~~ Simulation parameters:")
logfile.write("\n" + str(sim_par))
logfile.write("\n~~~ RBC mechanical properties:")
logfile.write("\n" + str(rbc_par))
logfile.write("\n~~~ Plasma parameters:")
logfile.write("\n" + str(plasma_par))
logfile.write("\n~~~ Hemoglobin parameters:")
logfile.write("\n" + str(hemogl_par))
# ~~~ Outer solvent: plasma
plasma_pv = mir.ParticleVectors.ParticleVector('outer', mass = plasma_par['mass'])
ic_plasma = mir.InitialConditions.Uniform(number_density = plasma_par['nd'])
u.registerParticleVector(pv=plasma_pv, ic=ic_plasma)
# ~~~ RBC mesh
mesh_ini = mesh_par['ini']
mesh_ref = mesh_par['ref']
mesh_rbc = mir.ParticleVectors.MembraneMesh(mesh_ini.vertices.tolist(), mesh_ref.vertices.tolist(), mesh_ini.faces.tolist())
rbc_pv = mir.ParticleVectors.MembraneVector('rbc', mass=mesh_par['mass'], mesh=mesh_rbc)
ic_rbc = mir.InitialConditions.Membrane([[sim_par['domain'][0]*0.5, sim_par['domain'][1]*0.5, sim_par['domain'][2]*0.5, 1.0, 0.0, 0.0, 0.0]])
u.registerParticleVector(pv=rbc_pv, ic=ic_rbc)
# ~~~ Inner solvent
checker = mir.BelongingCheckers.Mesh('checker')
u.registerObjectBelongingChecker(checker, rbc_pv)
hemogl_pv = u.applyObjectBelongingChecker(checker=checker, pv=plasma_pv, inside='inner', correct_every=sim_par['correctfreq'])
# ~~~ Bouncer
bouncer = mir.Bouncers.Mesh("bounce_rbc", "bounce_maxwell", kBT=0.0)
u.registerBouncer(bouncer)
# ~~~ Interactions
dpd_int = mir.Interactions.Pairwise('dpd', rc=plasma_par['rc'], kind='DPD', a=plasma_par['alpha'], gamma=plasma_par['gamma'], kBT=plasma_par['kbt'], power=plasma_par['kpow'])
rbc_int = mir.Interactions.MembraneForces("int_rbc", **rbc_par, stress_free=True)
u.registerInteraction(dpd_int)
u.registerInteraction(rbc_int)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
subvv = mir.Integrators.SubStep('subvv', sim_par['substeps'], [rbc_int])
u.registerIntegrator(subvv)
if u.isComputeTask():
dpd_int.setSpecificPair(rbc_pv, plasma_pv, a=0, gamma=sim_par['gfsi_o'])
dpd_int.setSpecificPair(rbc_pv, hemogl_pv, a=0, gamma=sim_par['gfsi_i'])
dpd_int.setSpecificPair(hemogl_pv, plasma_pv, gamma=0, kBT=0)
dpd_int.setSpecificPair(hemogl_pv, hemogl_pv, gamma=hemogl_par['gamma'])
u.setInteraction(dpd_int, plasma_pv, plasma_pv)
u.setInteraction(dpd_int, hemogl_pv, plasma_pv)
u.setInteraction(dpd_int, rbc_pv, plasma_pv)
u.setInteraction(dpd_int, hemogl_pv, hemogl_pv)
u.setInteraction(dpd_int, rbc_pv, hemogl_pv)
# ~~~ Integration
u.setIntegrator(vv, hemogl_pv)
u.setIntegrator(vv, plasma_pv)
u.setIntegrator(subvv, rbc_pv)
# ~~~ Membrane bounce
u.setBouncer(bouncer, rbc_pv, hemogl_pv)
u.setBouncer(bouncer, rbc_pv, plasma_pv)
# ~~~ Dumps
logfile.write('Saving results to: ' + ply_dir)
logfile.write('Current Path: ' + os.getcwd())
u.registerPlugins(mir.Plugins.createDumpMesh('rbcs', rbc_pv, dump_every=sim_par['dumpfreq'], path = ply_dir))
logfile.close()
# ~~~ Run
u.run(sim_par['iend'])
def run_korali( comm_address, gammaC, NDATA, TEND, ini_mesh_fname, ref_mesh_fname, ply_dir, verbose=False, dryrun=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
lm = 10.
factor = 0.005
kT_s = 0.02
T_p = factor * (23.+273.)
etai_p = factor * 10.e-3
etao_p = etai_p / lm
mu_p = factor * 2.5e-6
Ka_p = factor * 5.e-6
kb_p = factor * 2.e-19
area_p = 140.e-12
volume_p = 100.e-18
run( comm_address, gammaC, NDATA, TEND, kT_s, T_p, etao_p, etai_p, mu_p, Ka_p, kb_p, area_p, volume_p, verbose, dryrun, ini_mesh_fname, ref_mesh_fname, ply_dir )
def run( comm_address, gammaC, NDATA, TEND, kT_s, T_p, etao_p, etai_p, mu_p, Ka_p, kb_p, area_p, volume_p, verbose, dryrun, ini_mesh_fname, ref_mesh_fname, ply_dir='ply/'):
import trimesh
import numpy as np
from pint import UnitRegistry
import mirheo as mir
import dpdParams as D
import rbcParams as R
logfile = open(ply_dir + "config.txt", "a")
ureg = UnitRegistry()
def effective_radius_from_area(A):
return np.sqrt(A / (4.0 * np.pi))
@ureg.wraps(None, ureg.dimensionless)
def to_sim(a):
return a
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Physical values (_p), in SI units
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A_p = area_p * ureg.m**2
V_p = volume_p * ureg.m**3
R_p = effective_radius_from_area(A_p)
mu_p = mu_p * ureg.N / ureg.m
Ka_p = Ka_p * ureg.N / ureg.m
kb_p = kb_p * ureg.J
kB_p = constants.get('kBoltz') * ureg.J / ureg.K
T_p = T_p * ureg.K
kT_p = kB_p * T_p
Gka_p = constants.get('Gka') * ureg.J / ureg.m**2
Gkv_p = constants.get('Gkv') * ureg.J / ureg.m**3
a3 = constants.get('a3')
a4 = constants.get('a4')
b1 = constants.get('b1')
b2 = constants.get('b2')
rho_p = constants.get('rho') * ureg.kg / ureg.m**3
etai_p = etai_p * ureg.Pa * ureg.s
etao_p = etao_p * ureg.Pa * ureg.s
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Non-dimensional numbers:
#
# FvK_p : Foeppl-von-Karman number in healthy cells
# fKa_p : Ka / mu
# lm_p : ratio between inner and outer viscosity
# FGKa_p : dimensionless area constraint coefficient
# FGKv_p : dimensionless volume constraint coefficient
# Ftherm : ratio between bending modulus and thermal energy
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
FvK_p = mu_p * R_p*R_p / kb_p
fKa_p = Ka_p / mu_p
FGKa_p = Gka_p * R_p**2 / kb_p
FGKv_p = Gkv_p * R_p**3 / kb_p
Ftherm = kb_p / kT_p
lm_p = etai_p / etao_p
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simulation (_s)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Simulation length of rbc
suggested_radius = constants.get('R0')
# Scale initial mesh to suggested radius
ini = trimesh.load(ini_mesh_fname, process=False)
ref = trimesh.load(ref_mesh_fname, process=False)
original_radius = effective_radius_from_area(ini.area)
rbc_scaling = suggested_radius / original_radius
ini.vertices *= rbc_scaling
ref.vertices *= rbc_scaling
# set length scale (R_s)
Nv = len(ini.vertices)
Nv_ref = len(ref.vertices)
A_s = ini.area
V_s = ini.volume
R_s = suggested_radius
# set mass scale (mass_s)
nd = constants.get('nd')
mass_s = constants.get('mass')
rho_s = mass_s * nd
# rbc mass, assume membrane is 2D surface
M_s = rho_s * A_s
mmass_s = M_s / Nv
# set time scale (based on kBT)
kT_s = kT_s
# unit scalings
L_UNIT = R_p / R_s
M_UNIT = rho_p / rho_s * (L_UNIT**3)
T_UNIT = np.sqrt( kT_s/kT_p * L_UNIT**2 * M_UNIT )
F_UNIT = M_UNIT * L_UNIT / T_UNIT**2
E_UNIT = F_UNIT * L_UNIT
VISC_UNIT = F_UNIT / L_UNIT**2 * T_UNIT
# Numerical parameters
AIJ = constants.get('AIJ')
rc = constants.get('rc')
rho0_s = constants.get('rho0')
aij = AIJ * kT_s / rc
cs_s = D.get_Cs_(aij, nd, mass_s, kT_s)
kpow = constants.get('kpow')
kb_s = to_sim(Ftherm * kT_s)
mu_s = to_sim(FvK_p * kb_s / (R_s**2))
Ka_s = to_sim(fKa_p * mu_s)
kade_s = 0. # use Minimum rbc model
DA0D_s = 0.
C0_s = 0.
Gka_s = to_sim(FGKa_p * kb_s / (R_s**2))
Gkv_s = to_sim(FGKv_p * kb_s / (R_s**3))
kT_rbc = kT_s
etao_s = to_sim(etao_p / VISC_UNIT)
etai_s = to_sim(lm_p * etao_s)
nuo_s = etao_s / rho_s
nui_s = etai_s / rho_s
gij = D.get_gij(aij, nuo_s*rho0_s)
gin = D.get_gij(aij, nui_s*rho0_s)
gfsi_o = R.get_gammafsiDPD(nuo_s*rho0_s, kpow, A_s, Nv, nd, rc)
gfsi_i = R.get_gammafsiDPD(nui_s*rho0_s, kpow, A_s, Nv, nd, rc)
gT = 0.
gC = gammaC
etam_s = np.sqrt(3)/4. * gC # approximation, Fedosov2010
FvK_s = mu_s * R_s**2 / kb_s
Ftherm_s = kb_s / kT_s
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Timestep estimation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~ Solvent timestep:
# computed based on the timesteps
# from sonic, viscous and acceleration
# constraints as defined in Morris1997
LX_min = min(ini.vertices[:,0]); LX_max = max(ini.vertices[:,0]); DX = LX_max - LX_min
LX_min = min(ref.vertices[:,0]); LX_max = max(ref.vertices[:,0]); DX0 = LX_max - LX_min
vcell_s = 0.5*(DX-DX0) / to_sim(0.2*ureg.s / T_UNIT)
h = D.interparticle_distance(nd)
Cdt = constants.get('Cdt')
Cdt_visc = constants.get('Cdt_visc')
dtmax = constants.get('dtmax')
dt_sonic = D.get_dt_sonic(h, cs_s, C=Cdt)
dt_visc = D.get_dt_viscous(h, max([etao_s, etai_s]), rho_s, C=Cdt_visc)
Fdpd = D.get_total_dpd_force(nd, rc, aij, max([gfsi_o, gfsi_i, gij, gin]), vcell_s, kT_s, min([dt_sonic, dt_visc]))
dt_acc = D.get_dt_accel_(h, Fdpd, mass_s, C=Cdt)
dt_fluid = min([dtmax, dt_sonic, dt_visc, dt_acc])
dt = dt_fluid
# ~ Membrane substeps:
# Computed based on the timesteps
# from viscous and acceleration
# constraints as defined in Morris1997
hm = R.intervertex_distance(A_s, Nv)
Fdpd = D.get_total_dpd_force(nd, rc, aij, max([gfsi_o, gfsi_i]), vcell_s, kT_s, dt_fluid)
Fintern = (mu_s+Ka_s)*hm + (kb_s+kade_s)/hm + Gka_s*hm + Gkv_s*hm**2
dt_m_acc = D.get_dt_accel_(hm, Fdpd+Fintern, mmass_s, C=Cdt)
dt_m_visc = D.get_dt_viscous(hm, max([etao_s, etai_s, etam_s/hm]), rho_s, C=Cdt_visc)
dt_rbc = min([dt_m_visc, dt_m_acc])
substeps = min(int(dt_fluid/dt_rbc + 0.5), 100)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Checking/Dumping frequencies
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tend_p = TEND * ureg.s
Ndumps = NDATA
tdump_s = to_sim(tend_p/(Ndumps-1)/T_UNIT)
dumpfreq = int(tdump_s/dt+0.5)
tdump_s = dumpfreq * dt
iend = (Ndumps-1)*dumpfreq + 1
correctfreq = constants.get('correctfreq')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Domain, ranks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
safety = constants.get('safety')
LX_min = min(ini.vertices[:,0])
LX_max = max(ini.vertices[:,0])
LX = int(safety*(LX_max - LX_min)+0.5)
LY_min = min(ref.vertices[:,1])
LY_max = max(ref.vertices[:,1])
LY = int(safety*(LY_max - LY_min)+0.5)
LZ_min = min(ini.vertices[:,2])
LZ_max = max(ini.vertices[:,2])
LZ = int(safety*(LZ_max - LZ_min)+0.5)
LX = LX + np.mod(LX,2)
LY = LY + np.mod(LY,2)
LZ = LZ + np.mod(LZ,2)
domain = (LX, LY, LZ)
ranks = (1, 1, 1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Group parameters
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sim = {
"domain" : domain,
"dumpfreq" : dumpfreq,
"correctfreq" : correctfreq,
"substeps" : substeps,
"iend" : iend,
"gfsi_o" : gfsi_o,
"gfsi_i" : gfsi_i,
}
plasma = D.set_dpd_params(aij, gij, kT_s, kpow, rc, nd, mass_s)
hemogl = D.set_dpd_params(aij, gin, kT_s, kpow, rc, nd, mass_s)
rbc = R.set_rbc_params_ade_lwm(Gka_s, Gkv_s, A_s, V_s, gT, gC, kT_rbc, Ka_s, a3, a4, mu_s, b1, b2, kb_s, C0_s, kade_s, DA0D_s)
mesh = R.set_mesh_params(ini, ref, Nv, mmass_s)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Start simulation
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# chech debug_level (DW)
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename=ply_dir + 'log', comm_ptr=comm_address, no_splash=True)
if u.isComputeTask() and verbose:
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\n~~~ Physical Units ~~~")
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\n>> Non-dimensional quantities:")
logfile.write("\nFvK :" + str(FvK_p))
logfile.write("\nfKa :" + str(fKa_p))
logfile.write("\nFGKa_p :" + str(FGKa_p))
logfile.write("\nFGKv_p :" + str(FGKv_p))
logfile.write("\nFtherm :" + str(Ftherm))
logfile.write("\n>> Dimensional quantities:")
logfile.write("\nmu_p:" + str(mu_p))
logfile.write("\nKa_p:" + str(Ka_p))
logfile.write("\nA_p:" + str(A_p))
logfile.write("\netao_p:" + str(etao_p))
logfile.write("\netai_p:" + str(etai_p))
logfile.write("\nRe_p:" + str(vcell_s*L_UNIT/T_UNIT * R_p * rho_p / etao_p))
logfile.write("\nMa_p:" + str(vcell_s*L_UNIT/T_UNIT / (1500. * ureg.m / ureg.s)))
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\n~~~ Simulation Units ~~~")
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\n>> Non-dimensional quantities:")
logfile.write("\nFvK :" + str(FvK_s))
logfile.write("\nFtherm:" + str(Ftherm_s))
logfile.write("\n>> Dimensional quantities:")
logfile.write("\nR_s:" + str(R_s))
logfile.write("\nA_s:" + str(A_s))
logfile.write("\nV_s:" + str(V_s))
logfile.write("\nkT_s:" + str(kT_s))
logfile.write("\nmu_s:" + str(mu_s))
logfile.write("\nKa_s:" + str(Ka_s))
logfile.write("\nkb_s:" + str(kb_s))
logfile.write("\nGkv_s:" + str(Gkv_s))
logfile.write("\nGka_s:" + str(Gka_s))
logfile.write("\naij:" + str(aij))
logfile.write("\ngij:" + str(gij))
logfile.write("\ngin:" + str(gin))
logfile.write("\ngC:" + str(gC))
logfile.write("\ngT:" + str(gT))
logfile.write("\netao_s:" + str(etao_s))
logfile.write("\netai_s:" + str(etai_s))
logfile.write("\netam_s:" + str(etam_s))
logfile.write("\nmmass_s:" + str(mmass_s))
logfile.write("\nvcell_s:" + str(vcell_s))
logfile.write("\nNdumps:" + str(Ndumps))
logfile.write("\ntend:" + str(TEND))
logfile.write("\ndt:" + str(dt))
logfile.write("\ndt_visc:" + str(dt_visc))
logfile.write("\ndt_acc:" + str(dt_acc))
logfile.write("\ndt_m_visc:" + str(dt_m_visc))
logfile.write("\ndt_m_acc:" + str(dt_m_acc))
logfile.write("\nsubsteps:" + str(substeps))
logfile.write("\ndumpfreq:" + str(dumpfreq))
logfile.write("\ndumptime:" + str(tdump_s))
logfile.write("\niend:" + str(iend))
logfile.write("\ndomain:" + str(domain))
logfile.write("\nRe_s:" + str(vcell_s * R_s * rho_s / etao_s))
logfile.write("\nMa_s:" + str(vcell_s / cs_s))
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\n~~~ Unit Transforms ~~~")
logfile.write("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
logfile.write("\nL_UNIT:" + str(L_UNIT.to_base_units() ))
logfile.write("\nT_UNIT:" + str(T_UNIT.to_base_units() ))
logfile.write("\nM_UNIT:" + str(M_UNIT.to_base_units() ))
logfile.write("\nChecks:")
logfile.write("\nR_s/R_p: " + str((R_s*L_UNIT/R_p)) )
logfile.write("\nkT_s/kT_p: " + str((kT_s*E_UNIT/kT_p)) )
logfile.write("\nkb_s/kb_p: " + str((kb_s*E_UNIT/kb_p)) )
logfile.write("\netao_s/etao_p: " + str((etao_s*VISC_UNIT/etao_p)) )
logfile.close()
if not dryrun:
simulation(u, plasma, hemogl, rbc, mesh, sim, ply_dir)
del u
del plasma
del hemogl
del rbc
del mesh
del sim
```
#### File: RBCStretch/model/model.py
```python
import numpy as np
import sys, os
import korali
this_dir, this_filename = os.path.split(__file__)
data_dir = os.path.join(this_dir, "data")
mesh_fname = os.path.join(data_dir, "1986.scaled.off")
data_fname = os.path.join(data_dir, "Mills.2004.txt")
# Stretch Cell for given Parameters (x0, ks) and Force
def run_one_force(comm_address, force, x0, ks):
import mirheo as mir
import trimesh
toolsPath = os.path.join(this_dir, "tools")
#print(toolsPath)
sys.path.append(toolsPath)
import stretchForce as stretch
import diameters
from rbc_params import set_rbc_params
from rbc_params import print_rbc_params
def original_diameter(mesh):
pos = np.array(mesh.vertices, copy=True)
(D0, D1) = diameters.computeDiameters(pos)
return 0.5 * (D0 + D1)
tend = 50.0
dt = 0.001
mesh = trimesh.load_mesh(mesh_fname)
safety = 3
box_lo = np.amin(mesh.vertices, axis=0)
box_hi = np.amax(mesh.vertices, axis=0)
domain = (box_hi - box_lo) * safety + np.ones(3)
domain = tuple(np.array(domain, dtype=int))
ranks = (1, 1, 1)
u = mir.Mirheo(ranks, domain, dt, debug_level=0, log_filename='stderr', comm_ptr=comm_address, no_splash=True)
mesh_rbc = mir.ParticleVectors.MembraneMesh(mesh.vertices.tolist(), mesh.faces.tolist())
pv_rbc = mir.ParticleVectors.MembraneVector("rbc", mass=1.0, mesh=mesh_rbc)
ic_rbc = mir.InitialConditions.Membrane([[domain[0] * 0.5,
domain[1] * 0.5,
domain[2] * 0.5,
1.0, 0.0, 0.0, 0.0]])
u.registerParticleVector(pv_rbc, ic_rbc)
# mu_ph = 2.5 # [micro N / m]
force_scale = 0.025 # [pN]
force = force / force_scale
D0 = original_diameter(mesh)
D0_ph = 7.739 # [micro m]
prms_rbc = set_rbc_params(mesh, x0, ks)
prms_rbc["gammaT"] = 1.0 # for killing oscillations
int_rbc = mir.Interactions.MembraneForces("int_rbc", **prms_rbc, stress_free=True)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv_rbc)
u.registerInteraction(int_rbc)
u.setInteraction(int_rbc, pv_rbc, pv_rbc)
fraction = 0.05 * 2
n = int(len(mesh.vertices) * fraction)
force = force / (0.5 * n)
forces = stretch.computeForces(mesh.vertices, fraction, force).tolist()
u.registerPlugins(mir.Plugins.createMembraneExtraForce("stretchForce", pv_rbc, forces))
u.registerPlugins(mir.Plugins.createStats('stats', "stats.txt", 1000))
u.run(int(tend/dt))
Dshort = 0
Dlong = 0
if u.isMasterTask():
rbc_pos = pv_rbc.getCoordinates()
(Dlong, Dshort) = diameters.computeDiameters(rbc_pos)
# make it dimensional again
Dlong = (Dlong / D0) * D0_ph
Dshort = (Dshort / D0) * D0_ph
del u
del vv
del int_rbc
del prms_rbc
del mesh_rbc
del pv_rbc
del ic_rbc
return Dshort, Dlong
# Evaluate Stretching Experiments
def evaluate( korali_obj, forces, comm_address, verbose = False ):
x0 = korali_obj["Parameters"][0]
ks = korali_obj["Parameters"][1]
diam_lo_sim = np.zeros(len(forces))
diam_hi_sim = np.zeros(len(forces))
for i, f in enumerate(forces):
if verbose:
print("in: f {0}, x0 {1}, ks {2}".format(f, x0, ks))
sys.stdout.flush()
(dlo, dhi) = run_one_force(comm_address, f, x0, ks)
if verbose:
print("out: f {0}, dlo {1}, dhi {2}".format(f, dlo, dhi))
sys.stdout.flush()
diam_lo_sim[i] = dlo
diam_hi_sim[i] = dhi
result = []
for i in range(len(diam_lo_sim)):
result.append(diam_lo_sim[i])
for i in range(len(diam_hi_sim)):
result.append(diam_hi_sim[i])
korali_obj["Reference Evaluations"] = result
```
#### File: model/tools/rbc_params.py
```python
import numpy as np
# ------ HELPER FUNCTIONS ------ #
def sq(x):
return np.sqrt(x)
def calc_l0(A,Nv):
return sq(A*4./(2*Nv-4)/sq(3))
def calc_kp(l0,lm,ks,m):
return (6*ks*pow(l0,(m+1))*pow(lm,2) - 9*ks*pow(l0,(m+2))*lm + 4*ks*pow(l0,(m+3))) / (4*pow(lm,3)-8*l0*pow(lm,2)+4*pow(l0,2)*lm)
def calc_mu0(x0,l0,ks,kp,m):
return sq(3)*ks/(4.*l0) * (x0/(2.*pow((1-x0),3)) - 1./(4.*pow((1-x0),2)) + 1./4) + sq(3)*kp*(m+1)/(4.*pow(l0,(m+1)))
# -------- COMPUTE QoIs -------- #
def compute_mu(m, x0, ks, A, Nv):
l0 = calc_l0(A,Nv)
lm = l0/x0
kp = calc_kp(l0, lm, ks, m)
return calc_mu0(x0, l0, ks, kp, m)
def compute_mu_over_ks(m, x0, A, Nv):
return compute_mu(m, x0, 1.0, A, Nv)
# from optimal UQ results
def set_rbc_params(mesh, x0, ks):
m = 2.0
Nv = len(mesh.vertices)
A = mesh.area
V = mesh.volume
# max likelihood estimate from Athena UQ, stretching
# x0 = 0.48497214
2576
# ks = 22.6814565515
kb = 1.0
prms = {
"tot_area" : A,
"tot_volume" : V,
"ka_tot" : 4900.0,
"kv_tot" : 7500.0,
"kBT" : 0.0,
"gammaC" : 52.0,
"gammaT" : 0.0,
"shear_desc": "wlc",
"ka" : 5000,
"x0" : x0,
"mpow" : m,
"ks" : ks,
"bending_desc" : "Kantor",
"theta" : 0.0,
"kb" : kb
}
mu = compute_mu(m, x0, ks, A, Nv)
return prms
# from Fedosov params
def set_rbc_params0(prms, prms_bending, mesh):
m = 2.0
Nv = len(mesh.vertices)
A = mesh.area
V = mesh.volume
# max likelihood estimate from Athena UQ, stretching
x0 = 1.0/2.2
ks = 35.429323407939094
kb = 27.105156961709344
prms = {
"tot_area" : A,
"tot_volume" : V,
"ka_tot" : 4900.0,
"kv_tot" : 7500.0,
"kBT" : 0.0,
"gammaC" : 52.0,
"gammaT" : 0.0,
"shear_desc": "wlc",
"ka" : 5000,
"x0" : x0,
"mpow" : m,
"ks" : ks,
"bending_desc" : "Kantor",
"theta" : 0.0,
"kb" : kb
}
mu = compute_mu(m, x0, ks, A, Nv)
return prms
def print_rbc_params(p):
print("A = {}".format(p.totArea))
print("V = {}".format(p.totVolume))
print("x0 = {}".format(p.x0))
print("kb = {}".format(pb.kb))
print("ks = {}".format(p.ks))
print("m = {}".format(p.mpow))
```
#### File: korali/rlview/__main__.py
```python
import os
import sys
import signal
import json
import argparse
import time
import matplotlib
import importlib
import math
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
from korali.plot.helpers import hlsColors, drawMulticoloredLine
from scipy.signal import savgol_filter
# Check if name has correct suffix
def validateOutput(output):
if not (output.endswith(".png") or output.endswith(".eps") or output.endswith(".svg")):
print("[Korali] Error: Outputfile '{0}' must end with '.eps', '.png' or '.svg' suffix.".format(output))
sys.exit(-1)
##################### Plotting Reward History
def plotRewardHistory(ax, dirs, results, minReward, maxReward, averageDepth, maxObservations, showCI, aggregate):
## Setting initial x-axis (episode) and y-axis (reward) limits
maxPlotObservations = -math.inf
maxPlotReward = -math.inf
minPlotReward = +math.inf
## Creating colormap
cmap = matplotlib.cm.get_cmap('brg')
colCurrIndex = 0.0
## Reading the individual results
unpackedResults = []
for resId, r in enumerate(results):
environmentCount = r["Problem"]["Environment Count"]
nAgents = r["Problem"]["Agents Per Environment"]
## Split results for multi envs
envIds = r["Solver"]["Training"]["Environment Id History"]
resultsFolder = dirs[resId]
del dirs[resId]
for envId in range(environmentCount):
dirs.insert(resId+envId, resultsFolder+" env {}".format(envId))
res = {}
res["Solver"] = { "Training" : { "Experience History" : [ exp for (env, exp) in zip (envIds, r["Solver"]["Training"]["Experience History"]) if env == envId ] } }
res["Solver"]["Training"]["Reward History"] = [ rew for (env, rew) in zip(envIds, r["Solver"]["Training"]["Reward History"]) if env == envId ]
cumulativeObsCountHistory = np.cumsum(np.array(res["Solver"]["Training"]["Experience History"])) / nAgents
rewardHistory = np.array(res["Solver"]["Training"]["Reward History"])
# Merge Results
if aggregate == True and len(unpackedResults) > 0:
coH, rH, trTh, teTh = unpackedResults[0]
aggCumObs = np.append(coH, cumulativeObsCountHistory)
aggRewards = np.append(rH, rewardHistory)
sortedAggRewards = np.array([r for _, r in sorted(zip(aggCumObs, aggRewards), key=lambda pair: pair[0])])
sortedAggCumObs = np.sort(aggCumObs)
unpackedResults[0] = (sortedAggCumObs, sortedAggRewards)
# Append Results
else:
unpackedResults.append( (cumulativeObsCountHistory, rewardHistory) )
## Plotting the individual experiment results
for resId, r in enumerate(unpackedResults):
cumulativeObsArr, rewardHistory = r
currObsCount = cumulativeObsArr[-1]
# Updating common plot limits
if (currObsCount > maxPlotObservations): maxPlotObservations = currObsCount
if (maxObservations): maxPlotObservations = int(maxObservations)
if (min(rewardHistory) < minPlotReward):
if (min(rewardHistory) > -math.inf):
minPlotReward = min(rewardHistory)
if (max(rewardHistory) > maxPlotReward):
if (max(rewardHistory) < math.inf):
maxPlotReward = max(rewardHistory)
# Getting average cumulative reward statistics
cumRewards = np.cumsum(rewardHistory)
meanHistoryStart = cumRewards[:averageDepth]/np.arange(1,averageDepth+1)
meanHistoryEnd = (cumRewards[averageDepth:]-cumRewards[:-averageDepth])/float(averageDepth)
meanHistory = np.append(meanHistoryStart, meanHistoryEnd)
confIntervalLowerHistory = None
confIntervalUpperHistory = None
# Calculating confidence intervals
if showCI > 0.0:
confIntervalLowerHistory= [ rewardHistory[0] ]
confIntervalUpperHistory= [ rewardHistory[0] ]
for i in range(1, len(rewardHistory)):
startPos = max(i - averageDepth, 0)
endPos = i
data = rewardHistory[startPos:endPos]
ciLow = np.percentile(data, 50-50*showCI)
ciUp = np.percentile(data, 50+50*showCI)
confIntervalLowerHistory.append(ciLow)
confIntervalUpperHistory.append(ciUp)
confIntervalLowerHistory = np.array(confIntervalLowerHistory)
confIntervalUpperHistory = np.array(confIntervalUpperHistory)
# Plotting common plot
ax.plot(cumulativeObsArr, rewardHistory, 'x', markersize=1.3, color=cmap(colCurrIndex), alpha=0.15, zorder=0)
ax.plot(cumulativeObsArr, meanHistory, '-', color=cmap(colCurrIndex), lineWidth=3.0, zorder=1, label=dirs[resId])
# Plotting confidence intervals
if showCI > 0.:
ax.fill_between(cumulativeObsArr, confIntervalLowerHistory, confIntervalUpperHistory, color=cmap(colCurrIndex), alpha=0.2)
# Updating color index
if (len(results) > 1):
colCurrIndex = colCurrIndex + (1.0 / float(len(unpackedResults)-1)) - 0.0001
## Configuring common plotting features
if (minReward): minPlotReward = float(minReward)
if (maxReward): maxPlotReward = float(maxReward)
ax.set_ylabel('Cumulative Reward')
ax.set_xlabel('# Observations')
ax.set_title('Korali RL History Viewer')
ax.yaxis.grid()
ax.set_xlim([0, maxPlotObservations-1])
ax.set_ylim([minPlotReward - 0.1*abs(minPlotReward), maxPlotReward + 0.1*abs(maxPlotReward)])
##################### Results parser
def parseResults(dir):
results = [ ]
for p in dir:
configFile = p + '/latest'
if (not os.path.isfile(configFile)):
print("[Korali] Error: Did not find any results in the {0} folder...".format(p))
exit(-1)
with open(configFile) as f:
data = json.load(f)
results.append(data)
return results
##################### Main Routine: Parsing arguments and result files
if __name__ == '__main__':
# Setting termination signal handler
signal.signal(signal.SIGINT, lambda x, y: exit(0))
# Parsing arguments
parser = argparse.ArgumentParser(
prog='korali.rlview',
description='Plot the results of a Korali Reinforcement Learning execution.')
parser.add_argument(
'--dir',
help='Path(s) to result files, separated by space',
default=['_korali_result'],
required=False,
nargs='+')
parser.add_argument(
'--maxObservations',
help='Maximum observations (x-axis) to display',
type=int,
default=None,
required=False)
parser.add_argument(
'--maxReward',
help='Maximum reward to display',
default=None,
required=False)
parser.add_argument(
'--updateFrequency',
help='Specified the time (seconds) between live updates to the plot',
default=0.0,
required=False)
parser.add_argument(
'--maxPlottingTime',
help='Specified the maximum time (seconds) to update the plot for (for testing purposes)',
default=0.0,
required=False)
parser.add_argument(
'--minReward',
help='Minimum reward to display',
default=None,
required=False)
parser.add_argument(
'--averageDepth',
help='Specifies the depth for plotting average',
type=int,
default=100,
required=False)
parser.add_argument(
'--showCI',
help='Option to plot the reward confidence interval.',
type = float,
default=0.0,
required=False)
parser.add_argument(
'--aggregate',
help='Aggregate multiple runs and plot result summary.',
action='store_true')
parser.add_argument(
'--test',
help='Run without graphics (for testing purpose)',
action='store_true',
required=False)
parser.add_argument(
'--output',
help='Indicates the output file path. If not specified, it prints to screen.',
required=False)
args = parser.parse_args()
### Validating input
if args.showCI < 0.0 or args.showCI > 1.0:
print("[Korali] Argument of confidence interval must be in [0,1].")
exit(-1)
if args.output:
validateOutput(args.output)
### Setup without graphics, if needed
if (args.test or args.output):
matplotlib.use('Agg')
### Reading values from result files
results = parseResults(args.dir)
if (len(results) == 0):
print('Error: No result folders have been provided for plotting.')
exit(-1)
### Creating figure(s)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
### Creating plots
plotRewardHistory(ax1, args.dir, results, args.minReward, args.maxReward, args.averageDepth, args.maxObservations, args.showCI, args.aggregate)
plt.legend()
plt.draw()
### Printing live results if update frequency > 0
fq = float(args.updateFrequency)
maxTime = float(args.maxPlottingTime)
if (fq > 0.0):
initialTime = time.time()
while(True):
results = parseResults(args.dir)
plt.pause(fq)
ax1.clear()
plotRewardHistory(ax1, args.dir, results, args.minReward, args.maxReward, args.averageDepth, args.maxObservations, args.showCI, args.aggregate)
plt.draw()
# Check if maximum time exceeded
if (maxTime> 0.0):
currentTime = time.time()
elapsedTime = currentTime - initialTime
if (elapsedTime > maxTime):
print('[Korali] Maximum plotting time reached. Exiting...')
exit(0)
plt.show()
exit(0)
if (args.output is None):
plt.show()
else:
if args.output.endswith('.eps'):
plt.savefig(args.output, format='eps')
elif args.output.endswith('.svg'):
plt.savefig(args.output, format='svg')
else:
plt.savefig(args.output, format='png')
```
#### File: integrator/model/model.py
```python
import os
import sys
import numpy as np
def model_integration(s):
x = s["Parameters"][0]
s["Evaluation"] = x**2
```
#### File: correctness/model/model.py
```python
import math
# Minimum expected at - 0.5
def evalmodel(s):
x = s["Parameters"][0]
r = x * x + math.sin(x)
s["F(x)"] = -r
def constraint1(k):
k["F(x)"] = 100.0
def g1(k):
v = k["Parameters"]
k["F(x)"] = -127.0 + 2 * v[0] * v[0] + 3.0 * pow(
v[1], 4) + v[2] + 4.0 * v[3] * v[3] + 5.0 * v[4]
def g2(k):
v = k["Parameters"]
k["F(x)"] = -282.0 + 7.0 * v[0] + 3.0 * v[1] + 10.0 * v[2] * v[2] + v[3] - v[4]
def g3(k):
v = k["Parameters"]
k["F(x)"] = -196.0 + 23.0 * v[0] + v[1] * v[1] + 6.0 * v[5] * v[5] - 8.0 * v[6]
def g4(k):
v = k["Parameters"]
k["F(x)"] = 4.0 * v[0] * v[0] + v[1] * v[1] - 3.0 * v[0] * v[1] + 2.0 * v[
2] * v[2] + 5.0 * v[5] - 11.0 * v[6]
```
#### File: cmaes/model/model.py
```python
import numpy as np
# Helper
def assertclose(expected, value, atol):
assert np.isclose(expected, value, atol = atol), "Value {0} "\
"deviates from expected value {1}".format(value, expected)
# Function to minimize
def minmodel1(p):
x = p["Parameters"][0]
p["F(x)"] = -((x - 2.0) * (x - 2.0) + 10) # fopt = 10.0
# Function to minimize
def minmodel2(p):
x = p["Parameters"][0]
p["F(x)"] = -((x - 2.0) * (x - 2.0) - 1e9) # fopt = -1e9
# Function to maximize
def maxmodel1(p):
x = p["Parameters"][0]
p["F(x)"] = -(x - 2.0) * (x - 2.0) - 10 # fopt = -10
# Function to maximize
def maxmodel2(p):
x = p["Parameters"][0]
p["F(x)"] = -(x - 2.0) * (x - 2.0) + 1e9 # fopt = 1e9
```
#### File: optimizers/termination/dea_termination.py
```python
import os
import sys
import json
import korali
import argparse
sys.path.append('./helpers')
from helpers import *
#################################################
# DEA run method
#################################################
def run_dea_with_termination_criterion(criterion, value):
print("[Korali] Prepare DEA run with Termination Criteria "\
"'{0}'".format(criterion))
e = korali.Experiment()
e["Problem"]["Type"] = "Optimization"
e["Problem"]["Objective Function"] = evaluateModel
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Lower Bound"] = +1.0
e["Variables"][0]["Upper Bound"] = +10.0
e["Solver"]["Type"] = "Optimizer/DEA"
e["Solver"]["Population Size"] = 10
e["Solver"]["Termination Criteria"][criterion] = value
e["File Output"]["Enabled"] = False
e["Random Seed"] = 1337
k = korali.Engine()
k.run(e)
if (criterion == "Max Generations"):
assert_value(e["Current Generation"], value)
elif (criterion == "Max Infeasible Resamplings"):
assert_greatereq(e["Solver"]["Infeasible Sample Count"], value)
elif (criterion == "Max Value"):
assert_greatereq(e["Solver"]["Best Ever Value"], value)
elif (criterion == "Min Value Difference Threshold"):
previous = e["Solver"]["Previous Best Ever Value"]
current = e["Solver"]["Best Ever Value"]
assert_smallereq(previous - current, value)
else:
print("Termination Criterion not recognized!")
exit(-1)
#################################################
# Main (called from run_test.sh with args)
#################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='cmaes_termination', description='Check Termination Criterion.')
parser.add_argument(
'--criterion',
help='Name of Termination Criterion',
action='store',
required=True)
parser.add_argument(
'--value',
help='Value of Termination Criterion',
action='store',
type=float,
required=True)
args = parser.parse_args()
run_dea_with_termination_criterion(args.criterion, args.value)
```
#### File: correctness/helpers/helpers.py
```python
import json
import numpy as np
def open_json(dirname, filename):
path = '{0}/{1}'.format(dirname, filename)
data = None
with open(path) as f:
data = json.load(f)
return data
def checkMean(k, expectedMean, tol):
samples = k["Solver"]["Sample Database"]
mean = np.mean(samples)
assert np.isclose(
expectedMean, mean, atol=tol
), "Sample mean ({0}) deviates from expected ({1}) by more than {2}".format(
mean, expectedMean, tol)
def checkStd(k, expectedStd, tol):
samples = k["Solver"]["Sample Database"]
std = np.std(samples)
assert np.isclose(
expectedStd, std, atol=tol
), "Sample standard deviation ({0}) deviates from expected ({1}) by more than {2}".format(
std, expectedStd, tol)
```
#### File: mcmc/helpers/helpers.py
```python
import numpy as np
def compareMean(k):
samples = k["Solver"]["Sample Database"]
mean = np.mean(samples)
chainmean = k["Solver"]["Chain Mean"]
assert np.isclose(mean, chainmean), "Chain Mean deviates from Mean of "\
"Samples ({0} vs {1})".format(mean, chainmean)
def compareStd(k):
samples = k["Solver"]["Sample Database"]
mean = np.mean(samples)
std = np.sqrt(sum((samples - mean)**2) / (len(samples) - 1))
chainstd = k["Solver"]["Cholesky Decomposition Chain Covariance"]
assert np.isclose(std, chainstd), "Cholesky Decomposition of Chain" \
"Covariance deviates from Standard Deviation of Samples ({0} vs {1})".format(std, chainstd)
def compareMeanHMC(k):
warmupSamples = k["Solver"]["Sample Database"]
mean = np.mean(warmupSamples)
positionMean = k["Solver"]["Position Mean"]
assert np.isclose(mean, positionMean), "Position Mean deviates from Mean of "\
"Samples ({0} vs {1})".format(mean, positionMean)
```
#### File: tools/build/get_header_directory.py
```python
import pathlib
import sys
import os
def main():
argv = sys.argv
if len(argv) != 3:
raise RuntimeError(
f"Usage: {argv[0]} <header install directory> <absolute path of current header directory>"
)
anchor = 'source' # should this name ever change, this code will be broken
# all source code with headers must be below `anchor`
install_base = os.path.join(
*[x for x in pathlib.PurePath(argv[1].strip()).parts])
path = pathlib.PurePath(argv[2].strip())
subpath = []
append = False
for part in path.parts:
if part == anchor:
append = True
continue
if append:
subpath.append(part)
print(os.path.join(install_base, *subpath)) # stripped directory name
if __name__ == "__main__":
main()
``` |
{
"source": "JonathanLehner/nnabla-examples",
"score": 2
} |
#### File: zooming-slow-mo/authors_scripts/data_utils.py
```python
import math
import numpy as np
def cubic(x):
"""
Cubic kernel
"""
absx = np.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5 * absx3 - 2.5 * absx2 + 1) * (
(absx <= 1).astype(type(absx))) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * ((
(absx > 1) * (absx <= 2)).astype(type(absx)))
def calculate_weights_indices(in_length, out_length, scale, kernel_width, antialiasing):
"""
Get weights and indices
"""
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = np.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = np.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = np.repeat(left.reshape(out_length, 1), P).reshape(out_length, P) + \
np.broadcast_to(np.linspace(
0, P - 1, P).reshape(1, P), (out_length, P))
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = np.repeat(
u.reshape(out_length, 1), P).reshape(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = np.sum(weights, 1).reshape(out_length, 1)
weights = weights / np.repeat(weights_sum, P).reshape(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = np.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices[:, 1:P-1]
weights = weights[:, 1:P-1]
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices[:, 0:P-1]
weights = weights[:, 0:P-1]
weights = np.ascontiguousarray(weights)
indices = np.ascontiguousarray(indices)
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize_np(img, scale, antialiasing=True):
"""
The scale should be the same for H and W
Input: img: Numpy, HWC BGR [0,1]
Output: HWC BGR [0,1] w/o round
"""
in_h, in_w, in_channels = img.shape
_, out_h, out_w = in_channels, math.ceil(
in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(
in_h, out_h, scale, kernel_width, antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(
in_w, out_w, scale, kernel_width, antialiasing)
# process h dimension
# symmetric copying
img_aug = np.ndarray((in_h + sym_len_hs + sym_len_he,
in_w, in_channels), dtype='float32')
img_aug[sym_len_hs:sym_len_hs+in_h, :, :] = img
sym_patch = img[:sym_len_hs, :, :]
inv_idx = np.arange(sym_patch.shape[0] - 1, -1, -1).astype('int64')
sym_patch_inv = np.take(sym_patch, inv_idx, 0)
img_aug[0:sym_len_hs:, :, :] = sym_patch_inv
sym_patch = img[-sym_len_he:, :, :]
inv_idx = np.arange(sym_patch.shape[0] - 1, -1, -1).astype('int64')
sym_patch_inv = np.take(sym_patch, inv_idx, 0)
img_aug[sym_len_hs + in_h:sym_len_hs +
in_h + sym_len_he, :, :] = sym_patch_inv
out_1 = np.ndarray((out_h, in_w, in_channels), dtype='float32')
kernel_width = weights_h.shape[1]
for i in range(out_h):
idx = int(indices_h[i][0])
out_1[i, :, 0] = np.dot(
img_aug[idx:idx + kernel_width, :, 0].transpose(1, 0), weights_h[i])
out_1[i, :, 1] = np.dot(
img_aug[idx:idx + kernel_width, :, 1].transpose(1, 0), weights_h[i])
out_1[i, :, 2] = np.dot(
img_aug[idx:idx + kernel_width, :, 2].transpose(1, 0), weights_h[i])
# process w dimension
# symmetric copying
out_1_aug = np.ndarray(
(out_h, in_w + sym_len_ws + sym_len_we, in_channels), dtype='float32')
out_1_aug[:, sym_len_ws:sym_len_ws + in_w] = out_1
sym_patch = out_1[:, :sym_len_ws, :]
inv_idx = np.arange(sym_patch.shape[1] - 1, -1, -1).astype('int64')
sym_patch_inv = np.take(sym_patch, inv_idx, 1)
out_1_aug[:, 0:0+sym_len_ws] = sym_patch_inv
sym_patch = out_1[:, -sym_len_we:, :]
inv_idx = np.arange(sym_patch.shape[1] - 1, -1, -1).astype('int64')
sym_patch_inv = np.take(sym_patch, inv_idx, 1)
out_1_aug[:, sym_len_ws + in_w:sym_len_ws +
in_w + sym_len_we] = sym_patch_inv
out_2 = np.ndarray((out_h, out_w, in_channels), dtype='float32')
kernel_width = weights_w.shape[1]
for i in range(out_w):
idx = int(indices_w[i][0])
out_2[:, i, 0] = np.dot(
out_1_aug[:, idx:idx + kernel_width, 0], weights_w[i])
out_2[:, i, 1] = np.dot(
out_1_aug[:, idx:idx + kernel_width, 1], weights_w[i])
out_2[:, i, 2] = np.dot(
out_1_aug[:, idx:idx + kernel_width, 2], weights_w[i])
return out_2
```
#### File: frame-interpolation/zooming-slow-mo/inference.py
```python
import os.path as osp
import argparse
import glob
import cv2
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nnabla.ext_utils import get_extension_context
from models import zooming_slo_mo_network
import utils.utils as util
parser = argparse.ArgumentParser(
description='Zooming-SloMo or only Slo-Mo Inference')
parser.add_argument('--input-dir', type=str, default='test_example/',
help='input data directory, expected to have input frames')
parser.add_argument('--model', type=str, default='ZoomingSloMo_NNabla.h5',
help='model path')
parser.add_argument('--context', type=str, default='cudnn',
help="Extension modules. ex) 'cpu', 'cudnn'.")
parser.add_argument('--metrics', action='store_true', default=False,
help='calculate metrics i.e. SSIM and PSNR')
parser.add_argument('--only-slomo', action='store_true', default=False,
help='If True, Slo-Mo only Inference (No Zooming)')
args = parser.parse_args()
def test():
"""
Test(Zooming SloMo) - inference on set of input data or Vid4 data
"""
# set context and load the model
ctx = get_extension_context(args.context)
nn.set_default_context(ctx)
nn.load_parameters(args.model)
input_dir = args.input_dir
n_ot = 7
# list all input sequence folders containing input frames
inp_dir_list = sorted(glob.glob(input_dir + '/*'))
inp_dir_name_list = []
avg_psnr_l = []
avg_psnr_y_l = []
avg_ssim_y_l = []
sub_folder_name_l = []
save_folder = 'results'
# for each sub-folder
for inp_dir in inp_dir_list:
gt_tested_list = []
inp_dir_name = inp_dir.split('/')[-1]
sub_folder_name_l.append(inp_dir_name)
inp_dir_name_list.append(inp_dir_name)
save_inp_folder = osp.join(save_folder, inp_dir_name)
img_low_res_list = sorted(glob.glob(inp_dir + '/*'))
util.mkdirs(save_inp_folder)
imgs = util.read_seq_imgs_(inp_dir)
img_gt_l = []
if args.metrics:
replace_str = 'LR'
for img_gt_path in sorted(glob.glob(osp.join(inp_dir.replace(replace_str, 'HR'), '*'))):
img_gt_l.append(util.read_image(img_gt_path))
avg_psnr, avg_psnr_sum, cal_n = 0, 0, 0
avg_psnr_y, avg_psnr_sum_y = 0, 0
avg_ssim_y, avg_ssim_sum_y = 0, 0
skip = args.metrics
select_idx_list = util.test_index_generation(
skip, n_ot, len(img_low_res_list))
# process each image
for select_idxs in select_idx_list:
# get input images
select_idx = [select_idxs[0]]
gt_idx = select_idxs[1]
imgs_in = F.gather_nd(
imgs, indices=nn.Variable.from_numpy_array(select_idx))
imgs_in = F.reshape(x=imgs_in, shape=(1,) + imgs_in.shape)
output = zooming_slo_mo_network(imgs_in, args.only_slomo)
outputs = output[0]
outputs.forward(clear_buffer=True)
for idx, name_idx in enumerate(gt_idx):
if name_idx in gt_tested_list:
continue
gt_tested_list.append(name_idx)
output_f = outputs.d[idx, :, :, :]
output = util.tensor2img(output_f)
cv2.imwrite(osp.join(save_inp_folder,
'{:08d}.png'.format(name_idx + 1)), output)
print("Saving :", osp.join(save_inp_folder,
'{:08d}.png'.format(name_idx + 1)))
if args.metrics:
# calculate PSNR
output = output / 255.
ground_truth = np.copy(img_gt_l[name_idx])
cropped_output = output
cropped_gt = ground_truth
crt_psnr = util.calculate_psnr(
cropped_output * 255, cropped_gt * 255)
cropped_gt_y = util.bgr2ycbcr(cropped_gt, only_y=True)
cropped_output_y = util.bgr2ycbcr(
cropped_output, only_y=True)
crt_psnr_y = util.calculate_psnr(
cropped_output_y * 255, cropped_gt_y * 255)
crt_ssim_y = util.calculate_ssim(
cropped_output_y * 255, cropped_gt_y * 255)
avg_psnr_sum += crt_psnr
avg_psnr_sum_y += crt_psnr_y
avg_ssim_sum_y += crt_ssim_y
cal_n += 1
if args.metrics:
avg_psnr = avg_psnr_sum / cal_n
avg_psnr_y = avg_psnr_sum_y / cal_n
avg_ssim_y = avg_ssim_sum_y / cal_n
avg_psnr_l.append(avg_psnr)
avg_psnr_y_l.append(avg_psnr_y)
avg_ssim_y_l.append(avg_ssim_y)
if args.metrics:
print('################ Tidy Outputs ################')
for name, ssim, psnr_y in zip(sub_folder_name_l, avg_ssim_y_l, avg_psnr_y_l):
print(
'Folder {} - Average SSIM: {:.6f} PSNR-Y: {:.6f} dB. '.format(name, ssim, psnr_y))
print('################ Final Results ################')
print('Total Average SSIM: {:.6f} PSNR-Y: {:.6f} dB for {} clips. '.format(
sum(avg_ssim_y_l) / len(avg_ssim_y_l), sum(avg_psnr_y_l) /
len(avg_psnr_y_l),
len(inp_dir_list)))
if __name__ == '__main__':
test()
```
#### File: GANs/first-order-model/generator.py
```python
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
from modules import resblock, sameblock, upblock, downblock
from dense_motion import predict_dense_motion
def deform_input(inp, deformation):
_, h_old, w_old, _ = deformation.shape
_, _, h, w = inp.shape
if h_old != h or w_old != w:
deformation = F.transpose(deformation, (0, 3, 1, 2))
deformation = F.interpolate(deformation, output_size=(
h, w), mode="linear", align_corners=False, half_pixel=True)
deformation = F.transpose(deformation, (0, 2, 3, 1))
return F.warp_by_grid(inp, deformation, align_corners=True)
def occlusion_aware_generator(source_image, kp_driving, kp_source,
num_channels, num_kp, block_expansion, max_features,
num_down_blocks, num_bottleneck_blocks,
estimate_occlusion_map=False, dense_motion_params=None,
estimate_jacobian=False, test=False, comm=None):
# pre-downsampling
out = sameblock(source_image, out_features=block_expansion,
kernel_size=7, padding=3, test=test, comm=comm)
# downsampling
for i in range(num_down_blocks):
with nn.parameter_scope(f"downblock_{i}"):
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
out = downblock(out, out_features=out_features,
kernel_size=3, padding=1, test=test, comm=comm)
output_dict = {}
if dense_motion_params is not None:
with nn.parameter_scope("dense_motion_prediction"):
dense_motion = predict_dense_motion(source_image=source_image,
kp_driving=kp_driving, kp_source=kp_source,
num_kp=num_kp, num_channels=num_channels,
estimate_occlusion_map=estimate_occlusion_map,
test=test, comm=comm, **dense_motion_params)
# dense_motion is a dictionay containing:
# 'sparse_deformed': <Variable((8, 11, 3, 256, 256)),
# 'mask': <Variable((8, 11, 256, 256)),
# 'deformation': <Variable((8, 256, 256, 2)),
# 'occlusion_map': <Variable((8, 1, 256, 256))}
output_dict['mask'] = dense_motion['mask']
output_dict['sparse_deformed'] = dense_motion['sparse_deformed']
# Transform feature representation by deformation (+ occlusion)
if 'occlusion_map' in dense_motion:
occlusion_map = dense_motion['occlusion_map']
output_dict['occlusion_map'] = occlusion_map
else:
occlusion_map = None
deformation = dense_motion['deformation']
out = deform_input(out, deformation)
if occlusion_map is not None:
if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
resized_occlusion_map = F.interpolate(occlusion_map,
output_size=out.shape[2:], mode="linear",
align_corners=False, half_pixel=True)
else:
resized_occlusion_map = F.identity(occlusion_map)
out = out * resized_occlusion_map
if test:
output_dict["deformed"] = deform_input(source_image, deformation)
# intermediate residual blocks
in_features = min(max_features, block_expansion * (2 ** num_down_blocks))
for i in range(num_bottleneck_blocks):
with nn.parameter_scope(f"residual_block_{i}"):
out = resblock(out, in_features=in_features,
kernel_size=3, padding=1, test=test, comm=comm)
# upsampling
for i in range(num_down_blocks):
with nn.parameter_scope(f"upblock_{i}"):
out_features = min(max_features, block_expansion *
(2 ** (num_down_blocks - i - 1)))
out = upblock(out, out_features=out_features,
kernel_size=3, padding=1, test=test, comm=comm)
with nn.parameter_scope("final_conv"):
inmaps, outmaps = out.shape[1], num_channels
k_w = I.calc_normal_std_he_forward(
inmaps, outmaps, kernel=(7, 7)) / np.sqrt(2.)
k_b = I.calc_normal_std_he_forward(inmaps, outmaps) / np.sqrt(2.)
w_init = I.UniformInitializer((-k_w, k_w))
b_init = I.UniformInitializer((-k_b, k_b))
out = PF.convolution(out, outmaps=num_channels, kernel=(7, 7),
pad=(3, 3), w_init=w_init, b_init=b_init)
out = F.sigmoid(out)
output_dict["prediction"] = out
return output_dict
```
#### File: GANs/jsigan/ops.py
```python
from collections import namedtuple
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.initializer as I
import numpy as np
from utils import depth_to_space
def box_filter(x, szf):
"""
Box filter
"""
y = F.identity(x)
szy = list(y.shape)
b_filt = nn.Variable((szf, szf, 1, 1))
b_filt.data.fill(1.)
b_filt = b_filt / (szf ** 2)
# 5,5,1,1
b_filt = F.tile(b_filt, [1, 1, szy[3], 1])
b_filt = F.transpose(b_filt, (3, 2, 0, 1))
b_filt = F.reshape(b_filt, (6, 5, 5))
pp = int((szf - 1) / 2)
y = F.pad(y, (0, 0, pp, pp, pp, pp, 0, 0), mode='reflect')
y_chw = F.transpose(y, (0, 3, 1, 2))
y_chw = F.depthwise_convolution(y_chw, b_filt, multiplier=1, stride=(1, 1))
y_hwc = F.transpose(y_chw, (0, 2, 3, 1))
return y_hwc
def guided_filter(img, r, eps):
"""
Edge preserving filter
"""
img2 = F.concatenate(img, img * img, axis=3)
img2 = box_filter(img2, r)
mean = F.split(img2, axis=3)
mean_i = F.stack(mean[0], mean[1], mean[2], axis=3)
mean_ii = F.stack(mean[3], mean[4], mean[5], axis=3)
var_i = mean_ii - mean_i * mean_i
a = var_i / (var_i + eps)
b = mean_i - a * mean_i
ab = F.concatenate(a, b, axis=3)
ab = box_filter(ab, r)
mean_ab = F.split(ab, axis=3)
mean_a = F.stack(mean_ab[0], mean_ab[1], mean_ab[2], axis=3)
mean_b = F.stack(mean_ab[3], mean_ab[4], mean_ab[5], axis=3)
q = mean_a * img + mean_b
return q
def conv_2d(x, o_ch, kernel, name=None):
"""
Convolution for JSInet
"""
b = I.ConstantInitializer(0.)
h = PF.convolution(x, o_ch, kernel=kernel, stride=(1, 1), pad=(1, 1), channel_last=True,
b_init=b, name=name)
return h
def res_block(x, out_ch, name):
"""
Create residual block
"""
with nn.parameter_scope(name):
h = conv_2d(F.relu(x), out_ch, kernel=(3, 3), name='conv/0')
h = conv_2d(F.relu(h), out_ch, kernel=(3, 3), name='conv/1')
h = x + h
return h
def dyn_2d_filter(x, lf_2d, k_sz):
"""
Dynamic 2d filtering
"""
with nn.parameter_scope('Dynamic_2D_Filtering'):
f_localexpand = nn.Variable.from_numpy_array(
np.eye(k_sz[0] * k_sz[1], k_sz[0] * k_sz[1]))
f_localexpand = F.reshape(f_localexpand,
(k_sz[0], k_sz[1], 1, k_sz[0] * k_sz[1])) # (9,9,1,81))
f_localexpand = F.transpose(f_localexpand, (3, 0, 1, 2)) # (81,9,9,1))
x_sz = x.shape
x = F.reshape(x, (x_sz[0], x_sz[1], x_sz[2], 1)) # (1,100,170,1)
x_localexpand = F.convolution(x, f_localexpand, stride=(1, 1), pad=(4, 4),
channel_last=True) # (1,100,170,81)
x_le_sz = x_localexpand.shape
x_localexpand = F.reshape(x_localexpand,
(x_le_sz[0], x_le_sz[1], x_le_sz[2], 1, x_le_sz[3]))
y = F.batch_matmul(x_localexpand, lf_2d)
y_sz = y.shape
y = F.reshape(y, (y_sz[0], y_sz[1], y_sz[2], y_sz[4]))
return y
def dyn_2d_up_operation(x, lf_2d, k_sz, sf=2):
"""
Dynamic 2d upsampling
"""
with nn.parameter_scope("Dynamic_2D_Upsampling"):
y = []
sz = lf_2d.shape
lf_2d_new = F.reshape(
lf_2d, (sz[0], sz[1], sz[2], k_sz[0] * k_sz[0], sf ** 2))
lf_2d_new = F.softmax(lf_2d_new, axis=3)
for ch in range(3): # loop over YUV channels
# apply dynamic filtering operation
temp = dyn_2d_filter(x[:, :, :, ch], lf_2d_new, k_sz)
temp = depth_to_space(temp, sf)
y += [temp]
y = F.concatenate(*y, axis=3)
return y
def dyn_sep_up_operation(x, dr_k_v, dr_k_h, k_sz, sf):
"""
Dynamic separable upsampling operation with 1D separable local kernels.
x: [B, H, W, C], dr_k_v: [B, H, W, 41*sf*sf], dr_k_h: [B, H, W, 41*sf*sf]
out: [B, H*sf, W*sf, C]
"""
sz = x.shape
pad = k_sz // 2 # local filter pad size
# [B, H, W, C*sf*sf]
out_v = nn.Variable((sz[0], sz[1], sz[2], sz[3] * sf ** 2))
out_v.data.zero()
# [B, H, W, C*sf*sf]
out_h = nn.Variable((sz[0], sz[1], sz[2], sz[3] * sf ** 2))
out_h.data.zero()
img_pad = F.pad(x, (0, 0, pad, pad, 0, 0, 0, 0))
img_pad_y = F.reshape(img_pad[:, :, :, 0],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_y = F.tile(img_pad_y, [1, 1, 1, sf ** 2])
img_pad_u = F.reshape(img_pad[:, :, :, 1],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_u = F.tile(img_pad_u, [1, 1, 1, sf ** 2])
img_pad_v = F.reshape(img_pad[:, :, :, 2],
(img_pad.shape[0], img_pad.shape[1], img_pad.shape[2], 1))
img_pad_v = F.tile(img_pad_v, [1, 1, 1, sf ** 2])
img_pad = F.concatenate(img_pad_y, img_pad_u, img_pad_v, axis=3)
# vertical 1D filter
for i in range(k_sz):
out_v = out_v + img_pad[:, i:i + sz[1], :, :] * F.tile(
dr_k_v[:, :, :, i:k_sz * sf ** 2:k_sz], [1, 1, 1, 3])
img_pad = F.pad(out_v, (0, 0, 0, 0, pad, pad, 0, 0))
# horizontal 1D filter
for i in range(k_sz):
out_h = out_h + img_pad[:, :, i:i + sz[2], :] * F.tile(
dr_k_h[:, :, :, i:k_sz * sf ** 2:k_sz], [1, 1, 1, 3])
# depth to space upsampling (YUV)
out = depth_to_space(out_h[:, :, :, 0:sf ** 2], sf)
out = F.concatenate(out, depth_to_space(
out_h[:, :, :, sf ** 2:2 * sf ** 2], sf), axis=3)
out = F.concatenate(out, depth_to_space(
out_h[:, :, :, 2 * sf ** 2:3 * sf ** 2], sf), axis=3)
return out
def res_block_concat(x, out_ch, name):
"""
Basic residual block -> [conv-relu | conv-relu] + input
"""
with nn.parameter_scope(name):
h = conv_2d(F.relu(x), out_ch, kernel=(3, 3), name='conv/0')
h = conv_2d(F.relu(h), out_ch, kernel=(3, 3), name='conv/1')
h = x[:, :, :, :out_ch] + h
return h
def model(img, sf):
"""
Define JSInet model
"""
with nn.parameter_scope('Network'):
with nn.parameter_scope('local_contrast_enhancement'):
## ================= Local Contrast Enhancement Subnet ============================ ##
ch = 64
b = guided_filter(img, 5, 0.01)
n1 = conv_2d(b, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
n1 = res_block(n1, ch, 'res_block/%d' % i)
n1 = F.relu(n1, inplace=True)
local_filter_2d = conv_2d(n1, (9 ** 2) * (sf ** 2), kernel=(3, 3),
name='conv_k') # [B, H, W, (9x9)*(sfxsf)]
# dynamic 2D upsampling with 2D local filters
pred_C = dyn_2d_up_operation(b, local_filter_2d, (9, 9), sf)
# local contrast mask
pred_C = 2 * F.sigmoid(pred_C)
## ================= Detail Restoration Subnet ============================ ##
ch = 64
d = F.div2(img, b + 1e-15)
with nn.parameter_scope('detail_restoration'):
n3 = conv_2d(d, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
n3 = res_block(n3, ch, 'res_block/%d' % i)
if i == 0:
d_feature = n3
n3 = F.relu(n3, inplace=True)
# separable 1D filters
dr_k_h = conv_2d(n3, 41 * sf ** 2, kernel=(3, 3), name='conv_k_h')
dr_k_v = conv_2d(n3, 41 * sf ** 2, kernel=(3, 3), name='conv_k_v')
# dynamic separable upsampling with with separable 1D local filters
pred_D = dyn_sep_up_operation(d, dr_k_v, dr_k_h, 41, sf)
## ================= Image Reconstruction Subnet ============================ ##
with nn.parameter_scope('image_reconstruction'):
n4 = conv_2d(img, ch, kernel=(3, 3), name='conv/0')
for i in range(4):
if i == 1:
n4 = F.concatenate(n4, d_feature, axis=3)
n4 = res_block_concat(n4, ch, 'res_block/%d' % i)
else:
n4 = res_block(n4, ch, 'res_block/%d' % i)
n4 = F.relu(n4, inplace=True)
n4 = F.relu(conv_2d(n4, ch * sf * sf, kernel=(3, 3),
name='conv/1'), inplace=True)
# (1,100,170,1024) -> (1,100,170,4,4,64) -> (1,100,4,170,4,64)
# pixel shuffle
n4 = depth_to_space(n4, sf)
pred_I = conv_2d(n4, 3, kernel=(3, 3), name='conv/2')
pred = F.add2(pred_I, pred_D, inplace=True) * pred_C
jsinet = namedtuple('jsinet', ['pred'])
return jsinet(pred)
def truncated_normal(w_shape, mean, std):
"""
Numpy truncated normal
"""
init = I.NormalInitializer()
tmp = init(w_shape + (4,))
valid = np.logical_and((np.less(tmp, 2)), (np.greater(tmp, -2)))
ind = np.argmax(valid, axis=-1)
ind1 = (np.expand_dims(ind, -1))
trunc_norm = np.take_along_axis(tmp, ind1, axis=4).squeeze(-1)
trunc_norm = trunc_norm * std + mean
return trunc_norm
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, scope='conv_0'):
"""
Convolution for discriminator
"""
w_n_shape = (channels, kernel, kernel, x.shape[-1])
w_init = truncated_normal(w_n_shape, mean=0.0, std=0.02)
b_init = I.ConstantInitializer(0.)
with nn.parameter_scope(scope):
if pad > 0:
h = x.shape[1]
if h % stride == 0:
pad = pad * 2
else:
pad = max(kernel - (h % stride), 0)
pad_top = pad // 2
pad_bottom = pad - pad_top
pad_left = pad // 2
pad_right = pad - pad_left
if pad_type == 'zero':
x = F.pad(x, (0, 0, pad_top, pad_bottom,
pad_left, pad_right, 0, 0))
if pad_type == 'reflect':
x = F.pad(x, (0, 0, pad_top, pad_bottom, pad_left,
pad_right, 0, 0), mode='reflect')
def apply_w(w):
return PF.spectral_norm(w, dim=0)
x = PF.convolution(x, channels, kernel=(kernel, kernel), stride=(
stride, stride), apply_w=apply_w, w_init=w_init, b_init=b_init, with_bias=use_bias,
channel_last=True)
return x
def dis_block(n, c, i, train=True):
"""
Discriminator conv_bn_relu block
"""
out = conv(n, channels=c, kernel=4, stride=2, pad=1, use_bias=False,
scope='d_conv/' + str(2 * i + 2))
out_fm = F.leaky_relu(
PF.batch_normalization(
out, axes=[3], batch_stat=train, name='d_bn/' + str(2 * i + 1)),
alpha=0.2)
out = conv(out_fm, channels=c * 2, kernel=3, stride=1, pad=1, use_bias=False,
scope='d_conv/' + str(2 * i + 3))
out = F.leaky_relu(
PF.batch_normalization(
out, axes=[3], batch_stat=train, name='d_bn/' + str(2 * i + 2)),
alpha=0.2)
return out, out_fm
def discriminator_fm(x, sf, scope="Discriminator_FM"):
"""
Feature matching discriminator
"""
with nn.parameter_scope(scope):
fm_list = []
ch = 32
n = F.leaky_relu(conv(x, ch, 3, 1, 1, scope='d_conv/1'), alpha=0.2)
for i in range(4):
n, out_fm = dis_block(n, ch, i, train=True)
ch = ch * 2
fm_list.append(out_fm)
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=4, stride=2,
pad=1, use_bias=False, scope='d_conv/10'),
axes=[3], batch_stat=True, name='d_bn/9'), alpha=0.2,
inplace=True)
if sf == 1:
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=5, stride=1,
pad=1, use_bias=False, scope='d_conv/11'),
axes=[3], batch_stat=True, name='d_bn/10'), alpha=0.2, inplace=True)
else:
n = F.leaky_relu(PF.batch_normalization(
conv(n, channels=ch, kernel=5, stride=1,
use_bias=False, scope='d_conv/11'),
axes=[3], batch_stat=True, name='d_bn/10'), alpha=0.2, inplace=True)
n = PF.batch_normalization(
conv(n, channels=1, kernel=1, stride=1,
use_bias=False, scope='d_conv/12'),
axes=[3], batch_stat=True, name='d_bn/11')
out_logit = n
out = F.sigmoid(out_logit) # [B,1]
return out, out_logit, fm_list
def discriminator_loss(real, fake):
"""
Calculate discriminator loss
"""
real_loss = F.mean(
F.relu(1.0 - (real - F.reshape(F.mean(fake), (1, 1, 1, 1)))))
fake_loss = F.mean(
F.relu(1.0 + (fake - F.reshape(F.mean(real), (1, 1, 1, 1)))))
l_d = real_loss + fake_loss
return l_d
def generator_loss(real, fake):
"""
Calculate generator loss
"""
real_loss = F.mean(
F.relu(1.0 + (real - F.reshape(F.mean(fake), (1, 1, 1, 1)))))
fake_loss = F.mean(
F.relu(1.0 - (fake - F.reshape(F.mean(real), (1, 1, 1, 1)))))
l_g = real_loss + fake_loss
return l_g
def feature_matching_loss(x, y, num=4):
"""
Calculate feature matching loss
"""
fm_loss = 0.0
for i in range(num):
fm_loss += F.mean(F.squared_error(x[i], y[i]))
return fm_loss
def gan_model(label_ph, pred, conf):
"""
Define GAN model with adversarial and discriminator losses and their orchestration
"""
# Define Discriminator
_, d_real_logits, d_real_fm_list = discriminator_fm(
label_ph, conf.scaling_factor, scope="Discriminator_FM")
# output of D for fake images
_, d_fake_logits, d_fake_fm_list = discriminator_fm(
pred, conf.scaling_factor, scope="Discriminator_FM")
# Define Detail Discriminator
# compute the detail layers for the dicriminator (reuse)
base_gt = guided_filter(label_ph, 5, 0.01)
detail_gt = F.div2(label_ph, base_gt + 1e-15)
base_pred = guided_filter(pred, 5, 0.01)
detail_pred = F.div2(pred, base_pred + 1e-15)
# detail layer output of D for real images
_, d_detail_real_logits, d_detail_real_fm_list = \
discriminator_fm(detail_gt, conf.scaling_factor,
scope="Discriminator_Detail")
# detail layer output of D for fake images
_, d_detail_fake_logits, d_detail_fake_fm_list = \
discriminator_fm(detail_pred, conf.scaling_factor,
scope="Discriminator_Detail")
# Loss
# original GAN (hinge GAN)
d_adv_loss = discriminator_loss(d_real_logits, d_fake_logits)
d_adv_loss.persistent = True
g_adv_loss = generator_loss(d_real_logits, d_fake_logits)
g_adv_loss.persistent = True
# detail GAN (hinge GAN)
d_detail_adv_loss = conf.detail_lambda * \
discriminator_loss(d_detail_real_logits, d_detail_fake_logits)
d_detail_adv_loss.persistent = True
g_detail_adv_loss = conf.detail_lambda * \
generator_loss(d_detail_real_logits, d_detail_fake_logits)
g_detail_adv_loss.persistent = True
# feature matching (FM) loss
fm_loss = feature_matching_loss(d_real_fm_list, d_fake_fm_list, 4)
fm_loss.persistent = True
fm_detail_loss = conf.detail_lambda * feature_matching_loss(d_detail_real_fm_list,
d_detail_fake_fm_list, 4)
fm_detail_loss.persistent = True
jsigan = namedtuple('jsigan',
['d_adv_loss', 'd_detail_adv_loss', 'g_adv_loss', 'g_detail_adv_loss',
'fm_loss', 'fm_detail_loss'])
return jsigan(d_adv_loss, d_detail_adv_loss, g_adv_loss, g_detail_adv_loss, fm_loss,
fm_detail_loss)
```
#### File: data_cleansing/influence_functions/infl.py
```python
import numpy as np
import nnabla as nn
import nnabla.solvers as S
import nnabla.functions as F
import os
import functools
from tqdm import tqdm
from sgd_influence.model import setup_model
from sgd_influence.dataset import get_batch_data, init_dataset, get_data, get_image_size, get_batch_indices
from sgd_influence.utils import ensure_dir, get_indices, save_to_csv
def adjust_batch_size(model, batch_size, loss_fn=None):
has_loss = loss_fn is not None
if has_loss:
loss_d, loss_g = loss_fn.d, loss_fn.g
pred, loss_fn, input_image = model(batch_size=batch_size)
if has_loss:
loss_fn.d = loss_d
loss_fn.g = loss_g
return pred, loss_fn, input_image
def save_infl_for_analysis(infl_list, use_all_params, save_dir, infl_filename, epoch, header, data_type):
dn = os.path.join(save_dir, 'epoch%02d' % (epoch))
if use_all_params:
dn = os.path.join(dn, 'infl_original')
else:
dn = os.path.join(dn, 'infl_arranged')
ensure_dir(dn)
save_to_csv(filename=os.path.join(dn, os.path.basename(infl_filename)),
header=header, list_to_save=infl_list, data_type=data_type)
def compute_gradient(grad_model, solver, dataset, batch_size, idx_list_to_data, resize_size):
n = len(idx_list_to_data)
grad_idx = get_batch_indices(n, batch_size, seed=None)
u = {}
loss_fn = None
for i in tqdm(grad_idx, desc='calc gradient (2/3 steps)'):
X, y = get_batch_data(dataset, idx_list_to_data,
i, resize_size, test=True)
_, loss_fn, input_image = adjust_batch_size(
grad_model, len(X), loss_fn)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
solver.zero_grad()
loss_fn.backward(clear_buffer=True)
for key, param in nn.get_parameters(grad_only=False).items():
uu = u.get(key, None)
if uu is None:
u[key] = nn.Variable(param.shape)
u[key].data.zero()
u[key].d += param.g / n
return u
def infl_icml(model_info_dict, file_dir_dict, use_all_params, need_evaluate, alpha):
num_epochs = 2
# params
lr = 0.005
seed = model_info_dict['seed']
net_func = model_info_dict['net_func']
batch_size = model_info_dict['batch_size']
test_batch_size = 1000
target_epoch = model_info_dict['num_epochs']
# files and dirs
save_dir = file_dir_dict['save_dir']
infl_filename = file_dir_dict['infl_filename']
final_model_name = file_dir_dict['model_filename']
final_model_path = os.path.join(save_dir, 'epoch%02d' % (
target_epoch - 1), 'weights', final_model_name)
input_dir_name = os.path.dirname(file_dir_dict['train_csv'])
# setup
trainset, valset, image_shape, n_classes, ntr, nval = init_dataset(
file_dir_dict['train_csv'], file_dir_dict['val_csv'], seed)
n_channels, _h, _w = image_shape
resize_size = get_image_size((_h, _w))
idx_train = get_indices(ntr, seed)
idx_val = get_indices(nval, seed)
nn.load_parameters(final_model_path)
trained_params = nn.get_parameters(grad_only=False)
test = True
grad_model = functools.partial(
setup_model, net_func=net_func, n_classes=n_classes, n_channels=n_channels, resize_size=resize_size, test=test, reduction='mean')
solver = S.Momentum(lr=lr, momentum=0.9)
solver.set_parameters(trained_params)
# gradient
u = compute_gradient(grad_model, solver, valset,
test_batch_size, idx_val, resize_size)
# Hinv * u with SGD
seed_train = 0
v = dict()
for key, param in nn.get_parameters(grad_only=False).items():
v[key] = nn.Variable(param.d.shape, need_grad=True)
v[key].d = 0
v[key].g = 0
solver.set_parameters(v)
loss_train = []
loss_fn = None
for epoch in range(num_epochs):
# training
seed_train = 0
np.random.seed(epoch)
idx = get_batch_indices(ntr, batch_size, seed=epoch)
for j, i in enumerate(idx):
seeds = list(range(seed_train, seed_train + i.size))
seed_train += i.size
X, y = get_batch_data(trainset, idx_train, i,
resize_size, test=False, seeds=seeds)
_, loss_fn, input_image = adjust_batch_size(
grad_model, len(X), loss_fn)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
grad_params = nn.grad(
loss_fn, [param for param in nn.get_parameters(grad_only=False).values()])
vg = 0
for vv, g in zip(v.values(), grad_params):
vg += F.sum(vv*g)
for parameters in trained_params.values():
parameters.grad.zero()
vgrad_params = nn.grad(
vg, [param for param in nn.get_parameters(grad_only=False).values()])
loss_i = 0
for vgp, vv, uu in zip(vgrad_params, v.values(), u.values()):
loss_i += 0.5 * F.sum(vgp * vv + alpha *
vv * vv) - F.sum(uu * vv)
loss_i.forward()
solver.zero_grad()
loss_i.backward(clear_buffer=True)
solver.update()
loss_train.append(loss_i.d.copy())
# influence
infl_dict = dict()
infl = np.zeros(ntr)
for i in tqdm(range(ntr), desc='calc influence (3/3 steps)'):
csv_idx = idx_train[i]
file_name = trainset.get_filepath_to_data(csv_idx)
file_name = os.path.join(input_dir_name, file_name)
file_name = os.path.normpath(file_name)
X, y = get_data(trainset, idx_train[i], resize_size, True, seed=i)
_, loss_fn, input_image = adjust_batch_size(
grad_model, len(X), loss_fn)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
for parameters in trained_params.values():
parameters.grad.zero()
loss_fn.backward(clear_buffer=True)
infl_i = 0
for j, param in enumerate(nn.get_parameters(grad_only=False).values()):
infl_i += (param.g.copy() * list(v.values())[j].d.copy()).sum()
infl[i] = -infl_i / ntr
infl_dict[csv_idx] = [file_name, y, infl[i]]
infl_list = [val + [key] for key, val in infl_dict.items()]
infl_list = sorted(infl_list, key=lambda x: (x[-2]))
# save
header = ['x:image', 'y:label', 'influence', 'datasource_index']
data_type = 'object,int,float,int'
if need_evaluate:
save_infl_for_analysis(infl_list, use_all_params,
save_dir, infl_filename, epoch, header, data_type)
save_to_csv(filename=infl_filename, header=header,
list_to_save=infl_list, data_type=data_type)
```
#### File: data_cleansing/sgd_influence/train.py
```python
import os
import numpy as np
from tqdm import tqdm
import nnabla as nn
import functools
import nnabla.solvers as S
from .model import setup_model, calc_acc, adjust_batch_size
from .dataset import get_batch_indices, get_batch_data, init_dataset, get_image_size
from .utils import ensure_dir, get_indices, save_to_csv
def save_all_params(params_dict, c, k, j, bundle_size, step_size, save_dir, epoch):
params_dict[c] = nn.get_parameters(grad_only=False).copy()
c += 1
if c == bundle_size or j == step_size - 1:
dn = os.path.join(save_dir, 'epoch%02d' % (epoch), 'weights')
ensure_dir(dn)
for cc, params in params_dict.items():
fn = '%s/model_step%04d.h5' % (dn, k + cc)
nn.save_parameters(fn, params=params, extension=".h5")
k += c
c = 0
params_dict = {}
return params_dict, c, k
def eval_model(val_model, solver, dataset, idx_list_to_data, batch_size, resize_size):
loss = 0
acc = 0
n = len(idx_list_to_data)
idx = np.array_split(np.arange(n), batch_size)
loss_fn = None
for _, i in enumerate(idx):
X, y = get_batch_data(dataset, idx_list_to_data,
i, resize_size, test=True)
pred, loss_fn, input_image = adjust_batch_size(
val_model, solver, len(X), loss_fn)
input_image["image"].d = X
input_image["label"].d = y
loss_fn.forward()
loss += loss_fn.d * len(X)
acc += calc_acc(pred.d, y, method='sum')
loss /= n
acc /= n
return loss, acc
def train(model_info_dict, file_dir_dict, use_all_params, need_evaluate, bundle_size=200):
# params
lr = model_info_dict['lr']
seed = model_info_dict['seed']
net_func = model_info_dict['net_func']
batch_size = model_info_dict['batch_size']
num_epochs = model_info_dict['num_epochs']
infl_end_epoch = model_info_dict['end_epoch']
# files and dirs
save_dir = file_dir_dict['save_dir']
info_filename = file_dir_dict['info_filename']
model_filename = file_dir_dict['model_filename']
score_filename = file_dir_dict['score_filename']
# setup
trainset, valset, image_shape, n_classes, ntr, nval = init_dataset(
file_dir_dict['train_csv'], file_dir_dict['val_csv'], seed)
n_channels, _h, _w = image_shape
resize_size = get_image_size((_h, _w))
# Create training graphs
test = False
train_model = functools.partial(
setup_model, net_func=net_func, n_classes=n_classes, n_channels=n_channels, resize_size=resize_size, test=test)
# Create validation graphs
test = True
val_model = functools.partial(
setup_model, net_func=net_func, n_classes=n_classes, n_channels=n_channels, resize_size=resize_size, test=test)
# setup optimizer (SGD)
solver = S.Sgd(lr=lr)
solver.set_parameters(nn.get_parameters(grad_only=False))
# get shuffled index using designated seed
idx_train = get_indices(ntr, seed)
idx_val = get_indices(nval, seed)
# training
seed_train = 0
info = []
score = []
loss_train = None
for epoch in tqdm(range(num_epochs), desc='training (1/3 steps)'):
idx = get_batch_indices(ntr, batch_size, seed=epoch)
epoch_info = []
c = 0
k = 0
params_dict = {}
for j, i in enumerate(idx):
seeds = list(range(seed_train, seed_train + i.size))
seed_train += i.size
epoch_info.append({'epoch': epoch, 'step': j,
'idx': i, 'lr': lr, 'seeds': seeds})
if (use_all_params) & (epoch >= infl_end_epoch):
params_dict, c, k = save_all_params(
params_dict, c, k, j, bundle_size, len(idx), save_dir, epoch)
X, y = get_batch_data(trainset, idx_train, i,
resize_size, test=False, seeds=seeds)
_, loss_train, input_image_train = adjust_batch_size(
train_model, solver, len(X), loss_train)
input_image_train["image"].d = X
input_image_train["label"].d = y
loss_train.forward()
solver.zero_grad()
loss_train.backward(clear_buffer=True)
solver.update()
info.append(epoch_info)
# save if params are necessary for calculating influence
if epoch >= infl_end_epoch-1:
dn = os.path.join(save_dir, 'epoch%02d' % (epoch), 'weights')
ensure_dir(dn)
nn.save_parameters(os.path.join(dn, model_filename), params=nn.get_parameters(
grad_only=False), extension=".h5")
# evaluation
if need_evaluate:
loss_tr, acc_tr = eval_model(
val_model, solver, trainset, idx_train, batch_size, resize_size)
loss_val, acc_val = eval_model(
val_model, solver, valset, idx_val, batch_size, resize_size)
score.append((loss_tr, loss_val, acc_tr, acc_val))
# save epoch and step info
np.save(os.path.join(save_dir, info_filename), arr=info)
# save score
if need_evaluate:
save_to_csv(filename=score_filename, header=[
'train_loss', 'val_loss', 'train_accuracy', 'val_accuracy'], list_to_save=score, data_type='float,float,float,float')
```
#### File: data_cleansing/sgd_influence/utils.py
```python
import os
import csv
import numpy as np
from nnabla.ext_utils import get_extension_context
from nnabla.utils.load import load
from shutil import rmtree
def delete_file(file_name):
if os.path.isfile(file_name):
os.remove(file_name)
def delete_dir(dir_name, keyword='sgd_infl_results'):
if os.path.isdir(dir_name):
if keyword in dir_name:
rmtree(dir_name)
def ensure_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def get_context(device_id):
# for cli app use
try:
context = 'cudnn'
ctx = get_extension_context(context, device_id=device_id)
except ModuleNotFoundError:
context = 'cpu'
ctx = get_extension_context(context, device_id=device_id)
# for nnc use
config_filename = 'net.nntxt'
if os.path.isfile(config_filename):
config_info = load([config_filename])
ctx = config_info.global_config.default_context
return ctx
def get_indices(n, seed):
np.random.seed(seed)
idx = np.random.permutation(n)
return idx
def save_to_csv(filename, header, list_to_save, data_type):
with open(filename, 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(header)
writer.writerows(np.array([tuple(row)
for row in list_to_save], dtype=data_type))
def read_csv(filename):
with open(filename) as f:
reader = csv.reader(f)
ret = [s for s in reader]
return ret
```
#### File: responsible_ai/gradcam/gradcam.py
```python
import cv2
import numpy as np
def gradcam(middle_layer):
"""
Calculate GradCAM.
Parameters
----------
middle_layer: nn.Variable
The layer of interest to apply GradCAM
Returns
----------
heatmap: ndarray
2D array of same size as width and height of middle_layer
"""
conv_layer_output = middle_layer.d
conv_layer_grad = middle_layer.g
pooled_grad = conv_layer_grad.mean(axis=(0, 2, 3), keepdims=True)
heatmap = pooled_grad * conv_layer_output
heatmap = np.maximum(heatmap, 0) # ReLU
heatmap = heatmap.mean(axis=(0, 1))
max_v, min_v = np.max(heatmap), np.min(heatmap)
if max_v != min_v:
heatmap = (heatmap - min_v) / (max_v - min_v)
return heatmap
def overlay_images(base_img, overlay_img, overlay_coef=1.0):
"""
Overlay two images.
Parameters
----------
base_img: ndarray
2D array
overlay_img: ndarray
2D array
overlay_coef: float
mix rate of overlay_img to base_img (overlay_img: overlay_coef, base_img: 1).
Returns
----------
ret_img: ndarray
2D array of overlaid image
"""
# resize
_overlay_img = cv2.resize(
overlay_img, (base_img.shape[1], base_img.shape[0]))
# normalize
_overlay_img = 255 * _overlay_img / _overlay_img.max()
_overlay_img = _overlay_img.astype('uint8')
# color adjust
_overlay_img = cv2.applyColorMap(_overlay_img, cv2.COLORMAP_JET)
base_img = cv2.cvtColor(base_img, cv2.COLOR_BGR2RGB)
# overlay
ret_img = _overlay_img * overlay_coef + base_img
ret_img = 255 * ret_img / ret_img.max()
ret_img = ret_img.astype('uint8')
ret_img = cv2.cvtColor(ret_img, cv2.COLOR_BGR2RGB)
return ret_img
```
#### File: utils/neu/lms.py
```python
from contextlib import contextmanager
import nnabla.logger as logger
from nnabla.lms import SwapInOutScheduler
@contextmanager
def sechdule_scope(scheduler):
scheduler.update_pre_hook()
yield scheduler
scheduler.update_post_hook()
def lms_scheduler(ctx, use_lms, gpu_memory_size=8 << 30, window_length=12 << 30):
_check_list = [x.split(":")[0] for x in ctx.backend]
if "cudnn" not in _check_list and "cuda" not in _check_list:
logger.warn(
"ctx passed to scheduler doesn't have cuda/cudnn backend. lms scheduler will not be used.")
use_lms = False
if use_lms:
logger.info("[LMS] gpu_memory_limit: {}GB, prefetch_window_length: {}GB".format(float(gpu_memory_size) / (1 << 30),
float(window_length) / (1 << 30)))
# Change array preference so that lms works well.
# import nnabla_ext.cuda.init as cuda_init
# cuda_init.prefer_cpu_pinned_array()
# cuda_init.prefer_cuda_virtual_array()
#
from nnabla.ext_utils import get_extension_context
# from nnabla import set_default_context
be, tc = ctx.backend[0].split(":")
# ctx = get_extension_context(be, device_id=ctx.device_id, type_config=tc)
# set_default_context(ctx)
cpu_ctx = get_extension_context("cpu", device_id="", type_config=tc)
return SwapInOutScheduler(cpu_ctx, ctx, gpu_memory_size, window_length)
else:
class DummyScheduler(object):
function_pre_hook = None
function_post_hook = None
update_pre_hook = None
update_post_hook = None
def start_scheduling(self):
return None
def end_scheduling(self):
return None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
return DummyScheduler()
```
#### File: VAEs/vq-vae/main.py
```python
import os
import sys
from argparse import ArgumentParser
import time
common_utils_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'utils'))
sys.path.append(common_utils_path)
from neu.yaml_wrapper import read_yaml
from neu.comm import CommunicatorWrapper
import nnabla as nn
import nnabla.solvers as S
from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile
from nnabla.ext_utils import get_extension_context
from models import VQVAE, GatedPixelCNN
from trainers import VQVAEtrainer, TrainerPrior
from data import mnist_iterator, imagenet_iterator, cifar10_iterator
def make_parser():
parser = ArgumentParser(description='VQVAE Implementation in NNabla')
parser.add_argument('--data', '-d', type=str, default='cifar10', required=True,
choices=['mnist', 'cifar10', 'imagenet'])
parser.add_argument('--load-checkpoint', action='store_true', default=False,
help='Pass this argument to load saved parameters. Path of the saved parameters needs to be defined in config file.')
parser.add_argument('--pixelcnn-prior', action='store_true', default=False,
help='Pass this argument to train a PixelCNN on the trained discretized latent space')
parser.add_argument('--sample-from-pixelcnn', type=int,
help='To generate images by randomly sampling using a trained pixelcnn prior. Enter number of images to generate')
parser.add_argument('--sample-save-path', type=str, default='',
help='Path to save samples generated via pixelcnn prior')
return parser
def train(data_iterator, monitor, config, comm, args):
monitor_train_loss, monitor_train_recon = None, None
monitor_val_loss, monitor_val_recon = None, None
if comm.rank == 0:
monitor_train_loss = MonitorSeries(
config['monitor']['train_loss'], monitor, interval=config['train']['logger_step_interval'])
monitor_train_recon = MonitorImageTile(config['monitor']['train_recon'], monitor, interval=config['train']['logger_step_interval'],
num_images=config['train']['batch_size'])
monitor_val_loss = MonitorSeries(
config['monitor']['val_loss'], monitor, interval=config['train']['logger_step_interval'])
monitor_val_recon = MonitorImageTile(config['monitor']['val_recon'], monitor, interval=config['train']['logger_step_interval'],
num_images=config['train']['batch_size'])
model = VQVAE(config)
if not args.sample_from_pixelcnn:
if config['train']['solver'] == 'adam':
solver = S.Adam()
else:
solver = S.momentum()
solver.set_learning_rate(config['train']['learning_rate'])
train_loader = data_iterator(config, comm, train=True)
if config['dataset']['name'] != 'imagenet':
val_loader = data_iterator(config, comm, train=False)
else:
val_loader = None
else:
solver, train_loader, val_loader = None, None, None
if not args.pixelcnn_prior:
trainer = VQVAEtrainer(model, solver, train_loader, val_loader, monitor_train_loss,
monitor_train_recon, monitor_val_loss, monitor_val_recon, config, comm)
num_epochs = config['train']['num_epochs']
else:
pixelcnn_model = GatedPixelCNN(config['prior'])
trainer = TrainerPrior(model, pixelcnn_model, solver, train_loader, val_loader, monitor_train_loss,
monitor_train_recon, monitor_val_loss, monitor_val_recon, config, comm, eval=args.sample_from_pixelcnn)
num_epochs = config['prior']['train']['num_epochs']
if os.path.exists(config['model']['checkpoint']) and (args.load_checkpoint or args.sample_from_pixelcnn):
checkpoint_path = config['model']['checkpoint'] if not args.pixelcnn_prior else config['prior']['checkpoint']
trainer.load_checkpoint(checkpoint_path, msg='Parameters loaded from {}'.format(
checkpoint_path), pixelcnn=args.pixelcnn_prior, load_solver=not args.sample_from_pixelcnn)
if args.sample_from_pixelcnn:
trainer.random_generate(
args.sample_from_pixelcnn, args.sample_save_path)
return
for epoch in range(num_epochs):
trainer.train(epoch)
if epoch % config['val']['interval'] == 0 and val_loader != None:
trainer.validate(epoch)
if comm.rank == 0:
if epoch % config['train']['save_param_step_interval'] == 0 or epoch == config['train']['num_epochs']-1:
trainer.save_checkpoint(
config['model']['saved_models_dir'], epoch, pixelcnn=args.pixelcnn_prior)
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
config = read_yaml(os.path.join('configs', '{}.yaml'.format(args.data)))
ctx = get_extension_context(
config['extension_module'], device_id=config['device_id'])
nn.set_auto_forward(True)
if args.data == 'mnist':
data_iterator = mnist_iterator
elif args.data == 'imagenet':
data_iterator = imagenet_iterator
elif args.data == 'cifar10':
data_iterator = cifar10_iterator
else:
print('Dataset not recognized')
exit(1)
comm = CommunicatorWrapper(ctx)
nn.set_default_context(ctx)
monitor = None
if comm.rank == 0:
monitor = Monitor(config['monitor']['path'])
start_time = time.time()
acc = train(data_iterator, monitor, config, comm, args)
if comm.rank == 0:
end_time = time.time()
training_time = (end_time-start_time)/3600
print('Finished Training!')
print('Total Training time: {} hours'.format(training_time))
```
#### File: vq-vae/models/gated_pixel_cnn.py
```python
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
import numpy as np
class GatedPixelCNN(object):
def __init__(self, config):
self.out_channels = config['out_channels']
self.num_layers = config['num_layers']
self.num_features = config['num_features']
self.num_classes = config['num_classes']
self.conditional = config['conditional']
self.input_shape = config['latent_shape']
def mask_type_A(self, W):
c_x, c_y = W.shape[2]//2, W.shape[3]//2
mask = np.ones(W.shape)
mask[:, :, c_x, c_y+1:] = 0
mask[:, :, c_x+1:, :] = 0
mask[:, :, c_x, c_y] = 0
mask = nn.Variable.from_numpy_array(mask)
W = mask*W
return W
def mask_type_B(self, W):
c_x, c_y = W.shape[2]//2, W.shape[3]//2
mask = np.ones(W.shape)
mask[:, :, c_x, c_y+1:] = 0
mask[:, :, c_x+1:, :] = 0
mask = nn.Variable.from_numpy_array(mask)
W = mask*W
return W
def gated_conv(self, x, kernel_shape, h=None, mask_type='', gated=True, payload=None, return_payload=False, scope_name='gated_conv'):
pad_dim_0 = (kernel_shape[0]-1)/2
pad_dim_1 = (kernel_shape[1]-1)/2
if mask_type == '':
mask_type = self.mask_type_B
with nn.parameter_scope(scope_name):
if gated:
out_f = PF.convolution(x, self.num_features, kernel_shape,
pad=(pad_dim_0, pad_dim_1), apply_w=mask_type, name='conv_f')
out_g = PF.convolution(x, self.num_features, kernel_shape,
pad=(pad_dim_0, pad_dim_1), apply_w=mask_type, name='conv_g')
if isinstance(payload, nn.Variable):
out_f += payload[:, :self.num_features, :, :]
out_g += payload[:, self.num_features:, :, :]
if self.conditional:
h_out_f = PF.affine(
h, self.num_features, name='h_out_f')
h_out_f = h_out_f.reshape(
(h_out_f.shape[0], h_out_f.shape[1], 1, 1))
h_out_g = PF.affine(
h, self.num_features, name='h_out_g')
h_out_g = h_out_g.reshape(
(h_out_g.shape[0], h_out_g.shape[1], 1, 1))
out = F.tanh(out_f+h_out_f) * F.sigmoid(out_g+h_out_g)
else:
out = F.tanh(out_f) * F.sigmoid(out_g)
if return_payload:
payload = PF.convolution(F.concatenate(
out_f, out_g, axis=1), 2*self.num_features, (1, 1), name='conv_1x1')
payload = F.relu(payload)
return out, payload
else:
out = PF.convolution(x, self.num_features, kernel_shape, stride=(1, 1),
pad=(pad_dim_0, pad_dim_1), apply_w=mask_type)
out = F.relu(out)
return out
def __call__(self, conv_in, h=None):
v_stack_in = conv_in
h_stack_in = conv_in
features = []
with nn.parameter_scope('ConditionalPixelCNN'):
for i in range(self.num_layers):
if i == 0:
kernel_shape = (7, 7)
mask_type = self.mask_type_A
residual = False
else:
kernel_shape = (3, 3)
mask_type = self.mask_type_B
residual = True
v_stack_gated, v_stack_conv = self.gated_conv(v_stack_in, kernel_shape, h, mask_type=mask_type, return_payload=True,
scope_name='vertical_stack_gated_'+str(i))
h_stack_gated = self.gated_conv(h_stack_in, (1, kernel_shape[0]), h, mask_type=mask_type,
payload=v_stack_conv, scope_name='horizontal_stack_gated_'+str(i))
h_stack_conv = self.gated_conv(h_stack_gated, (1, 1), h, mask_type=mask_type, gated=False,
scope_name='horizontal_stack_conv_'+str(i))
if residual:
h_stack_conv += h_stack_in
v_stack_in = v_stack_gated
h_stack_in = h_stack_conv
fc_1 = self.gated_conv(
h_stack_in, (1, 1), gated=False, scope_name='fc_1')
fc_2 = PF.convolution(fc_1, self.out_channels,
(1, 1), apply_w=self.mask_type_B, name='fc_2')
fc_2 = F.transpose(fc_2, (0, 2, 3, 1))
fc_2 = F.reshape(fc_2, (-1, fc_2.shape[-1]), inplace=True)
return fc_2
```
#### File: vq-vae/trainers/vq_vae_train.py
```python
import nnabla as nn
import nnabla.functions as F
import numpy as np
import os
from tqdm import trange
from .base import BaseTrainer
class VQVAEtrainer(BaseTrainer):
def __init__(self, model, solver, data_loader, val_data_loader, monitor_train_loss, monitor_train_recon,
monitor_val_loss, monitor_val_recon, config, comm):
super(VQVAEtrainer, self).__init__(solver, data_loader, val_data_loader, monitor_train_loss, monitor_train_recon,
monitor_val_loss, monitor_val_recon, config, comm)
self.model = model
self.train_recon_path = os.path.join(
config['monitor']['path'], config['monitor']['train_recon'])
self.val_recon_path = os.path.join(
config['monitor']['path'], config['monitor']['val_recon'])
os.makedirs(self.train_recon_path, exist_ok=True)
os.makedirs(self.val_recon_path, exist_ok=True)
if self.dataset_name != 'imagenet':
self.data_variance = np.var(
self.data_loader._data_source._images/255.0)
else:
self.data_variance = 1
def forward_pass(self, img_var, test=False):
vq_loss, img_recon, perplexity = self.model(img_var, test=test)
recon_loss = F.mean(F.squared_error(
img_recon, img_var))/self.data_variance
loss = recon_loss + vq_loss
return loss, recon_loss, perplexity, img_recon
def train(self, epoch):
pbar = trange(self.iterations_per_epoch//self.comm.n_procs,
desc='Train at epoch '+str(epoch), disable=self.comm.rank > 0)
epoch_loss = 0
if epoch in self.learning_rate_decay_epochs:
self.solver.set_learning_rate(
self.solver.learning_rate()*self.learning_rate_decay_factor)
for i in pbar:
data = self.data_loader.next()
if self.dataset_name == 'imagenet':
img_var = nn.Variable(data[0].shape)
img_var.data = data[0]
else:
img_var = self.convert_to_var(data[0])
loss, recon_loss, perplexity, img_recon = self.forward_pass(
img_var)
pbar.set_description('Batch Loss: {}'.format(loss.d))
epoch_loss += loss.d
self.solver.set_parameters(
nn.get_parameters(), reset=False, retain_state=True)
self.solver.zero_grad()
loss.backward(clear_buffer=True)
params = [x.grad for x in nn.get_parameters().values()]
self.comm.all_reduce(params, division=False, inplace=True)
self.solver.weight_decay(self.weight_decay)
self.solver.update()
avg_epoch_loss = epoch_loss/self.iterations_per_epoch
self.log_loss(epoch, avg_epoch_loss, train=True)
self.save_image(img_var, os.path.join(
self.train_recon_path, 'original_epoch_{}.png'.format(epoch)))
self.save_image(img_recon, os.path.join(
self.train_recon_path, 'recon_epoch_{}.png'.format(epoch)))
def validate(self, epoch):
pbar = trange(self.val_iterations_per_epoch,
desc='Validate at epoch '+str(epoch), disable=self.comm.rank > 0)
epoch_loss = 0
for i in pbar:
data = self.val_data_loader.next()
if self.dataset_name == 'imagenet':
img_var = nn.Variable(data[0].shape)
img_var.data = data[0]
else:
img_var = self.convert_to_var(data[0])
loss, _, _, img_recon = self.forward_pass(img_var, test=True)
pbar.set_description('Batch Loss: {}'.format(loss.d))
epoch_loss += loss.d
avg_epoch_loss = epoch_loss/self.iterations_per_epoch
self.log_loss(epoch, avg_epoch_loss, train=False)
self.save_image(img_var, os.path.join(
self.val_recon_path, 'original_epoch_{}.png'.format(epoch)))
self.save_image(img_recon, os.path.join(
self.val_recon_path, 'recon_epoch_{}.png'.format(epoch)))
``` |
{
"source": "JonathanLehner/pyrender",
"score": 3
} |
#### File: pyrender/pyrender/texture.py
```python
import numpy as np
from OpenGL.GL import *
from .utils import format_texture_source
from .sampler import Sampler
class Texture(object):
"""A texture and its sampler.
Parameters
----------
name : str, optional
The user-defined name of this object.
sampler : :class:`Sampler`
The sampler used by this texture.
source : (h,w,c) uint8 or (h,w,c) float or :class:`PIL.Image.Image`
The image used by this texture. If None, the texture is created
empty and width and height must be specified.
source_channels : str
Either `D`, `R`, `RG`, `GB`, `RGB`, or `RGBA`. Indicates the
channels to extract from `source`. Any missing channels will be filled
with `1.0`.
width : int, optional
For empty textures, the width of the texture buffer.
height : int, optional
For empty textures, the height of the texture buffer.
tex_type : int
Either GL_TEXTURE_2D or GL_TEXTURE_CUBE.
data_format : int
For now, just GL_FLOAT.
"""
def __init__(self,
name=None,
sampler=None,
source=None,
source_channels=None,
width=None,
height=None,
tex_type=GL_TEXTURE_2D,
data_format=GL_FLOAT):
self.source_channels = source_channels
self.name = name
self.sampler = sampler
self.source = source
self.width = width
self.height = height
self.tex_type = tex_type
self.data_format = data_format
self._texid = None
self._is_transparent = False
@property
def name(self):
"""str : The user-defined name of this object.
"""
return self._name
@name.setter
def name(self, value):
if value is not None:
value = str(value)
self._name = value
@property
def sampler(self):
""":class:`Sampler` : The sampler used by this texture.
"""
return self._sampler
@sampler.setter
def sampler(self, value):
if value is None:
value = Sampler()
self._sampler = value
@property
def source(self):
"""(h,w,c) uint8 or float or :class:`PIL.Image.Image` : The image
used in this texture.
"""
return self._source
@source.setter
def source(self, value):
if value is None:
self._source = None
else:
self._source = format_texture_source(value, self.source_channels)
self._is_transparent = False
@property
def source_channels(self):
"""str : The channels that were extracted from the original source.
"""
return self._source_channels
@source_channels.setter
def source_channels(self, value):
self._source_channels = value
@property
def width(self):
"""int : The width of the texture buffer.
"""
return self._width
@width.setter
def width(self, value):
self._width = value
@property
def height(self):
"""int : The height of the texture buffer.
"""
return self._height
@height.setter
def height(self, value):
self._height = value
@property
def tex_type(self):
"""int : The type of the texture.
"""
return self._tex_type
@tex_type.setter
def tex_type(self, value):
self._tex_type = value
@property
def data_format(self):
"""int : The format of the texture data.
"""
return self._data_format
@data_format.setter
def data_format(self, value):
self._data_format = value
def is_transparent(self, cutoff=1.0):
"""bool : If True, the texture is partially transparent.
"""
if self._is_transparent is None:
self._is_transparent = False
if self.source_channels == 'RGBA' and self.source is not None:
if np.any(self.source[:,:,3] < cutoff):
self._is_transparent = True
return self._is_transparent
def delete(self):
"""Remove this texture from the OpenGL context.
"""
self._unbind()
self._remove_from_context()
##################
# OpenGL code
##################
def _add_to_context(self):
if self._texid is not None:
raise ValueError('Texture already loaded into OpenGL context')
fmt = GL_DEPTH_COMPONENT
if self.source_channels == 'R':
fmt = GL_RED
elif self.source_channels == 'RG' or self.source_channels == 'GB':
fmt = GL_RG
elif self.source_channels == 'RGB':
fmt = GL_RGB
elif self.source_channels == 'RGBA':
fmt = GL_RGBA
# Generate the OpenGL texture
self._texid = glGenTextures(1)
glBindTexture(self.tex_type, self._texid)
# Flip data for OpenGL buffer
data = None
width = self.width
height = self.height
if self.source is not None:
data = np.ascontiguousarray(np.flip(self.source, axis=0).flatten())
width = self.source.shape[1]
height = self.source.shape[0]
# Bind texture and generate mipmaps
glTexImage2D(
self.tex_type, 0, fmt, width, height, 0, fmt,
self.data_format, data
)
if self.source is not None:
glGenerateMipmap(self.tex_type)
if self.sampler.magFilter is not None:
glTexParameteri(
self.tex_type, GL_TEXTURE_MAG_FILTER, self.sampler.magFilter
)
else:
glTexParameteri(self.tex_type, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
if self.sampler.minFilter is not None:
glTexParameteri(
self.tex_type, GL_TEXTURE_MIN_FILTER, self.sampler.minFilter
)
else:
glTexParameteri(self.tex_type, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(self.tex_type, GL_TEXTURE_WRAP_S, self.sampler.wrapS)
glTexParameteri(self.tex_type, GL_TEXTURE_WRAP_T, self.sampler.wrapT)
glTexParameterfv(
self.tex_type, GL_TEXTURE_BORDER_COLOR,
np.ones(4).astype(np.float32)
)
# Unbind texture
glBindTexture(self.tex_type, 0)
def _remove_from_context(self):
if self._texid is not None:
# TODO OPENGL BUG?
# glDeleteTextures(1, [self._texid])
glDeleteTextures([self._texid])
self._texid = None
def _in_context(self):
return self._texid is not None
def _bind(self):
# TODO HANDLE INDEXING INTO OTHER UV's
glBindTexture(self.tex_type, self._texid)
def _unbind(self):
glBindTexture(self.tex_type, 0)
def _bind_as_depth_attachment(self):
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
self.tex_type, self._texid, 0)
def _bind_as_color_attachment(self):
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
self.tex_type, self._texid, 0)
``` |
{
"source": "jonathanleinola/radiohead-master",
"score": 3
} |
#### File: src/hex2dec/hex2dec.py
```python
import pandas as pd
import csv
import math
def hexToDec(hexNum):
hex = hexNum.replace(" ", "")
hex = hex[4:]
i = int(hex, 16)
#Convert into decimal value
dec = str(i)
return(dec)
def read_csv(filename):
df = pd.read_csv(filename, sep=',', header=0)
return df
#Take each row in the 'Datablock' column and convert to decimal value
def convertToDec(df):
L = []
total_rows = len(df['Datablock'])
for i in range(total_rows):
value = (df.loc[i, ['Datablock']])
hex=hexToDec(value[0])
L.append(hex)
return L
def writetocsv(filename, read_data):
read_data.to_csv(filename, sep=',', encoding='utf-8')
read_data = read_csv('../BRMSample/build/release/data/example.csv')
computed_decimal = convertToDec(read_data)
read_data['Decimal value'] = computed_decimal
writetocsv('../BRMSample/build/release/data/result.csv', read_data)
```
#### File: src/rest_api/mir_coords_to_csv.py
```python
import time
import sys
import urllib3
from time import sleep
import json
import csv
import datetime
import requests
from datetime import datetime
import subprocess # just for changing file ownership at the end of script
http = urllib3.PoolManager()
###############################################################################
DURATION = 2000 # How many timestamps you want? it 100 takes 10s
TIMES = 10 # How many times per sec you want the timestamp
###############################################################################
### define filename to save timestamps (coords3.csv)
with open('coords.csv', mode='w') as csvfile: # open the csv file
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
writer.writerow(["X", "Y", "orientation","timestamp"])
print ("Coord queries running, wait"),
print(DURATION/TIMES),
print ("s")
def main():
#######################################################################
### change the url localhost to match actual addrest for REST API calls
#######################################################################
url = 'http://192.168.12.20/api/v2.0.0/status' # url where to call the rest api
error=0
response = http.request('GET', url) # response values from REST API
### get the values from response jason object x,y,orientation ###
try:
x = json.loads(response.data)['position']['x']
y = json.loads(response.data)['position']['y']
orientation = json.loads(response.data)['position']['orientation']
except KeyError as error:
error=1
### get the timestamp %f')[:-3] gives second with 3 digits ###
timestamp = datetime.now().strftime('%Y/%m/%d %H:%M:%S.%f')[:-3]
### write the REST API values into csv file
if error != 1:
writer.writerow([x,y,orientation,timestamp])
else:
error=0
if __name__ == '__main__':
time_start = time.time()
i = 1
while True:
time_current = time.time()
if time_current > time_start + i / float(TIMES):
# print('{}: {}'.format(i, time_current))
main() # execute main function after every 100ms
i += 1
if i > DURATION: # break the prog when duration reached
break
print ("Coord queries done, have a nice day!")
################################################################################
### If issues with ownership of the file u can use subprocess.call function to
### execute shell commands such as:
### subprocess.call(['chown', '[user]:root','/home/user/Documents/coords3.csv'])
### change [user] to match username and the file path to correct folder
################################################################################
``` |
{
"source": "JonathanLeisner/EconomicDynamics",
"score": 4
} |
#### File: EconomicDynamics/pyfunctions/polyclass.py
```python
class Polynomial:
def __init__(self, coef):
"""Creates an instance p of the Polynomial class,
where p(x) = coef[0] x^0 + ... + coef[N] x^N."""
self.coef = coef
def evaluate(self, x):
y = sum(a*x**i for i, a in enumerate(self.coef))
return y
def differentiate(self):
new_coef = [i*a for i, a in enumerate(self.coef)]
# Remove the first element, which is zero
del new_coef[0]
# And reset coefficients data to new values
self.coef = new_coef
``` |
{
"source": "jonathan-lemos/easy-encrypt",
"score": 3
} |
#### File: jonathan-lemos/easy-encrypt/bufferedreader.py
```python
from typing import Iterable, Union
def combine(*arr: Union[bytes, Iterable[bytes]]) -> bytes:
def args():
for b in arr:
if isinstance(b, bytes):
yield b
else:
for ba in b:
yield ba
return b"".join(args())
class BufferedReader:
def __init__(self, arg: Union[bytes, Iterable[bytes], str]):
if isinstance(arg, str):
self.file = open(arg, "rb")
self.bytestream, self.buf = None, None
elif isinstance(arg, bytes):
self.file = None
self.bytestream, self.buf = iter([arg]), bytearray()
else:
self.file = None
self.bytestream, self.buf = iter(arg), bytearray()
self.index = 0
def __enter__(self):
return self
def read(self, length: int = -1) -> bytes:
if self.file:
ret = self.file.read(length)
self.index += len(ret)
return ret
elif length < 0:
return combine(self.buf, self.bytestream)
else:
try:
while len(self.buf) < length and self.bytestream:
self.buf.extend(next(self.bytestream))
except StopIteration:
pass
ret = self.buf[:length]
self.buf = self.buf[length:]
return bytes(ret)
def chunks(self, size: int = 65536) -> Iterable[bytes]:
while len(buf := self.read(size)) != 0:
yield buf
def __exit__(self, exc_type, exc_val, exc_tb):
if self.file:
self.file.close()
```
#### File: easy-encrypt/ciphers/cipherext.py
```python
import re
from ciphers.aes256gcmcipher import Aes256GcmCipher
from ciphers.chacha20poly1305cipher import ChaCha20Poly1305Cipher
from ciphers.cipher import Cipher
from securerandom import rand_unique_bytes
from typing import Dict, Iterable, List, Union, Optional, Tuple
import log
import sys
import b64
def __parse_aes256gcm(name: str, params: List[Tuple[str, Optional[str]]]) -> Aes256GcmCipher:
ret = Aes256GcmCipher()
if name not in {"aes256", "aes256gcm", "aes-256", "aes-256-gcm", "aes256-gcm"}:
raise ValueError(f"Given name '{name}' is not aes-256-gcm")
for key, value in params:
if key in {"iv", "nonce"}:
if value is None:
value = ''
ret.nonce = b64.decode(value)
if key in {"iv-len", "nonce-len"}:
if value is None:
log.warning(f"No value given for key '{key}'.")
iint = int(value)
ret.nonce = rand_unique_bytes(iint)
else:
log.warning(f"Unrecognized key '{key}' in params string.")
return ret
def __parse_chacha20(name: str, params: List[Tuple[str, Optional[str]]]) -> ChaCha20Poly1305Cipher:
ret = ChaCha20Poly1305Cipher()
if name not in {"chacha20poly1305", "chacha20-poly1305"}:
raise ValueError(f"Given name '{name}' is not aes-256-gcm")
for key, value in params:
if key in {"iv", "nonce"}:
if value is None:
value = ''
byt = b64.decode(value)
if len(byt) != 12:
log.error("Nonce must be 12 bytes")
sys.exit(0)
ret.nonce = byt
else:
log.warning(f"Unrecognized key '{key}' in params string.")
return ret
__cipher_switcher = {
"aes256": (Aes256GcmCipher.deserialize, __parse_aes256gcm),
"aes256gcm": (Aes256GcmCipher.deserialize, __parse_aes256gcm),
"aes-256": (Aes256GcmCipher.deserialize, __parse_aes256gcm),
"aes-256-gcm": (Aes256GcmCipher.deserialize, __parse_aes256gcm),
"aes256-gcm": (Aes256GcmCipher.deserialize, __parse_aes256gcm),
"chacha20-poly1305": (ChaCha20Poly1305Cipher.deserialize, __parse_chacha20),
"chacha20poly1305": (ChaCha20Poly1305Cipher.deserialize, __parse_chacha20),
}
def supported_ciphers() -> Iterable[str]:
return __cipher_switcher.keys()
def default_cipher() -> Cipher:
return Aes256GcmCipher()
def deserialize(props: Dict[str, Union[str, int, bool, None, Dict, List]]):
if "algorithm" not in props:
raise ValueError("Cipher dictionary must include 'algorithm' field.")
if props["algorithm"] not in supported_ciphers():
raise ValueError(f"The given cipher '{props['algorithm']}' is not supported.")
return __cipher_switcher[props["algorithm"]][0](props)
def from_option_string(s: str) -> Cipher:
s = s.strip().lower()
name = re.sub(r":.*$", "", s)
params = re.sub(r"^.*?:", "", s) if ":" in s else ""
par = [tuple(z) if len(z) == 2 else (z[0], None) for z in map(lambda x: x.strip().split("=") if "=" in x else [], params.split(",") if "," in params else [])]
if name not in __cipher_switcher:
raise ValueError(f"The given kdf algorithm '{name}' is not supported.")
return __cipher_switcher[name][1](name, par)
```
#### File: easy-encrypt/kdfs/argon2kdf.py
```python
import argon2
import b64
from typing import Optional, Dict, Union, List
from securerandom import rand_bytes
from kdfs.kdf import Kdf
class Argon2Kdf(Kdf):
@staticmethod
def sensitive():
return Argon2Kdf(12, 2 * 1024 * 1024, 8, argon2.Type.ID, rand_bytes(32))
@staticmethod
def fast():
return Argon2Kdf(2, 256 * 1024, 2, argon2.Type.ID, rand_bytes(16))
@staticmethod
def type_to_str(type: argon2.Type):
return {
argon2.Type.I: "argon2i",
argon2.Type.D: "argon2d",
argon2.Type.ID: "argon2id"
}[type]
@staticmethod
def str_to_type(type: str):
try:
return {
"argon2i": argon2.Type.I,
"argon2d": argon2.Type.D,
"argon2id": argon2.Type.ID
}[type]
except KeyError:
raise ValueError(f"Type must be one of ['argon2i', 'argon2d', 'argon2id'] (was '{type}')")
@staticmethod
def __type_to_int(type: argon2.Type):
return {
argon2.Type.I: 0,
argon2.Type.D: 1,
argon2.Type.ID: 2
}[type]
@staticmethod
def __int_to_type(type: int):
try:
return {
0: argon2.Type.I,
1: argon2.Type.D,
2: argon2.Type.ID
}[type]
except KeyError:
raise ValueError(f"The Argon2 type must be 0x0, 0x1, or 0x2 (was {hex(type)})")
@staticmethod
def deserialize(props: Dict[str, Union[str, int, bool, None, Dict, List]]) -> "Argon2Kdf":
ret = Argon2Kdf.sensitive()
base_keys = set(ret.serialize().keys())
if not base_keys.issubset(props.keys()):
raise ValueError(f"The properties dict is missing required keys {base_keys - props.keys()}")
ret.type = Argon2Kdf.str_to_type(props["algorithm"])
ret.version = props["version"]
ret.time_cost = props["time_cost"]
ret.memory_cost = props["memory_cost"]
ret.parallelism = props["parallelism"]
ret.salt = b64.decode(props["salt"])
return ret
def serialize(self) -> Dict[str, Union[str, int, bool, None, Dict, List]]:
return {
"algorithm": self.type_to_str(self.type),
"version": self.version,
"time_cost": self.time_cost,
"memory_cost": self.memory_cost,
"parallelism": self.parallelism,
"salt": b64.encode(self.salt),
}
def derive(self, password: str, out_len: int) -> bytes:
return argon2.low_level.hash_secret_raw(bytes(password, "utf-8"), self.salt, self.time_cost, self.memory_cost,
self.parallelism, out_len, self.type, self.version)
def __init__(self, time_cost: int, memory_cost: int, parallelism: int, type: argon2.Type,
salt: Optional[bytes] = None, version: int = argon2.low_level.ARGON2_VERSION):
self.time_cost = time_cost
self.memory_cost = memory_cost
self.parallelism = parallelism
self.type = type
self.salt = salt if salt is not None else rand_bytes(32)
self.version = version
```
#### File: easy-encrypt/tests/chacha20poly1305ciphertest.py
```python
from ciphers.chacha20poly1305cipher import ChaCha20Poly1305Cipher
import unittest
from tests.symmetric import symmetric_test_env
class TestAes256GcmCipher(unittest.TestCase):
def test_encrypt_decrypt(self):
key = bytes(range(32))
iv = bytes(range(12))
cipher = ChaCha20Poly1305Cipher(iv)
enc, dec = symmetric_test_env(lambda x: cipher.encrypt(key, x), lambda x: cipher.decrypt(key, x))
self.assertEqual(enc, dec)
```
#### File: easy-encrypt/tests/symmetric.py
```python
from typing import Callable, Iterable
def symmetric_test_env(encrypt: Callable[[Iterable[bytes]], Iterable[bytes]], decrypt: Callable[[Iterable[bytes]], Iterable[bytes]]):
data_chunks = [bytes(range(i, 71 + i)) for i in range(6)]
data_flat = b''.join(data_chunks)
enc = list(encrypt(data_chunks))
enc_flat = b''.join(enc)
enc_processed = [enc_flat[i: i + 67] for i in range(0, len(enc_flat), 67)]
dec = list(decrypt(filter(lambda x: x != b'', enc_processed)))
return data_flat, b''.join(dec)
``` |
{
"source": "jonathanli2012/bloom_filter",
"score": 3
} |
#### File: jonathanli2012/bloom_filter/run_example.py
```python
import os
import subprocess
import time
#add_count < 1000
def write_test_file_with_cout(file_name, add_count, b_size):
open('test.cpp', 'w').close()
file1 = open('test/names.txt', 'r')
names = file1.readlines()
with open(file_name, 'a') as f:
print("#include \"src/bloom.h\"", file=f)
print("#include \"src/cityhash/city.h\"", file=f)
print("#include <iostream>", file=f)
print("\nusing namespace std;", file=f)
print("\nint main() {", file=f)
print(" BloomFilter *new_b = new BloomFilter(" + str(b_size) + ");", file=f)
for i in range(0, add_count):
print(" new_b->add_string(\"" + names[i][:-1] + "\");", file=f)
print(" std::cout << \"filter print: \" << new_b->expose_filter() << \"\\n\";", file=f)
#print(" std::cout << \"filter print: \\n\";", file=f)
for i in range(0, 1000):
print(" std::cout << \"filter lookup: \" << \"" +
names[i][:-1] + " \" << new_b->lookup(\"" + names[i][:-1] + "\") << \"\\n\";", file=f)
print(" return 0;", file=f)
print("}", file=f)
def write_test_file_without_cout(file_name, add_count, b_size):
open('test.cpp', 'w').close()
file1 = open('test/names.txt', 'r')
names = file1.readlines()
with open(file_name, 'a') as f:
print("#include \"src/bloom.h\"", file=f)
print("#include \"src/cityhash/city.h\"", file=f)
print("#include <iostream>", file=f)
print("\nusing namespace std;", file=f)
print("\nint main() {", file=f)
print(" BloomFilter *new_b = new BloomFilter(" + str(b_size) + ");", file=f)
for i in range(0, add_count):
print(" new_b->add_string(\"" + names[i][:-1] + "\");", file=f)
for i in range(0, 1000):
print(" new_b->lookup(\"" + names[i][:-1] + "\");", file=f)
print(" return 0;", file=f)
print("}", file=f)
def run_trace(add_count, b_size):
write_test_file_without_cout("test_fast.cpp", add_count, b_size)
write_test_file_with_cout("test.cpp", add_count, b_size)
start_compile = time.time()
os.system("g++ -Ofast src/cityhash/city.cc src/bloom.cpp test_fast.cpp")
start_run = time.time()
os.system("./a.out")
end_run = time.time()
os.system("g++ src/cityhash/city.cc src/bloom.cpp test.cpp")
os.system("./a.out > export.txt")
os.remove("test_fast.cpp")
os.remove("test.cpp")
os.remove("./a.out")
file1 = open('export.txt', 'r')
names = file1.readlines()
print("") # new line
print("compile time: " + str(start_run-start_compile))
print("run time: " + str(end_run-start_run))
count = 0
for i in names:
if(i.startswith("filter lookup:")):
if(i.endswith("1\n")):
count += 1
correct = True
for i in names[1:add_count + 1]:
if(i.endswith("0\n")):
print("filter correctness test failed")
correct = False
if(correct):
print("filter correctness test passed")
print("filter lookup miss rate: " + str((count - add_count)/(1000-add_count)))
print("filter lookup miss count: " + str(count - add_count))
# comment this out to remove export view
os.remove("export.txt")
#add_count = int(input("input value 1-999 for strings inserted: "))
#b_size = int(input("input value 1-6710886 for bloom filter size: "))
#run_trace(add_count, b_size)
for i in range(1,900,200):
print("add_count: " + str(i))
run_trace(i, 12)
``` |
{
"source": "Jonathan-Liesch/Black-Litterman",
"score": 2
} |
#### File: Jonathan-Liesch/Black-Litterman/Black-Litterman.py
```python
import numpy as np
def get_mkt_weights(stock_price_vec, shares_outstanding_vec):
MCs = stock_price_vec * shares_outstanding_vec
total_MC = np.sum(MCs)
return MCs/total_MC
def get_Pi(lam, sigma, mkt_weights):
return lam * np.matmul(sigma, mkt_weights)
def get_BL_Vec(stock_price_vec, shares_outstanding_vec, Sigma, lam, tau, P, Q, Omega):
assert tau > 0, "tau <= 0"
w_mkt = get_mkt_weights(stock_price_vec, shares_outstanding_vec)
Pi = get_Pi(lam, Sigma, w_mkt)
inv_tSigma = np.linalg.inv(tau*Sigma)
if Omega.ndim == 1:
inv_Omega = 1/Omega
M = inv_tSigma + np.matmul(P, inv_Omega * P)
inv_M = np.linalg.inv(M)
right = np.matmul(inv_tSigma, Pi) + P*inv_Omega*Q
new_Mu = np.matmul(inv_M, right)
new_Sigma = Sigma + inv_M
return new_Sigma, new_Mu
else:
inv_Omega = np.linalg.inv(Omega)
M = inv_tSigma + np.matmul(P,np.matmul(inv_Omega, P))
inv_M = np.linalg.inv(M)
right = np.matmul(inv_tSigma, Pi) + np.matmul(P, np.matmul(inv_Omega, Q))
Mu_bar = np.matmul(inv_M, right)
Sigma_bar = Sigma + inv_M
return Sigma_bar, Mu_bar
def get_Markowitz_Portfolio(return_vec, Sigma, desired_return):
"""
min wSw st wR = r, w1 = 1
"""
y = np.array([1, desired_return])
one = np.array([1] * len(return_vec))
inv_Sigma = np.linalg.inv(Sigma)
a = np.matmul(one, np.matmul(inv_Sigma, one))
b = np.matmul(return_vec, np.matmul(inv_Sigma, one))
c = np.matmul(one, np.matmul(inv_Sigma, return_vec))
d = np.matmul(return_vec, np.matmul(inv_Sigma, return_vec))
A = np.array([[a,b],[c,d]])
inv_A = np.linalg.inv(A)
lagrangian_multipliers = np.matmul(inv_A, y)
weights = lagrangian_multipliers[0] * np.matmul(inv_Sigma, one) + \
lagrangian_multipliers[1] * np.matmul(inv_Sigma, return_vec)
return weights
def get_Markowitz_Portfolio_Vec(return_vec, Sigma, desired_return):
weights = get_Markowitz_Portfolio(return_vec, Sigma, desired_return)
mean = np.matmul(return_vec, weights)
std = np.matmul(weights, np.matmul(Sigma, weights))**0.5
return std, mean
###############
# Main
###############
sp_vec = np.array([2, 2])
so_vec = np.array([10000, 10000])
Sigma = np.array([[0.025, 0],[0, 0.025]]) # covariance of excess returns
lam = 5
P = np.array([1, -1])
Q = np.array([0.02])
Omega = np.array([0.25])
tau = 1
Sigma_bar, Mu_bar = get_BL_Vec(sp_vec, so_vec,Sigma,lam,tau,P,Q,Omega)
from matplotlib import pyplot as plt
plt.style.use('seaborn')
desired_returns = np.linspace(min(Mu_bar), max(Mu_bar), 100)
for desired_return in desired_returns:
std, mean = get_Markowitz_Portfolio_Vec(Mu_bar, Sigma_bar, desired_return)
plt.scatter(std, mean, c='teal')
plt.show()
``` |
{
"source": "Jonathan-Liesch/Mean-Variance-Optimizer",
"score": 3
} |
#### File: Jonathan-Liesch/Mean-Variance-Optimizer/stockDataClean.py
```python
import pandas as pd
import os
import re
import math
from scipy import stats
def csv_files(stock_folder):
csvs = []
for file_name in os.listdir("./" + stock_folder):
if os.path.splitext(file_name)[1] == '.csv':
csvs.append(file_name)
assert len(csvs) > 0, "Add stocks data to folder"
return csvs
def get_raw_df(stock_folder):
df = pd.DataFrame()
stock_file_paths = [stock_folder+"/"+stock for stock in csv_files(stock_folder)]
for stock_file in stock_file_paths:
stock_csv = re.split("/", stock_file)[-1]
ticker = re.split("\.", stock_csv)[0]
file = open(stock_file, 'r')
stock_df = pd.read_csv(file)
file.close()
stock_df = stock_df[['Date', 'Adj Close']]
stock_df[ticker] = stock_df['Adj Close'].pct_change()
stock_df.drop(columns = ['Adj Close'], inplace=True)
if df.empty:
df = stock_df
continue
df = pd.merge(df, stock_df, how='outer', on='Date')
return df
def get_start_date(raw_df):
return raw_df['Date'][1]
def get_end_date(raw_df):
return raw_df['Date'].iloc[-1]
def get_trading_days(raw_df):
return len(raw_df)
def cleaned_df(raw_df):
raw_df.drop(0, inplace=True)
assert not raw_df.isnull().values.any(), "Inconsistent Dates or NaN values"
return raw_df.drop("Date", axis = 1)
def get_df(stock_folder):
return cleaned_df(get_raw_df(stock_folder))
def stock_mean_daily(df):
df = df.values + 1
adj_mean = stats.gmean(df, axis=0)
return adj_mean - 1
def stock_mean_annualized(df):
mean_daily = stock_mean_daily(df)
return (((mean_daily + 1)**253) - 1)
def covariance_matrix_daily(df):
return df.cov().values
def covariance_matrix_annualized(df):
return covariance_matrix_daily(df)*253
def stock_std_daily(df):
return df.std().values
def stock_std_annualized(df):
return stock_std_daily(df)*math.sqrt(253)
def stocks(df):
return df.columns.values
################################
# Main
################################
stock_folder = "Sample Stock Data"
df = get_df(stock_folder)
stocks = stocks(df)
mu = stock_mean_annualized(df)
std = stock_std_annualized(df)
sigma = covariance_matrix_annualized(df)
``` |
{
"source": "JonathanLima25/epidemiological_data",
"score": 3
} |
#### File: app/controllers/default.py
```python
from flask import render_template, request
from app import app, db
from app.models.tables import Epidemiologico, Doenca
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/cadastro_doenca', methods=['GET', 'POST'])
def cadastro_doenca():
if request.method == 'POST':
doenca = Doenca(request.form['nome'], request.form['sintomas'])
db.session.add(doenca)
db.session.commit()
return render_template('cadastro_doenca.html')
@app.route('/cadastro_epidemiologico', methods=['GET' , 'POST'])
def cadastro_epidemiologico():
if request.method == 'POST':
epidemio = Epidemiologico(request.form['data_coleta'], request.form['doenca_associada'])
db.session.add(epidemio)
db.session.commit()
#return redirect(url_for('index'))
return render_template('cadastro_epidemiologico.html')
@app.route('/visualizacao_doencas', methods=['GET'])
def visualizacao_doencas():
doenca = Doenca.query.all()
return render_template('visualizacao_doencas.html', doenca=doenca)
@app.route('/visualizacao_epidemiologica', methods=['GET'])
def visualizacao_epidemiologica():
Epidemio = Epidemiologico.query.all()
return render_template('visualizacao_epidemiologica.html', Epidemio=Epidemio)
``` |
{
"source": "Jonathan-Lindbloom/bayesian-projects",
"score": 3
} |
#### File: Portfolio Allocation/stoch_vol_student_t/forecasting.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pymc3 as pm
import os
import sys
from model import build_model, load_trace
def compute_forecast(trace, fdays=100):
nsamps = trace["nu"].shape[0] # number of MCMC samples
# samples of nu
nus = trace["nu"]
# samples of mu
mus = trace["mu"]
# samples of the chol fact of the cov matrix
chols = trace["chol2"]
# Now, because of shape issues we need some new code to generate multiple samples across the different MCMC samples
# # Generate samples from the standard multivariate normal distribution.
dim = len(mus[0])
samps_per_param = fdays # this can be seen as number of days
zero_means = np.zeros(mus.shape)
u = np.random.multivariate_normal(np.zeros(dim), np.eye(dim),
size=(len(zero_means), samps_per_param,))
# u has shape (len(means), nsamples, dim)
# # Transform u.
v = np.einsum('ijk,ikl->ijl', u, chols)
m = np.expand_dims(zero_means, 1)
t = v + m
# Now t is distributed N(0, Cov) but is 3-dim, which is what we want
# Now we need the normalization constants, which are sqrt(U/nu) where U is chi^2_nu distributed
prefac = np.random.chisquare(nus)
prefac /= nus
prefac = np.sqrt(prefac)
# Now broadcast to the N(0, Cov) samples
offset = t/prefac[:,None,None]
# Now add the true mean
samps = mus[:,None,:] + offset
raw_returns = samps
# Calculate cumulative gains for each algo
cum_returns = (1.0 + samps[:,:,:]).cumprod(axis=1) - 1.0
# Slice out the cumulative gain at the final time
ending_returns = cum_returns[:,-1,:]
return raw_returns, cum_returns, ending_returns
def plot_ending_ret_hist(ending_returns, fdays=None):
nalgos = ending_returns.shape[1]
fig, axs = plt.subplots(nalgos,figsize=(13,3*nalgos))
if fdays == None:
fig.suptitle("Ending Cumulative Return", fontsize=14)
else:
fig.suptitle("Ending Cumulative Return, {} days in the future".format(fdays), fontsize=14)
for j in range(nalgos):
num_entries = len(ending_returns[:,j])
lower = 100*np.quantile(ending_returns[:,j], 0.05)
upper = 100*np.quantile(ending_returns[:,j], 0.95)
num_nonneg = np.sum(ending_returns[:,j] >= 0)
prob_gain = 100*(num_nonneg/num_entries)
axs[j].hist(ending_returns[:,j], bins=100)
axs[j].set_xlabel("Algo {}. 95% CI is [{:.1f}%,{:.1f}%]. Prob(gain) = {:.1f}%".format(j, lower, upper, prob_gain))
axs[j].set_ylabel("Freq")
plt.tight_layout()
os.chdir(sys.path[0])
if fdays == None:
plt.savefig("ending_ret_distributions.png", dpi=250)
else:
plt.savefig("ending_ret_distributions_fdays_{}.png".format(fdays), dpi=250)
def plot_cum_returns(cum_returns, num=100):
nalgos = cum_returns.shape[2]
nsamps = cum_returns.shape[0]
fig, axs = plt.subplots(nalgos,figsize=(13,3*nalgos))
fig.suptitle("Cumulative Return", fontsize=14)
for j in range(nalgos):
for i in range(num):
axs[j].plot(cum_returns[-i,:,j], color="blue", alpha=0.1)
axs[j].set_xlabel("Algo {}".format(j))
axs[j].set_ylabel("Return")
plt.tight_layout()
os.chdir(sys.path[0])
plt.savefig("cum_returns.png", dpi=250)
if __name__ == "__main__":
# Load data
os.chdir(sys.path[0])
log_rets = pd.read_csv("log_returns.csv", index_col="Date", parse_dates=True)
data = log_rets.values
# Build model
model = build_model(data)
# Get trace
trace = load_trace(model)
# Calculate forecast
fdays=100
raw_returns, cum_returns, ending_returns = compute_forecast(trace, fdays=fdays)
# Generate plots
plot_ending_ret_hist(ending_returns, fdays=fdays)
plot_cum_returns(cum_returns, num=1000)
``` |
{
"source": "Jonathan-Lindbloom/sampi",
"score": 4
} |
#### File: sampi/optimization/basic.py
```python
import numpy as np
def bisection(func, a=0, b=1, tol=1e-6, maxits=1000, which="min"):
"""Implements a basic bisection search on the function f over the interval [a,b].
Args:
f (func): the function to optimize.
a (float): left end-point to optimize over.
b (float): the right end-point to optimize over.
tol (float): relative tolerance for when to stop iterating.
maxits (int): maximum number of iterations.
"""
if which == "max":
func = lambda x: func(-x)
if func(a)*func(b) >= 0:
raise ValueError("Bisection failed... f(a) and f(b) must be of oposite sign!")
if func(a) == 0:
return a
if func(b) == 0:
return b
for i in range(maxits):
midpoint = (b+a)/2
f_mid = func(midpoint)
f_a = func(a)
f_b = func(b)
if f_a*f_mid < 0:
b = midpoint
elif f_b*f_mid < 0:
a = midpoint
elif f_mid == 0:
return midpoint
if np.abs((b+a)/2 - midpoint) < tol:
return (b+a)/2
else:
pass
def newton(f, df, x0, epsilon, maxiter):
x = x0
for n in range(0, maxiter):
f_x = f(x)
if abs(f_x) < epsilon:
return x
df_x = df(x)
if df_x == 0:
print("Error: derivative is zero")
x -= f_x/df_x
return x
```
#### File: sampi/plotting/basic.py
```python
import numpy as np
import matplotlib.pyplot as plt
def bands_plot(domain, bands, ax=None, which=[(1.0, 99.0), (12.5, 87.5)]):
if ax == None:
fig, ax = plt.subplots(figsize=(10,10))
for j, pair in enumerate(which):
alpha = (100 - (pair[1]-pair[0]))/100
alpha *= 0.5
alpha += 0.3
ax.fill_between(domain, bands[pair[0]], bands[pair[1]], alpha=alpha, color="blue", label="{:.0f}% CI".format((pair[1]-pair[0])))
ax.plot(domain, bands[50.0], label="Median", color="blue", alpha=1.0)
if ax == None:
return fig, ax
else:
pass
```
#### File: sampi/plotting/util.py
```python
import numpy as np
def compute_bands(vals, levels=[1.0, 5.5, 12.5, 25.0, 50.0, 75.0, 87.5, 94.5, 99.0]):
'''
Given an array with shape (x, y, z) where x is the dimension for samples, y is the
dimension for time, and z the dimension variable, computes the corresponding percentile bands for credible intervals.
'''
def scoreatpercentile(vals, p):
return np.percentile(vals, p, axis=0)
perc = {p:scoreatpercentile(vals,p) for p in levels}
median = np.median(vals, axis=0)
return perc
```
#### File: sampi/sdes/base.py
```python
from sampi.sdes.util import draw_brownian
from sampi.sdes.solvers import euler_maruyama
import numpy as np
class StochDiffEq():
def __init__(self, drift=lambda x, t : 1, diffusion=lambda x, t : 1, true_sol=None, eqn=""):
self.drift = drift
self.diffusion = diffusion
self.true_sol = true_sol
self.eqn = eqn
def draw_true(self, x0, t, nsamps=1):
wt = draw_brownian(t, nsamps=nsamps)
t_copied = np.repeat(t[:, np.newaxis], nsamps, axis=1)
return self.true_sol(x0, t_copied, wt)
def solve(self, x0, t_end, method="euler-maruyama", nsamps=1, **kwargs):
"""General solver to call other methods from.
Args:
y0 (array): the RHS function for the derivative of y (think y' = func(y, t)).
t_end (float): the time to solve the ODE up until.
method (str): the time to solve the ODE up until.
Returns:
(y_sol, t): The return value. True for success, False otherwise.
"""
if method == "euler-maruyama":
return euler_maruyama(self.drift, self.diffusion, x0, t_end, nsamps=nsamps, **kwargs)
```
#### File: sampi/sdes/catalog.py
```python
from sampi.sdes.base import StochDiffEq
import numpy as np
def basic_linear(a=1, b=1):
"""Returns a function implementing the explicit solution to the SDE
dX_t = a X_t dt + b X_t dW_t.
"""
drift = lambda x, t : a*x
diffusion = lambda x, t : b*x
true_sol = lambda x0, t, wt : x0*np.exp( (a - 0.5*b*b)*t + b*wt)
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = a X_t dt + b X_t dW_t")
def kloeden_4_15(a=1):
"""Returns a function implementing the explicit solution to the SDE
dX_t = 0.5*a*(a-1)*(X_t)^(1 - 2/a) dt + a*(X_t)^(1 - 1/a) dW_t.
Taken from (Kloden & Platen, 1992), page 120.
"""
drift = lambda x, t : 0.5*a*(a-1)*(x**(1 - 2/a))
diffusion = lambda x, t : a*(x**(1 - 1/a))
true_sol = lambda x0, t, wt : (wt + x0**(1/a))**a
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = 0.5*a*(a-1)*(X_t)^(1 - 2/a) dt + a*(X_t)^(1 - 1/a) dW_t")
def kloeden_4_16(a=1):
"""Returns a function implementing the explicit solution to the SDE
dX_t = 0.5*a^2 X_t dt + a X_t dW_t.
Taken from (Kloden & Platen, 1992), page 120.
"""
drift = lambda x, t : 0.5*a*a*x
diffusion = lambda x, t : a*x
true_sol = lambda x0, t, wt : x0*np.exp(a*wt)
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = 0.5*a^2 X_t dt + a X_t dW_t")
def kloeden_4_20(a=1):
"""Returns a function implementing the explicit solution to the SDE
dX_t = -0.5*a^2 X_t dt - a*sqrt(1 - (X_t)^2) dW_t
Taken from (Kloden & Platen, 1992), page 121.
"""
drift = lambda x, t : -0.5*a*a*x
diffusion = lambda x, t : -a*np.sqrt(1 - x**2)
true_sol = lambda x0, t, wt : np.cos(a*wt + np.arccos(x0))
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = -0.5*a^2 X_t dt - a*sqrt(1 - (X_t)^2) dW_t")
def double_well():
"""Returns a SDE object implementing the drift and diffusion functions for SDE
dX_t = 4*(X_t - (X_t)^3) dt + dW_t
Taken from (Saarkk et. al, 2019), page 269.
"""
drift = lambda x, t : 4*(x - x**3)
diffusion = lambda x, t : 1
true_sol = None
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = 4*(x - x^3) dt + dW_t")
def garcia_m3():
"""Returns a SDE object implementing the drift and diffusion functions for the SDE
dX_t = -(X_t)**3 dt + (0.2 + x**2) dW_t
Taken from (Garcia et. al, 2017).
"""
drift = lambda x, t : -x**3
diffusion = lambda x, t : (0.2 + x**2)
true_sol = None
return StochDiffEq(drift=drift, diffusion=diffusion, true_sol=true_sol, eqn="dX_t = -(X_t)**3 dt + (0.2 + x**2) dW_t")
```
#### File: sampi/sdes/solvers.py
```python
from sampi.sdes.util import draw_brownian
import numpy as np
def euler_maruyama(drift, diffusion, x0, t_end, dt=0.01, nsamps=1):
"""Basic Euler-Maruyama method to solve an SDE.
"""
t = np.arange(0, t_end+dt, dt)
wt = draw_brownian(t, nsamps=nsamps)
x_sol = np.zeros((len(t), nsamps))
x_sol[0,:] = x0
wt_diff = np.diff(wt, axis=0)
for i, tval in enumerate(t):
if i != 0:
x_sol[i,:] = x_sol[i-1,:] + drift(x_sol[i-1,:], t[i-1])*dt + diffusion(x_sol[i-1,:], t[i-1])*wt_diff[i-1,:]
return x_sol, t
``` |
{
"source": "jonathanlintott/spectree",
"score": 2
} |
#### File: spectree/plugins/flask_plugin.py
```python
from pydantic import ValidationError
from .base import BasePlugin, Context
from .page import PAGES
class FlaskPlugin(BasePlugin):
blueprint_state = None
FORM_MIMETYPE = ("application/x-www-form-urlencoded", "multipart/form-data")
def find_routes(self):
from flask import current_app
if self.blueprint_state:
excludes = [
f"{self.blueprint_state.blueprint.name}.{ep}"
for ep in ["static", "openapi"] + [f"doc_page_{ui}" for ui in PAGES]
]
for rule in current_app.url_map.iter_rules():
if (
self.blueprint_state.url_prefix
and not str(rule).startswith(self.blueprint_state.url_prefix)
or str(rule).startswith("/static")
):
continue
if rule.endpoint in excludes:
continue
yield rule
else:
for rule in current_app.url_map.iter_rules():
if any(
str(rule).startswith(path)
for path in (f"/{self.config.PATH}", "/static")
):
continue
yield rule
def bypass(self, func, method):
if method in ["HEAD", "OPTIONS"]:
return True
return False
def parse_func(self, route):
from flask import current_app
if self.blueprint_state:
func = self.blueprint_state.app.view_functions[route.endpoint]
else:
func = current_app.view_functions[route.endpoint]
# view class: https://flask.palletsprojects.com/en/1.1.x/views/
if getattr(func, "view_class", None):
cls = getattr(func, "view_class")
for method in route.methods:
view = getattr(cls, method.lower(), None)
if view:
yield method, view
else:
for method in route.methods:
yield method, func
def parse_path(self, route):
from werkzeug.routing import parse_converter_args, parse_rule
subs = []
parameters = []
for converter, arguments, variable in parse_rule(str(route)):
if converter is None:
subs.append(variable)
continue
subs.append(f"{{{variable}}}")
args, kwargs = [], {}
if arguments:
args, kwargs = parse_converter_args(arguments)
schema = None
if converter == "any":
schema = {
"type": "array",
"items": {
"type": "string",
"enum": args,
},
}
elif converter == "int":
schema = {
"type": "integer",
"format": "int32",
}
if "max" in kwargs:
schema["maximum"] = kwargs["max"]
if "min" in kwargs:
schema["minimum"] = kwargs["min"]
elif converter == "float":
schema = {
"type": "number",
"format": "float",
}
elif converter == "uuid":
schema = {
"type": "string",
"format": "uuid",
}
elif converter == "path":
schema = {
"type": "string",
"format": "path",
}
elif converter == "string":
schema = {
"type": "string",
}
for prop in ["length", "maxLength", "minLength"]:
if prop in kwargs:
schema[prop] = kwargs[prop]
elif converter == "default":
schema = {"type": "string"}
parameters.append(
{
"name": variable,
"in": "path",
"required": True,
"schema": schema,
}
)
return "".join(subs), parameters
def request_validation(self, request, query, json, headers, cookies):
req_query = request.args or {}
if request.mimetype in self.FORM_MIMETYPE:
req_json = request.form or {}
if request.files:
req_json = dict(
list(request.form.items()) + list(request.files.items())
)
else:
req_json = request.get_json(silent=True) or {}
req_headers = request.headers or {}
req_cookies = request.cookies or {}
request.context = Context(
query.parse_obj(req_query.items()) if query else None,
json.parse_obj(req_json.items()) if json else None,
headers.parse_obj(req_headers.items()) if headers else None,
cookies.parse_obj(req_cookies.items()) if cookies else None,
)
def validate(
self, func, query, json, headers, cookies, resp, before, after, *args, **kwargs
):
from flask import abort, jsonify, make_response, request
response, req_validation_error, resp_validation_error = None, None, None
try:
self.request_validation(request, query, json, headers, cookies)
if self.config.ANNOTATIONS:
for name in ("query", "json", "headers", "cookies"):
if func.__annotations__.get(name):
kwargs[name] = getattr(request.context, name)
except ValidationError as err:
req_validation_error = err
response = make_response(jsonify(err.errors()), 422)
before(request, response, req_validation_error, None)
if req_validation_error:
after(request, response, req_validation_error, None)
abort(response)
response = make_response(func(*args, **kwargs))
if resp and resp.has_model():
model = resp.find_model(response.status_code)
if model:
try:
model.parse_obj(response.get_json())
except ValidationError as err:
resp_validation_error = err
response = make_response(
jsonify({"message": "response validation error"}), 500
)
after(request, response, resp_validation_error, None)
return response
def register_route(self, app):
from flask import Blueprint, jsonify
app.add_url_rule(
self.config.spec_url,
"openapi",
lambda: jsonify(self.spectree.spec),
)
if isinstance(app, Blueprint):
def gen_doc_page(ui):
spec_url = self.config.spec_url
if self.blueprint_state.url_prefix is not None:
spec_url = "/".join(
(
self.blueprint_state.url_prefix.rstrip("/"),
self.config.spec_url.lstrip("/"),
)
)
return PAGES[ui].format(spec_url)
for ui in PAGES:
app.add_url_rule(
f"/{self.config.PATH}/{ui}",
f"doc_page_{ui}",
lambda ui=ui: gen_doc_page(ui),
)
app.record(lambda state: setattr(self, "blueprint_state", state))
else:
for ui in PAGES:
app.add_url_rule(
f"/{self.config.PATH}/{ui}",
f"doc_page_{ui}",
lambda ui=ui: PAGES[ui].format(self.config.spec_url),
)
```
#### File: spectree/tests/test_plugin_starlette.py
```python
from random import randint
import pytest
from starlette.applications import Starlette
from starlette.endpoints import HTTPEndpoint
from starlette.responses import JSONResponse
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.testclient import TestClient
from spectree import Response, SpecTree
from .common import JSON, Cookies, Headers, Query, Resp, StrDict
def before_handler(req, resp, err, instance):
if err:
resp.headers["X-Error"] = "Validation Error"
def after_handler(req, resp, err, instance):
resp.headers["X-Validation"] = "Pass"
def method_handler(req, resp, err, instance):
resp.headers["X-Name"] = instance.name
api = SpecTree(
"starlette", before=before_handler, after=after_handler, annotations=True
)
class Ping(HTTPEndpoint):
name = "Ping"
@api.validate(
headers=Headers,
resp=Response(HTTP_200=StrDict),
tags=["test", "health"],
after=method_handler,
)
def get(self, request):
"""summary
description"""
return JSONResponse({"msg": "pong"})
@api.validate(
query=Query,
json=JSON,
cookies=Cookies,
resp=Response(HTTP_200=Resp, HTTP_401=None),
tags=["api", "test"],
)
async def user_score(request):
score = [randint(0, request.context.json.limit) for _ in range(5)]
score.sort(reverse=request.context.query.order)
assert request.context.cookies.pub == "abcdefg"
assert request.cookies["pub"] == "abcdefg"
return JSONResponse({"name": request.context.json.name, "score": score})
@api.validate(
resp=Response(HTTP_200=Resp, HTTP_401=None),
tags=["api", "test"],
)
async def user_score_annotated(request, query: Query, json: JSON, cookies: Cookies):
score = [randint(0, json.limit) for _ in range(5)]
score.sort(reverse=query.order)
assert cookies.pub == "abcdefg"
assert request.cookies["pub"] == "abcdefg"
return JSONResponse({"name": json.name, "score": score})
app = Starlette(
routes=[
Route("/ping", Ping),
Mount(
"/api",
routes=[
Mount(
"/user",
routes=[
Route("/{name}", user_score, methods=["POST"]),
],
),
Mount(
"/user_annotated",
routes=[
Route("/{name}", user_score_annotated, methods=["POST"]),
],
),
],
),
Mount("/static", app=StaticFiles(directory="docs"), name="static"),
]
)
api.register(app)
@pytest.fixture
def client():
with TestClient(app) as client:
yield client
def test_starlette_validate(client):
resp = client.get("/ping")
assert resp.status_code == 422
assert resp.headers.get("X-Error") == "Validation Error", resp.headers
resp = client.get("/ping", headers={"lang": "en-US"})
assert resp.json() == {"msg": "pong"}
assert resp.headers.get("X-Error") is None
assert resp.headers.get("X-Name") == "Ping"
assert resp.headers.get("X-Validation") is None
for fragment in ("user", "user_annotated"):
resp = client.post(f"/api/{fragment}/starlette")
assert resp.status_code == 422
assert resp.headers.get("X-Error") == "Validation Error"
resp = client.post(
f"/api/{fragment}/starlette?order=1",
json=dict(name="starlette", limit=10),
cookies=dict(pub="abcdefg"),
)
resp_body = resp.json()
assert resp_body["name"] == "starlette"
assert resp_body["score"] == sorted(resp_body["score"], reverse=True)
assert resp.headers.get("X-Validation") == "Pass"
resp = client.post(
f"/api/{fragment}/starlette?order=0",
json=dict(name="starlette", limit=10),
cookies=dict(pub="abcdefg"),
)
resp_body = resp.json()
assert resp_body["name"] == "starlette"
assert resp_body["score"] == sorted(resp_body["score"], reverse=False)
assert resp.headers.get("X-Validation") == "Pass"
def test_starlette_doc(client):
resp = client.get("/apidoc/openapi.json")
assert resp.json() == api.spec
resp = client.get("/apidoc/redoc")
assert resp.status_code == 200
resp = client.get("/apidoc/swagger")
assert resp.status_code == 200
``` |
{
"source": "Jonathan-Livingston-Seagull/cerebro-dl",
"score": 3
} |
#### File: cerebro/baseline/logistic_regression.py
```python
import theano
import theano.tensor as T
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin
from ..optimizer.gradient_descent import MiniBatchGradientDescent
from cerebro.models.logistic_regression_model import LogisticRegressionModel
class LogisticRegression(BaseEstimator, ClassifierMixin):
"""Linear logistic regression model
Parameters
----------
learning_rate : float
The learning rate. Defaults to 6
The learning rate is normalized with respect to the batch_size as learning_rate / batch_size
batch_size : integer
Number of samples to use for one iteration. Defaults to 600.
n_iter : integer
The number of gradient updates (aka epochs). Defaults to 1000.
"""
def __init__(self, learning_rate=6, batch_size=600, n_iter=1000):
self.batch_size = batch_size
self.learning_rate = learning_rate
self.n_iter = n_iter
self.model_ = None
self.var_x = T.matrix('x')
self.var_y = T.ivector('y')
def fit(self, x, y):
"""Fit model with batch stochastic gradient descent
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples]
Class labels of training samples
Returns
-------
self : an instance of self
"""
n_features = x.shape[1]
n_classes = len(numpy.unique(y))
train_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
train_set_y = theano.shared(numpy.asarray(y, dtype=numpy.int32), borrow=True)
self.model_ = LogisticRegressionModel(self.var_x, self.var_y, n_features, n_classes)
optimizer = MiniBatchGradientDescent(self.model_, self.n_iter, self.learning_rate, self.batch_size, 0)
optimizer.fit(train_set_x, train_set_y)
return self
def predict_proba(self, x):
"""Probability estimates for each class.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Samples of which class should be predicted
Returns
-------
p : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model.
"""
n_features = self.model_.shape[0]
if x.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (x.shape[1], n_features))
test_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
pred_prob = self.model_.p_y_given_x
classify = theano.function(inputs=[], outputs=pred_prob, givens={self.var_x: test_set_x})
result = classify()
return result
def predict(self, x):
"""Predict class labels of samples in x
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Samples of which class should be predicted
Returns
-------
c : array-like, shape = [n_samples]
Predicted class labels per sample
"""
n_features = self.model_.shape[0]
if x.shape[1] != n_features:
raise ValueError("X has %d features per sample; expecting %d"
% (x.shape[1], n_features))
test_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
y_pred = self.model_.predict_function()
classify = theano.function(inputs=[], outputs=y_pred, givens={self.var_x: test_set_x})
result = classify()
return numpy.asarray(result)
```
#### File: cerebro/convnets/lenet.py
```python
import theano
import theano.tensor as T
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from ..optimizer.gradient_descent import MiniBatchGradientDescent
from cerebro.models import lenet_model
class LeNet(BaseEstimator, ClassifierMixin):
"""Lenet classifier
Parameters
----------
learning_rate : float
The learning rate. Defaults to 0-1
batch_size : integer
Number of samples to use for one iteration. Defaults to 600.
n_iter : integer
The number of gradient updates (aka epochs). Defaults to 1000.
"""
def __init__(self, learning_rate=0.1, batch_size=500, n_epochs=100, nkerns=[20, 50], filter_sizes=[5,5], pool_sizes=[2,2], fully_connected_n_output=500, random_state=None):
self.batch_size = batch_size
self.learning_rate = learning_rate
self.n_epochs = n_epochs
self.nkerns = nkerns
self.filter_sizes = filter_sizes
self.pool_sizes = pool_sizes
self.fully_connected_n_output = fully_connected_n_output
self.model_ = None
self.var_x = T.matrix('x')
self.var_y = T.ivector('y')
def fit(self, x, y, validation_x=None, validation_y=None):
"""Fit model with batch stochastic gradient descent
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples]
Class labels of training samples
validation_x : array-like
Validation data set
validation_y : array_like
Corresponding class labels of validation set
Returns
-------
self : an instance of self
"""
self.n_features = numpy.int32(x.shape[1])
self.n_classes = len(numpy.unique(y))
self.model_ = lenet_model.LeNetModel(self.var_x, self.var_y, self.nkerns, self.batch_size, self.n_features, self.n_classes, self.filter_sizes, self.pool_sizes, self.fully_connected_n_output)
if validation_x is None or validation_y is None:
stopping_criteria = None
train_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
train_set_y = theano.shared(numpy.asarray(y, dtype=numpy.int32), borrow=True)
optimizer = MiniBatchGradientDescent(self.model_, self.n_epochs, self.learning_rate, self.batch_size, 0,
stopping_criteria=stopping_criteria)
optimizer.fit(train_set_x, train_set_y, validation_x=validation_x, validation_y=validation_y)
return self
def predict_proba(self, x):
"""Probability estimates for each class.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Samples of which class should be predicted
Returns
-------
p : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model.
"""
n_samples = x.shape[0]
n_train_batches = n_samples // self.batch_size
test_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
var_index = T.lscalar()
givens = {self.var_x: test_set_x[var_index * self.batch_size:(var_index + 1) * self.batch_size]}
pred_prob = self.model_.predict_prob()
classify = theano.function([var_index], outputs=pred_prob, givens=givens, name="predict_probability")
result = []
for batch_index in range(n_train_batches):
result.append(classify(batch_index))
return numpy.reshape(result, newshape=(n_samples, self.n_classes))
def predict(self, x):
"""Predict class labels of samples in x
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Samples of which class should be predicted
Returns
-------
c : array-like, shape = [n_samples]
Predicted class labels per sample
"""
n_samples = x.shape[0]
n_train_batches = n_samples // self.batch_size
test_set_x = theano.shared(numpy.asarray(x, dtype=theano.config.floatX), borrow=True)
var_index = T.lscalar()
givens = {self.var_x: test_set_x[var_index * self.batch_size:(var_index + 1) * self.batch_size]}
pred_prob = self.model_.predict_function()
classify = theano.function([var_index], outputs=pred_prob, givens=givens, name="predict_probability")
result = []
for batch_index in range(n_train_batches):
result.append(classify(batch_index))
return numpy.reshape(result, newshape=(n_samples, ))
``` |
{
"source": "Jonathan-Livingston-Seagull/DDPG",
"score": 3
} |
#### File: DDPG/ddpg-pendulam/pendulam_train_script.py
```python
import gym, roboschool
import random
import numpy as np
from collections import deque
import tensorflow as tf
import matplotlib.pyplot as plt
from ddpg_agent_tf import Agent
ENV_NAME = 'RoboschoolInvertedPendulum-v1'
env = gym.make(ENV_NAME)
env.seed(2)
agent = Agent(state_size=5, action_size=1, random_seed=2)
#@<EMAIL>
def ddpg(n_episodes=10000, max_t=3000, print_every=100):
scores_deque = deque(maxlen=print_every)
scores = []
for i_episode in range(1, n_episodes + 1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act([state])[0]
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_deque.append(score)
scores.append(score)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.max(scores)), end="")
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.max(scores)))
return scores
#scores = ddpg()
with tf.device("/gpu:0"):
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores) + 1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
``` |
{
"source": "Jonathan-Llovet/google_foobar",
"score": 4
} |
#### File: Level_1/01_Prison_Labor_Dodgers/solution.py
```python
def solution(x, y):
"""Returns ID that is only present in one of the two lists passed as args
Args:
x: list of prisoner IDs
y: list of prisoner IDs
Returns:
int value of the additional prisoner ID
"""
try:
a = set(x)
b = set(y)
except TypeError:
raise TypeError("Args must be lists of IDs")
c = a.symmetric_difference(b)
# c is a set containing the ID that is only present in one of the lists
if len(c) == 0:
raise ValueError("Args have same set of IDs. " +
"One additional ID expected.")
if len(c) > 1:
raise ValueError("More than one additional ID " +
"found: %s One expected." % list(c))
return c.pop()
```
#### File: Level_2/power-hungry/solution.py
```python
def solution(xs):
"""Returns integer representing maximum power output of solar panel array
Args:
xs: List of integers representing power output of
each of the solar panels in a given array
"""
negatives = []
smallest_negative = None
positives = []
contains_panel_with_zero_power = False
if isinstance(xs, list) and not xs:
raise IndexError("xs must be a non-empty list. Empty list received.")
for x in xs:
if x > 0:
positives.append(x)
if x < 0:
if not smallest_negative or abs(smallest_negative) > abs(x):
smallest_negative = x
negatives.append(x)
if x == 0:
contains_panel_with_zero_power = True
if not positives and len(negatives) == 1:
# Best-case scenario is zero power output for panel array. Looking bad.
if contains_panel_with_zero_power:
max_power = 0
else:
# Panel array is draining power. Ouch.
max_power = negatives.pop()
return str(max_power)
# Ensures panels with negative outputs are in pairs to take
# advantage of the panels' wave stabilizer, which makes paired
# negative-output panels have a positive output together
if positives and len(negatives) % 2 != 0:
negatives.remove(smallest_negative)
max_power = 1 # initialize for multiplication
panel_outputs = positives
panel_outputs.extend(negatives)
for output in panel_outputs:
max_power *= output
return str(max_power)
```
#### File: Level_3/find-the-access-codes/solution2.py
```python
import random
import timeit
def solution(l):
"""Returns the number of access codes that are present in the list l
The access codes contained in the list l are 'lucky triples'.
'Lucky triples' are defined as a tuple (x, y, z) where x divides y and y
divides z, and where the indices (xi, yj, zk) satisfy i < j < k, such as
(1, 2, 4).
Args:
l: List of integers potentially containing access codes
Returns:
An integer indicating how many access codes are present in list l
If no access codes are found, 0 is returned.
"""
access_codes = 0
if len(l) < 3:
return access_codes
l1 = l
l2 = sorted(list(set(l1)), reverse=True)
candidates = []
if 1 in l and 1 in l[l.index(1)+1:]:
for k in l2:
if len(l) == 3:
if k == 1:
continue
if is_in_order(l, 1, 1, k):
if (1, 1, k) not in candidates:
candidates.append((1, 1, k))
if 1 in l2 and len(l2) < 3:
if len(l2) == 1:
candidates.append((1,1,1))
if len(l2) == 2:
if 1 in l[l.index(1)+1:]:
if is_in_order(l, 1, 1, l2[0]):
if (1, 1, l2[0]) not in candidates:
candidates.append((1, 1, l2[0]))
elif l2[0] in l[l.index(l2[0])+1:]:
if is_in_order(l, 1, l2[0], l2[0]):
if (1, l2[0], l2[0]) not in candidates:
candidates.append((1, l2[0], l2[0]))
for k in l2:
for j in l2:
if k % j == 0:
for i in l2:
if j % i == 0:
i, j, k = sorted([i, j, k])
if is_in_order(l, i, j, k):
if (i, j, k) not in candidates:
candidates.append((i, j, k))
access_codes = len(candidates)
return access_codes
def is_in_order(l, x, y, z):
try:
x_index = l.index(x)
if x_index == 0:
y_index = l[x_index + 1:].index(y) + 1
else:
y_index = x_index + l[x_index + 1:].index(y) + 1
z_index = y_index + l[y_index + 1:].index(z) + 1
x_before_y = x_index < y_index
y_before_z = y_index < z_index
return x_before_y and y_before_z
except ValueError:
return False
```
#### File: Level_3/find-the-access-codes/solution.py
```python
def solution(l):
"""Returns the number of access codes that are present in the list l
The access codes contained in the list l are 'lucky triples'.
'Lucky triples' are defined as a tuple (x, y, z) where x divides y and y
divides z, and where the indices (xi, yj, zk) satisfy i < j < k, such as
(1, 2, 4).
Args:
l: List of integers potentially containing access codes
Returns:
An integer indicating how many access codes are present in list l
If no access codes are found, 0 is returned.
"""
access_codes = 0
if len(l) < 3:
return access_codes
calculated = dict({})
for i in range(len(l)):
if str(l[i]) not in calculated.keys():
calculated.update({str(l[i]): [i]})
else:
indices = calculated[str(l[i])]
indices.append(i)
calculated.update({str(l[i]): indices})
int_keys = [int(key) for key in calculated.keys()]
unique_values = sorted(int_keys, reverse=True)
secret_key_candidates = []
for k in unique_values:
for j in unique_values:
if k % j == 0:
if j == 1:
i = k
else:
i = k / j
if i in unique_values:
candidate = sorted([i, j, k])
if candidate not in secret_key_candidates:
secret_key_candidates.append(candidate)
# iterate over candidates and check indices against calculated
key_candidates = verify_secret_key_candidate_positions(secret_key_candidates, calculated, (0,1))
secret_keys = verify_secret_key_candidate_positions(key_candidates, calculated, (1,2))
access_codes = len(secret_keys)
return access_codes
def verify_secret_key_candidate_positions(secret_key_candidates, calculated, indices=(0,1)):
for candidate in secret_key_candidates:
i_value = candidate[indices[0]]
j_value = candidate[indices[1]]
li = calculated[str(i_value)] # list of indices from l
lj = calculated[str(j_value)]
keep_candidate = None
for i_index in li:
for j_index in lj:
if i_index < j_index:
keep_candidate = True
break
if keep_candidate:
break
else:
secret_key_candidates.remove(candidate)
return secret_key_candidates
def test(l, expected):
print "solution({})".format(str(l))
print "************************************************"
result = solution(l)
print "************************************************"
print "expected: {}".format(expected)
if result == expected:
print "solution({})".format(str(l))
print "PASSED"
else:
print "solution({})".format(str(l))
print "FAILED"
print "\n"
test([1, 1], 0)
test([1, 1, 2], 1)
test([1, 1, 1, 1], 1)
test([1, 1, 2, 1, 1], 2)
test([1, 1, 2, 1, 1, 2], 3)
test([1, 1, 2, 4, 1, 2], 5)
test([1, 1, 2, 1], 2)
test([1, 2, 3, 4, 5, 6], 3)
test([1, 1, 1], 1)
test([2, 2, 2], 1)
test([2, 4, 2], 0)
test([2, 4, 12], 1)
test([2, 4, 8], 1)
test([2, 4, 8, 12, 16], 5)
test([6,5,3,2,1], 0)
test([], 0)
test([3,2,5,2,1,6,8],2)
test([31, 31, 1, 4, 2, 62], 2)
```
#### File: Level_3/the-grandest-staircase-of-them-all/scratch3.py
```python
def solution(n):
bricks = n
max_sum_elements_of_branches = dict({})
def get_viable_children(parent):
first_child = parent - 1
smallest_child = 1
for child in range(first_child, 0, -1):
if str(child) in max_sum_elements_of_branches.keys():
continue
max_sum_branch_children = child * (child + 1) / 2
if max_sum_branch_children <= n:
smallest_child = child + 1
break
max_sum_elements_of_branches.update({str(child): max_sum_branch_children})
midway = (first_child + smallest_child) / 2
branch_children = {
"first_child": first_child,
"smallest_child": smallest_child,
"midway": midway
}
return branch_children
tree = get_viable_children(bricks)
# midway = (len(max_sum_elements_of_branches.keys()) + 1) / 2
# midway = (first_child + smallest_child) / 2
print tree
print "max_sum_elements_of_branches: {}".format(max_sum_elements_of_branches)
# print "children: {}".format(sorted([int(key) for key in max_sum_elements_of_branches.keys()]))
print "first_child: {}".format(tree["first_child"])
# print "first_child: {}".format(first_child)
print "smallest_child: {}".format(tree["smallest_child"])
# print "smallest_child: {}".format(smallest_child)
print "midway: {}".format(tree["midway"])
# print "midway: {}".format(midway)
# for n in range(201):
for n in range(1,21):
print "solution({})".format(n)
solution(n)
print "******************************************"
```
#### File: Level_3/the-grandest-staircase-of-them-all/scratch4.py
```python
def solution(n):
bricks = n
viable = get_viable_configurations(bricks)
options = Graph(viable["midway"])
print viable
print options
# viable_configurations = get_viable_configurations(bricks)
# midway = (len(max_sums_of_branch_elements.keys()) + 1) / 2
# midway = (largest_child + smallest_child) / 2
max_sums_of_branch_elements = dict({})
# def solution(n):
# graph = Graph(n)
# graph.traverse_children(graph.root)
# return graph.leaves
class Graph:
def __init__(self, bricks):
self.size = 0
self.bricks = bricks
self.root = Node(bricks)
# self.root = Node(value=root, parent=None, remaining_from_parent=root)
# self.leaves = 0
def add_node(self, value):
if self.root == None:
self.root = Node(value)
else:
self.root.add_child(value, self.root)
self.size += 1
# def traverse_children(self, node):
# if not node.children and node.remaining == 0:
# self.leaves += 1
# for child in node.children:
# self.traverse_children(child)
class Node:
def __init__(self, value, parent=None):
self.value = value
if parent != None:
self.parent = parent.value
else:
self.parent = parent
self.left = None
self.right = None
def add_child(self, value, parent):
options = get_viable_configurations(value)
if value > parent.value:
# Go to the right
if parent.right == None:
parent.right = Node(value, parent)
return
return add_child(value, parent.right)
else:
# Go to the left
if parent.value > options["largest_child"]:
return
if parent.left == None:
parent.left = Node(value, parent)
return
return add_child(value, parent.left)
def get_viable_configurations(self, parent):
if parent == None:
branch_children = {
"largest_child": None,
"smallest_child": None,
"midway": None
}
# Possibly throw an error
return branch_children
largest_child = parent.value - 1
smallest_child = 1
for child in range(largest_child, 0, -1):
max_sum_branch_children = child * (child + 1) / 2
if max_sum_branch_children <= parent.value:
smallest_child = child + 1
break
if str(child) in max_sums_of_branch_elements.keys():
continue
max_sums_of_branch_elements.update({str(child): max_sum_branch_children})
midway = (largest_child + smallest_child) / 2
branch_children = {
"largest_child": largest_child,
"smallest_child": smallest_child,
"midway": midway
}
return branch_children
# class Node:
# def __init__(self, value, parent, is_left):
# self.value = value
# get_viable_configurations(value)
# self.parent = parent
# if not parent:
# self.remaining = value
# else:
# self.remaining = remaining_from_parent - self.value
# self.children = []
# self.set_children()
# def __str__(self):
# return str(self.__dict__)
# # return "Node: value: {} parent: {} children: {}".format(self.value, self.parent, self.children)
"""
def get_viable_configurations(parent):
largest_child = parent - 1
smallest_child = 1
for child in range(largest_child, 0, -1):
max_sum_branch_children = child * (child + 1) / 2
if max_sum_branch_children <= parent:
smallest_child = child + 1
break
if str(child) in max_sums_of_branch_elements.keys():
continue
max_sums_of_branch_elements.update({str(child): max_sum_branch_children})
midway = (largest_child + smallest_child) / 2
branch_children = {
"largest_child": largest_child,
"smallest_child": smallest_child,
"midway": midway
}
return branch_children
def set_children(self):
if not self.parent:
range_bound = (self.value//2) + 1
else:
range_bound = self.remaining
for i in range(0, range_bound):
if self.value == 1:
continue
child_value = self.remaining - i
parent_value = self.value
remaining = self.remaining
# if remaining - child_value > child_value:
# continue
if child_value < parent_value:
if child_value <= self.remaining:
child = Node(child_value, parent_value, remaining)
self.children.append(child)
"""
# print tree
"""
print "max_sums_of_branch_elements: {}".format(max_sums_of_branch_elements)
# print "children: {}".format(sorted([int(key) for key in max_sums_of_branch_elements.keys()]))
print "largest_child: {}".format(tree["largest_child"])
# print "largest_child: {}".format(largest_child)
print "smallest_child: {}".format(tree["smallest_child"])
# print "smallest_child: {}".format(smallest_child)
print "midway: {}".format(tree["midway"])
# print "midway: {}".format(midway)
"""
# for n in range(201):
for n in range(1,21):
print "solution({})".format(n)
solution(n)
print "******************************************"
```
#### File: Level_3/the-grandest-staircase-of-them-all/scratch5.py
```python
from collections import deque as deque
# for a given int n, what is the maximum sum of all ints in range(1, n+1)?
# for a given remaining_bricks, how many ways are there to combine decreasing integers to equal it?
def solution(n):
bricks = n
options = 0
if bricks < 3:
print "options: {}".format(options)
return options
# calculated = dict({})
branch_capacities = dict({})
for num in range(bricks, 1, -1):
max_sum_branch_children = num * (num + 1) / 2
# if max_sum_branch_children <= bricks:
# break
branch_capacities.update({str(num): max_sum_branch_children})
minimum_size_child_branches = dict({})
# for num in range(bricks, 1, -1):
# for capacity in sorted(map(int, branch_capacities.keys())):
# (num * (num + 1) / 2)
# smallest size branch root nodes that can support remaining bricks
child_deque = deque()
children = get_children(bricks)
children_values = children["children_values"]
options += children["options"]
child_deque.extend(children_values)
# print child_deque
# print "len(child_deque): {}".format(len(child_deque))
while child_deque:
# print "running through next layer"
grandchildren_deque = deque()
for child in child_deque:
# print "child: {}".format(child)
node = child[0]
remaining_bricks = child[1]
# print child
children = get_children(node,
remaining_bricks,
branch_capacities=branch_capacities,
minimum_size_child_branches=minimum_size_child_branches,
options=options)
children_values = children["children_values"]
options = children["options"]
# print children
grandchildren_deque.extend(children_values)
child_deque = grandchildren_deque
# child_deque.extend(get_children(node, remaining_bricks, options=options))
# print "bricks: {} child: {} remaining_bricks: {} grandchildren: {}".format(bricks, child, bricks-child, get_children(child, bricks-child))
# print "number of grandchildren: {}".format(len(get_children(child, bricks-child)))
# print child_deque
print "len(child_deque): {}".format(len(child_deque))
print "options: {}".format(options)
return options
def get_children(node, remaining_bricks=None, branch_capacities=dict({}), minimum_size_child_branches=dict({}), options=0):
# grandchildren_exist = False
if remaining_bricks == None:
remaining_bricks = node
children_values = []
for child in range(1, remaining_bricks+1):
# print "handling child: {} with remaining_bricks: {}".format(child, remaining_bricks)
if child == remaining_bricks:
options += 1
if str(child) in branch_capacities.keys():
# print "retrieving child: {} from branch_capacities: {}".format(child, branch_capacities)
child_branch_capacity = branch_capacities[str(child)]
# print "subbranch capacity of {}: {}".format(child, child_branch_capacity)
# print "subbranch capacity (calculated) of {}: {}".format(child, child * (child + 1) / 2)
else:
child_branch_capacity = (child * (child + 1) / 2)
if child_branch_capacity < remaining_bricks:
continue
# print "incrementing options: {}".format(options)
if child <= 3:
continue
# print "examining child: {} with remaining_bricks: {}".format(child, remaining_bricks)
children_values.append((child, remaining_bricks - child))
return dict({"children_values": children_values, "options": options})
# for n in range(1,100):
print "solution({})".format(200)
solution(200)
print "************************************"
# for n in range(1,100):
# print "solution({})".format(n)
# solution(n)
# print "************************************"
```
#### File: Level_3/the-grandest-staircase-of-them-all/solution.py
```python
def solution(n):
graph = Graph(n)
graph.traverse_children(graph.root)
return graph.leaves
class Graph:
def __init__(self, root):
self.root = Node(value=root, parent=None, remaining_from_parent=root)
self.leaves = 0
def traverse_children(self, node):
if not node.children and node.remaining == 0:
self.leaves += 1
for child in node.children:
self.traverse_children(child)
class Node:
def __init__(self, value, parent, remaining_from_parent):
self.value = value
self.parent = parent
if not parent:
self.remaining = value
else:
self.remaining = remaining_from_parent - self.value
self.children = []
self.set_children()
def __str__(self):
return str(self.__dict__)
# return "Node: value: {} parent: {} children: {}".format(self.value, self.parent, self.children)
def set_children(self):
if not self.parent:
range_bound = (self.value//2) + 1
else:
range_bound = self.remaining
for i in range(0, range_bound):
if self.value == 1:
continue
child_value = self.remaining - i
parent_value = self.value
remaining = self.remaining
if child_value < parent_value:
if child_value <= self.remaining:
child = Node(child_value, parent_value, remaining)
self.children.append(child)
for n in range(1,201):
print solution(n)
``` |
{
"source": "jonathanlloyd/posthog",
"score": 2
} |
#### File: posthog/api/decide.py
```python
import json
import secrets
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.parse import urlparse
from django.conf import settings
from django.http import HttpRequest, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from posthog.auth import PersonalAPIKeyAuthentication
from posthog.models import FeatureFlag, Team
from posthog.utils import base64_to_json, cors_response, load_data_from_request
def _get_token(data, request):
if request.POST.get("api_key"):
return request.POST["api_key"]
if request.POST.get("token"):
return request.POST["token"]
if "token" in data:
return data["token"] # JS reloadFeatures call
if "api_key" in data:
return data["api_key"] # server-side libraries like posthog-python and posthog-ruby
return None
def on_permitted_domain(team: Team, request: HttpRequest) -> bool:
permitted_domains = ["127.0.0.1", "localhost"]
for url in team.app_urls:
hostname = parse_domain(url)
if hostname:
permitted_domains.append(hostname)
return (parse_domain(request.headers.get("Origin")) in permitted_domains) or (
parse_domain(request.headers.get("Referer")) in permitted_domains
)
def decide_editor_params(request: HttpRequest) -> Tuple[Dict[str, Any], bool]:
if on_permitted_domain(request.user.team, request):
response: Dict[str, Any] = {"isAuthenticated": True}
editor_params = {}
if request.user.toolbar_mode == "toolbar":
editor_params["toolbarVersion"] = "toolbar"
if settings.JS_URL:
editor_params["jsURL"] = settings.JS_URL
response["editorParams"] = editor_params
return response, not request.user.temporary_token
else:
return {}, False
# May raise exception if request body is malformed
def get_team_from_token(request: HttpRequest, data_from_request: Dict[str, Any]) -> Union[Team, None]:
data = data_from_request["data"]
if not data:
return None
token = _get_token(data, request)
is_personal_api_key = False
if not token:
token = PersonalAPIKeyAuthentication.find_key(
request, data_from_request["body"], data if isinstance(data, dict) else None
)
is_personal_api_key = True
if token:
return Team.objects.get_team_from_token(token, is_personal_api_key)
return None
def feature_flags(request: HttpRequest, team: Team, data: Dict[str, Any]) -> List[str]:
flags_enabled = []
feature_flags = FeatureFlag.objects.filter(team=team, active=True, deleted=False)
for feature_flag in feature_flags:
# distinct_id will always be a string, but data can have non-string values ("Any")
if feature_flag.distinct_id_matches(data["distinct_id"]):
flags_enabled.append(feature_flag.key)
return flags_enabled
def parse_domain(url: Any) -> Optional[str]:
return urlparse(url).hostname
@csrf_exempt
def get_decide(request: HttpRequest):
response = {
"config": {"enable_collect_everything": True},
"editorParams": {},
"isAuthenticated": False,
"supportedCompression": ["gzip", "lz64"],
}
if request.COOKIES.get(settings.TOOLBAR_COOKIE_NAME):
response["isAuthenticated"] = True
if settings.JS_URL:
response["editorParams"] = {"jsURL": settings.JS_URL, "toolbarVersion": "toolbar"}
if request.user.is_authenticated:
r, update_user_token = decide_editor_params(request)
response.update(r)
if update_user_token:
request.user.temporary_token = secrets.token_urlsafe(32)
request.user.save()
response["featureFlags"] = []
response["sessionRecording"] = False
if request.method == "POST":
try:
data_from_request = load_data_from_request(request)
except (json.decoder.JSONDecodeError, TypeError):
return cors_response(
request,
JsonResponse(
{"code": "validation", "message": "Malformed request data. Make sure you're sending valid JSON.",},
status=400,
),
)
team = get_team_from_token(request, data_from_request)
if team:
response["featureFlags"] = feature_flags(request, team, data_from_request["data"])
response["sessionRecording"] = team.session_recording_opt_in and on_permitted_domain(team, request)
return cors_response(request, JsonResponse(response))
``` |
{
"source": "jonathanlloyd/scratchstack-httpserver",
"score": 4
} |
#### File: scratchstack-httpserver/src/scratchsocket.py
```python
import socket
class InboundSocket:
"""Provides a simple callback-based API for inbound TCP sockets
Attributes:
port (int): The port currently being listened on
"""
def __init__(self):
self.port = None
self._client_socket = None
self._server_socket = None
self._running = False
def listen(self, port, read_callback):
"""Start listening on the given port
Args:
port (int): Port to listen on
read_callback (callable): Called every time a byte is read from
the socket
Raises:
RuntimeError: If socket is already listening
"""
if self._running:
raise RuntimeError('Socket already listening')
self.port = port
self._running = True
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server_socket.bind(('0.0.0.0', self.port))
self._server_socket.listen(1)
(self._client_socket, _) = self._server_socket.accept()
while self._running:
byte = self._client_socket.recv(1)
if byte == b'':
# The connection has closed, wait for another one
(self._client_socket, _) = self._server_socket.accept()
continue
read_callback(byte)
def write(self, bytes_to_write):
"""Write bytes to the currently connected client connection (if any)
Note:
An RuntimeError will be thrown if no client is currently connected
Args:
bytes_to_write (bytes): Bytes that will be sent to the connected
client
Raises:
RuntimeError: If no clients are currently connected
RuntimeError: If the current connection is closed by the client
"""
if self._client_socket is None:
raise RuntimeError('Cannot write to socket: Not listening on any port')
bytes_sent = 0
while bytes_sent < len(bytes_to_write):
sent = self._client_socket.send(bytes_to_write[bytes_sent:])
if sent == 0:
raise RuntimeError('Cannot write to socket: Connection closed')
bytes_sent += sent
def close_client_conn(self):
"""Gracefully terminate any inbound client connections"""
if self._client_socket is not None:
self._client_socket.shutdown(1)
def stop(self):
"""Stop listening on the current port"""
if self._client_socket is not None:
self._client_socket.shutdown(1)
self._client_socket.close()
if self._server_socket is not None:
self._server_socket.close()
self.port = None
self._client_socket = None
self._server_socket = None
self._running = False
```
#### File: scratchstack-httpserver/src/serializer.py
```python
def serialize_response(response):
if response.status_code < 0:
raise ValueError('Status code cannot be negative')
if response.status_code > 999:
raise ValueError('Status code must be < 1000')
status_code_string = f"{response.status_code:03d}"
status_line = f"HTTP/1.1 {status_code_string} {response.reason_phrase}"
headers_string = ''
for field_name, field_value in response.headers.items():
headers_string += f"{field_name}: {field_value}\r\n"
response_string = f"{status_line}\r\n{headers_string}\r\n{response.body}"
response_bytes = response_string.encode('utf-8')
return response_bytes
``` |
{
"source": "jonathanlo411/malapiclient",
"score": 3
} |
#### File: malapiclient/malapiclient/errors.py
```python
class Error(Exception):
"""Base class for other exceptions"""
pass
class NoClientSecretError(Error):
def __str__(self):
return "No Client Secret. You can add your Client Secret to your malclient by using malclient.add_secret()"
pass
class NeedAuthentificationError(Error):
def __str__(self):
return "No Token added. You can create a new one following the Oauth2 process or by using malclient.add_token()"
pass
``` |
{
"source": "jonathanlofgren/more-kedro",
"score": 2
} |
#### File: more-kedro/more_kedro/hooks.py
```python
from kedro.framework.hooks import hook_impl
from kedro.io import DataCatalog
from kedro.utils import load_obj
from typing import Dict
import logging
logger = logging.getLogger(__name__)
class TypedParameters:
def __init__(self, type_indicator: str = "type", inline: bool = False):
self._type_indicator = type_indicator
self._type_suffix = f"__{type_indicator}"
self._inline = inline
@hook_impl
def after_catalog_created(self, catalog: DataCatalog) -> None:
if self._inline:
param_types = self._get_param_types_inline(catalog)
else:
param_types = self._get_param_types(catalog)
for param, type_string in param_types.items():
type_obj = load_obj(type_string)
catalog._data_sets[param]._data = type_obj(
**catalog._data_sets[param]._data
)
def _get_param_types(self, catalog: DataCatalog) -> Dict[str, str]:
param_types = {}
for name, dataset in catalog._data_sets.items():
if name.startswith("params:") and name.endswith(self._type_suffix):
param_name = name[: -len(self._type_suffix)]
if param_name in catalog._data_sets:
param_types[param_name] = dataset._data
return param_types
def _get_param_types_inline(self, catalog: DataCatalog) -> Dict[str, str]:
param_types = {}
for name, dataset in catalog._data_sets.items():
if name.startswith("params:") and self._type_indicator in dataset._data:
param_types[name] = dataset._data.pop(self._type_indicator)
return param_types
``` |
{
"source": "jonathan-longe/RSBC-DataHub-API",
"score": 3
} |
#### File: python/common/actions.py
```python
import logging
from datetime import datetime, timedelta
from python.common.message import encode_message, add_error_to_message
from python.common.config import Config
import iso8601
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
def is_not_on_hold(**args) -> tuple:
"""
Returns true if the message is not on hold -- either
there is no 'hold_until' attribute OR there is a
hold_unit attribute, but it's ISO datetime if
greater than the current datetime.
"""
message = args.get('message')
if 'hold_until' not in message:
return True, args
now = datetime.now()
hold_until = iso8601.parse_date(message['hold_until'], None)
return now >= hold_until, args
def add_hold_before_trying_vips_again(**args) -> tuple:
"""
Adds a do not process until attribute to the message
"""
message = args.get('message')
config = args.get('config')
hold_hours = int(config.HOURS_TO_HOLD_BEFORE_TRYING_VIPS)
message['hold_until'] = (datetime.today() + timedelta(hours=hold_hours)).isoformat()
return True, args
def add_24_hour_hold_until(**args) -> tuple:
"""
Adds a 24 hour hold until attribute to the message
"""
message = args.get('message')
config = args.get('config')
hold_hours = 24
message['hold_until'] = (datetime.today() + timedelta(hours=hold_hours)).isoformat()
return True, args
def add_hold_before_sending_disclosure(**args) -> tuple:
"""
Adds a hold_until attribute to the message appropriate for disclosure
"""
message = args.get('message')
config = args.get('config')
hold_hours = int(config.HOURS_TO_HOLD_BEFORE_DISCLOSURE)
message['hold_until'] = (datetime.today() + timedelta(hours=hold_hours)).isoformat()
return True, args
def add_hold_to_verify_schedule(**args) -> tuple:
"""
Adds a hold_until attribute to the message appropriate for verify_schedule
"""
message = args.get('message')
config = args.get('config')
hold_hours = config.HOURS_APPLICANT_HAS_TO_SCHEDULE
message['hold_until'] = (datetime.today() + timedelta(hours=hold_hours)).isoformat()
return True, args
def add_to_failed_queue(**args) -> tuple:
config = args.get('config')
message = args.get('message')
writer = args.get('writer')
logging.warning('writing to failed write queue')
if not writer.publish(config.FAIL_QUEUE, encode_message(message, config.ENCRYPT_KEY)):
logging.critical('unable to write to RabbitMQ {} queue'.format(config.FAIL_QUEUE))
return False, args
return True, args
def add_to_hold_queue(**args) -> tuple:
config = args.get('config')
message = args.get('message')
writer = args.get('writer')
logging.warning('writing to hold queue')
if not writer.publish(config.HOLD_QUEUE, encode_message(message, config.ENCRYPT_KEY)):
logging.critical('unable to write to RabbitMQ {} queue'.format(config.HOLD_QUEUE))
return False, args
return True, args
def add_unknown_event_error_to_message(**args) -> tuple:
message = args.get('message')
event_type = '[ no event type attribute ]'
if 'event_type' in message:
event_type = message['event_type']
error = dict({
"error": "unknown event type: {}".format(event_type)
})
message = add_error_to_message(message, error)
return True, args
def add_to_rabbitmq_queue(**args) -> tuple:
encoded_message = args.get('encoded_message')
queue = args.get('queue')
writer = args.get('writer')
logging.warning('writing to {} queue'.format(queue))
if not writer.publish(queue, encoded_message):
logging.critical('unable to write to RabbitMQ {} queue'.format(queue))
return False, args
return True, args
```
#### File: python/common/helper.py
```python
import json
import csv
import pytz
import logging
import datetime
from python.common.config import Config
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
def load_json_into_dict(file_name) -> dict:
with open(file_name, 'r') as f:
data = f.read()
return json.loads(data)
def get_csv_test_data(file_and_path):
test_data = list()
with open(file_and_path, newline='') as csvfile:
data = csv.reader(csvfile, delimiter=',')
for row_number, row in enumerate(data):
# exclude the header row
if row_number != 0:
test_data.append(row)
return test_data
def load_xml_to_string(file_name) -> str:
with open(file_name, 'r') as f:
return f.read()
def validate_form_number(number: str) -> bool:
"""
Validate check digit used in IRP, VI and UL forms
:param number:
:return:
"""
number_list = list(number)
check_digit = int(number_list.pop())
n = list()
for element in number_list:
# cast each element to an int
n.append(int(element))
# ignore the first two digits, sum the number
# using a special formula
number_sum = (
n[2] +
_times_2(n[3]) +
n[4] +
_times_2(n[5]) +
n[6] +
_times_2(n[7]))
# compare modulus of the sum with check digit
return number_sum % 10 == check_digit
def _times_2(number: int) -> int:
"""
If number * 2 is greater than 9, return 1
otherwise return the number * 2
:param number:
:return:
"""
return int(list(str(number * 2))[0])
def middle_logic(functions: list, **args):
"""
Recursive function that calls each node in the list.
Each node has a "try" function that is executed first. If the try
function returns True, the next node in the list is returned. If the
try function returns False, the node's "fail" list is executed in the
same way.
example = dict({
"rules": [
{
"pass": success1,
"fail": [
{
"pass": failure1,
"fail": []
}
],
},
]
})
The middleware is called like this: middle_logic(example['rules'])
"""
if functions:
try_fail_node = functions.pop(0)
logging.debug('calling try function: ' + try_fail_node['try'].__name__)
flag, args = try_fail_node['try'](**args)
logging.info("result from {} is {}".format(try_fail_node['try'].__name__, flag))
if flag:
logging.debug('calling middleware logic recursively')
args = middle_logic(functions, **args)
else:
logging.debug('calling failure functions recursively')
args = middle_logic(try_fail_node['fail'], **args)
return args
def get_listeners(listeners: dict, key: str) -> list:
"""
Get the list of nested list of functions to invoke
for a particular form type
"""
if key in listeners:
return listeners[key]
else:
return listeners['unknown_event']
def localize_timezone(date_time: datetime) -> datetime:
tz = pytz.timezone('America/Vancouver')
localized = tz.localize(date_time)
logging.debug("localized datetime: {}".format(localized))
return localized
def check_credentials(username, password, username_submitted, password_submitted) -> bool:
logging.debug('credentials: {}:{}'.format(username, password))
if username_submitted == username and password_submitted == password:
return True
return False
```
#### File: python/common/rsi_email.py
```python
import python.common.helper as helper
from python.common.config import Config
import python.common.common_email_services as common_email_services
from datetime import datetime
import json
import logging
from jinja2 import Environment, select_autoescape, FileSystemLoader
logging.basicConfig(level=Config.LOG_LEVEL, format=Config.LOG_FORMAT)
def application_accepted(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_application_accepted.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
deadline_date_string=args.get('deadline_date_string'),
link_to_paybc=config.LINK_TO_PAYBC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_review_type_change(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "review_type_change.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def send_form_xml_to_admin(**args):
xml = args.get('xml_base64', None)
if xml:
config = args.get('config')
subject = 'DEBUG - Form XML attached'
template = get_jinja2_env().get_template('admin_notice.html')
return common_email_services.send_email(
[config.ADMIN_EMAIL_ADDRESS],
subject,
config,
template.render(
body='XML attached',
message='message xml attached',
subject=subject),
[{
"content": args.get('xml_base64'),
"contentType": "string",
"encoding": "base64",
"filename": "submitted_form.xml"
}]), args
logging.info('No XML to send')
def insufficient_reviews_available(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "insufficient_reviews_available.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_to_business(
content["subject"],
config,
template.render(
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_did_not_schedule(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "applicant_did_not_schedule.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_to_business(
content["subject"],
config,
template.render(
full_name=args.get('applicant_name'),
receipt_number=args.get('receipt_number'),
receipt_amount=args.get('receipt_amount'),
receipt_date=args.get('receipt_date'),
order_number=args.get('order_number'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_applied_at_icbc(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "applicant_applied_at_icbc.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_paybc=config.LINK_TO_PAYBC,
full_name="Applicant",
prohibition_number=prohibition_number,
subject=content["subject"])), args
def send_email_to_admin(**args):
subject = args.get('subject')
config = args.get('config')
message = args.get('message')
body = args.get('body')
template = get_jinja2_env().get_template('admin_notice.html')
return common_email_services.send_email(
[config.ADMIN_EMAIL_ADDRESS],
subject,
config,
template.render(subject=subject, body=body, message=json.dumps(message))), args
def applicant_prohibition_served_more_than_7_days_ago(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "not_received_in_time.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_licence_not_seized(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
t = "licence_not_seized.html"
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_not_found(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_not_found.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_to_schedule_review(**args):
"""
This message is sent immediately after an applicant pays
the application fee. Since we don't have the driver's
first name handy, this email is addressed to the applicant.
"""
config = args.get('config')
payload = args.get('payload')
vips_application = args.get('vips_application')
vips_data = args.get('vips_data')
t = "{}_select_review_date.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
email_address = vips_application['email']
full_name = "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm'])
prohibition_number = args.get('prohibition_number')
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_schedule_form=config.LINK_TO_SCHEDULE_FORM,
order_number=payload.get('transaction_id'),
full_name=full_name,
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_schedule_confirmation(**args):
"""
This message is sent to the applicant after the requested review date
is successfully saved to VIPS.
"""
config = args.get('config')
vips_application = args.get('vips_application')
email_address = vips_application['email']
presentation_type = vips_application['presentationTypeCd']
t = 'review_date_confirmed_{}.html'.format(presentation_type)
args['email_template'] = t
phone = vips_application['phoneNo']
prohibition_number = args.get('prohibition_number')
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"],
phone=phone,
friendly_review_time_slot=args.get('friendly_review_time_slot'))), args
def applicant_last_name_mismatch(**args):
"""
This email is sent to the applicant if the last name entered by the applicant
does not match the last name of the driver as entered in VIPS
"""
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_last_name_mismatch.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_application_form=config.LINK_TO_APPLICATION_FORM,
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_not_found_yet(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
date_served_string = args.get('date_of_service')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
human_friendly_date_served = date_served.strftime("%B %d, %Y")
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_not_found_yet.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
# Note: we rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
date_of_service=human_friendly_date_served,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_prohibition_still_not_found(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
date_served_string = args.get('date_of_service')
date_served = helper.localize_timezone(datetime.strptime(date_served_string, '%Y-%m-%d'))
human_friendly_date_served = date_served.strftime("%B %d, %Y")
notice_type = args.get('user_entered_notice_type')
t = "{}_prohibition_still_not_found.html".format(notice_type)
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
# Note: we rely on the date_served as submitted by the user -- not the date in VIPS
# Check to see if enough time has elapsed to enter the prohibition into VIPS
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_icbc=config.LINK_TO_ICBC,
link_to_service_bc=config.LINK_TO_SERVICE_BC,
date_of_service=human_friendly_date_served,
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def already_applied(**args):
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = "{}_already_applied.html".format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
full_name=args.get('applicant_full_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_disclosure(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_data = args.get('vips_data')
t = '{}_send_disclosure.html'.format(vips_data['noticeTypeCd'])
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[args.get('applicant_email_address')],
content["subject"],
config,
template.render(
link_to_get_driving_record=config.LINK_TO_GET_DRIVING_RECORD,
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"]),
args.get('disclosure_for_applicant')), args
def applicant_evidence_instructions(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
vips_application = args.get('vips_application')
email_address = vips_application['email']
t = 'send_evidence_instructions.html'
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_evidence_form=config.LINK_TO_EVIDENCE_FORM,
full_name=args.get('applicant_name'),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def applicant_evidence_received(**args) -> tuple:
config = args.get('config')
prohibition_number = args.get('prohibition_number')
email_address = args.get('email_address')
vips_application = args.get('vips_application')
full_name = "{} {}".format(vips_application['firstGivenNm'], vips_application['surnameNm'])
t = 'evidence_received.html'
args['email_template'] = t
content = get_email_content(t, prohibition_number)
template = get_jinja2_env().get_template(t)
return common_email_services.send_email(
[email_address],
content["subject"],
config,
template.render(
link_to_evidence_form=config.LINK_TO_EVIDENCE_FORM,
full_name=full_name,
today_date=args.get('today_date').strftime("%B %d, %Y %H:%M"),
prohibition_number=prohibition_number,
subject=content["subject"])), args
def admin_unable_to_save_to_vips(**args) -> tuple:
logging.critical('inside unable_to_save_to_vips_api()')
config = args.get('config')
message = args.get('message')
subject = 'Critical Error: Unable to save to VIPS'
body_text = 'While attempting to save an application to VIPS, an error was returned. ' + \
'We will save the record to a failed write queue in RabbitMQ.'
logging.critical('unable to save to VIPS: {}'.format(json.dumps(message)))
return send_email_to_admin(config=config, subject=subject, body=body_text), args
def admin_unknown_event_type(**args) -> tuple:
message = args.get('message')
config = args.get('config')
title = 'Critical Error: Unknown Event Type'
body_text = "An unknown event has been received: " + message['event_type']
logging.critical('unknown event type: {}'.format(message['event_type']))
return send_email_to_admin(config=config, title=title, body=body_text), args
def get_jinja2_env():
template_loader = FileSystemLoader(searchpath="./python/common/templates")
return Environment(
loader=template_loader,
autoescape=select_autoescape(['html', 'xml'])
)
def get_email_content(template_name: str, prohibition_number: str):
content = content_data()
if template_name in content:
email_content = content[template_name]
email_content['subject'] = email_content['raw_subject'].format(_hyphenate(prohibition_number))
logging.info(email_content)
return email_content
return dict({
"raw_subject": "Unknown template requested {}",
"subject": "Unknown template",
"callout": "",
"title": "Unknown Template",
"timeline": ""
})
def _hyphenate(prohibition_number: str) -> str:
return "{}-{}".format(prohibition_number[0:2], prohibition_number[2:8])
def content_data() -> dict:
return dict({
"IRP_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "IRP Prohibition Number or Name Don't Match",
},
"ADP_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "ADP Prohibition Number or Name Don't Match",
},
"UL_last_name_mismatch.html": {
"raw_subject": "Prohibition Number or Name Don't Match - Driving Prohibition {} Review",
"title": "UL Prohibition Number or Name Don't Match",
},
"IRP_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "IRP Prohibition Not Yet Found",
},
"ADP_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "ADP Prohibition Not Yet Found",
},
"UL_prohibition_not_found_yet.html": {
"raw_subject": "Prohibition Not Yet Found - Driving Prohibition {} Review",
"title": "UL Prohibition Not Yet Found",
},
"IRP_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "IRP Prohibition Still Not Found",
},
"ADP_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "ADP Prohibition Still Not Found",
},
"UL_prohibition_still_not_found.html": {
"raw_subject": "Prohibition Still Not Found - Driving Prohibition {} Review",
"title": "UL Prohibition Still Not Found",
},
"IRP_already_applied.html": {
"raw_subject": "Already Applied – Driving Prohibition {} Review",
"title": "IRP Already Applied",
},
"ADP_already_applied.html": {
"raw_subject": "Already Applied – Driving Prohibition {} Review",
"title": "ADP Already Applied",
},
"UL_already_applied.html": {
"raw_subject": "Previous Review on File – Driving Prohibition {} Review",
"title": "UL Already Applied",
},
"review_date_confirmed_ORAL.html": {
"raw_subject": "Review Date Confirmed - Driving Prohibition {} Review",
"title": "Review Date Confirmed Oral",
},
"review_date_confirmed_WRIT.html": {
"raw_subject": "Review Date Confirmed - Driving Prohibition {} Review",
"title": "Review Date Confirmed Written",
},
"IRP_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "IRP Select Review Date",
},
"ADP_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "ADP Select Review Date",
},
"UL_select_review_date.html": {
"raw_subject": "Select Review Date - Driving Prohibition {} Review",
"title": "UL Select Review Date",
},
"IRP_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found and 7-day Application Window Missed - Driving Prohibition {} Review",
"title": "IRP Prohibition Not Found"
},
"ADP_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found and 7-day Application Window Missed - Driving Prohibition {} Review",
"title": "ADP Prohibition Not Found"
},
"UL_prohibition_not_found.html": {
"raw_subject": "Prohibition Not Found – Driving Prohibition {} Review",
"title": "UL Prohibition Not Found"
},
"licence_not_seized.html": {
"raw_subject": "Licence Not Surrendered - Driving Prohibition {} Review",
"title": "Licence Not Surrendered",
},
"not_received_in_time.html": {
"raw_subject": "7-day Application Window Missed - Driving Prohibition {} Review",
"title": "7-day Application Window Missed",
},
"IRP_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "IRP Application Accepted",
},
"ADP_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "ADP Application Accepted",
},
"UL_application_accepted.html": {
"raw_subject": "Application Accepted - Driving Prohibition {} Review",
"title": "UL Application Accepted",
},
"IRP_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"ADP_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"UL_send_disclosure.html": {
"raw_subject": "Disclosure Documents Attached - Driving Prohibition {} Review",
"title": "Send Disclosure",
},
"send_evidence_instructions.html": {
"raw_subject": "Submit Evidence - Driving Prohibition {} Review",
"title": "Submit Evidence",
},
"evidence_received.html": {
"raw_subject": "Evidence Received - Driving Prohibition {} Review",
"title": "Evidence Received",
},
"review_type_change.html": {
"raw_subject": "Review Type Change - Driving Prohibition {} Review",
"title": "Review Type Change",
},
"insufficient_reviews_available.html": {
"raw_subject": "Insufficient Review Dates Available - Driving Prohibition {} Review",
"title": "Insufficient Review Dates Available",
},
"applicant_did_not_schedule.html": {
"raw_subject": "Did Not Schedule - Driving Prohibition {} Review",
"title": "Applicant Did Not Schedule",
},
"applicant_applied_at_icbc.html": {
"raw_subject": "Applied at ICBC - Driving Prohibition {} Review",
"title": "Applied at ICBC",
}
})
```
#### File: python/form_handler/business.py
```python
import python.common.middleware as middleware
import python.common.actions as actions
import python.common.rsi_email as rsi_email
def process_incoming_form() -> dict:
"""
This function lists the business rules required when processing
each form. The Orbeon form name is used as the key. For example,
the "send_disclosure" attributes below are used when processing the
"send_disclosure" Orbeon form.
"""
return {
"unknown_event": [
{"try": actions.add_unknown_event_error_to_message, "fail": []},
{"try": actions.add_to_failed_queue, "fail": []},
{"try": rsi_email.admin_unknown_event_type, "fail": []}
],
"send_disclosure": [
{"try": actions.is_not_on_hold, "fail": [
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.get_data_from_disclosure_event, "fail": []},
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.is_review_in_the_future, "fail": [
# No further disclosure will be sent. The review has concluded.
]},
{"try": middleware.is_any_unsent_disclosure, "fail": [
# No new disclosure to send at present, try again later
{"try": actions.add_hold_before_sending_disclosure, "fail": []},
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.retrieve_unsent_disclosure, "fail": []},
{"try": middleware.if_required_add_adp_disclosure, "fail": []},
{"try": rsi_email.applicant_disclosure, "fail": [
# if send is not successful, add back to hold queue
]},
{"try": middleware.mark_disclosure_as_sent, "fail": []},
{"try": actions.add_hold_before_sending_disclosure, "fail": []},
{"try": actions.add_to_hold_queue, "fail": []}
],
"verify_schedule": [
{"try": actions.is_not_on_hold, "fail": [
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.get_data_from_verify_schedule_event, "fail": []},
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.review_has_been_scheduled, "fail": [
# if review has not been scheduled, notify Appeals Registry
{"try": rsi_email.applicant_did_not_schedule, "fail": []},
]}
# If review has been scheduled, do nothing
],
"review_schedule_picker": [
# aka: review scheduler
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.get_data_from_schedule_form, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_payment_status, "fail": []},
{"try": middleware.received_valid_payment_status, "fail": []},
{"try": middleware.paid_not_more_than_24hrs_ago, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": middleware.get_invoice_details, "fail": []},
{"try": middleware.calculate_schedule_window, "fail": []},
{"try": middleware.decode_selected_timeslot, "fail": []},
{"try": middleware.get_human_friendly_time_slot_string, "fail": []},
{"try": middleware.save_schedule_to_vips, "fail": [
# Consider sending a message to the applicant in the unlikely
# event that the schedule save operation is unsuccessful
]},
{"try": rsi_email.applicant_schedule_confirmation, "fail": []},
{"try": rsi_email.applicant_evidence_instructions, "fail": []},
{"try": middleware.create_disclosure_event, "fail": []},
{"try": actions.add_hold_before_sending_disclosure, "fail": []},
{"try": actions.add_to_hold_queue, "fail": []}
],
"prohibition_review": [
# aka: submit prohibition application
{"try": actions.is_not_on_hold, "fail": [
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.get_data_from_application_form, "fail": []},
{"try": middleware.get_user_entered_notice_type_from_message, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.populate_driver_name_fields_if_null, "fail": []},
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.get_vips_status, "fail": [
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.prohibition_exists_in_vips, "fail": [
{"try": middleware.prohibition_served_within_past_week, "fail": [
{"try": rsi_email.applicant_prohibition_not_found, "fail": []}
]},
{"try": middleware.applicant_has_more_than_one_day_to_apply, "fail": [
{"try": rsi_email.applicant_prohibition_still_not_found, "fail": []},
{"try": actions.add_24_hour_hold_until, "fail": []},
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": rsi_email.applicant_prohibition_not_found_yet, "fail": []},
{"try": actions.add_hold_before_trying_vips_again, "fail": []},
{"try": actions.add_to_hold_queue, "fail": []}
]},
{"try": middleware.application_not_previously_saved_to_vips, "fail": [
{"try": rsi_email.already_applied, "fail": []},
]},
{"try": middleware.review_has_not_been_scheduled, "fail": [
{"try": rsi_email.applicant_applied_at_icbc, "fail": []},
]},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": [
{"try": rsi_email.applicant_last_name_mismatch, "fail": []}
]},
{"try": middleware.is_applicant_within_window_to_apply, "fail": [
{"try": rsi_email.applicant_prohibition_served_more_than_7_days_ago, "fail": []}
]},
{"try": middleware.has_drivers_licence_been_seized, "fail": [
{"try": rsi_email.applicant_licence_not_seized, "fail": []}
]},
{"try": middleware.transform_hearing_request_type, "fail": []},
{"try": middleware.force_presentation_type_to_written_if_ineligible_for_oral, "fail": []},
{"try": middleware.transform_applicant_role_type, "fail": []},
{"try": middleware.compress_form_data_xml, "fail": []},
{"try": middleware.save_application_to_vips, "fail": [
{"try": actions.add_to_failed_queue, "fail": []},
{"try": rsi_email.admin_unable_to_save_to_vips, "fail": []}
]},
{"try": rsi_email.application_accepted, "fail": []},
{"try": middleware.is_applicant_ineligible_for_oral_review_but_requested_oral, "fail": [
# end of successful application process
]},
{"try": rsi_email.applicant_review_type_change, "fail": []}
],
"Document_submission": [
# aka: evidence submission form
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.get_data_from_document_submission_form, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": rsi_email.applicant_evidence_received, "fail": []},
]
}
```
#### File: python/tests/test_vips.py
```python
import python.common.vips_api as vips
import logging
import json
from unittest.mock import MagicMock
from python.common.helper import load_json_into_dict, localize_timezone
import pytest
import pytz
from iso8601 import parse_date
from datetime import datetime, timezone
class TestConfig:
VIPS_API_ROOT_URL = 'https://someserver.gov.bc.ca/endpoint'
VIPS_API_USERNAME = 'username'
VIPS_API_PASSWORD = 'password'
class TestVips:
CORRELATION_ID = 'ABC'
@staticmethod
def test_build_endpoint_method():
prohibition_number = "1234"
endpoint = vips.build_endpoint(TestConfig.VIPS_API_ROOT_URL, prohibition_number, 'status')
assert isinstance(endpoint, str)
assert TestConfig.VIPS_API_ROOT_URL + '/' + prohibition_number + '/status' == endpoint
@staticmethod
def test_health_check_method():
response = load_json_into_dict('python/tests/sample_data/vips/vips_health_check_200.json')
vips.get = MagicMock(return_value=(True, response))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/api/utility/ping'
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.health_get(TestConfig)
vips.get.assert_called_with(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
print(json.dumps(actual))
assert is_success is True
assert "responseMessage" in actual
@staticmethod
def test_query_get_method_success():
response_from_api = load_json_into_dict('python/tests/sample_data/vips/vips_query_200.json')
vips.get = MagicMock(return_value=(True, response_from_api))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/12345/status/' + TestVips.CORRELATION_ID
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.status_get("12345", TestConfig, TestVips.CORRELATION_ID)
vips.get.assert_called_with(
endpoint,
TestConfig.VIPS_API_USERNAME,
TestConfig.VIPS_API_PASSWORD,
TestVips.CORRELATION_ID)
print(json.dumps(actual))
assert is_success is True
assert "driverLicenceSeizedYn" in actual['data']['status']
assert "surnameNm" in actual['data']['status']
assert "disclosure" in actual['data']['status']
@staticmethod
def test_query_get_method_failure():
response_from_api = load_json_into_dict('python/tests/sample_data/vips/vips_query_404.json')
vips.get = MagicMock(return_value=(True, response_from_api))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/12345/status/' + TestVips.CORRELATION_ID
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.status_get("12345", TestConfig, TestVips.CORRELATION_ID)
vips.get.assert_called_with(
endpoint,
TestConfig.VIPS_API_USERNAME,
TestConfig.VIPS_API_PASSWORD,
TestVips.CORRELATION_ID)
print(json.dumps(actual))
assert is_success is True
assert "fail" in actual['resp']
@staticmethod
def test_query_get_method_bad_response():
response_from_api = dict({"offline": True})
vips.get = MagicMock(return_value=(False, response_from_api))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/12345/status/' + TestVips.CORRELATION_ID
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.status_get("12345", TestConfig, TestVips.CORRELATION_ID)
vips.get.assert_called_with(
endpoint,
TestConfig.VIPS_API_USERNAME,
TestConfig.VIPS_API_PASSWORD,
TestVips.CORRELATION_ID)
print(json.dumps(actual))
assert is_success is False
@staticmethod
def test_disclosure_get_method():
response_from_api = load_json_into_dict('python/tests/sample_data/vips/vips_disclosure_200.json')
vips.get = MagicMock(return_value=(True, response_from_api))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/1234/disclosure/' + TestVips.CORRELATION_ID
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.disclosure_get("1234", TestConfig, TestVips.CORRELATION_ID)
vips.get.assert_called_with(
endpoint,
TestConfig.VIPS_API_USERNAME,
TestConfig.VIPS_API_PASSWORD,
TestVips.CORRELATION_ID)
assert is_success is True
assert "document" in actual['data']
@staticmethod
def test_payment_get_method():
response_from_api = load_json_into_dict('python/tests/sample_data/vips/vips_payment_200.json')
vips.get = MagicMock(return_value=(True, response_from_api))
endpoint = TestConfig.VIPS_API_ROOT_URL + '/1234/payment/status/' + TestVips.CORRELATION_ID
vips.get(endpoint, TestConfig.VIPS_API_USERNAME, TestConfig.VIPS_API_PASSWORD)
is_success, actual = vips.payment_get("1234", TestConfig, TestVips.CORRELATION_ID)
vips.get.assert_called_with(
endpoint,
TestConfig.VIPS_API_USERNAME,
TestConfig.VIPS_API_PASSWORD,
TestVips.CORRELATION_ID)
assert is_success is True
assert "transactionInfo" in actual['data']
vips_date_strings = [
("2019-01-02 17:30:00 -08:00", "2019-01-02 17:30:00-0800"),
("2019-01-02 17:30:00 -07:00", "2019-01-02 17:30:00-0700"),
]
@pytest.mark.parametrize("vips_datetime, expected", vips_date_strings)
def test_vips_datetime_conversion(self, vips_datetime, expected):
actual = vips.vips_str_to_datetime(vips_datetime)
assert actual == parse_date(expected)
@staticmethod
def test_datetime_to_vips_string():
tz = pytz.timezone('America/Vancouver')
date_under_test = localize_timezone(datetime.strptime("2020-11-22", "%Y-%m-%d"))
vips_date_string = vips.vips_datetime(date_under_test)
components = vips_date_string.split(":")
print(date_under_test.strftime("%z"))
print(vips_date_string)
assert len(components) == 4
assert vips_date_string[0:22] == '2020-11-22 00:00:00 -0'
@staticmethod
def test_transform_schedule_to_local_friendly_times():
vips_response = load_json_into_dict('python/tests/sample_data/vips/vips_schedule_200.json')
time_slots = vips_response['data']['timeSlots']
print(json.dumps(time_slots[0]))
print(str(type(time_slots[0])))
friendly_times_list = vips.time_slots_to_friendly_times(time_slots, "ORAL")
expected = list(["Fri, Sep 4, 2020 - 9:00AM to 9:30AM", "Fri, Sep 4, 2020 - 10:00AM to 10:30AM",
"Fri, Sep 4, 2020 - 11:00AM to 11:30AM", "Fri, Sep 4, 2020 - 12:00PM to 12:30PM",
"Fri, Sep 4, 2020 - 1:00PM to 1:30PM"])
for index, item in enumerate(expected):
assert friendly_times_list[index]['label'] == item
@staticmethod
def test_list_of_weekday_dates_between_method():
start_date = datetime.strptime("2020-09-01", "%Y-%m-%d")
end_date = datetime.strptime("2020-09-07", "%Y-%m-%d")
expected = list(["2020-09-01", "2020-09-02", "2020-09-03", "2020-09-04", "2020-09-07"])
list_of_date_times = vips.list_of_weekdays_dates_between(start_date, end_date)
assert list(map(iso_date_string, list_of_date_times)) == expected
@staticmethod
def test_last_name_match():
response_from_api = load_json_into_dict('python/tests/sample_data/vips/vips_query_200.json')
is_success = vips.is_last_name_match(response_from_api['data']['status'], "Norris")
assert is_success is True
dates_to_test = [
("2020-11-02", "2020-11-03"),
("2020-11-03", "2020-11-04"),
("2020-11-06", "2020-11-09"),
("2020-11-07", "2020-11-09"),
("2020-11-08", "2020-11-09"),
("2020-11-09", "2020-11-10"),
("2020-11-10", "2020-11-12"), # Nov-11 is a stat holiday in BC
]
@staticmethod
@pytest.mark.parametrize("date_under_test, next_business_date", dates_to_test)
def test_next_business_date(date_under_test, next_business_date):
iso = "%Y-%m-%d"
date_time = datetime.strptime(date_under_test, iso)
expected = datetime.strptime(next_business_date, iso)
assert vips.next_business_date(date_time) == expected
get_schedule_data = [
("IRP", "ORAL", "2020-11-02", "2020-11-03", 1, 2),
("IRP", "ORAL", "2020-11-02", "2020-11-07", 1, 5),
("IRP", "ORAL", "2020-11-02", "2020-11-10", 2, 7),
("IRP", "ORAL", "2020-11-02", "2020-11-08", 0, 0)
]
@staticmethod
@pytest.mark.parametrize(
"prohibition_type, review_type, first_date, last_date, get_time_slots, count_days", get_schedule_data)
def test_schedule_get_method(
prohibition_type, review_type, first_date, last_date, get_time_slots, count_days, monkeypatch):
correlation_id = 'abcdef'
iso = "%Y-%m-%d"
def mock_vips_get(*args):
endpoint_list = args[0].split("/")
query_date = endpoint_list[6]
print(query_date)
assert args[0] == vips.build_endpoint(
TestConfig.VIPS_API_ROOT_URL,
prohibition_type,
review_type,
query_date,
"review",
"availableTimeSlot",
correlation_id
)
endpoint_list = args[0].split("/")
print(endpoint_list)
return mock_schedule_get(get_time_slots, query_date)
first_datetime = datetime.strptime(first_date, iso)
last_datetime = datetime.strptime(last_date, iso)
monkeypatch.setattr(vips, "get", mock_vips_get)
is_successful, data = vips.schedule_get(
prohibition_type,
review_type,
first_datetime,
last_datetime,
TestConfig,
correlation_id)
assert is_successful
logging.warning("Schedule data: " + json.dumps(data))
assert data['number_review_days_offered'] == count_days
def iso_date_string(date_time: datetime) -> str:
return date_time.strftime("%Y-%m-%d")
def mock_schedule_get(time_slots: int, query_date: str) -> tuple:
if time_slots == 0:
return False, dict({
"resp": "fail",
"error": {
"message": "Requested data not found",
"httpStatus": 404
}
})
elif 0 < time_slots < 7:
items = list()
for item in range(time_slots):
hour = str(time_slots + 9)
items.append({
"reviewStartDtm": query_date + ' ' + hour.zfill(2) + ":00:00 -08:00",
"reviewEndDtm": query_date + ' ' + hour.zfill(2) + ":30:00 -08:00",
})
logging.info("items: {}".format(json.dumps(items)))
return True, dict({
"resp": "success",
"data": {
"timeSlots": items
}
})
else:
logging.warning('too many time slots requested')
return True, dict()
``` |
{
"source": "JonathanLoscalzo/asset-license-dev-demo",
"score": 3
} |
#### File: data/exceptions/base.py
```python
class AssetManagerException(Exception):
message: str
def __init__(self):
super().__init__(self.message)
class ItemNotFound(AssetManagerException):
def __init__(self, id):
self.id = id
self.message = f"Item {id} not found"
super().__init__()
class MethodShouldNotBeImplemented(AssetManagerException):
def __init__(self, method: str):
self.method = method
self.message = f"Method {method} should not be implemented"
super().__init__()
class AssetHasAssignedException(AssetManagerException):
def __init__(self, asset_id):
self.message = f"Asset {asset_id} just have a user!"
super().__init__()
class UserJustHaveLicenseException(AssetManagerException):
def __init__(self, license_id, dev_id):
self.message = (
f"License({license_id}) has just been assigned to User({dev_id})"
)
super().__init__()
class UserJustHaveAssetException(AssetManagerException):
def __init__(self, dev_id, asset_id):
self.message = (
f"Developer {dev_id} just have a this asset ({asset_id})!"
)
super().__init__()
class UserAndAssetNotRelatedException(AssetManagerException):
def __init__(self, dev_id, asset_id):
self.message = (
f"Developer {dev_id} is not related with this asset ({asset_id})!"
)
super().__init__()
class UserAndLicenseNotRelatedException(AssetManagerException):
def __init__(self, dev_id, asset_id):
self.message = f"Developer {dev_id} is not related with this license ({asset_id})!"
super().__init__()
class DeveloperInactiveException(AssetManagerException):
def __init__(self, dev_id):
self.message = f"Developer {dev_id} is not activated!"
super().__init__()
```
#### File: data/repos/license.py
```python
from pymongo.database import Database
from asset_manager.data.repos.base import MongoRepository
from asset_manager.data.schemas.license import LicenseMongo
class LicenseRepository(MongoRepository[LicenseMongo]):
def __init__(self, db: Database):
super().__init__(db, "licenses", LicenseMongo)
```
#### File: data/schemas/base.py
```python
from typing import Optional
from bson.objectid import ObjectId as BsonObjectId
from pydantic import BaseModel, Field
class PydanticObjectId(BsonObjectId):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not isinstance(v, BsonObjectId):
raise TypeError("ObjectId required")
return str(v)
class BaseMongoModel(BaseModel):
_id: PydanticObjectId
id: Optional[PydanticObjectId] = Field(..., alias="_id")
class CreateBaseMongoModel(BaseModel):
# _id: Optional[PydanticObjectId]
# id: Optional[PydanticObjectId] = Field(..., alias="_id")
pass
```
#### File: data/services/developer.py
```python
from typing import List
from asset_manager.data.exceptions.base import (
AssetHasAssignedException,
DeveloperInactiveException,
ItemNotFound,
UserAndAssetNotRelatedException,
UserAndLicenseNotRelatedException,
UserJustHaveAssetException,
UserJustHaveLicenseException,
)
from asset_manager.data.repos.assets import AssetRepository
from asset_manager.data.repos.developers import DeveloperRepository
from asset_manager.data.repos.license import LicenseRepository
from asset_manager.data.schemas.developer import (
CreateDeveloperMongo,
DeveloperMongo,
)
from bson import ObjectId
from asset_manager.models.models import (
Asset,
CreateDev,
Developer,
FullDeveloper,
License,
)
from asset_manager.models.responses import ApiStatus, OutputResponse
class DeveloperService:
__repository: DeveloperRepository
def __init__(
self,
repository: DeveloperRepository,
asset_repo: AssetRepository,
license_repo: LicenseRepository,
):
self.__repository = repository
self._asset_repo = asset_repo
self._license_repo = license_repo
def get_all(self) -> List[FullDeveloper]:
return list(
map(
FullDeveloper.create_model_from_devmongo,
self.__repository.get_all(),
)
)
def add(self, create_dev: CreateDev) -> Developer:
devMongo = CreateDeveloperMongo(**create_dev.dict())
_id = self.__repository.add(devMongo)
return Developer(id=str(_id), **create_dev.dict())
def _change_active_user(self, uid, active: bool):
res = self.__repository.update(uid, {"$set": {"active": active}})
if res is None:
raise ItemNotFound(uid)
return active
def activate(self, uid) -> OutputResponse:
self._change_active_user(uid, True)
return OutputResponse[dict](
status=ApiStatus.ok,
data={"id": uid},
message=f"Developer {uid} activated",
)
def deactivate(self, uid) -> OutputResponse:
_ = self.__repository.find_and_update(
uid, {"active": True}, {"$set": {"assets": [], "licenses": []}}
)
self._asset_repo.find_all_and_update(
{"user": ObjectId(uid)}, {"$set": {"user": None}}
)
self._change_active_user(uid, False)
return OutputResponse[dict](
status=ApiStatus.ok,
data={"id": uid},
message=f"Developer {uid} deactivated",
)
def add_asset(self, developer_id, asset_id) -> OutputResponse[dict]:
"""Add asset relationship to current user
Constrains:
- asset has just one dev
- dev may have several assets
Args:
developer_id (uid): developer id (ObjectId)
asset_id (uid): asset id (ObjectId)
"""
asset = self._asset_repo.get(asset_id)
if asset.user is not None:
raise AssetHasAssignedException(asset.id)
relationship = self.__repository.get_by_filter(
{"_id": ObjectId(developer_id), "assets": {"$in": [asset.id]}}
)
if relationship:
raise UserJustHaveAssetException(asset.user, asset.id)
dev = self.__repository.find_and_update(
developer_id,
{"active": True},
{"$push": {"assets": asset.id}},
)
if dev is None:
raise DeveloperInactiveException(developer_id)
self._asset_repo.update(
asset.id, {"$set": {"user": ObjectId(developer_id)}}
)
return OutputResponse[dict](
status=ApiStatus.ok,
data={
"developer": FullDeveloper.create_model_from_devmongo(
DeveloperMongo.parse_obj(dev)
)
},
message="Updated relationship",
)
def add_license(self, developer_id, license_id) -> OutputResponse[dict]:
"""add a licenses to a user
Args:
developer_id (ObjectId): str from an ObjectId
license_id (uid): in this case is the code
Raises:
UserJustHaveLicenseException: if the user just have the license,
it raises an exc.
Returns:
OutputResponse: a plain new Developer
"""
license = self._license_repo.get(license_id)
relationship = self.__repository.get_by_filter(
{"_id": ObjectId(developer_id), "licenses": {"$in": [license.id]}}
)
if relationship:
raise UserJustHaveLicenseException(license_id, dev_id=developer_id)
dev = self.__repository.find_and_update(
developer_id,
{"active": True},
{"$push": {"licenses": license.id}},
)
if dev is None:
raise DeveloperInactiveException(developer_id)
return OutputResponse[dict](
status=ApiStatus.ok,
data={
"developer": FullDeveloper.create_model_from_devmongo(
DeveloperMongo.parse_obj(dev)
)
},
message="Updated relationship",
)
def remove_license(self, developer_id, license_id) -> OutputResponse[dict]:
relationship = self.__repository.get_by_filter(
{"_id": ObjectId(developer_id), "licenses": {"$in": [license_id]}}
)
if not relationship:
raise UserAndLicenseNotRelatedException(developer_id, license_id)
self.__repository.update(
developer_id, {"$pull": {"licenses": {"$in": [license_id]}}}
)
return OutputResponse[dict](
status=ApiStatus.ok,
data={"dev": developer_id, "license": license_id},
message="Removed relationship "
+ f"between Dev({developer_id}) and License({license_id})",
)
def remove_asset(self, developer_id, asset_id) -> OutputResponse[dict]:
relationship = self.__repository.get_by_filter(
{"_id": ObjectId(developer_id), "assets": {"$in": [asset_id]}}
)
if not relationship:
raise UserAndAssetNotRelatedException(developer_id, asset_id)
self.__repository.update(
developer_id, {"$pull": {"assets": {"$in": [asset_id]}}}
)
self._asset_repo.update(asset_id, {"$set": {"user": None}})
return OutputResponse[dict](
status=ApiStatus.ok,
data={"dev": developer_id, "asset": asset_id},
message=f"Removed relationship "
+ f"between Dev({developer_id}) and Asset({asset_id})",
)
def get_licenses(self, developer_id) -> OutputResponse[List[License]]:
dev = self.__repository.get(developer_id)
licenses = list(
self._license_repo.get_all_by_filter(
{
"$or": [
{"id": {"$in": dev.licenses}},
{"_id": {"$in": dev.licenses}},
]
}
)
)
return OutputResponse(
data=list(map(License.parse_obj, licenses)),
message=f"Returned licenses assigned to Developer {developer_id}",
)
def get_assets(self, developer_id) -> OutputResponse[List[Asset]]:
dev = self.__repository.get(developer_id)
assets = list(
self._asset_repo.get_all_by_filter(
{
"$or": [
{"id": {"$in": dev.assets}},
{"_id": {"$in": dev.assets}},
]
}
)
)
return OutputResponse(
data=list(map(Asset.create_from_asset_mongo, assets)),
message=f"Returned assets assigned to Developer {developer_id}",
)
```
#### File: asset_manager/routers/auth.py
```python
from fastapi import APIRouter, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from pymongo.database import Database
from asset_manager.models.auth import LoginUser, Token
from asset_manager.utils.security import (
authenticate_user,
create_access_token,
)
from asset_manager.deps import get_current_user, get_db
router = APIRouter(
tags=["auth"],
dependencies=[],
responses={404: {"description": "Not found"}},
)
@router.post("/token", response_model=Token)
async def login_for_access_token(
db: Database = Depends(get_db),
form_data: OAuth2PasswordRequestForm = Depends(),
):
user = authenticate_user(None, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token = create_access_token(data={"sub": user.username})
return Token(access_token=access_token, token_type="bearer")
@router.get("/me")
async def read_users_me(current_user: LoginUser = Depends(get_current_user)):
return current_user
``` |
{
"source": "JonathanLoscalzo/eci2019-ventas_estacionales",
"score": 3
} |
#### File: src/features/build_features.py
```python
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
def impute_values(df):
"""
está funcion se encaga de imputar los valores del dataframe que recibe (todas las columnas por igual)
"""
#estrategia de imputación, con valores medios.
imp = SimpleImputer(strategy='mean')
#imputamos
df_impute = pd.DataFrame(imp.fit_transform(df))
df_impute.columns = df.columns
return df_impute
def assign_tipo_pos(df_pos, df_ventas_pos_freq):
df_pos.loc[lambda df:
(df.ventas >= df_ventas_pos_freq.loc["POCAS","ini"])
& (df.ventas <= df_ventas_pos_freq.loc["POCAS","fin"]), "tipo_pos_ventas"] = "POCAS"
df_pos.loc[lambda df:
(df.ventas >= df_ventas_pos_freq.loc["MEDIO","ini"])
& (df.ventas <= df_ventas_pos_freq.loc["MEDIO","fin"]), "tipo_pos_ventas"] = "MEDIO"
df_pos.loc[lambda df:
(df.ventas >= df_ventas_pos_freq.loc["MUCHAS","ini"])
& (df.ventas <= df_ventas_pos_freq.loc["MUCHAS","fin"]), "tipo_pos_ventas"] = "MUCHAS"
df_pos.loc[lambda df:
(df.ventas >= df_ventas_pos_freq.loc["SIN VENTAS","ini"])
& (df.ventas <= df_ventas_pos_freq.loc["SIN VENTAS","fin"]), "tipo_pos_ventas"] = "SIN VENTAS"
def add_date_features(df, col_name="fecha"):
"""
Esta función transforma una columna que se recibe como parámetro,
en varias columnas derivadas.
"""
df['date'] = df[col_name]
# df['dayofweek'] = df['date'].dt.dayofweek
df['year'] = df['date'].dt.year
df['month'] = df['date'].dt.month
# df['dayofyear'] = df['date'].dt.dayofyear
# df['dayofmonth'] = df['date'].dt.day
# df['weekofyear'] = df['date'].dt.weekofyear
# df['is_weekend'] = np.where(df['date'].dt.dayofweek.isin([5,6]), 1, 0)
df = df.drop([col_name,'date'], axis=1, errors='ignore')
return df
def group_canal(df_ventas):
df_ventas.canal.loc[df_ventas.canal != "ALMACEN"] = "OTROS"
def assign_unidades_anteriores(ventas_totales, year, month):
return (
ventas_totales
.set_index('id_pos')
[lambda df: (df.year == year) & (df.month==month)]
.unidades
.reindex(ventas_totales.id_pos.unique())
.fillna(0) # o -1?
)
```
#### File: src/visualization/visualize.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def print_correlated_columns(df, columns, print_correlated_hard = False, print_correlated_slight=False):
"""
Este código imprime:
- la matriz de correlación
- la matriz de correlación de atributos levemente lineales (0.8>x>0.5)
- la matriz de correlación de atributos fuertemente lineales (>0.8)"""
corr_matrix = df[columns].corr()
upper_triangle = np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)
upper_triangle_transpose = np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)
upper = corr_matrix.where(upper_triangle)
slight_lineal_cols = [
column for column in upper.columns
if any(0.8 > np.abs(upper[column])) and np.abs(any(upper[column]) >= 0.5)
]
lineal_cols = [column for column in upper.columns if np.abs(any(upper[column]) >= 0.8)]
# print("levemente lineales ", slight_lineal_cols)
# print("fuertemente lineales ", lineal_cols)
if print_correlated_hard:
return (
upper
.loc[:, lineal_cols]
.sort_values(by=lineal_cols, ascending=False)
.stack()
.dropna()
.reset_index()
.loc[lambda df: df[0] >= .8]
)
elif print_correlated_slight:
return (
upper
.loc[:, slight_lineal_cols]
.sort_values(by=slight_lineal_cols, ascending=False)
.stack()
.dropna()
.reset_index()
.loc[lambda df: (0.8 > np.abs(df[0])) & (np.abs(df[0]) >= 0.5)]
)
else:
return df[columns].corr()
def plot_importance_reg(reg, columns, title):
# https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_regression.html#sphx-glr-auto-examples-ensemble-plot-gradient-boosting-regression-py
top_n = 10
feat_imp = pd.DataFrame({'importance':reg.feature_importances_})
feat_imp['feature']=columns
feat_imp.sort_values(by='importance', ascending=False, inplace=True)
feat_imp = feat_imp.iloc[:top_n]
feat_imp.sort_values(by='importance', inplace=True)
feat_imp = feat_imp.set_index('feature', drop=True)
ax = feat_imp.plot.barh(title=title, figsize=(12,6))
plt.xlabel('Feature Importance Score')
plt.savefig("../reports/figures/05-01-{}-feature_importance.svg".format(title))
plt.show()
return ax
``` |
{
"source": "jonathanlunt/disdat",
"score": 2
} |
#### File: context.template/bin/entrypoint.py
```python
import argparse
import disdat.apply
import disdat.common
import disdat.fs
import disdat.api
import logging
import os
import sys
import boto3
from botocore.exceptions import ClientError
_PIPELINE_CLASS_ENVVAR = 'PIPELINE_CLASS'
_HELP = """ Run a Disdat pipeline. This script wraps up several of the
steps required to run a pipeline, including: creating a working context,
running a pipeline class to generate an output bundle, and pushing an
output bundle to a Disdat remote.
"""
_logger = logging.getLogger(__name__)
def _context_and_remote(context_name, remote=None):
"""Create a new Disdat context and bind remote if not None.
Check environment for 'LOCAL_EXECUTION', which should exist and be True if we are running
a container in an existing .disdat environment (e.g., on someone's laptop).
If so, do not take any actions that would change the state of the users CLI. That is, do not
switch contexts.
Args:
context_name (str): A fully-qualified context name. remote-context/local-context
remote (str): S3 remote name.
"""
if len(context_name.split('/')) <= 1:
_logger.error("Partial context name: Expected <remote-context>/<local-context>, got '{}'".format(context_name))
return False
retval = disdat.api.context(context_name)
if retval == 1: # branch exists
_logger.warn("Entrypoint found existing local context {} ".format(context_name))
_logger.warn("Entrypoint not switching and ignoring directive to change to remote context {}".format(remote))
elif retval == 0: # just made a new branch
if remote is not None:
_logger.info("Entrypoint made a new context {}, attaching remote {}".format(context_name, remote))
_remote(context_name, remote)
else:
_logger.error("Entrypoint got non standard retval {} from api.context({}) command.".format(retval, context_name))
return False
if disdat.common.LOCAL_EXECUTION not in os.environ:
disdat.api.switch(context_name)
else:
_logger.info("Container running locally (not in a cloud provider, aka AWS). Not switching contexts")
return True
def _remote(context_arg, remote_url):
""" Add remote to our context.
Args:
context_arg: <remote context>/<local context> or <local context> to use in this container
remote_url: The remote to add to this local context
Returns:
None
"""
_logger.debug("Adding remote at URL {} for branch '{}'".format(remote_url, context_arg))
contexts = context_arg.split('/')
if len(contexts) > 1:
remote_context = contexts[0]
local_context = contexts[1]
else:
local_context = contexts[0]
remote_context = local_context
if remote_url is None:
_logger.error("Got an invalid URL {}".format(remote_url))
return False
try:
disdat.api.remote(local_context, remote_context, remote_url, force=True)
except Exception:
return False
return True
def retrieve_secret(secret_name):
""" Placeholder for ability to retrieve secrets needed by image
Returns:
"""
raise NotImplementedError
# Modify these to get them from the current environment
endpoint_url = "https://secretsmanager.us-west-2.amazonaws.com"
region_name = "us-west-2"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
endpoint_url=endpoint_url
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
else:
# Decrypted secret using the associated KMS CMK
# Depending on whether the secret was a string or binary, one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
binary_secret_data = get_secret_value_response['SecretBinary']
print ("Found the secret string as ")
print secret
def add_argument_help_string(help_string, default=None):
if default is None:
return '{}'.format(help_string)
else:
return "{} (default '{}')".format(help_string, default)
def _commit_and_push(b):
""" commit and push bundle b if not transient """
if disdat.common.BUNDLE_TAG_TRANSIENT not in b.tags:
b.commit()
b.push()
def run_disdat_container(args):
""" Execute Disdat inside of container
Args:
args: input arguments
Returns:
None
"""
print "Entrypoint running with args: {}".format(args)
# By default containerized execution ALWAYS localize's bundles on demand
incremental_pull = True
print ("Entrypoint running with incremental_pull=={}".format(incremental_pull))
client = boto3.client('sts')
response = client.get_caller_identity()
_logger.info("boto3 caller identity {}".format(response))
# Check to make sure that we have initialized the Disdat environment
if not os.path.exists(os.path.join(os.environ['HOME'], '.config', 'disdat')):
_logger.warning("Disdat environment possibly uninitialized?")
# Create context, add remote, and switch to it
if not _context_and_remote(args.branch, args.remote):
_logger.error("Failed to branch to \'{}\' and optionally bind to \'{}\'".format(args.branch,
args.remote))
sys.exit(os.EX_IOERR)
# Pull the remote branch into the local branch or download individual items
try:
if not args.no_pull:
disdat.api.pull(args.branch, localize=not incremental_pull)
else:
fetch_list = []
if args.fetch is not None:
fetch_list = ['{}'.format(kv[0]) for kv in args.fetch]
if len(fetch_list) > 0:
for b in fetch_list:
disdat.api.pull(args.branch, bundle_name=b)
except Exception as e:
_logger.error("Failed to pull and localize all bundles from context {} due to {}".format(args.branch, e))
sys.exit(os.EX_IOERR)
# If specified, decode the ordinary 'key:value' strings into a dictionary of tags.
input_tags = {}
if args.input_tag is not None:
input_tags = disdat.common.parse_args_tags(args.input_tag)
output_tags = {}
if args.output_tag is not None:
output_tags = disdat.common.parse_args_tags(args.output_tag)
# Convert string of pipeline args into dictionary for api.apply
pipeline_args = disdat.common.parse_params(args.pipeline_args)
# If the user wants final and intermediate, then inc push.
if not args.no_push and not args.no_push_intermediates:
incremental_push = True
else:
incremental_push = False
try:
result = disdat.api.apply(args.branch,
args.output_bundle,
args.pipeline,
input_tags=input_tags,
output_tags=output_tags,
params=pipeline_args,
output_bundle_uuid=args.output_bundle_uuid,
force=args.force,
workers=args.workers,
incremental_push=incremental_push,
incremental_pull=incremental_pull)
if not incremental_push:
if not args.no_push:
if not args.no_push_intermediates:
to_push = disdat.api.search(args.branch, is_committed=False, find_intermediates=True)
for b in to_push:
_commit_and_push(b)
if result['did_work']:
_logger.info("Pipeline ran. Committing and pushing output bundle UUID {}.".format(args.output_bundle_uuid))
b = disdat.api.get(None, uuid=args.output_bundle_uuid)
assert(b is not None)
_commit_and_push(b)
else:
_logger.info("Pipeline ran but did no useful work (output bundle exists).")
else:
_logger.info("Pipeline ran but user specified not to push any bundles to remote context.")
else:
_logger.info("Pipeline ran using incremental pushing.")
except RuntimeError as re:
_logger.error('Failed to run pipeline: RuntimeError {}'.format(re))
sys.exit(os.EX_IOERR)
except disdat.common.ApplyException as ae:
_logger.error('Failed to run pipeline: ApplyException {}'.format(ae))
sys.exit(os.EX_IOERR)
if args.dump_output:
print(disdat.api.cat(args.branch, args.output_bundle))
sys.exit(os.EX_OK)
def main(input_args):
# To simplify configuring and building pipeline images, we keep all
# of the various defaults parameter values in the Docker image makefile,
# and pass them on as Docker ENV variables.
_pipeline_class_default = os.environ[_PIPELINE_CLASS_ENVVAR] if _PIPELINE_CLASS_ENVVAR in os.environ else None
parser = argparse.ArgumentParser(
description=_HELP,
)
parser.add_argument(
'--dump-output',
help='Dump the output to standard output',
action='store_true',
)
parser.add_argument(
'--debug-level',
default=logging.INFO,
help='The debug logging level (default {})'.format(logging.getLevelName(logging.WARNING))
)
disdat_parser = parser.add_argument_group('remote repository arguments')
disdat_parser.add_argument(
'--remote',
type=str,
required=True,
help='The s3 bucket from/to which to pull/push data',
)
disdat_parser.add_argument(
'--no-pull',
action='store_true',
help='Do not pull (synchronize) remote repository with local repo - may cause entire pipeline to re-run.',
)
disdat_parser.add_argument(
'--no-push',
action='store_true',
help='Do not push output bundles (including intermediates) to the remote repository (default is to push)',
)
disdat_parser.add_argument(
'--no-push-intermediates',
action='store_true',
help='Do not push the intermediate bundles to the remote repository (default is to push)',
)
pipeline_parser = parser.add_argument_group('pipe arguments')
pipeline_parser.add_argument(
'--pipeline',
default=_pipeline_class_default,
type=str,
required=(_pipeline_class_default is None),
help=add_argument_help_string('Name of the pipeline class to run', _pipeline_class_default),
)
pipeline_parser.add_argument(
'--branch',
type=str,
required=True,
help='The fully-qualified Disdat branch to use when running',
)
pipeline_parser.add_argument(
'--workers',
type=int,
default=2,
help="The number of Luigi workers to spawn. Default is 2."
)
pipeline_parser.add_argument(
'-it', '--input-tag',
nargs=1, type=str, action='append',
help="Input bundle tags: '-it authoritative:True -it version:0.7.1'")
pipeline_parser.add_argument(
'-ot', '--output-tag',
nargs=1, type=str, action='append',
help="Output bundle tags: '-ot authoritative:True -ot version:0.7.1'")
pipeline_parser.add_argument(
'-f', '--fetch',
nargs=1, type=str, action='append',
help="Fetch a bundle before execution: '-f some.input.bundle'")
pipeline_parser.add_argument(
'--output-bundle-uuid',
default=None,
type=str,
help='UUID for the output bundle (default is for apply to generate a UUID)',
)
pipeline_parser.add_argument(
'--force',
action='store_true',
help='Force recomputation of all pipe dependencies (default is to recompute dependencies with changed inputs or code)',
)
pipeline_parser.add_argument(
'output_bundle',
type=str,
help='Name for the output bundle',
)
pipeline_parser.add_argument(
"pipeline_args",
nargs=argparse.REMAINDER,
type=str,
help="Optional set of parameters for this pipe '--parameter value'"
)
args = parser.parse_args(input_args)
logging.basicConfig(level=args.debug_level)
_logger.setLevel(args.debug_level)
run_disdat_container(args)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: kickstart/bin/find_packages_from_setup.py
```python
import argparse
import imp
import mock
import os
import setuptools
def find_packages(setup_py):
packages = []
# All this horrid hackery to recover the install_requires parameter from
# a setup() call in setup.py.
#
# https://stackoverflow.com/questions/24236266/how-to-extract-dependencies-information-from-a-setup-py
try:
# Patch setuptools to intercept the setup() call
with mock.patch.object(setuptools, 'setup') as setup_mock:
# Get an open file handle and a description of the
# setup file.
setup_file, setup_filename, setup_description = imp.find_module('setup', [os.path.dirname(setup_py)])
# Load setup.py as the module setup. We have to
# intercept calls to find_packages as well since
# find_packages will run a 'find'-like operation from
# the current working directory - which is Bad if the
# CWD is the root directory...
with mock.patch.object(setuptools, 'find_packages'):
imp.load_module('setup', setup_file, setup_filename, setup_description)
# Grab the call args to setup
_, setup_kwargs = setup_mock.call_args
# ...and recover the install_requires parameter. Fun, eh?
# Don't forget to remove trailing version specifiers that
# lack version numbers.
packages = ['{}'.format(p.rstrip('<>=')) for p in setup_kwargs['install_requires']]
finally:
# As warned in the docs, we have to close the setup file
# ourselves.
if setup_file is not None:
setup_file.close()
return packages
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Select Python packages from setup.py'
)
parser.add_argument(
'setup_py',
type=str,
help='The setup.py file',
)
args = parser.parse_args()
if not os.path.exists(args.setup_py):
raise RuntimeError('Failed to find file {}'.format(args.setup_py))
print '\n'.join(find_packages(args.setup_py))
```
#### File: disdat/utility/which.py
```python
import os
def which(cmd_name):
'''Get the full path to an external command executable.
:param cmd_name: The command name
:return: The full path to the command executable, or `None` if the
executable is not on the O/S path
:rtype: str
'''
paths = os.environ['PATH'].split(os.pathsep)
for p in paths:
cmd_fq_name = os.path.join(p, cmd_name)
if os.path.exists(cmd_fq_name) and os.access(cmd_fq_name, os.X_OK):
return cmd_fq_name
return None
```
#### File: tests/functional/test_context.py
```python
from disdat.pipe import PipeTask
import disdat.api as api
class ContextTest(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('context_test')
def pipe_run(self, pipeline_input=None):
return 2
def test_create_context():
context_name = '__test__'
assert context_name not in api.ls_contexts(), 'Context exists'
api.context(context_name)
assert context_name in api.ls_contexts(), 'Test context does exists'
api.delete_context(context_name=context_name)
assert context_name not in api.ls_contexts(), 'Test context exists'
def test_independent_context():
context_1_name = '__test_context_1__'
context_2_name = '__test_context_2__'
api.context(context_1_name)
api.context(context_2_name)
api.apply(context_1_name, '-', 'ContextTest')
assert len(api.search(context_1_name)) == 1, 'Only one bundle should be in context one'
assert len(api.search(context_2_name)) == 0, 'Context two should be empty'
api.delete_context(context_name=context_1_name)
api.delete_context(context_name=context_2_name)
assert context_1_name not in api.ls_contexts(), 'Contexts should be removed'
assert context_2_name not in api.ls_contexts(), 'Contexts should be removed'
```
#### File: tests/functional/test_output_types.py
```python
import numpy as np
import pandas as pd
import pytest
from disdat.pipe import PipeTask
import disdat.api as api
TEST_CONTEXT = '___test_context___'
@pytest.fixture(autouse=True)
def run_test():
# Remove test context before running test
setup()
yield
def setup():
if TEST_CONTEXT in api.ls_contexts():
api.delete_context(context_name=TEST_CONTEXT)
api.context(context_name=TEST_CONTEXT)
# Test Return Types
class IntTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('int_task')
def pipe_run(self, pipeline_input=None):
return 1
def test_int_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'IntTask')
data = api.get(TEST_CONTEXT, 'int_task').data
assert data == 1, 'Data did not match output'
assert type(data) == int, 'Data is not int'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
class StringTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('string_task')
def pipe_run(self, pipeline_input=None):
return 'output'
def test_string_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'StringTask')
data = api.get(TEST_CONTEXT, 'string_task').data
assert data == 'output', 'Data did not match output'
assert type(data) == unicode, 'Data is not string'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
class FloatTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('float_task')
def pipe_run(self, pipeline_input=None):
return 2.5
def test_float_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'FloatTask')
data = api.get(TEST_CONTEXT, 'float_task').data
assert data == 2.5, 'Data did not match output'
assert type(data) == float, 'Data is not float'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
class ListTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('list_task')
def pipe_run(self, pipeline_input=None):
return [1, 2, 3]
def test_list_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'ListTask')
data = api.get(TEST_CONTEXT, 'list_task').data
assert np.array_equal(data, [1, 2, 3]), 'Data did not match output'
assert type(data) == np.ndarray, 'Data is not list'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
class DataFrameTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('df_task')
def pipe_run(self, pipeline_input=None):
df = pd.DataFrame()
df['a'] = [1, 2, 3]
return df
def test_df_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'DataFrameTask')
data = api.get(TEST_CONTEXT, 'df_task').data
df = pd.DataFrame()
df['a'] = [1, 2, 3]
assert df.equals(data), 'Data did not match output'
assert type(data) == pd.DataFrame, 'Data is not df'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
class FileTask(PipeTask):
def pipe_requires(self, pipeline_input=None):
self.set_bundle_name('file_task')
def pipe_run(self, pipeline_input=None):
target = self.create_output_file('test.txt')
with target.open('w') as of:
of.write('5')
return target
def test_file_task():
assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
api.apply(TEST_CONTEXT, '-', 'FileTask')
output_path = api.get(TEST_CONTEXT, 'file_task').data
with open(output_path) as f:
output = f.read()
assert output == '5', 'Data did not match output'
assert type(output_path )== str, 'Data is not path'
assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
# Does not support dict return type
# class DictTask(PipeTask):
# def pipe_requires(self, pipeline_input=None):
# self.set_bundle_name('dict_task')
#
# def pipe_run(self, pipeline_input=None):
# return {
# 'hello': 'world'
# }
#
#
# def test_dict_task():
# setup()
# assert len(api.search(TEST_CONTEXT)) == 0, 'Context should be empty'
#
# api.apply(TEST_CONTEXT, '-', '-', 'DictTask')
# data = api.get(TEST_CONTEXT, 'dict_task').data
#
# assert data == {
# 'hello': 'world'
# }, 'Data did not match output'
# assert type(data) == dict, 'Data is not dict'
# assert len(api.search(TEST_CONTEXT)) == 1, 'Only one bundle should be present'
if __name__ == '__main__':
setup()
test_df_task()
``` |
{
"source": "jonathanlurie/BLANK_PY2WX",
"score": 3
} |
#### File: BLANK_PY2WX/src/Model.py
```python
import time
from wx.lib.pubsub import pub
class Model:
def __init__(self):
None
# This function might be called by the Controller.
# Since the Model is supposed to work even without the Controller and the View,
# it sends a message that might be caught by "someone" (aka. the Controller)
def processSomething(self):
#Simulating a process
#time.sleep(1)
#pub.sendMessage("MESSAGE SPREAD", message="A bottle in the sea.", arg2 = 99, arg3 = 99.1)
for i in range(0, 5):
pub.sendMessage("MESSAGE SPREAD", message="A bottle in the sea." + str(i), arg2 = 99, arg3 = 99.1)
time.sleep(1)
``` |
{
"source": "jonathanlurie/ExifAddressFinder",
"score": 2
} |
#### File: ExifAddressFinder/exif_address_finder/ExifAddressFinderManager.py
```python
import GeoToolbox
import exifread
import piexif
from IFD_KEYS_REFERENCE import *
import exifWriter
import os
class ExifAddressFinderManager:
_geotoolbox = None
def __init__(self):
self._geotoolbox = GeoToolbox.GeoToolbox()
# return a dictionnary {"lat": yy.yyy, "lon": xx.xxx}
# or None if not found
def _getGpsCoordinates(self, fileAddress):
f = open(fileAddress, 'rb')
# Return Exif tags
tags = exifread.process_file(f)
# add positionning
if('EXIF GPS GPSLatitude' in tags.keys() and 'EXIF GPS GPSLongitude' in tags.keys()):
# dealing with latitutes
latValues = tags["EXIF GPS GPSLatitude"].values
latRef = tags["EXIF GPS GPSLatitudeRef"]
latInt = float(latValues[0].num)
latDec = float(latValues[1].num) / float(latValues[1].den) / 60. + float(latValues[2].num) / float(latValues[2].den) / 3600.
lat = latInt + latDec
if(latRef.values != 'N'):
lat = lat * (-1)
# dealing with longitudes
lonValues = tags["EXIF GPS GPSLongitude"].values
lonRef = tags["EXIF GPS GPSLongitudeRef"]
lonInt = float(lonValues[0].num)
lonDec = float(lonValues[1].num) / float(lonValues[1].den) / 60. + float(lonValues[2].num) / float(lonValues[2].den) / 3600.
lon = lonInt + lonDec
if(lonRef.values != 'E'):
lon = lon * (-1)
return {"lat": lat, "lon": lon}
else:
return None
# return the address if found
# returns None if not retrieve
def _retrieveAddress(self, latitude, longitude):
address = self._geotoolbox.getAddress(latitude=latitude, longitude=longitude)
# if the address was well retrieve
if(address["status"]):
return address["address"]
else:
return None
# update the EXIF Decription field with the real postal address
def _updateDescription(self, fileAddress, locationAddress, addToFormer=False):
# reading exif
exifDict = piexif.load(fileAddress)
newDict = exifWriter.writeField(exifDict, DESCRIPTION_FIELD, locationAddress, addToFormer)
exifWriter.writeExifToFile(newDict, fileAddress)
def addAddressToImage(self, fileAddress, prefix="", suffix="", addToFormer=False):
coordinates = self._getGpsCoordinates(fileAddress)
if(not coordinates):
print("\tERROR: "+ os.path.basename(fileAddress) +" is not geo tagged")
return None
postalAddress = self._retrieveAddress(coordinates["lat"], coordinates["lon"])
if(not postalAddress):
print("\tERROR: The address was impossible to retrieve")
return None
self._updateDescription(fileAddress, prefix + postalAddress + suffix, addToFormer)
return 1
``` |
{
"source": "jonathanlurie/Tubular",
"score": 3
} |
#### File: gooey/examples/widget_demo.py
```python
import sys
import hashlib
from time import time as _time
from time import sleep as _sleep
from gooey import Gooey
from gooey import GooeyParser
@Gooey
def arbitrary_function():
desc = "Example application to show Gooey's various widgets"
file_help_msg = "Name of the file you want to process"
my_cool_parser = GooeyParser(description=desc)
# my_cool_parser.add_argument("FileChooser", help=file_help_msg, widget="FileChooser") # positional
# my_cool_parser.add_argument("DirectoryChooser", help=file_help_msg, widget="DirChooser") # positional
# my_cool_parser.add_argument("FileSaver", help=file_help_msg, widget="FileSaver") # positional
# my_cool_parser.add_argument("MultiFileSaver", help=file_help_msg, widget="MultiFileChooser") # positional
# my_cool_parser.add_argument("directory", help="Directory to store output") # positional
my_cool_parser.add_argument('-c', '--countdown', default=2, type=int, help='sets the time to count down from you see its quite simple!')
my_cool_parser.add_argument('-j', '--cron-schedule', type=int, help='Set the datetime when the cron should begin', widget='DateChooser')
my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer")
my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit")
my_cool_parser.add_argument('-v', '--verbose', action='count')
my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!")
my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders')
my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something")
my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes")
verbosity = my_cool_parser.add_mutually_exclusive_group()
verbosity.add_argument('-t', '--verbozze', dest='verbose', action="store_true", help="Show more details")
verbosity.add_argument('-q', '--quiet', dest='quiet', action="store_true", help="Only output on error")
print my_cool_parser._actions
print 'inside of main(), my_cool_parser =', my_cool_parser
args = my_cool_parser.parse_args()
main(args)
def main(args):
print sys.argv
print args.countdown
print args.showtime
start_time = _time()
print 'Counting down from %s' % args.countdown
while _time() - start_time < args.countdown:
if args.showtime:
print 'printing message at: %s' % _time()
else:
print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
_sleep(.5)
print 'Finished running the program. Byeeeeesss!'
def here_is_smore():
pass
if __name__ == '__main__':
arbitrary_function()
```
#### File: gooey/gui/application.py
```python
import wx
import os
import sys
import json
import argparse
from functools import partial
from gooey.gui.lang import i18n
from gooey.gui.windows.base_window import BaseWindow
from gooey.gui.windows.advanced_config import AdvancedConfigPanel
# C:\Users\Chris\Dropbox\pretty_gui\Gooey\gooey\gui\application.py
from gooey.python_bindings import config_generator, source_parser
def main():
parser = argparse.ArgumentParser(
description='Gooey turns your command line programs into beautiful, user friendly GUIs')
parser.add_argument(
'-b', '--create-build-script',
dest='build_script',
help='Parse the supplied Python File and generate a runnable Gooey build script'
)
parser.add_argument(
'-r', '--run',
dest='run',
nargs='?',
const='',
help='Run Gooey with build_config in local dir OR via the supplied config path'
)
args = parser.parse_args()
if args.build_script:
do_build_script(args.build_script)
elif args.run is not None:
do_run(args)
def do_build_script(module_path):
with open(module_path, 'r') as f:
if not source_parser.has_argparse(f.read()):
raise AssertionError('Argparse not found in module. Unable to continue')
gooey_config = config_generator.create_from_parser(module_path, show_config=True)
outfile = os.path.join(os.getcwd(), 'gooey_config.json')
print 'Writing config file to: {}'.format(outfile)
with open(outfile, 'w') as f:
f.write(json.dumps(gooey_config, indent=2))
def do_run(args):
gooey_config = args.run or read_local_dir()
if not os.path.exists(gooey_config):
raise IOError('Gooey Config not found')
with open(gooey_config, 'r') as f:
build_spec = json.load(f)
print json.dumps(build_spec)
run(build_spec)
def run(build_spec):
app = wx.App(False)
i18n.load(build_spec['language'])
BodyPanel = partial(AdvancedConfigPanel, build_spec=build_spec)
frame = BaseWindow(BodyPanel, build_spec)
frame.Show(True)
app.MainLoop()
def read_local_dir():
local_files = os.listdir(os.getcwd())
if 'gooey_config.json' not in local_files:
print "Bugger! gooey_config.json not found!"
sys.exit(1)
return os.path.join(os.getcwd(), 'gooey_config.json')
if __name__ == '__main__':
main()
```
#### File: gooey/gui/client_app.py
```python
import sys
from gooey.gui.action_sorter import ActionSorter
class ClientApp(object):
def __init__(self, parser, payload):
self._parser = parser
self.description = parser.description
self.action_groups = ActionSorter(self._parser._actions)
self.payload = payload
def HasPositionals(self):
if self.action_groups._positionals:
return True
return False
def IsValidArgString(self, arg_string):
if isinstance(self._Parse(arg_string), str):
return False
return True
def _Parse(self, arg_string):
try:
self._parser.parse_args(arg_string.split())
return True
except Exception as e:
return str(e)
def GetErrorMsg(self, arg_string):
return self._FormatMsg(self._Parse(arg_string))
def _FormatMsg(self, msg):
output = list(msg)
if ':' in output:
output[output.index(':')] = ':\n '
return ''.join(output)
def AddToArgv(self, arg_string):
sys.argv.extend(arg_string.split())
class EmptyClientApp(object):
def __init__(self, payload):
'''
initializes a BlankModel object
As you can see. This class does nothing..
'''
self.description = ''
self.payload = payload
if __name__ == '__main__':
pass
```
#### File: gooey/gui/component_builder.py
```python
import itertools
from gooey.gui.widgets import components
class ComponentBuilder(object):
def __init__(self, build_spec):
self.build_spec = build_spec
_required_specs = self.build_spec.get('required', None)
_optional_specs = self.build_spec.get('optional', None)
self.required_args = self.build_widget(_required_specs) if _required_specs else []
optionals = self.build_widget(_optional_specs) if _optional_specs else None
if _optional_specs:
self.flags = [widget for widget in optionals if isinstance(widget, components.CheckBox)]
self.general_options = [widget for widget in optionals if not isinstance(widget, components.CheckBox)]
else:
self.flags = []
self.general_options = []
def build_widget(self, build_spec):
assembled_widgets = []
for spec in build_spec:
widget_type = spec['type']
properties = spec['data']
Component = getattr(components, widget_type)
assembled_widgets.append(Component(data=properties))
return assembled_widgets
def __iter__(self):
'''
return an iterator for all of the contained gui
'''
return itertools.chain(self.required_args or [],
self.flags or [],
self.general_options or [])
```
#### File: gooey/gui/message_router.py
```python
import threading
__author__ = 'Chris'
class MessageRouter(threading.Thread):
def __init__(self, textbox, process_to_route):
threading.Thread.__init__(self)
self.textbox = textbox
self.process = process_to_route
def run(self):
while True:
line = self.process.stdout.readline()
if not line:
break
```
#### File: gooey/gui/option_reader.py
```python
from abc import ABCMeta, abstractmethod
class OptionReader(object):
'''
Mixin for forcing subclasses to
honor GetOptions method
'''
__metaclass__ = ABCMeta
@abstractmethod
def GetOptions(self):
'''
Implemented by subclasses.
Defines how the config panel Views retrieve their options
'''
pass
```
#### File: gui/windows/basic_config_panel.py
```python
import wx
from gooey.gui.lang import i18n
from gooey.gui.option_reader import OptionReader
class BasicConfigPanel(wx.Panel, OptionReader):
def __init__(self, parent, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.header_msg = None
self.cmd_textbox = None
self._init_properties()
self._init_components()
self._do_layout()
def _init_components(self):
self.header_msg = self._bold_static_text(i18n.translate('simple_config'))
self.cmd_textbox = wx.TextCtrl(self, -1, "")
def _init_properties(self):
self.SetBackgroundColour('#F0F0F0')
def _do_layout(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(50)
sizer.Add(self.header_msg, 0, wx.LEFT, 20)
sizer.AddSpacer(10)
h_sizer = wx.BoxSizer(wx.HORIZONTAL)
h_sizer.Add(self.cmd_textbox, 1, wx.ALL | wx.EXPAND)
sizer.Add(h_sizer, 0, wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, 20)
self.SetSizer(sizer)
def _bold_static_text(self, text_label):
text = wx.StaticText(self, label=text_label)
font_size = text.GetFont().GetPointSize()
bold = wx.Font(font_size, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
text.SetFont(bold)
return text
def GetOptions(self):
return self.cmd_textbox.GetValue()
def RegisterController(self, controller):
pass
```
#### File: gui/windows/header.py
```python
import wx
from gooey.gui import imageutil, image_repository
from gooey.gui import styling
from gooey.gui.lang import i18n
PAD_SIZE = 10
class FrameHeader(wx.Panel):
def __init__(self, heading, subheading, **kwargs):
wx.Panel.__init__(self, **kwargs)
self.SetDoubleBuffered(True)
self._controller = None
self._header = None
self._subheader = None
self._settings_img = None
self._running_img = None
self._check_mark = None
self._init_properties()
self._init_components(heading, subheading)
self._init_pages()
self._do_layout()
def _init_properties(self):
self.SetBackgroundColour('#ffffff')
self.SetSize((30, 90))
self.SetMinSize((120, 80))
def _init_components(self, heading, subheading):
self._header = styling.H1(self, heading)
self._subheader = wx.StaticText(self, label=subheading)
self._settings_img = self._load_image(image_repository.settings2, height=79)
self._running_img = self._load_image(image_repository.computer3, 79)
self._check_mark = self._load_image(image_repository.alessandro_rei_checkmark, height=75)
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
headings_sizer = self.build_heading_sizer()
sizer.Add(headings_sizer, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.LEFT, PAD_SIZE)
sizer.Add(self._settings_img, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
sizer.Add(self._running_img, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
sizer.Add(self._check_mark, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
self._running_img.Hide()
self._check_mark.Hide()
vsizer.Add(sizer, 1, wx.EXPAND)
self.SetSizer(vsizer)
def _load_image(self, img_path, height=70):
return imageutil.resize_bitmap(self, imageutil._load_image(img_path), height)
def build_heading_sizer(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddStretchSpacer(1)
sizer.Add(self._header, 0)
sizer.Add(self._subheader, 0)
sizer.AddStretchSpacer(1)
return sizer
def RegisterController(self, controller):
if self._controller is None:
self._controller = controller
def _init_pages(self):
def PageOne():
self._header.SetLabel(i18n.translate("running_title"))
self._subheader.SetLabel(i18n.translate('running_msg'))
self._check_mark.Hide()
self._settings_img.Hide()
self._running_img.Show()
self.Layout()
def PageTwo():
self._header.SetLabel(i18n.translate('finished_title'))
self._subheader.SetLabel(i18n.translate('finished_msg'))
self._running_img.Hide()
self._check_mark.Show()
self.Layout()
self._pages = iter([PageOne, PageTwo])
def NextPage(self):
try:
next(self._pages)()
except:
self._init_pages()
next(self._pages)()
```
#### File: gooey/python_bindings/modules.py
```python
import os
import time
import hashlib
from itertools import dropwhile
import sys
import tempfile
sys.path.append(os.path.dirname(__file__))
def load(module_source):
descriptor, tmpfilepath = tempfile.mkstemp(suffix='.py')
tmpfiledir = os.path.dirname(tmpfilepath)
tmpfilename = os.path.splitext(os.path.split(tmpfilepath)[-1])[0]
sys.path.append(tmpfiledir)
try:
with open(tmpfilepath, 'w') as f:
f.write(module_source)
return __import__(tmpfilename)
finally:
os.close(descriptor)
os.remove(tmpfilepath)
if __name__ == '__main__':
pass
```
#### File: gooey/tests/i18n_unittest.py
```python
# '''
# Created on Jan 25, 2014
#
# @author: Chris
# '''
#
# import unittest
#
# import i18n
#
#
# class Test(unittest.TestCase):
#
# def test_i18n_loads_module_by_name(self):
# self.assertTrue(i18n._DICTIONARY is None)
#
# i18n.load('english')
# self.assertTrue(i18n._DICTIONARY is not None)
# self.assertEqual('Cancel', i18n.translate('cancel'))
#
# i18n.load('french')
# self.assertEqual('Annuler', i18n.translate('cancel'))
#
#
# def test_i18n_throws_exception_on_no_lang_file_found(self):
# self.assertRaises(IOError, i18n.load, 'chionenglish')
#
#
#
#
#
# if __name__ == "__main__":
# pass
# #import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
```
#### File: gooey/tests/modules_unittest.py
```python
from gooey.python_bindings import modules
module_source = \
'''
some_var = 1234
def fooey():
return 10
'''
def test_load_creates_and_imports_module_from_string_source():
module = modules.load(module_source)
assert 10 == module.fooey()
```
#### File: gooey/_tmp/module_with_no_argparse.py
```python
import time
from gooey import Gooey
def main():
end = time.time() + 10
while end > time.time():
print 'Jello!', time.time()
time.sleep(.8)
if __name__ == '__main__':
main()
``` |
{
"source": "jonathanlxy/JobSearchBoost",
"score": 3
} |
#### File: JobSearchBoost/Model Training/Doc2Vec.py
```python
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
# Best performance
import multiprocessing
# Logging
import time
import gensim
def bind_desc_skill( job ):
if job[1] is np.nan:
return job[0]
else:
return ' '.join( job )
def make_sentence( doc, stem = True ):
low_doc = doc.lower()
# Punctuation removal
tokenizer = RegexpTokenizer( r'\w+' )
tokens_P = tokenizer.tokenize( low_doc )
# Stopwords removal, html special tags included
custom_stopwords = stopwords.words( 'english' ) + [ 'nbsp', 'amp' ]
tokens_PS = [ token for token in tokens_P if token not in custom_stopwords ]
# Stemming
if stem == True:
stemmer = PorterStemmer()
sentence = [ stemmer.stem( token ) for token in tokens_PS ]
else:
sentence = tokens_PS
return sentence
def tag_sentence( sentences, tags ):
for i, line in enumerate( sentences ):
yield gensim.models.doc2vec.TaggedDocument( line, [ tags[i] ] )
# Jobs
jobs = pd.read_csv( 'unique_jobs.csv', encoding = 'utf-8' )
jobs['desc_skill'] = jobs[ [ 'job_description', 'skills' ] ].apply( bind_desc_skill, axis = 1 )
unique_desc = jobs[ 'desc_skill' ].unique()
# Resume
import codecs
with codecs.open( 'sample_resume.txt', 'r', encoding = 'utf-8' ) as f:
resume_sentence = make_sentence( f.read(), stem = False )
# Corpus
sentences = map( lambda doc: make_sentence( doc, stem = False ), jobs[ 'desc_skill' ].tolist() )
train = list( tag_sentence( sentences = sentences + [ resume_sentence ],
tags = jobs.job_url.tolist() + [ 'resume' ]
)
)
# Doc2Vec training. This might take a long time to run
d2v_model = gensim.models.doc2vec.Doc2Vec( size = 50, min_count = 2, iter = 10,
alpha = 0.025, min_alpha = 0.025, workers = multiprocessing.cpu_count() )
d2v_model.build_vocab( train )
for epoch in range( 10 ):
print time.ctime()
print 'Training started. Current epoch: %i' %epoch
d2v_model.train( train )
print 'One training round finished. Epoch finished: %i' %epoch
d2v_model.alpha -= 0.002 # decrease the learning rate
d2v_model.min_alpha = d2v_model.alpha # fix the learning rate, no decay
d2v_model.save( 'my_model.doc2vec' )
``` |
{
"source": "Jonathanm10/drifter",
"score": 2
} |
#### File: drifter/tests/conftest.py
```python
from contextlib import contextmanager
import logging
import os
import shutil
import string
import subprocess
import time
import pytest
import yaml
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
TESTS_DIR = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@contextmanager
def log_time(description):
start_time = time.time()
yield
end_time = time.time()
logger.debug("%s took %s seconds", description, end_time - start_time)
class Box:
def __init__(self, base_dir):
self.base_dir = base_dir
logger.debug("Creating box environment in %s", self.base_dir)
self._create_environment()
self._base_lxc_id = None
def _run(self, cmd, read=True):
env = os.environ.copy()
env['VAGRANT_CWD'] = self.base_dir
env['VIRTUALIZATION_PARAMETERS_FILE'] = os.path.join(self.base_dir, 'parameters.yml')
with log_time(cmd):
logger.debug("Executing %s", cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
proc.wait()
if read:
return proc.stdout.read().decode('utf-8')
def get_lxc_id(self):
with open(os.path.join(self.base_dir, '.vagrant/machines/default/lxc/id')) as f:
id = f.read()
return id
def set_lxc_id(self, id):
with open(os.path.join(self.base_dir, '.vagrant/machines/default/lxc/id'), 'w') as f:
f.write(id)
def get_ip(self):
return self._run(['sudo', 'lxc-info', '-iH', '-n', self.get_lxc_id()], read=True)
def _template(self, src, dest, mapping):
with open(src) as f:
raw_file = f.read()
raw_file_template = string.Template(raw_file)
templated_file = raw_file_template.substitute(**mapping)
with open(dest, 'w') as f:
f.write(templated_file)
def _create_environment(self):
self._template(
os.path.join(TESTS_DIR, 'data/ansible.cfg'),
os.path.join(self.base_dir, 'ansible.cfg'),
{'roles_path': os.path.join(BASE_DIR, 'provisioning', 'roles')}
)
shutil.copy(
os.path.join(BASE_DIR, 'parameters.yml.dist'),
os.path.join(self.base_dir, 'parameters.yml')
)
shutil.copy(
os.path.join(BASE_DIR, 'provisioning', 'playbook.yml.dist'),
os.path.join(self.base_dir, 'playbook.yml')
)
shutil.copytree(
os.path.join(BASE_DIR, 'provisioning', 'roles'),
os.path.join(self.base_dir, 'roles')
)
with open(os.path.join(BASE_DIR, 'Vagrantfile.dist')) as f:
lines = f.readlines()
lines[-1] = "load '{}'".format(os.path.join(BASE_DIR, 'Vagrantfile'))
with open(os.path.join(self.base_dir, 'Vagrantfile'), 'w') as f:
f.write(''.join(lines))
def up(self, provision=False):
if provision:
self._run(['vagrant', 'up', '--provision'], read=False)
else:
# When not provisioning it's better to just use `lxc-start` which is much faster than vagrant
self._run(['sudo', 'lxc-start', '-n', self.get_lxc_id()], read=False)
def halt(self):
self._run(['sudo', 'lxc-stop', '-k', '-n', self.get_lxc_id()], read=False)
def destroy(self):
self._run(['vagrant', 'destroy', '-f'])
def execute(self, cmd):
return self._run(['sudo', 'lxc-attach', '-n', self.get_lxc_id(), '--'] + cmd.split(), read=True)
def provision(self, roles, parameters=None):
self._run(['sudo', 'lxc-copy', '-s', '-B', 'overlayfs', '-n', self.get_lxc_id(), '-N', self.get_lxc_id() + '_copy'])
self._base_lxc_id = self.get_lxc_id()
self.set_lxc_id(self.get_lxc_id() + '_copy')
with open(os.path.join(BASE_DIR, 'provisioning', 'playbook.yml.dist')) as f:
playbook = yaml.load(f.read())
for role in roles:
playbook[0]['roles'].append({'role': role})
with open(os.path.join(self.base_dir, 'playbook.yml'), 'w') as f:
f.write(yaml.dump(playbook))
with open(os.path.join(self.base_dir, 'parameters.yml')) as f:
parameters_yml = yaml.load(f.read())
parameters_yml['playbook'] = 'playbook.yml'
parameters_yml['forwarded_ports'] = []
for param, value in parameters.items():
parameters_yml[param] = value
with open(os.path.join(self.base_dir, 'parameters.yml'), 'w') as f:
f.write(yaml.dump(parameters_yml))
self.up()
self._run(['vagrant', 'provision'])
def delete_copy(self):
self._run(['sudo', 'lxc-stop', '-k', '-n', self.get_lxc_id()])
self._run(['sudo', 'lxc-destroy', '-n', self.get_lxc_id()])
self.set_lxc_id(self._base_lxc_id)
class BoxFactory:
def __init__(self, tmpdir_factory):
self.tmpdir_factory = tmpdir_factory
self.boxes = {}
def get_box(self, os):
os = os.replace('/', '-')
if os not in self.boxes:
self.boxes[os] = Box(str(self.tmpdir_factory.mkdir(os)))
self.boxes[os].up(provision=True)
# Base box must be stopped to be copied
self.boxes[os].halt()
return self.boxes[os]
def destroy(self):
for box in self.boxes.values():
box.destroy()
class LazyBox:
def __init__(self, box_factory):
self.box_factory = box_factory
self.box = None
def provision(self, roles, parameters, os='drifter/stretch64-base'):
self.box = self.box_factory.get_box(os)
self.box.provision(roles=roles, parameters=parameters)
return self.box
def up(self):
return self.box.up()
def destroy(self):
return self.box.destroy()
def execute(self, cmd):
return self.box.execute(cmd)
def delete_copy(self):
return self.box.delete_copy()
@pytest.fixture(scope='session')
def box_factory(tmpdir_factory):
box_factory = BoxFactory(tmpdir_factory.mktemp('drifter'))
yield box_factory
box_factory.destroy()
@pytest.fixture()
def box(box_factory):
vagrant_box = LazyBox(box_factory)
yield vagrant_box
vagrant_box.delete_copy()
``` |
{
"source": "JonathanMaes/Backupper",
"score": 3
} |
#### File: Backupper/source/programenv.py
```python
import ctypes
import psutil
import sys
import traceback
import urllib.request
import webbrowser
from packaging import version as pkg_version
## PROGRAM ENVIRONMENT VARIABLES ##
PROGRAMNAME = u"Jonathan's Backupper"
PROGRAMNAMEEXECUTABLE = u"jonathansbackupper"
COMPANYNAME = u"OrOrg Development inc."
REMOTE_FILENAME_CHANGELOG = 'https://raw.githubusercontent.com/JonathanMaes/Backupper/master/source/changelog.txt'
REMOTE_FILENAME_INSTALLER = 'https://github.com/JonathanMaes/Backupper/blob/master/installer/JonathansBackupper_installer.exe?raw=true'
CLIENT_FILENAME_CHANGELOG = 'changelog.txt'
with open(CLIENT_FILENAME_CHANGELOG, 'r') as f: # First line of changelog.txt is version
VERSION = f.readline()
## PROGRAM ENVIRONMENT FUNCTIONS ##
def reportError(fatal=False, notify=None, message=''):
if notify is None:
notify = fatal
exc = '\t' + traceback.format_exc().replace('\n', '\n\t')
message = '\n%s' % message
if fatal:
info = u"A fatal error occured: %s\n\n%s\n\nYou have to manually restart the program." % (message, exc)
else:
info = u"An non-fatal error occured: %s\n\n%s\n\nThe program has dealt with the error and continues to run correctly." % (message, exc)
if notify:
ctypes.windll.user32.MessageBoxW(0, info, PROGRAMNAME, 0)
else:
print(info)
if fatal:
sys.exit()
def checkForUpdates(versionFile=REMOTE_FILENAME_CHANGELOG):
'''
@param versionFile (string): The URL where the production changelog is located.
@return (bool): Whether a new update will be installed or not.
'''
# Read the file, and extract the remote version number, which is located at the top of versionFile
try:
with urllib.request.urlopen(versionFile) as changelog:
version_remote = changelog.readline().strip().decode("utf-8")
with open('changelog.txt', 'r') as f:
version_local = f.readline().strip()
shouldUpdate = pkg_version.parse(version_local) < pkg_version.parse(version_remote)
if shouldUpdate:
text = "An update for %s has been found.\nDo you wish to install it now?" % PROGRAMNAME
title = "%s Updater" % PROGRAMNAME
result = ctypes.windll.user32.MessageBoxW(0, text, title, 4) # 4: 'Yes'/'No'-messagebox
if result == 6: # 'Yes'
webbrowser.open(REMOTE_FILENAME_INSTALLER)
return True
elif result == 7: # 'No'
return False
else:
reportError(fatal=False, notify=True, message='Unknown return code in the updater messagebox.')
return False
except:
reportError(fatal=False, notify=False, message='%s: could not connect to update-server.' % versionFile)
return False
def checkIfRunning(shutOtherDown=True):
instances = []
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name', 'create_time'])
if PROGRAMNAMEEXECUTABLE.lower() in pinfo['name'].lower():
instances.append(pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
sorted_instances = sorted(instances, key=lambda x:x['create_time'])
for elem in sorted_instances[:-1]: # All but the last one opened
if shutOtherDown:
psutil.Process(int(elem['pid'])).terminate() # Kill other processes
print('Program was already running, terminated previous instance.')
else:
print('WARNING: Multiple instances of the program are running.')
``` |
{
"source": "jonathanmagal/BlackJack1",
"score": 4
} |
#### File: jonathanmagal/BlackJack1/blackjack2.py
```python
import enum
from typing import List, cast
import random
class Suit(enum.Enum):
HEARTS = enum.auto()
DIAMONDS = enum.auto()
SPADES = enum.auto()
CLUBS = enum.auto()
class Card(object):
CARD_VALUE_DICT = {11: "Jack", 12: "Queen", 13: "King", 14: "Ace"}
def __init__(self, suit: Suit, value: int):
self.suit = suit
self.value = value
def __repr__(self) -> str:
print_value = Card.CARD_VALUE_DICT.get(self.value, self.value)
return f"Card {print_value} of {self.suit.name}"
def __eq__(self, o: object) -> bool:
# Only cards can be checked for equality
if type(o) != type(self):
return False
o = cast(Card, o)
return self.value == o.value and self.suit == o.suit
def set_ace_to_one(self):
if self.value == 14:
self.value = 1
class Deck(object):
def __init__(self) -> None:
self.cards: List[Card] = []
for suit in [Suit.HEARTS, Suit.DIAMONDS, Suit.SPADES, Suit.CLUBS]:
for value in range(2, 15):
self.cards.append(Card(suit, value))
def shuffle(self) -> None:
random.shuffle(self.cards)
def take_card(self) -> Card:
return self.cards.pop()
class Player(object):
def __init__(self, name: str):
self.name = name
self.hand: List[Card] = []
def show_cards(self) -> None:
print("---------------------")
print(f"{self.name} cards:")
for card in self.hand:
print(f"{card}")
print("---------------------")
def get_card(self, card: Card) -> None:
self.hand.append(card)
def get_hand_value(self):
return sum(c.value for c in self.hand)
def ask_for_another_card(self):
print(f"** {self.name}s' turn **")
while True:
try:
useranswer = str(
input(f"{self.name} Do you want another card? [answer yes or no]"))
if useranswer.lower() == "yes" or useranswer.lower() == "no":
if useranswer.lower() == "yes":
return True
else:
return False
else:
print(f"{self.name} What??? yes or no")
except:
print(f"{self.name} What??? yes or no")
def checking21_singular(self):
if self.get_hand_value() > 21:
number_of_aces = 0
for card in self.hand:
if card.value == 14:
number_of_aces += 1
if self.get_hand_value() - (14*number_of_aces) < 21:
while self.get_hand_value() > 21:
for card2 in self.hand:
if card2.value == 14:
card2.set_ace_to_one()
print(
f"{self.name} change his Card Ace of {card2.value} to {card2}")
self.show_cards()
break
return True
else:
print(f"{self.name} is out of the game")
return False
else:
return True
class Game(object):
def __init__(self):
self.deck = Deck()
self.players: List[Player] = []
self.badplayers: List[Player] = []
def number_of_players(self):
PlayersValue = 0
while True:
try:
PlayersValue = int(input("How much players?"))
if PlayersValue > 0:
return PlayersValue
else:
print("valid players value. try again...")
except:
print("valid players value. try again...")
def players_names(self, number):
for place in range(1, number + 1):
while True:
try:
playername = str(
input(f"hey player {place}, what is your name?"))
if playername.isalpha():
player = Player(playername)
self.players.append(player)
break
else:
print("valid name son. change your name!!!")
except:
print("valid name son. change your name@")
def show_cards(self):
for player in self.players:
player.show_cards()
def checking21(self):
for player in self.players:
if player.get_hand_value() > 21:
number_of_aces = 0
for card in player.hand:
if card.value == 14:
number_of_aces += 1
if player.get_hand_value() - (14*number_of_aces) < 21:
for card2 in player.hand:
if card2.value == 14:
card2.set_ace_to_one()
print(
f"{player.name} change his Card Ace of {card2.value}, to {card2}")
if player.get_hand_value() < 21:
player.show_cards()
break
else:
x = self.players.remove(player)
print(f"{player.name} is out of the game")
# def ShowCard(self):
# print(self.suit, self.value)
# # change ace card from 11 to 1#
# def setAcetoOne(self):
# if self.value.lower() == "ace":
# self.value = "1"
# return self.value
# def valueofcard(self):
# if "ace" in self.value.lower():
# return 11
# elif "prince" in self.value.lower():
# return 12
# elif "queen" in self.value.lower():
# return 13
# elif "king" in self.value.lower():
# return 14
# else:
# return int(self.value)
# def __eq__(self, anotherCard):
# return self.value == anotherCard.value
# def __gt__(self, anotherCard):
# return self.value > anotherCard.value
# class Player:
# super().__init__()
# self.SetName(name)
# self.cards = cards.copy()
# def GetName(self):
# return self.name
# def SetName(self, Newname):
# if not Newname.isalpha():
# raise ValueError("Valid name")
# self.name = Newname[:]
# def GetCardsList(self):
# return self.cards
# def AddCard(self, anothercard):
# self.cards.append(anothercard)
# def SumCards(self):
# sum1 = 0
# for card in self.cards:
# sum1 = sum1 + card.valueofcard()
# return sum1
# ######################################################################################################
# # ~~~~~ all defunctions ~~~~~~~
# # returns the value of players who wanna play
# def HowMuchPlayers():
# while True:
# try:
# PlayersValue = int(input("How much players?"))
# if PlayersValue > 0:
# break
# else:
# print("valid players value. try again...")
# except:
# print("valid players value. try again...")
# return PlayersValue
# def CardCheck():
# card_value = [
# "1",
# "2",
# "3",
# "4",
# "5",
# "6",
# "7",
# "8",
# "9",
# "10",
# "Ace",
# "Prince",
# "Queen",
# "King",
# ]
# card_figure = ["HURT♥︎,", "CLUB♣︎,", "DIMOND♦︎,", "SPADE♠︎,"]
# value_random = card_value[random.randrange(0, 14)]
# figure_random = card_figure[random.randrange(0, 4)]
# return Card(figure_random, value_random)
# # creating list of Object PLAYERS. inserting each a name and list with two CARDS Objects
# def StartGame(value):
# playerslist1 = []
# for userplace in range(1, value + 1):
# while True:
# try:
# playername = str(input(f"hey player {userplace}, what is your name?"))
# if playername.isalpha():
# l = []
# l.append(CardCheck())
# l.append(CardCheck())
# player1 = Player(playername, l)
# playerslist1.append(player1)
# break
# else:
# print("valid name son. change your name!!!")
# except:
# print("valid name son. change your name!!!")
# return playerslist1
# # shows everyone's cards
# def showcardsofeveryone(playerlist1, playerwhosayno12):
# print("************************")
# print("************************")
# print("your's cards players 🤵🏻♂️")
# if playerlist1:
# for user in playerlist1:
# print("------------------------")
# print(user.GetName())
# for card in user.cards:
# print(card.GetCard())
# print("------------------------")
# if playerwhosayno12:
# for user1 in playerwhosayno12:
# print("------------------------")
# print(user1.GetName())
# for card in user1.cards:
# print(card.GetCard())
# print("------------------------")
# # asks the player if he watns another card. if the user wants to return True and if not return False.
# def wantsanothercard(user):
# print(f"** {user.GetName()}s' turn **")
# while True:
# try:
# useranswer = str(
# input(f"{user.GetName()} Do you want another card? [answer yes or no]")
# )
# if useranswer.lower() == "yes" or useranswer.lower() == "no":
# if useranswer.lower() == "yes":
# return True
# else:
# return False
# else:
# print(f"{user.GetName()} What??? yes or no")
# except:
# print(f"{user.GetName()} What??? yes or no")
# def checking21general(playerslist1):
# for user in playerslist1:
# sum = 0
# acelist = []
# for card in user.cards:
# if card.CardValue().lower() == "ace":
# acelist.append(card.valueofcard())
# sum = sum + card.valueofcard()
# if sum > 21:
# if acelist:
# if sum - (11 * len(acelist)) < 21:
# print(f"{user.GetName()} your Ace changed to 1. It was Inevitable.")
# for card2 in user.cards:
# if card2.CardValue().lower() == "ace":
# card2.setAcetoOne()
# break
# break
# else:
# print(f"{user.GetName()} removed from the game")
# playerslist1.remove(user)
# if not playerslist1:
# print(f"{user.GetName()} won the game! that was boring. 🤷🏽♂️")
# if len(playerslist1) == 1:
# print(f"{playerslist1[0].GetName()} won the game! that was boring. 🤷🏽♂️")
# playerslist1.pop()
# return playerslist1
# def playerpass21(user):
# sum = 0
# acelist = []
# for card in user.cards:
# if card.valueofcard() == 11:
# while True:
# try:
# useranswer = input(
# f"{user.GetName()} you got ACE at your card. Do you want to change his value to 1 ?"
# )
# if useranswer.lower() == "yes":
# card.setAcetoOne()
# print("Ace have changed to 1")
# break
# if useranswer.lower() == "no":
# print("ok Ace remain 11")
# acelist.append(card.valueofcard())
# break
# else:
# print(f"{user.GetName()} What??? yes or no")
# except:
# print(f"{user.GetName()} What??? yes or no")
# sum = sum + card.valueofcard()
# if sum > 21:
# if acelist:
# if sum - (11 * len(acelist)) < 21:
# print(f"{user.GetName()} your Ace changed to 1. It was Inevitable.")
# for card2 in user.cards:
# if card2.CardValue().lower() == "ace":
# card2.setAcetoOne()
# return user, True
# return user, False
# if sum <= 21:
# return user, True
# #####################################################################################################
# # ~~~~~ main program ~~~~~
# drawlist = []
# playerslistwhosaidno = []
# playersvalue = HowMuchPlayers()
# playerslist = StartGame(playersvalue)
# showcardsofeveryone(playerslist, playerslistwhosaidno)
# playerslist = checking21general(playerslist)
# while len(playerslist) > 0:
# for player in playerslist:
# playeranswer = wantsanothercard(player)
# if not playeranswer:
# playerslist.remove(player)
# playerslistwhosaidno.append(player)
# print(
# f"{player.GetName()} stops actions and waiting to all players to finish."
# )
# showcardsofeveryone(playerslist, playerslistwhosaidno)
# continue
# if playeranswer:
# player.cards.append(CardCheck())
# showcardsofeveryone(playerslist, playerslistwhosaidno)
# player1, playertag = playerpass21(player)
# if not playertag:
# playerslist.remove(player)
# print(f"{player.GetName()} has removed from game")
# showcardsofeveryone(playerslist, playerslistwhosaidno)
# if len(playerslist) == 1 and not playerslistwhosaidno:
# print(f"congrats, {playerslist[0].GetName()} won the game! 💰")
# playerslist.pop()
# continue
# else:
# player.cards = player1.cards[:]
# showcardsofeveryone(playerslist, playerslistwhosaidno)
# continue
# if playerslistwhosaidno:
# max = 0
# winnername = " "
# for playerno in playerslistwhosaidno:
# if playerno.SumCards() > max:
# max = playerno.SumCards()
# winnername = playerno.GetName()
# for checkdrew in playerslistwhosaidno:
# if checkdrew.SumCards() == max and checkdrew.GetName() != winnername:
# drawlist.append(checkdrew)
# if drawlist:
# for serchy in playerslistwhosaidno:
# if serchy.GetName() == winnername:
# drawlist.append(serchy)
# print("we have draw between:")
# showcardsofeveryone(drawlist, None)
# print(
# "ok ok ok listen. every one gets one card. \n the one closer to 21 WINS. Go"
# )
# for matchplayers in drawlist:
# matchplayers.cards.append(CardCheck())
# showcardsofeveryone(drawlist, None)
# gapmin = 0
# winnername2 = " "
# for matchplayers2 in drawlist:
# gapto21 = abs(21 - matchplayers2.SumCards())
# if gapmin < gapto21:
# gapmin = gapto21
# winnername2 = matchplayers2.GetName()
# print(f"{winnername2} is the winer after draw. his cards are the closet to 21")
# else:
# print(f"{winnername} is the winner!!!!")
# ##############################################################
# # @@@@@@@@@@@@@@@@@@@@@@@ asserting @ @@@@@@@@@@@@@@@@@@@@@@@@@@@
# ##############################################################
# cardaaa = Card("hurt", "9")
# cardbbb = Card("hurt", "10")
# cardccc = Card("Dimond", "king")
# cardddd = Card("Dimond", "3")
# cardeee = Card("club", "king")
# cardfff = Card("hurt", "3")
# cardhigh = Card("hurt", "prince")
# cardhigh2 = Card("hurt", "king")
# cardhigh3 = Card("hurt", "king")
# cardhigh4 = Card("dimond", "queen")
# card13 = Card("hurt", "ace")
# cardslist1 = [cardaaa, cardbbb]
# cardslist2 = [cardccc, cardddd]
# cardslist3 = [cardeee, cardfff]
# cardslisthigh = [cardhigh, cardhigh2]
# cardslisthigh2 = [cardhigh3, cardhigh4]
# assert cardaaa != cardbbb
# assert Card("hurt", "5") > Card("hurt", "3")
# playeraaa = Player("yonatan", cardslist1)
# playerbbb = Player("dror", cardslist2)
# playerccc = Player("YONI", cardslist3)
# playerhigh = Player("high", cardslisthigh)
# playerhigh2 = Player("hor", cardslisthigh2)
# playerhighlist44 = [playerhigh, playerhigh2]
# playerlist11 = [playeraaa, playerbbb]
# playerslistwhosaidno.append(playerbbb)
# playerslistwhosaidno.append(playerccc)
# cardofcardcheck = CardCheck()
# assert 0 < cardofcardcheck.valueofcard() < 15
# x, y = playerpass21(playerhigh)
# assert y == False
# z, a = playerpass21(playeraaa)
# assert a == True
# assert playeraaa.GetName().lower() == "yonatan"
# assert cardddd.valueofcard() == 3
# assert card13.valueofcard() == 11
# card13.setAcetoOne()
# assert card13.valueofcard() == 1
# card1 = Card("hurt", "ace")
# card2 = Card("hurt", "prince")
# card3 = Card("hurt", "3")
# card4 = Card("hurt", "4")
# card3 = Card("dimond", "1")
# card4 = Card("dimond", "1")
# liststam1 = [card3, card4]
# playerstam = Player("stam", liststam1)
# liststam2 = [playerstam]
# list1 = [card1, card2]
# list2 = [card3, card4]
# playera = Player("yoni", list1)
# playerb = Player("yuav", list2)
# playlist = [playera, playerb]
# x = checking21general(playlist)
# for y in x:
# for card in y.cards:
# assert card.CardValue().lower() != "ace"
# p = checking21general(playerhighlist44)
# assert len(p) < 2
# pp = checking21general(playerlist11)
# assert len(pp) == 2
``` |
{
"source": "JonathanMalott/KukaPython",
"score": 3
} |
#### File: JonathanMalott/KukaPython/kukapython.py
```python
class kukapython:
def __init__(self,name):
self.TOOL_IS_DEFINED = False
self.BASE_IS_DEFINED = False
#An Array that will contain all of the commands
self.code = []
#add some initial setup stuff
self.code.append("DEF "+str(name)+"()")
self.code.append("GLOBAL INTERRUPT DECL 3 WHEN $STOPMESS==TRUE DO IR_STOPM ( )")
"""
INTERRUPT
Description Executes one of the following actions:
- Activates an interrupt.
- Deactivates an interrupt.
- Disables an interrupt.
- Enables an interrupt.
Up to 16 interrupts may be active at any one time
"""
#self.code.append("INTERRUPT ON 3")
self.code.append("$APO.CDIS = 0.5000")
self.code.append("BAS (#INITMOV,0)")
self.code.append("BAS (#VEL_PTP,20)")
self.code.append("BAS (#ACC_PTP,20)")
self.code.append("")
"""
Advance run
The advance run is the maximum number of motion blocks that the robot controller calculates and plans in advance during program execution. The actual
number is dependent on the capacity of the computer.
The advance run refers to the current position of the block pointer. It is set via
the system variable $ADVANCE:
- Default value: 3
- Maximum value: 5
The advance run is required, for example, in order to be able to calculate approximate positioning motions. If $ADVANCE = 0 is set, approximate positioning is not possible.
Certain statements trigger an advance run stop. These include statements
that influence the periphery, e.g. OUT statements
"""
self.code.append("$advance=3")
#---------------------------------------------------------------------------
# The following methods are used to set the TOOL
#---------------------------------------------------------------------------
def setToolNumber(self, toolNumber):
if(self.TOOL_IS_DEFINED == False):
self.code.append("$TOOL=TOOL_DATA["+str(toolNumber)+"]")
self.TOOL_IS_DEFINED = True
return
raise Exception('You have already defined a tool. Either use setToolNumber or setToolCoordinates but not both.')
def setToolCoordinates(self,x,y,z,a,b,c):
#if(self.TOOL_IS_DEFINED == False):
if(True):
self.code.append("$TOOL={X "+str(x)+", Y "+str(y)+", Z "+str(z)+", A "+str(a)+", B "+str(b)+", C "+str(c)+"}")
self.TOOL_IS_DEFINED = True
return
raise Exception('You have already defined a tool. Either use setToolNumber or setToolCoordinates but not both.')
#---------------------------------------------------------------------------
# The following methods are used to set the BASE
#---------------------------------------------------------------------------
def setBaseNumber(self, number):
if(self.BASE_IS_DEFINED == False):
self.code.append("$BASE=BASE_DATA["+str(number)+"]")
self.BASE_IS_DEFINED = True
return
raise Exception('You have already defined a base. Either use setToolNumber or setToolCoordinates but not both.')
def setBaseCoordinates(self,x,y,z,a,b,c):
if(self.BASE_IS_DEFINED == False):
self.code.append("$BASE={X "+str(x)+", Y "+str(y)+", Z "+str(z)+", A "+str(a)+", B "+str(b)+", C "+str(c)+"}")
self.BASE_IS_DEFINED = True
return
raise Exception('You have already defined a base. Either use setToolNumber or setToolCoordinates but not both.')
def setVelocity(self,velocity):
self.code.append("$VEL.CP="+str(velocity))
def openFold(self,comment):
self.code.append(";FOLD "+str(comment))
def closeFold(self):
self.code.append(";ENDFOLD")
#---------------------------------------------------------------------------
# The following methods deal with inputs and outputs
#---------------------------------------------------------------------------
def setOutput(self, outputNumber, state):
self.code.append("$OUT["+str(outputNumber)+"] = "+str(state))
def WAIT(self,waitTime):
self.code.append("WAIT sec "+str(waitTime))
def PTP(self,a1,a2,a3,a4,a5,a6,e1,e2):
self.code.append("PTP {A1 "+str(a1)+", A2 "+str(a2)+", A3 "+str(a3)+", A4 "+str(a4)+", A5 "+str(a5)+", A6 "+str(a6)+", E1 "+str(e1)+", E2 "+str(e2)+", E3 0, E4 0, E5 0, E6 0}")
def LIN(self,x,y,z,a,b,c,e1,e2):
self.code.append("LIN {X "+str(x)+", Y "+str(y)+", Z "+str(z)+", A "+str(a)+", B "+str(b)+", C "+str(c)+", E1 "+str(e1)+", E2 "+str(e2)+"} C_DIS")
def CIRC(self,x1,y1,z1,a1,b1,c1,e11,e21,x2,y2,z2,a2,b2,c2,e12,e22):
self.code.append("CIRC {X "+str(x1)+", Y "+str(y1)+", Z "+str(z1)+", A "+str(a1)+", B "+str(b1)+", C "+str(c1)+", E1 "+str(e11)+", E2 "+str(e21)+"},{X "+str(x2)+", Y "+str(y2)+", Z "+str(z2)+", A "+str(a2)+", B "+str(b2)+", C "+str(c2)+", E1 "+str(e12)+", E2 "+str(e22)+"} C_DIS")
def LIN_REL(self,x=False,y=False,z=False,a=False,b=False,c=False,e1=False,e2=False):
st = []
if(x):
st.append("X "+str(x))
if(y):
st.append("Y "+str(y))
if(z):
st.append("Z "+str(z))
if(a):
st.append("A "+str(a))
if(b):
st.append("B "+str(b))
if(c):
st.append("C "+str(c))
if(e1):
st.append("E1 "+str(e1))
if(e2):
st.append("E2 "+str(e2))
assert len(st) > 0
app = ""
for s in range(len(st)-1):
app += st[s] + ", "
app += st[-1]
self.code.append("LIN_REL {"+app+"} C_DIS")
def PTP_REL(self,a1=False,a2=False,a3=False,a4=False,a5=False,a6=False,e1=False,e2=False):
st = []
if(a1):
st.append("A1 "+str(a1))
if(a2):
st.append("A2 "+str(a2))
if(a3):
st.append("A3 "+str(a3))
if(a4):
st.append("A4 "+str(a4))
if(a5):
st.append("A5 "+str(a5))
if(a6):
st.append("A6 "+str(a6))
if(e1):
st.append("E1 "+str(e1))
if(e2):
st.append("E2 "+str(e2))
assert len(st) > 0
app = ""
for s in range(len(st)-1):
app += st[s] + ", "
app += st[-1]
self.code.append("PTP_REL {"+app+"} C_DIS")
def COMMENT(self,text):
self.code.append(";"+str(text))
def BREAK(self):
self.code.append("")
#---------------------------------------------------------------------------
# Export the code as a file
#---------------------------------------------------------------------------
def saveFile(self, filename):
if(self.TOOL_IS_DEFINED == False):
Exception('You must define a tool')
return
if(self.BASE_IS_DEFINED == False):
Exception('You must define a base')
return
#Since we are done adding lines to the program, we will END it
self.code.append("END")
#Write each line of the KUKA src program to the specified file
fileOut = open(filename,"w")
for line in range(len(self.code)-1):
fileOut.write(self.code[line] + "\n")
fileOut.write(self.code[-1])
``` |
{
"source": "jonathan-marsan/addition_example",
"score": 3
} |
#### File: addition_example/additions/sum.py
```python
def get_sum(x, y):
return x+y
def get_sum_all(nbrs):
x = 0
for element in nbrs:
x += element
return x
``` |
{
"source": "jonathan-marsan/plotlyextreme",
"score": 3
} |
#### File: jonathan-marsan/plotlyextreme/plots.py
```python
from plotlyextreme import utilities
def create_big_number(title, large_number, render_func, pacing='--'):
"""
title = description of big number
large_number = whatever number you want to highlight
render_func = plotly rendering function like iplot
"""
pacing_font_color = utilities.warning_color_font(pacing)
background_color = utilities.warning_color_background(pacing)
fig = {
"data": [
{
"values": [100],
"labels": [
"Results"
],
"hoverinfo":'none',
'textinfo':'none',
"hole": 1.0,
"type": "pie"
}],
"layout": {
"title": title,
"titlefont": {
"size": 30
},
"showlegend": False,
"height": 300,
"width": 280,
"paper_bgcolor": background_color,
"annotations": [
{
"font":
{
"size": 40,
"family": 'Arial'
},
"showarrow": False,
"text": utilities.add_thousand_separator(large_number),
"x": 0.5,
"y": 0.5
},
{
"font":
{
"size": 18,
"color": pacing_font_color
},
"showarrow": False,
"text": utilities.perc_format(pacing) + ' of pacing',
"x": 0.5,
"y": 0.02
}
]
}
}
return render_func(fig)
def plot_four_dimensions(df, x, y, title, segmentation_col, button_col,
buttons, default_visibility, trace_func, render_func,
x_layout='', y_layout='', labels=[]):
"""
Plot four dimensions using a plotly line chart and buttons
df = dataframe with four dimensions, typically time, metric, and two categorical variables
x = column for x-axis
y = column for y-axis
title = default title for graph
segmentation_col = third dimension in the graph, segmenting the data
button_col = fourth dimension in the graph showing a subset of x, y and segmentation_col based on this column
buttons = enables ordering the buttons
default_visibility = enables selection of which graph is shown on load
trace_func = plotly trace function like plotly.graph_objs.Scatter
render_func = plotly rendering function like iplot
color_list = not required if fewer than 22 dimensions. Enables overwriting of colors.
"""
color_list = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080']
# Clean up some strings
buttons = [element.lower() for element in buttons]
button_col = button_col.lower()
default_visibility = default_visibility.lower()
default_title = title + ' ' + default_visibility.capitalize()
segments = utilities.get_unique_values(df, segmentation_col)
traces_dict = utilities.produce_traces(df=df, button_col=button_col,
buttons=buttons,
segmentation_col=segmentation_col,
segments=segments, x=x, y=y,
default_visibility=default_visibility,
color_list=color_list,
labels=labels, trace_func=trace_func)
traces_list = utilities.df_list(buttons, traces_dict)
updatemenus = utilities.create_update_menus(buttons, traces_dict, title)
layout = dict(title=default_title,
showlegend=True,
updatemenus=updatemenus,
width=950,
height=440,
legend=dict(orientation='h')
)
# Enable usage of plotly function like plotly.graph_objs.layout.XAxis
if x_layout != '':
layout.update({'xaxis': x_layout})
if y_layout != '':
layout.update({'yaxis': y_layout})
fig = dict(data=traces_list, layout=layout)
return render_func(fig)
``` |
{
"source": "JonathanMbt/SNA_Parking_Behavior",
"score": 3
} |
#### File: SNA_Parking_Behavior/src/botometerTopTen.py
```python
import matplotlib.pyplot as plt
import botometer
from commands import *
from functions import *
import statistics as st
def scrutinize(hashtags):
rapidapi_key = "<KEY>"
twitter_app_auth = {
'consumer_key': 'J2vBhcxzmgI3AkyMVBW14cG4K',
'consumer_secret': '<KEY>',
'access_token': '<KEY>',
'access_token_secret': '<KEY>',
}
bom = botometer.Botometer(wait_on_ratelimit=True, rapidapi_key=rapidapi_key, **twitter_app_auth)
users_botScore = {}
for hashtag in hashtags:
print(" *** Please wait ! API QUERY could be a bit long. ***")
users_id = list(getTweets(hashtag, data_filename).keys())[:25]
users_botScore[hashtag] = {}
query_answer = bom.check_accounts_in(users_id)
idx = 1
for screen_name, result in query_answer:
print("*** "+ str(idx) + "/"+ str(len(users_id)) + " ***")
idx += 1
if "error" in result.keys():
tmp = 0 # error is when the account is private so human
else:
tmp = round(st.mean((result["raw_scores"]['english']['overall'], result["raw_scores"]['universal']['overall'])), 2)
users_botScore[hashtag][str(screen_name)]= "Bot" if tmp >= 0.5 else "Human"
nbr_bot = list(users_botScore[hashtag].values()).count("Bot")
labels = ["Bot", "Human"]
values = [nbr_bot/(len(users_id)), (len(users_id)-nbr_bot)/len(users_id)]
fig, ax = plt.subplots(1, 2, figsize=(12,8))
fig.suptitle("Proportion of bots in the top 25 users for " + hashtag)
ax[0].hist(list(users_botScore[hashtag].values()))
ax[1].pie(values, explode=[0.1, 0], labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax[1].axis('equal')
plt.show()
``` |
{
"source": "jonathanmcelroy/pyflo",
"score": 3
} |
#### File: pyflo/pyflo/component.py
```python
from functools import wraps
import scheduler
class Component:
"""Component for flows"""
in_ports = {}
out_ports = {}
def __init__(self):
"""Initializes the component"""
self._call = {}
for port_name, port in self.in_ports.items():
port.node = self
self._send_ports = set()
def _inport(self, port, data):
try:
yield self._call[port](data)
except KeyError:
raise KeyError("You must have @self.on_data(port_name) before a function")
while self._send_ports:
port, data = self._send_ports.pop()
yield scheduler.NewTask(send_data(self, port, data))
def on_data(self, port):
def decorator(f):
@wraps(f)
def wrapper(data):
return f(data)
try:
self._call[self.in_ports[port]] = wrapper
except KeyError:
raise Exception("You must specify an in_port name in the decorator")
return wrapper
return decorator
def send(self, port, data):
#print('send', port, data)
self._send_ports.add((port, data))
def init_data(port, data):
yield from port.send(data)
def send_data(node, port, data):
yield from node.out_ports[port].send(data)
```
#### File: pyflo/pyflo/network.py
```python
from port import Port
from component import Component
import scheduler
class Display(Component):
def __init__(self):
self.in_ports = {'in': Port('in', str)}
self.out_ports = {'out': Port('out', str)}
super().__init__()
@self.on_data('in')
def test(data):
print(data)
if self.out_ports['out'].is_attached():
self.send('out', data)
class Network:
""" The network """
def __init__(self, graph):
# In the graph we talk about nodes and edges.
# The corresponding names are processes and connections.
self.processes = {}
self.connections = set()
self.initializers = set()
self.graph = graph
self.scheduler = scheduler.Scheduler()
for node in self.graph.nodes.values():
self.add_node(node)
for ports in self.graph.edges.values():
for edge in ports.values():
self.add_edge(edge)
for port in self.graph.initializers.values():
for initializer in port.values():
self.add_initializer(initializer)
def add_node(self, node):
name = node['name']
component = node['component']
if name not in self.processes:
self.processes[name] = component()
def get_node(self, name):
return self.processes[name]
def add_edge(self, edge):
out_node_name = edge['from']['node']
out_node = self.get_node(out_node_name)
out_port_name = edge['from']['port']
out_port = out_node.out_ports[out_port_name]
in_node_name = edge['to']['node']
in_node = self.get_node(in_node_name)
in_port_name = edge['to']['port']
in_port = in_node.in_ports[in_port_name]
out_port.attach(in_port)
self.connections.add((out_port, in_port))
def add_initializer(self, initializer):
data = initializer['from']['data']
node_name = initializer['to']['node']
node = self.get_node(node_name)
port_name = initializer['to']['port']
port = node.in_ports[port_name]
self.initializers.add((port, data))
#def remove_node(self, node):
''''port = Port('out')
d1 = Display()
d2 = Display()
port.attach(d1.in_ports['in'])
d1.out_ports['out'].attach(d2.in_ports['in'])
self.scheduler = scheduler.Scheduler()
self.scheduler.new(init_data(port, 'hello'))'''
def run(self):
for port, data in self.initializers:
self.scheduler.new(init_data(port, data))
self.scheduler.mainloop()
def init_data(port, data):
yield from port.send(data)
def send_data(node, port, data):
yield from node.out_ports[port].send(data)
``` |
{
"source": "jonathanmeier5/django-command-overrides",
"score": 3
} |
#### File: command_overrides/tests/test_management.py
```python
from os import path
from unittest import mock
from django.test import TestCase
from django.core.management import call_command
import command_overrides
class StartAppTestCase(TestCase):
@mock.patch('django.core.management.commands.startapp.Command.handle')
def test_overridden_start_app(self, *mocks):
"""
Test we successfully inject our custom template.
"""
args = ['test_app']
opts = dict([('template', None)])
call_command('startapp', *args, **opts)
template_path = path.join(command_overrides.__path__[0], 'conf', 'app_template')
self.assertEqual(template_path, mocks[0].call_args[1].get('template'))
``` |
{
"source": "jonathanmeier5/issuebranch",
"score": 3
} |
#### File: src/issuebranch/utils.py
```python
from issuebranch.backends.github import GithubSession
def label_milestone_issues():
"""
Labels all issues in a milestone with that milestone's respective label
"""
session = GithubSession()
labels = list(session.get_labels())
labels_by_name = dict([(x['name'], x) for x in labels])
milestones = list(session.get_milestones())
for milestone in milestones:
label_data = labels_by_name[f'epic:{milestone["title"].strip()}']
for issue in session.get_issues(milestone=milestone["number"], state='all'):
session.add_label(label_data, number=issue['number'])
def milestone_labels(argv=None):
"""
creates labels out of milestones
"""
argv = argv or sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('color', help='color to make the labels')
args = parser.parse_args(argv)
session = GithubSession()
labels = session.get_labels()
labels_by_name = dict([(label['name'], label) for label in labels])
for milestone in session.get_milestones():
label_name = f'epic:{milestone["title"]}'
if label_name in labels_by_name:
continue
labels_by_name[label_name] = session.create_label(label_name, args.color)
return labels_by_name
```
#### File: webhook/tests/test_api.py
```python
from unittest import mock
from django.test import TestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from webhook.tests.utils import get_project_webhook_data
class ProjectTestCase(TestCase):
@mock.patch('webhook.views.handlers')
def test_project_call(self, *mocks):
"""
Ensure the project action is called
"""
data = get_project_webhook_data()
client = APIClient()
url = reverse('webhook-list')
response = client.post(url, data, format='json')
self.assertEquals(200, response.status_code)
handlers_mock = mocks[0]
handlers_mock.handler_types.get.assert_called_with('project')
handler = handlers_mock.handler_types.get.return_value
handler.assert_called_with(data)
handler.return_value.run.assert_called_with()
@mock.patch('webhook.views.WebhookViewSet.logger', new_callable=mock.PropertyMock)
@mock.patch('webhook.views.get_action_type')
def test_no_action_logged(self, *mocks):
get_action_type_mock = mocks[0]
get_action_type_mock.return_value = None
logger_mock = mocks[1]
data = {}
client = APIClient()
url = reverse('webhook-list')
response = client.post(url, data, format='json')
self.assertEquals(200, response.status_code)
# ensure a warning is logged for an unknown payload
logger_mock.return_value.warning.assert_called_with(mock.ANY)
``` |
{
"source": "jonathanmendez/nitsm-python",
"score": 2
} |
#### File: nitsm-python/systemtests/nidcpower_codemodules.py
```python
import nidcpower
import nitsm.codemoduleapi
from nitsm.codemoduleapi import SemiconductorModuleContext
OPTIONS = {"Simulate": True, "DriverSetup": {"Model": "4141", "BoardType": "PXIe"}}
@nitsm.codemoduleapi.code_module
def open_sessions_channel_expansion(tsm_context: SemiconductorModuleContext):
resource_strings = tsm_context.get_all_nidcpower_resource_strings()
for resource_string in resource_strings:
session = nidcpower.Session(resource_string, options=OPTIONS)
tsm_context.set_nidcpower_session(resource_string, session)
@nitsm.codemoduleapi.code_module
def open_sessions(tsm_context: SemiconductorModuleContext):
instrument_names, channel_strings = tsm_context.get_all_nidcpower_instrument_names()
for instrument_name, channel_string in zip(instrument_names, channel_strings):
session = nidcpower.Session(instrument_name, channel_string, options=OPTIONS)
tsm_context.set_nidcpower_session_with_channel_string(
instrument_name, channel_string, session
)
@nitsm.codemoduleapi.code_module
def measure(
tsm_context: SemiconductorModuleContext,
pins,
expected_instrument_names,
expected_channel_strings,
):
pin_query, sessions, channel_strings = tsm_context.pins_to_nidcpower_sessions(pins)
expected_instrument_channels = set(zip(expected_instrument_names, expected_channel_strings))
valid_channels = []
for session, channel_string in zip(sessions, channel_strings):
# call some methods on the session to ensure no errors
pin_session = session.channels[channel_string]
session.abort()
pin_session.output_function = nidcpower.OutputFunction.DC_CURRENT
pin_session.current_level = 10e-3
pin_session.output_enabled = True
pin_session.source_delay = 250e-6
session.initiate()
session.wait_for_event(nidcpower.Event.SOURCE_COMPLETE)
pin_session.measure(nidcpower.MeasurementTypes.VOLTAGE)
# check instrument channel we received is in the set of instrument channels we expected
actual_instrument_channel = (session.io_resource_descriptor, channel_string)
valid_channels.append(actual_instrument_channel in expected_instrument_channels)
expected_instrument_channels -= {actual_instrument_channel}
pin_query.publish(valid_channels)
num_missing_channels = [len(expected_instrument_channels)] * len(sessions)
pin_query.publish(num_missing_channels, "NumMissing")
@nitsm.codemoduleapi.code_module
def close_sessions(tsm_context: SemiconductorModuleContext):
sessions = tsm_context.get_all_nidcpower_sessions()
for session in sessions:
session.close()
```
#### File: nitsm-python/tests/test_nidaqmx.py
```python
import nidaqmx
import pytest
from nitsm.codemoduleapi import SemiconductorModuleContext
from nitsm.pinquerycontexts import PinQueryContext
@pytest.fixture
def simulated_nidaqmx_tasks(standalone_tsm_context):
task_names, channel_lists = standalone_tsm_context.get_all_nidaqmx_task_names("")
tasks = [nidaqmx.Task(tsk_name) for tsk_name in task_names]
for task_name, task in zip(task_names, tasks):
standalone_tsm_context.set_nidaqmx_task(task_name, task)
yield tasks
for task in tasks:
task.close()
@pytest.mark.pin_map("nidaqmx.pinmap")
class TestNIDAQmx:
pin_map_instruments = ["DAQmx1", "DAQmx2"]
pin_map_dut_pins = ["DUTPin1", "DUTPin2"]
pin_map_system_pins = ["SystemPin1"]
def test_get_all_nidaqmx_task_names(self, standalone_tsm_context: SemiconductorModuleContext):
task_names, channel_lists = standalone_tsm_context.get_all_nidaqmx_task_names("")
assert isinstance(task_names, tuple)
assert isinstance(channel_lists, tuple)
assert len(task_names) == len(channel_lists)
for task_name, channel_list in zip(task_names, channel_lists):
assert isinstance(task_name, str)
assert isinstance(channel_list, str)
assert task_name in self.pin_map_instruments
def test_set_nidaqmx_task(self, standalone_tsm_context: SemiconductorModuleContext):
task_names, channel_lists = standalone_tsm_context.get_all_nidaqmx_task_names("")
for task_name, channel_list in zip(task_names, channel_lists):
with nidaqmx.Task(task_name) as task:
standalone_tsm_context.set_nidaqmx_task(task_name, task)
assert SemiconductorModuleContext._sessions[id(task)] is task
def test_get_all_nidaqmx_tasks(self, standalone_tsm_context, simulated_nidaqmx_tasks):
queried_tasks = standalone_tsm_context.get_all_nidaqmx_tasks("")
assert isinstance(queried_tasks, tuple)
assert len(queried_tasks) == len(simulated_nidaqmx_tasks)
for queried_task in queried_tasks:
assert isinstance(queried_task, nidaqmx.Task)
assert queried_task in simulated_nidaqmx_tasks
def test_pins_to_nidaqmx_task_single_pin(self, standalone_tsm_context, simulated_nidaqmx_tasks):
(
pin_query_context,
queried_task,
queried_channel_list,
) = standalone_tsm_context.pins_to_nidaqmx_task("SystemPin1")
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_task, nidaqmx.Task)
assert isinstance(queried_channel_list, str)
assert queried_task in simulated_nidaqmx_tasks
def test_pins_to_nidaqmx_task_multiple_pins(
self, standalone_tsm_context, simulated_nidaqmx_tasks
):
(
pin_query_context,
queried_task,
queried_channel_list,
) = standalone_tsm_context.pins_to_nidaqmx_task(self.pin_map_dut_pins)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_task, nidaqmx.Task)
assert isinstance(queried_channel_list, str)
assert queried_task in simulated_nidaqmx_tasks
def test_pins_to_nidaqmx_tasks_single_pin(
self, standalone_tsm_context, simulated_nidaqmx_tasks
):
(
pin_query_context,
queried_tasks,
queried_channel_lists,
) = standalone_tsm_context.pins_to_nidaqmx_tasks("PinGroup1")
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_tasks, tuple)
assert isinstance(queried_channel_lists, tuple)
assert len(queried_tasks) == len(queried_channel_lists)
for queried_task, queried_channel_list in zip(queried_tasks, queried_channel_lists):
assert isinstance(queried_task, nidaqmx.Task)
assert isinstance(queried_channel_list, str)
assert queried_task in simulated_nidaqmx_tasks
def test_pins_to_nidaqmx_tasks_multiple_pins(
self, standalone_tsm_context, simulated_nidaqmx_tasks
):
all_pins = self.pin_map_dut_pins + self.pin_map_system_pins
(
pin_query_context,
queried_tasks,
queried_channel_lists,
) = standalone_tsm_context.pins_to_nidaqmx_tasks(all_pins)
assert isinstance(pin_query_context, PinQueryContext)
assert isinstance(queried_tasks, tuple)
assert isinstance(queried_channel_lists, tuple)
assert len(queried_tasks) == len(queried_channel_lists)
for queried_task, queried_channel_list in zip(queried_tasks, queried_channel_lists):
assert isinstance(queried_task, nidaqmx.Task)
assert isinstance(queried_channel_list, str)
assert queried_task in simulated_nidaqmx_tasks
```
#### File: nitsm-python/tests/test_nirelaydriver.py
```python
import niswitch
import pytest
from niswitch.enums import RelayAction, RelayPosition
from nitsm.codemoduleapi import SemiconductorModuleContext
@pytest.fixture
def simulated_niswitch_sessions(standalone_tsm_context):
instrument_names = standalone_tsm_context.get_relay_driver_module_names()
sessions = [
niswitch.Session("", topology="2567/Independent", simulate=True) for _ in instrument_names
] # resource name must be empty string to simulate an niswitch
for instrument_name, session in zip(instrument_names, sessions):
standalone_tsm_context.set_relay_driver_niswitch_session(instrument_name, session)
yield sessions
for session in sessions:
session.close()
def assert_relay_positions(standalone_tsm_context, pin_map_relays, relay_position):
(
niswitch_sessions,
niswitch_relay_names,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_sessions(pin_map_relays)
for niswitch_session, relay_names in zip(niswitch_sessions, niswitch_relay_names):
for relay_name in relay_names.split(","):
assert niswitch_session.get_relay_position(relay_name) == relay_position
@pytest.mark.pin_map("nirelaydriver.pinmap")
class TestNIRelayDriver:
pin_map_instruments = ["RelayDriver1", "RelayDriver2"]
pin_map_site_relays = ["SiteRelay1", "SiteRelay2"]
pin_map_system_relays = ["SystemRelay1"]
def test_get_relay_driver_module_names(
self, standalone_tsm_context: SemiconductorModuleContext
):
instrument_names = standalone_tsm_context.get_relay_driver_module_names()
assert isinstance(instrument_names, tuple)
assert len(instrument_names) == len(self.pin_map_instruments)
for instrument_name in instrument_names:
assert isinstance(instrument_name, str)
assert instrument_name in self.pin_map_instruments
def test_get_relay_names(self, standalone_tsm_context: SemiconductorModuleContext):
site_relays, system_relays = standalone_tsm_context.get_relay_names()
assert isinstance(site_relays, tuple)
assert isinstance(system_relays, tuple)
for site_relay in site_relays:
assert isinstance(site_relay, str)
assert site_relay in self.pin_map_site_relays
for system_relay in system_relays:
assert isinstance(system_relay, str)
assert system_relay in self.pin_map_system_relays
def test_set_relay_driver_niswitch_session(
self, standalone_tsm_context: SemiconductorModuleContext
):
instrument_names = standalone_tsm_context.get_relay_driver_module_names()
for instrument_name in instrument_names:
with niswitch.Session("", topology="2567/Independent", simulate=True) as session:
standalone_tsm_context.set_relay_driver_niswitch_session(instrument_name, session)
assert SemiconductorModuleContext._sessions[id(session)] is session
def test_get_all_relay_driver_niswitch_sessions(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_niswitch_sessions
):
queried_niswitch_sessions = standalone_tsm_context.get_all_relay_driver_niswitch_sessions()
assert isinstance(queried_niswitch_sessions, tuple)
assert len(queried_niswitch_sessions) == len(simulated_niswitch_sessions)
for queried_niswitch_session in queried_niswitch_sessions:
assert isinstance(queried_niswitch_session, niswitch.Session)
assert queried_niswitch_session in simulated_niswitch_sessions
def test_relays_to_relay_driver_niswitch_session_single_relay(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_niswitch_sessions
):
(
queried_niswitch_session,
queried_niswitch_relay_names,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_session("SystemRelay1")
assert isinstance(queried_niswitch_session, niswitch.Session)
assert isinstance(queried_niswitch_relay_names, str)
assert queried_niswitch_session in simulated_niswitch_sessions
def test_relays_to_relay_driver_niswitch_session_multiple_relays(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_niswitch_sessions
):
(
queried_niswitch_session,
queried_niswitch_relay_names,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_session(self.pin_map_site_relays)
assert isinstance(queried_niswitch_session, niswitch.Session)
assert isinstance(queried_niswitch_relay_names, str)
assert queried_niswitch_session in simulated_niswitch_sessions
def test_relays_to_relay_driver_niswitch_sessions_single_relay(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_niswitch_sessions
):
(
queried_niswitch_sessions,
queried_niswitch_relay_names,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_sessions("RelayGroup1")
assert isinstance(queried_niswitch_sessions, tuple)
assert isinstance(queried_niswitch_relay_names, tuple)
assert len(queried_niswitch_sessions) == len(queried_niswitch_relay_names)
for queried_niswitch_session, queried_relay_name in zip(
queried_niswitch_sessions, queried_niswitch_relay_names
):
assert isinstance(queried_niswitch_session, niswitch.Session)
assert isinstance(queried_relay_name, str)
assert queried_niswitch_session in simulated_niswitch_sessions
def test_relays_to_relay_driver_niswitch_sessions_multiple_relays(
self, standalone_tsm_context: SemiconductorModuleContext, simulated_niswitch_sessions
):
all_relays = self.pin_map_site_relays + self.pin_map_system_relays
(
queried_niswitch_sessions,
queried_niswitch_relay_names,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_sessions(all_relays)
assert isinstance(queried_niswitch_sessions, tuple)
assert isinstance(queried_niswitch_relay_names, tuple)
assert len(queried_niswitch_sessions) == len(queried_niswitch_relay_names)
for queried_niswitch_session, queried_relay_name in zip(
queried_niswitch_sessions, queried_niswitch_relay_names
):
assert isinstance(queried_niswitch_session, niswitch.Session)
assert isinstance(queried_relay_name, str)
assert queried_niswitch_session in simulated_niswitch_sessions
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_apply_relay_configuration(self, standalone_tsm_context: SemiconductorModuleContext):
standalone_tsm_context.apply_relay_configuration("RelayConfiguration1")
assert_relay_positions(
standalone_tsm_context, self.pin_map_site_relays, RelayPosition.CLOSED
)
assert_relay_positions(
standalone_tsm_context, self.pin_map_system_relays, RelayPosition.OPEN
)
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_single_action_open_system_relay(
self, standalone_tsm_context: SemiconductorModuleContext
):
(
niswitch_session,
niswitch_relay_name,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_session("SystemRelay1")
standalone_tsm_context.control_relays("SystemRelay1", RelayAction.OPEN)
assert niswitch_session.get_relay_position(niswitch_relay_name) == RelayPosition.OPEN
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_single_action_close_system_relay(
self, standalone_tsm_context: SemiconductorModuleContext
):
(
niswitch_session,
niswitch_relay_name,
) = standalone_tsm_context.relays_to_relay_driver_niswitch_session("SystemRelay1")
standalone_tsm_context.control_relays("SystemRelay1", RelayAction.CLOSE)
assert niswitch_session.get_relay_position(niswitch_relay_name) == RelayPosition.CLOSED
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_single_action_open_all_site_relays(
self, standalone_tsm_context: SemiconductorModuleContext
):
standalone_tsm_context.control_relays(self.pin_map_site_relays, RelayAction.OPEN)
assert_relay_positions(standalone_tsm_context, self.pin_map_site_relays, RelayPosition.OPEN)
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_single_action_close_all_site_relays(
self, standalone_tsm_context: SemiconductorModuleContext
):
standalone_tsm_context.control_relays(self.pin_map_site_relays, RelayAction.CLOSE)
assert_relay_positions(
standalone_tsm_context, self.pin_map_site_relays, RelayPosition.CLOSED
)
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_multiple_action_open_all_site_relays(
self, standalone_tsm_context: SemiconductorModuleContext
):
standalone_tsm_context.control_relays(
self.pin_map_site_relays, [RelayAction.OPEN] * len(self.pin_map_site_relays)
)
assert_relay_positions(standalone_tsm_context, self.pin_map_site_relays, RelayPosition.OPEN)
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_multiple_action_close_all_site_relays(
self, standalone_tsm_context: SemiconductorModuleContext
):
standalone_tsm_context.control_relays(
self.pin_map_site_relays, [RelayAction.CLOSE] * len(self.pin_map_site_relays)
)
assert_relay_positions(
standalone_tsm_context, self.pin_map_site_relays, RelayPosition.CLOSED
)
@pytest.mark.usefixtures("simulated_niswitch_sessions")
def test_control_relays_multiple_action_mixed_site_relay_positions(
self, standalone_tsm_context: SemiconductorModuleContext
):
relay_actions = [
RelayAction.OPEN if i % 2 else RelayAction.CLOSE
for i in range(len(self.pin_map_site_relays))
]
standalone_tsm_context.control_relays(self.pin_map_site_relays, relay_actions)
for pin_map_site_relay, relay_action in zip(self.pin_map_site_relays, relay_actions):
relay_position = (
RelayPosition.OPEN if relay_action == RelayAction.OPEN else RelayPosition.CLOSED
)
assert_relay_positions(standalone_tsm_context, [pin_map_site_relay], relay_position)
``` |
{
"source": "jonathanmendoza-tx/DS-Unit-3-Sprint-1-Software-Engineering",
"score": 3
} |
#### File: DS-Unit-3-Sprint-1-Software-Engineering/challenge/acme.py
```python
class Product:
from random import randint
id = random.randint(1000000,9999999)
def __init__(self, name, price = 10, weight = 20, flammability = 0.5, identifier = id):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def __repr__(self):
return f'{self.name}'
def stealability(self):
steal = self.price/self.weight
if steal < 0.5:
return f'Not so stealable...'
if (steal >= 0.5) & (steal < 1):
return f'Kinda stealable.'
else:
return f'Very stealable!'
def explode(self):
boom = self.flammability*self.weight
if boom < 10:
return f'...fizzle.'
if (boom >= 10) & (boom < 50):
return f'...boom!'
else:
return f'...BABOOM!!'
class BoxingGlove(Product):
def __init__(self, name, weight = 10):
super().__init__(name)
self.weight = weight
def explode(self):
return f"...it's a glove."
def punch(self):
if self.weight < 5:
return f'That tickles.'
if (self.weight >= 5) & (self.weight < 15):
return f'Hey that hurt!'
else:
return f'OUCH!'
```
#### File: DS-Unit-3-Sprint-1-Software-Engineering/challenge/acme_report.py
```python
from random import randint, sample, uniform
from acme import Product
ADJECTIVES = ['Awesome', 'Shiny', 'Impressive',
'Portable', 'Improved']
NOUNS = ['Anvil', 'Catapult', 'Disguise',
'Mousetrap', '???']
def generate_products(num_products=30):
"""
Generates a list with a passed number of products (default = 30)
"""
products = []
for _ in range(num_products):
adj_rand = ADJECTIVES[randint(0, len(ADJECTIVES)-1)]
noun_rand = NOUNS[randint(0, len(NOUNS)-1)]
name = adj_rand+" "+noun_rand
weight = randint(5, 100)
price = randint(5, 100)
flammability = uniform(0,2.5)
products.append(Product(name = name, weight = weight, price = price,
flammability = flammability))
return products
def inventory_report(products):
""" Prints an inventory report using random values for prices,
weights and flammability.
"""
print('ACME CORPORATION OFFICIAL INVENTORY REPORT\n')
print(f'Unique product names: {len(set(products))}')
prices = []
weights = []
flammabilities = []
for i in range(len(products)):
prices += [products[i].price]
weights += [products[i].weight]
flammabilities += [products[i].flammability]
avg_price = sum(prices)/len(prices)
avg_weight = sum(weights)/len(weights)
avg_flamm = sum(flammabilities)/len(flammabilities)
print(f'Average price: {avg_price}')
print(f'Average weight: {avg_weight}')
print(f'Average flammability: {avg_flamm}')
if __name__ == '__main__':
inventory_report(generate_products())
```
#### File: DS-Unit-3-Sprint-1-Software-Engineering/challenge/acme_test.py
```python
import unittest
from acme import Product
from acme_report import generate_products, ADJECTIVES, NOUNS
class AcmeProductTests(unittest.TestCase):
"""
Making sure Acme products are the tops!
"""
def test_default_product_price(self):
"""
Test default product price being 10.
"""
prod = Product('Test Product')
self.assertEqual(prod.price, 10)
def test_default_product_weight(self):
"""
Test default product weight being 20.
"""
prod = Product('Test Product')
self.assertEqual(prod.weight, 20)
def test_stealability_explode(self):
"""
Test stealability and explode function with other than default
values
"""
prod = Product('Test Product')
prod.weight = 100
prod.price = 1000
self.assertEqual(Product.stealability(prod), 'Very stealable!')
self.assertEqual(Product.explode(prod),'...BABOOM!!')
class AcmeReportTests(unittest.TestCase):
def test_default_num_products(self):
"""
Test that products really does receive a list of default length 30
"""
products = generate_products()
self.assertEqual(len(products), 30)
def test_legal_names(self):
"""
checks that the generated names for a default batch of products
are all valid possible names to generate (adjective, space, noun,
from the lists of possible words)
"""
products = generate_products()
check_for_adj = []
check_for_noun = []
for i in range(len(products)):
"""
Split and collect the first and second words of each product in
a list. Check if the first names match to adjectives and the second
to nouns
"""
name = str(products[i]).split()
check_for_adj += [name[0]]
check_for_nouns += [name[1]]
for j in range(len(set(check_for_adj))):
self.assertIn(list(set(check_for_adj))[j], ADJECTIVES)
for k in range(len(set(check_for_noun))):
self.assertIn(list(set(check_for_nouns))[k], NOUNS)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonathanmendoza-tx/udemy-titanic-section3",
"score": 3
} |
#### File: udemy-titanic-section3/model/preprocessing_functions.py
```python
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
import joblib
# Individual pre-processing and training functions
# ================================================
def load_data(df_path):
df = pd.read_csv(df_path)
return df
def divide_train_test(df, target):
# Function divides data set in train and test
X_train, X_test, y_train, y_test = train_test_split(
df.drop(target, axis=1), # predictors
df[target], # target
test_size=0.2, # percentage of obs in test set
random_state=0) # seed to ensure reproducibility
return X_train, X_test, y_train, y_test
def extract_cabin_letter(df, var):
df[var] = df[var].str[0] # captures the first letter
return df
def add_missing_indicator(df, var):
# function adds a binary missing value indicator
df[var + '_NA'] = np.where(df[var].isnull(), 1, 0)
return df
def impute_na(df, var, imputation_dict, method='categorical'):
# function replaces NA by value entered by user
# defaults to string "missing"
if method == "numerical":
for col in var:
df = add_missing_indicator(df, col)
# replace NaN by median
if imputation_dict['numerical'] == 'median':
median_val = df[col].median()
df[col] = df[col].fillna(median_val)
else:
for col in var:
df[col] = df[col].fillna(imputation_dict[method])
return df
def remove_rare_labels(df, vars_cat, frequent_ls):
# groups labels that are not in the frequent list into the umbrella
# group Rare
for var in vars_cat:
# replace rare categories by the string "Rare"
df[var] = np.where(df[var].isin(frequent_ls[var]), df[var], 'Rare')
return df
def encode_categorical(df, vars_cat):
# adds ohe variables and removes original categorical variable
for var in vars_cat:
# to create the binary variables, we use get_dummies from pandas
df = pd.concat([df, pd.get_dummies(df[var],
prefix=var, drop_first=True)], axis=1)
df = df.drop(labels=vars_cat, axis=1)
return df
def check_dummy_variables(df, dummy_list):
# check that all missing variables where added when encoding, otherwise
# add the ones that are missing
columns = set(df.columns)
for col in dummy_list:
if col not in columns:
df[col] = 0
return df
def train_scaler(df, output_path):
# train and save scaler
scaler = StandardScaler()
# fit the scaler to the train set
scaler.fit(df)
# save scaler
joblib.dump(scaler, output_path)
def scale_features(df, output_path):
# load scaler and transform data
scaler = joblib.load(output_path)
df = scaler.transform(df)
return df
def train_model(X_train, y_train, target, output_path):
# train and save model
model = LogisticRegression(C=0.0005, random_state=0)
model.fit(X_train, y_train)
joblib.dump(model, output_path)
return model
def predict(df, model):
# load model and get predictions
y_pred = model.predict(df)
pred_prob = model.predict_proba(df)[:, 1]
return y_pred, pred_prob
``` |
{
"source": "jonathan-messina/JAGUARETE_KAA",
"score": 2
} |
#### File: JAGUARETE_KAA/miSitio/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django import forms
from .models import Categoria, Producto, Carrito
from .forms import FormProductoCRUD
from cart.cart import Cart
# Create your views here.
def index(request):
productos = Producto.objects.all()
categoriax = Categoria.objects.all()
return render(request, "misitio/index.html",{
"productos": productos,
"categoriax": categoriax,
})
def carrito(request):
lista = Carrito.lista_productos
return render(request, "misitio/carrito.html",{
"lista" : lista,
})
# lista_productos = []
# @login_required
# def añadir_a_carrito(request,producto_id):
# producto = get_object_or_404(Producto, pk=producto_id)
# carrito,created = Carrito.objects.get_or_create(user=request.user, active=True)
# Carrito.añadir_a_carrito(producto_id)
# return render(request, 'misitio/carrito.html')
def acerca_de(request):
return render(request, "misitio/acerca_de.html")
def producto(request,producto_id):
producto = Producto.objects.get(id=producto_id)
titulo = producto.titulo
imagen = producto.imagen
descripcion = producto.descripcion
precio = producto.precio
categoria = producto.categoria
return render(request, 'misitio/producto.html',{
"producto" : producto,
"files":Producto.objects.all(),
"titulo": titulo,
"precio": precio,
"descripcion": descripcion,
"imagen": imagen,
"categoria":categoria,
"producto_id":producto_id
}
)
def resultado_busqueda(request):
productos = Producto.objects.all()
query = request.GET.get("q")
if query and query != '':
productos = Producto.objects.filter(Q(titulo__icontains=query) | Q(categoria__descripcion__icontains=query))
else:
productos = Producto.objects.all()
return render(request, "misitio/resultado_busqueda.html",{
"productos": productos,
})
@login_required
def añadir_producto(request):
if request.method=="POST":
form = FormProductoCRUD(request.POST, request.FILES)
if form.is_valid():
form.save()
else:
form = FormProductoCRUD()
return render(request, 'misitio/añadir_producto.html',{
"formset":form,
"files":Producto.objects.all(),
})
@login_required
def editar_producto(request,producto_id):
un_producto = get_object_or_404(Producto,id=producto_id)
if request.method=="POST":
form= FormProductoCRUD(request.POST, request.FILES,instance=un_producto)
if form.is_valid():
form.save()
return render(request,"misitio/editar_producto.html",{
"Productos":Producto.objects.all(),
"un_producto":un_producto,
})
else:
form = FormProductoCRUD(instance=un_producto)
return render(request,"misitio/editar_producto.html",{
"un_producto":un_producto,
"formset":form,
})
def borrar_producto(request,producto_id):
un_producto=get_object_or_404(Producto, id = producto_id)
if request.method == 'POST':
un_producto.delete()
return render(request, 'index.html', {'un_producto': un_producto})
@login_required
def cart_add(request, producto_id):
cart = Cart(request)
product = Producto.objects.get(id=producto_id)
cart.add(product=product)
return redirect("/miSitio")
@login_required(login_url="/users/login")
def item_clear(request, producto_id):
cart = Cart(request)
product = Producto.objects.get(id=producto_id)
cart.remove(product)
return redirect("/miSitio/carrito")
@login_required(login_url="/users/login")
def item_increment(request, id):
cart = Cart(request)
product = Producto.objects.get(id=id)
cart.add(product=product)
return redirect("cart_detail")
@login_required
def item_decrement(request, id):
cart = Cart(request)
product = Producto.objects.get(id=id)
cart.decrement(product=product)
return redirect("cart_detail")
@login_required(login_url="/users/login")
def cart_clear(request):
cart = Cart(request)
cart.clear()
return redirect("/miSitio/carrito")
@login_required(login_url="/users/login")
def cart_detail(request):
return render(request, 'cart/cart_detail.html')
``` |
{
"source": "jonathanmiller2/django-raster",
"score": 2
} |
#### File: django-raster/raster/admin.py
```python
from django import forms
from django.contrib import admin, messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from .models import (
Legend, LegendEntry, LegendSemantics, RasterProduct, RasterLayer, RasterLayerBandMetadata, RasterLayerMetadata,
RasterLayerParseStatus, RasterLayerReprojected, RasterTile
)
class FilenameActionForm(forms.Form):
"""
Form for changing the filename of a raster.
"""
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
path = forms.CharField(label='Filepath', required=False)
class RasterLayerMetadataInline(admin.TabularInline):
model = RasterLayerMetadata
extra = 0
readonly_fields = (
'srid', 'uperleftx', 'uperlefty', 'width', 'height',
'scalex', 'scaley', 'skewx', 'skewy', 'numbands',
'max_zoom', 'srs_wkt',
)
def has_add_permission(self, request, obj=None):
return False
class RasterLayerParseStatusInline(admin.TabularInline):
model = RasterLayerParseStatus
extra = 0
readonly_fields = ('status', 'tile_levels', 'log', )
def has_add_permission(self, request, obj=None):
return False
class RasterLayerBandMetadataInline(admin.TabularInline):
model = RasterLayerBandMetadata
extra = 0
readonly_fields = (
'band', 'nodata_value', 'max', 'min', 'std', 'mean',
'hist_values', 'hist_bins',
)
def has_add_permission(self, request, obj=None):
return False
class RasterLayerReprojectedInline(admin.TabularInline):
model = RasterLayerReprojected
readonly_fields = (
'rasterlayer', 'rasterfile',
)
extra = 0
def has_add_permission(self, request, obj=None):
return False
class RasterProductModelAdmin(admin.ModelAdmin):
list_display = ('name',)
class RasterLayerModelAdmin(admin.ModelAdmin):
"""
Admin action to update filepaths only. Files can be uploadded to the
filesystems through any channel and then files can be assigned to the
raster objects through this action. This might be useful for large raster
files.
"""
actions = ['reparse_rasters']
list_filter = ('datatype', 'parsestatus__status')
search_fields = ('name', 'rasterfile')
inlines = (
RasterLayerParseStatusInline,
RasterLayerMetadataInline,
RasterLayerBandMetadataInline,
RasterLayerReprojectedInline,
)
def reparse_rasters(self, request, queryset):
"""
Admin action to re-parse a set of rasterlayers.
"""
for rasterlayer in queryset:
rasterlayer.parsestatus.reset()
rasterlayer.refresh_from_db()
rasterlayer.save()
msg = 'Parsing Rasters, check parse logs for progress'
self.message_user(request, msg)
class RasterLayerMetadataModelAdmin(admin.ModelAdmin):
readonly_fields = (
'rasterlayer', 'uperleftx', 'uperlefty', 'width', 'height',
'scalex', 'scaley', 'skewx', 'skewy', 'numbands', 'srid', 'srs_wkt',
)
def has_add_permission(self, request, obj=None):
return False
class RasterTileModelAdmin(admin.ModelAdmin):
readonly_fields = (
'rast', 'rasterlayer', 'tilex', 'tiley', 'tilez',
)
def has_add_permission(self, request, obj=None):
return False
class LegendEntriesInLine(admin.TabularInline):
model = LegendEntry
extra = 0
class LegendAdmin(admin.ModelAdmin):
inlines = (
LegendEntriesInLine,
)
admin.site.register(LegendSemantics)
admin.site.register(RasterProduct, RasterProductModelAdmin)
admin.site.register(RasterLayer, RasterLayerModelAdmin)
admin.site.register(RasterTile, RasterTileModelAdmin)
admin.site.register(RasterLayerMetadata, RasterLayerMetadataModelAdmin)
admin.site.register(LegendEntry)
admin.site.register(Legend, LegendAdmin)
``` |
{
"source": "jonathanmishler/coinbase",
"score": 3
} |
#### File: coinbase/coinbase/candle_async.py
```python
from asyncio.locks import Semaphore
from asyncio.tasks import sleep
import logging
import time
from typing import Optional, List, Tuple, Generator
import math
from datetime import datetime, timedelta
import asyncio
import httpx
from .auth import CoinbaseAuth
import utils
class Coin:
def __init__(self, coin_id: str) -> None:
self.auth = CoinbaseAuth()
self.base_url = f"https://api.exchange.coinbase.com/products/{coin_id}"
def client(self, params: Optional[dict] = None) -> httpx.AsyncClient:
return httpx.AsyncClient(base_url=self.base_url, auth=self.auth, params=params)
async def history(
self,
resolution: int = 60,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
):
interval_per_request = timedelta(seconds=(300 * resolution))
self.limiter = Semaphore(10)
if end is None:
end = datetime.now()
if start is None:
start = end - interval_per_request
start = utils.datetime_floor(start)
end = utils.datetime_floor(end)
batches = utils.gen_interval(
start=start,
end=end,
interval=interval_per_request,
offset=timedelta(seconds=resolution),
backwards=True,
)
async with self.client(params={"granularity": resolution}) as client:
req_batches = [
client.build_request(
method="GET",
url="/candles",
params={
"start": batch_start.isoformat(),
"end": batch_end.isoformat(),
},
)
for batch_end, batch_start in batches
]
tasks = [self.make_request(client, req) for req in req_batches]
results = await asyncio.gather(*tasks)
return results
async def make_request(
self, client: httpx.AsyncClient, req: httpx.Request
) -> httpx.Response:
async with self.limiter:
logging.info("Making Request")
resp = await client.send(req)
resp.raise_for_status()
await asyncio.sleep(1)
return resp
```
#### File: coinbase/coinbase/candle_sync.py
```python
import logging
from os import makedirs
from typing import Optional, List, Tuple, Generator
from datetime import datetime, timedelta
import httpx
import psycopg
from .auth import CoinbaseAuth
import utils
from db.models import CoinHistory
from db.settings import Settings
class Coin:
def __init__(self, coin_id: str) -> None:
self.auth = CoinbaseAuth()
self.coin_id = coin_id
self.base_url = f"https://api.exchange.coinbase.com/products/{self.coin_id}"
self.db_settings = Settings()
# limiter = RateLimiter(15)
def client(self, params: Optional[dict] = None) -> httpx.Client:
return httpx.Client(base_url=self.base_url, auth=self.auth, params=params)
def history(
self,
resolution: int = 60,
start: Optional[datetime] = None,
end: Optional[datetime] = None,
):
interval_per_request = timedelta(seconds=(300 * resolution))
if end is None:
end = datetime.now()
if start is None:
start = end - interval_per_request
start = utils.datetime_floor(start)
end = utils.datetime_floor(end)
batches = utils.gen_interval(
start=start,
end=end,
interval=interval_per_request,
offset=timedelta(seconds=resolution),
backwards=True,
)
with self.client(params={"granularity": resolution}) as client, psycopg.connect(
self.db_settings.conn_str
) as conn:
req_batches = [
client.build_request(
method="GET",
url="/candles",
params={
"start": batch_start.isoformat(),
"end": batch_end.isoformat(),
},
)
for batch_end, batch_start in batches
]
results = [
self.write_to_table(self.make_request(client, req), conn)
for req in req_batches
]
return results
def make_request(
self, client: httpx.AsyncClient, req: httpx.Request
) -> httpx.Response:
# async with self.limiter:
resp = client.send(req)
resp.raise_for_status()
return resp
def write_to_table(self, resp, conn):
with conn.cursor() as cur:
coin_history = CoinHistory.from_coinbase(resp.text)
cur.executemany(
CoinHistory.insert_str(self.coin_id), [row.as_tuple() for row in coin_history]
)
conn.commit()
```
#### File: coinbase/db/settings.py
```python
from pydantic import BaseSettings, Field
class Settings(BaseSettings):
"""Gets the Coinbase authentication settings from the local env variables or .env file"""
username: str = Field(None, env="db_username")
password: str = Field(None, env="db_password")
host: str = Field("localhost", env="db_host")
port: str = Field("5432", env="db_port")
dbname: str = Field("", env="db_name")
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
secrets_dir = "./secrets"
@property
def conn_str(self):
if self.password is None:
user_str = self.username
else:
user_str = f"{self.username}:{self.password}"
return f"postgresql://{user_str}@{self.host}:{self.port}/{self.dbname}"
``` |
{
"source": "jonathanmishler/ftw_food",
"score": 3
} |
#### File: jonathanmishler/ftw_food/foursquare.py
```python
from typing import Optional
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import pandas as pd
class Foursquare:
""" Foursquare Places API class """
BASE_ENDPOINT = "https://api.foursquare.com/v2/venues"
def __init__(self, client_id: str, client_secret: str, version: str):
self.CLIENT_ID = client_id
self.CLIENT_SECRET = client_secret
self.VERSION = version
self.BASE_PARAMS = {
"client_id": self.CLIENT_ID,
"client_secret": self.CLIENT_SECRET,
"v": self.VERSION,
}
self.session = self.create_requests_session()
self.categories = Categories(self)
@staticmethod
def create_requests_session():
""" Creates a Requests Seesion with a retry strategy
Copied from:
Advanced usage of Python requests - timeouts, retries, hooks
By:
<NAME>
URL:
https://findwork.dev/blog/advanced-usage-python-requests-timeouts-retries-hooks/#retry-on-failure
"""
retry_strategy = Retry(
total=5,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "OPTIONS"],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
http = requests.Session()
http.mount("https://", adapter)
http.mount("http://", adapter)
return http
def get_resource(self, resource_enpoint: "str", params: dict = {}):
""" Make the get request to Fourquare """
params.update(self.BASE_PARAMS)
r = self.session.get(
url=f"{self.BASE_ENDPOINT}/{resource_enpoint}", params=params
)
return r.json()
def explore(
self,
loc: str,
loc_type: str = "ll",
radius: float = 250.0,
categories_id: Optional[list] = None,
page: int = 1,
limit: int = 50,
):
""" Request the explore resource
Params:
loc (str): Search string for the users location depends on loc_type
loc_type (str): Either ll (lat and lon) or near (geocodable
location)
categories_id (list): Specify a list of category ids to limit
results
page (int): Begins at 1 and sets the offset for the API to get
more than 50 results
limit (int): max is 50 results per API call
"""
params = {
loc_type: loc,
"radius": radius,
"limit": limit,
"offset": (limit * (page - 1)),
}
if categories_id:
params["categoryId"] = categories_id
return self.get_resource("explore", params)
class Categories:
""" Class to hold the Foursquare categories """
def __init__(self, api_object: Foursquare):
self.api = api_object
self.get_categories()
def get_categories(self):
""" Creates a pandas.DataFrame of the Places Categoires """
response = self.api.get_resource("categories")
all_categories = list()
for category in response["response"]["categories"]:
all_categories.extend(self.flatten_categories(category))
self.df = pd.DataFrame(all_categories)
return self
def search(self, keywords: list) -> pd.DataFrame:
""" Provide a list of kewords to serach the category names """
df = self.df
search_mask = list()
for keyword in keywords:
search_mask.append(df["name"].str.contains(keyword, case=False))
search_mask = pd.concat(search_mask, axis=1).any(axis=1)
return df[search_mask]
def parents(self, parents_to_mask: list) -> pd.DataFrame:
""" The Categories are nested and have up to 5 parents. Provide a
list of parents to subset the list to look at the children categories
"""
df = self.df
masks = list()
for i, parent in enumerate(parents_to_mask):
masks.append(df[f"Parent_{i}"] == parent)
return df[pd.concat(masks, axis=1).all(axis=1)]
def select(self, names: list) -> pd.DataFrame:
""" Select names from the categories, must be a list """
return self.df[self.df["name"].isin(names)]
@staticmethod
def flatten_categories(child: dict, parents: list = list()) -> list:
""" Recursively flattens out the categories dict and adds the parent
names to each child
"""
cat_list = list()
cats = child.get("categories", None)
for cat in cats:
cat_list.extend(
Categories.flatten_categories(cat, [*parents, child["name"]])
)
# Drop the keys below to reduce the size
child.pop("categories", None)
child.pop("icon", None)
# Create the keys for each parent level name
for i, p in enumerate([*parents, "Top-Level"]):
child[f"Parent_{i}"] = p
return [child, *cat_list]
``` |
{
"source": "jonathanmonreal/nltk-examples",
"score": 3
} |
#### File: nltk-examples/chapter 1/c1q28.py
```python
from __future__ import division
import nltk
def percentage(word, text):
return 100 * ([word.lower() for word in text].count(word)/len(text))
```
#### File: nltk-examples/chapter 2/c2q17.py
```python
from __future__ import division
import nltk
from nltk.corpus import stopwords
def freq_nonstop(text):
stopword_list = stopwords.words('english') # stores the stopwords
# generate the frequency distribution for non-stopwords
fdist = nltk.FreqDist([word.lower() for word in text if word.isalpha() and
word not in stopword_list])
# from the frequency distribution, get tuples with both the word and its frequency
vocabulary_tuples = fdist.items()
# sort the tuples based on the word frequency
vocabulary_tuples = sorted(vocabulary_tuples, key=lambda vocab: vocab[1])
vocabulary = [] # used to store the vocabulary
# reduce and sort the items
for i in range(0, 50):
try:
vocabulary.insert(0, vocabulary_tuples[i][0]) # store each item as it comes in at position 0
except:
break
return vocabulary
```
#### File: nltk-examples/chapter 2/c2q18.py
```python
from __future__ import division
import nltk
from nltk.corpus import stopwords
def bigram_freq(text):
stopword_list = stopwords.words('english') # stores the stopwords
# generate the frequency distribution for non-stopword bigrams
fdist = nltk.FreqDist(nltk.bigrams([word.lower() for word in text if word.isalpha() and
word not in stopword_list]))
# from the frequency distribution, get tuples with both the bigram and its frequency
bigram_tuples = fdist.items()
# sort the tuples based on bigram frequency
bigram_tuples = sorted(bigram_tuples, key=lambda vocab: vocab[1])
bigrams = [] # used to store the bigram list
# reduce and sort the items
for i in range(0, 50):
try:
bigrams.insert(0, bigram_tuples[i][0]) # store each item as it comes in at position 0
except:
break
return bigrams
```
#### File: nltk-examples/chapter 3/c3q29.py
```python
from __future__ import division
from nltk.corpus import brown
def ari(words, sentences):
chars = 0
for word in words:
chars += len(word)
return (4.71 * (chars / len(words)) + 0.5 * (len(words) / len(sentences))
- 21.43)
for category in brown.categories():
print '%*s %9f' % (max(len(c) for c in brown.categories()), category,
ari(brown.words(categories = category),
brown.sents(categories = category)))
``` |
{
"source": "JonathanMonzalve/pythonapp",
"score": 2
} |
#### File: app/parametros/models.py
```python
from django.db import models
# Create your models here.
#los modelos son una clse que devuelve un objeto
#por lo que crearemos una clase con el nombre de la entidad o colección y definiremos los atributos:
class Etnia(models.Model):
NombEtni = models.CharField(max_length = 50)
#ya creada la clase, retornamos el objeto NomEtni o nombre etnia:
def __unicode__(self):
return self.NomEtni
#agregamos las otrasc clases del modulo:
#clase para el modelo tipo docu:
class TipoDocu(models.Model):
NombTipo = models.CharField(max_length = 50)
def __unicode__(self):
return self.NombTipo
#clase para el modelo estado civil:
class EstaCivil(models.Model):
NomEsCi = models.CharField(max_length = 50)
def __unicode__(self):
return self.NomEsCi
#clase para el modelo clasificacion de los estudios
class TipoEstu(models.Model):
NombTiEs = models.CharField(max_length = 50)
def __unicode__(self):
return self.NombTiEs
#clase para el modelo tipos de logro:
class TipoLogr(models.Model):
NombTiLo = models.CharField(max_length = 50)
def __unicode__(self):
return self.NombTiLo
``` |
{
"source": "jonathanmorley/beartype",
"score": 2
} |
#### File: _code/_pep/_pephint.py
```python
from beartype.roar import (
BeartypeDecorHintPepException,
BeartypeDecorHintPepUnsupportedException,
BeartypeDecorHintPep593Exception,
)
from beartype._cave._cavefast import NoneType
from beartype._decor._cache.cachetype import (
bear_typistry,
register_typistry_forwardref,
)
from beartype._decor._code.codesnip import (
ARG_NAME_GETRANDBITS,
ARG_NAME_TYPISTRY,
VAR_NAME_PREFIX_PITH,
VAR_NAME_PITH_ROOT,
)
from beartype._decor._code._pep._pepmagic import (
FUNC_WRAPPER_LOCAL_LABEL,
HINT_ROOT_LABEL,
HINT_META_INDEX_HINT,
HINT_META_INDEX_PLACEHOLDER,
HINT_META_INDEX_PITH_EXPR,
HINT_META_INDEX_INDENT,
)
from beartype._decor._code._pep._pepsnip import (
PEP_CODE_CHECK_HINT_GENERIC_CHILD,
PEP_CODE_CHECK_HINT_GENERIC_PREFIX,
PEP_CODE_CHECK_HINT_GENERIC_SUFFIX,
PEP_CODE_CHECK_HINT_NONPEP_TYPE,
PEP_CODE_CHECK_HINT_ROOT_SUFFIX,
PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD,
PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_PITH_CHILD_EXPR,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_EMPTY,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_LEN,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_CHILD,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_PITH_CHILD_EXPR,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_PREFIX,
PEP_CODE_CHECK_HINT_TUPLE_FIXED_SUFFIX,
PEP_CODE_CHECK_HINT_ROOT_PREFIX,
PEP_CODE_CHECK_HINT_ROOT_SUFFIX_RANDOM_INT,
PEP_CODE_HINT_CHILD_PLACEHOLDER_PREFIX,
PEP_CODE_HINT_CHILD_PLACEHOLDER_SUFFIX,
PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX,
PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX,
PEP_CODE_PITH_ASSIGN_EXPR,
PEP484_CODE_CHECK_HINT_UNION_CHILD_PEP,
PEP484_CODE_CHECK_HINT_UNION_CHILD_NONPEP,
PEP484_CODE_CHECK_HINT_UNION_PREFIX,
PEP484_CODE_CHECK_HINT_UNION_SUFFIX,
PEP586_CODE_CHECK_HINT_LITERAL,
PEP586_CODE_CHECK_HINT_PREFIX,
PEP586_CODE_CHECK_HINT_SUFFIX,
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_CHILD,
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_PREFIX,
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_SUFFIX,
)
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.cache.pool.utilcachepoollistfixed import (
SIZE_BIG,
acquire_fixed_list,
release_fixed_list,
)
from beartype._util.cache.pool.utilcachepoolobjecttyped import (
acquire_object_typed,
release_object_typed,
)
from beartype._util.data.utildatadict import update_mapping
from beartype._util.func.utilfuncscope import (
CallableScope,
add_func_scope_attr,
add_func_scope_type,
add_func_scope_types,
)
from beartype._util.data.hint.datahint import HINTS_IGNORABLE_SHALLOW
from beartype._util.data.hint.pep.datapep import (
HINT_SIGNS_SUPPORTED_DEEP,
HINT_SIGNS_SEQUENCE_STANDARD,
HINT_SIGNS_TUPLE,
HINT_SIGNS_TYPE_ORIGIN_STDLIB,
)
from beartype._util.data.hint.pep.proposal.datapep484 import (
HINT_PEP484_SIGNS_UNION)
from beartype._util.data.hint.pep.datapepattr import (
HINT_PEP586_ATTR_LITERAL,
HINT_PEP593_ATTR_ANNOTATED,
)
from beartype._util.data.hint.pep.sign.datapepsigns import (
HintSignForwardRef,
HintSignGeneric,
)
from beartype._util.hint.pep.proposal.utilhintpep484 import (
get_hint_pep484_generic_base_erased_from_unerased,
get_hint_pep484_newtype_class,
is_hint_pep484_newtype,
)
from beartype._util.hint.pep.proposal.utilhintpep544 import (
get_hint_pep544_io_protocol_from_generic,
is_hint_pep544_io_generic,
)
from beartype._util.hint.pep.proposal.utilhintpep585 import (
is_hint_pep585_builtin)
from beartype._util.hint.pep.proposal.utilhintpep586 import (
die_unless_hint_pep586)
from beartype._util.hint.pep.proposal.utilhintpep593 import (
get_hint_pep593_metadata,
get_hint_pep593_metahint,
is_hint_pep593,
is_hint_pep593_beartype,
)
from beartype._util.hint.pep.utilhintpepget import (
get_hint_pep_args,
get_hint_pep_generic_bases_unerased,
get_hint_pep_sign,
get_hint_pep_stdlib_type,
get_hint_pep_generic_type_or_none,
)
from beartype._util.hint.pep.utilhintpeptest import (
die_if_hint_pep_unsupported,
die_if_hint_pep_sign_unsupported,
is_hint_pep,
is_hint_pep_subscripted,
is_hint_pep_tuple_empty,
is_hint_pep_typing,
warn_if_hint_pep_sign_deprecated,
)
from beartype._util.hint.utilhintget import get_hint_forwardref_classname
from beartype._util.hint.utilhinttest import is_hint_ignorable
from beartype._util.py.utilpyversion import (
IS_PYTHON_AT_LEAST_3_8,
IS_PYTHON_AT_LEAST_3_7,
)
from beartype._util.text.utiltextmagic import (
CODE_INDENT_1,
CODE_INDENT_2,
LINE_RSTRIP_INDEX_AND,
LINE_RSTRIP_INDEX_OR,
)
from beartype._vale._valesub import _SubscriptedIs
from beartype._util.text.utiltextmunge import replace_str_substrs
from beartype._util.text.utiltextrepr import represent_object
from collections.abc import Callable
from random import getrandbits
from typing import Tuple
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CODERS }....................
#FIXME: Attempt to JIT this function with Numba at some point. This will almost
#certainly either immediately blow up or improve nothing, but we're curious to
#see what happens. Make it so, Ensign Numba!
# from numba import jit
# @jit
@callable_cached
def pep_code_check_hint(
# ..................{ PARAMS ~ mandatory }..................
hint: object,
# ..................{ PARAMS ~ optional }..................
# Globals defined above, declared as optional parameters for efficient
# lookup as local attributes. Yes, this is an absurd microoptimization.
# *fight me, github developer community*
# "beartype._decor._code.codesnip" globals.
_ARG_NAME_GETRANDBITS=ARG_NAME_GETRANDBITS,
_CODE_INDENT_1=CODE_INDENT_1,
_CODE_INDENT_2=CODE_INDENT_2,
# "beartype._decor._code._pep._pepmagic" globals.
_FUNC_WRAPPER_LOCAL_LABEL=FUNC_WRAPPER_LOCAL_LABEL,
_HINT_ROOT_LABEL=HINT_ROOT_LABEL,
_HINT_META_INDEX_HINT=HINT_META_INDEX_HINT,
_HINT_META_INDEX_PLACEHOLDER=HINT_META_INDEX_PLACEHOLDER,
_HINT_META_INDEX_PITH_EXPR=HINT_META_INDEX_PITH_EXPR,
_HINT_META_INDEX_INDENT=HINT_META_INDEX_INDENT,
_LINE_RSTRIP_INDEX_AND=LINE_RSTRIP_INDEX_AND,
_LINE_RSTRIP_INDEX_OR=LINE_RSTRIP_INDEX_OR,
# "beartype._decor._code._pep._pepsnip" globals.
_PEP_CODE_CHECK_HINT_GENERIC_PREFIX=PEP_CODE_CHECK_HINT_GENERIC_PREFIX,
_PEP_CODE_CHECK_HINT_GENERIC_SUFFIX=PEP_CODE_CHECK_HINT_GENERIC_SUFFIX,
_PEP_CODE_CHECK_HINT_ROOT_PREFIX=PEP_CODE_CHECK_HINT_ROOT_PREFIX,
_PEP_CODE_CHECK_HINT_ROOT_SUFFIX_RANDOM_INT=(
PEP_CODE_CHECK_HINT_ROOT_SUFFIX_RANDOM_INT),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_PREFIX=(
PEP_CODE_CHECK_HINT_TUPLE_FIXED_PREFIX),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_SUFFIX=(
PEP_CODE_CHECK_HINT_TUPLE_FIXED_SUFFIX),
# "beartype._decor._code._pep._pepsnip" string globals required only for
# their bound "str.format" methods.
_PEP_CODE_CHECK_HINT_NONPEP_TYPE_format: Callable = (
PEP_CODE_CHECK_HINT_NONPEP_TYPE.format),
_PEP_CODE_CHECK_HINT_GENERIC_CHILD_format: Callable = (
PEP_CODE_CHECK_HINT_GENERIC_CHILD.format),
_PEP_CODE_CHECK_HINT_ROOT_SUFFIX_format: Callable = (
PEP_CODE_CHECK_HINT_ROOT_SUFFIX.format),
_PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_format: Callable = (
PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD.format),
_PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_PITH_CHILD_EXPR_format: Callable = (
PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_PITH_CHILD_EXPR.format),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_EMPTY_format: Callable = (
PEP_CODE_CHECK_HINT_TUPLE_FIXED_EMPTY.format),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_LEN_format: Callable = (
PEP_CODE_CHECK_HINT_TUPLE_FIXED_LEN.format),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_CHILD_format: Callable = (
PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_CHILD.format),
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_PITH_CHILD_EXPR_format: Callable = (
PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_PITH_CHILD_EXPR.format),
_PEP_CODE_PITH_ASSIGN_EXPR_format: Callable = (
PEP_CODE_PITH_ASSIGN_EXPR.format),
_PEP484_CODE_CHECK_HINT_UNION_CHILD_PEP_format: Callable = (
PEP484_CODE_CHECK_HINT_UNION_CHILD_PEP.format),
_PEP484_CODE_CHECK_HINT_UNION_CHILD_NONPEP_format: Callable = (
PEP484_CODE_CHECK_HINT_UNION_CHILD_NONPEP.format),
_PEP586_CODE_CHECK_HINT_LITERAL_format: Callable = (
PEP586_CODE_CHECK_HINT_LITERAL.format),
_PEP586_CODE_CHECK_HINT_PREFIX_format: Callable = (
PEP586_CODE_CHECK_HINT_PREFIX.format),
_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_PREFIX_format: Callable = (
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_PREFIX.format),
_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_SUFFIX_format: Callable = (
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_SUFFIX.format),
_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_CHILD_format: Callable = (
PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_CHILD.format),
) -> Tuple[str, CallableScope, Tuple[str, ...]]:
'''
Python code snippet type-checking the previously localized parameter or
return value annotated by the passed PEP-compliant type hint against that
hint of the decorated callable.
This code generator is memoized for efficiency.
Caveats
----------
**This function intentionally accepts no** ``hint_label`` **parameter.**
Why? Since that parameter is typically specific to the caller, accepting
that parameter would effectively prevent this code generator from memoizing
the passed hint with the returned code, which would rather defeat the
point. Instead, this function only:
* Returns generic non-working code containing the placeholder
:attr:`beartype._decor._code._pep.pepcode.PITH_ROOT_NAME_PLACEHOLDER_STR`
substring that the caller is required to globally replace by the name of
the current parameter *or* ``return`` for return values (e.g., by calling
the builtin :meth:`str.replace` method) to generate the desired
non-generic working code type-checking that parameter or return value.
* Raises generic non-human-readable exceptions containing the placeholder
:attr:`beartype._util.cache.utilcacheerror.EXCEPTION_CACHED_PLACEHOLDER`
substring that the caller is required to explicitly catch and raise
non-generic human-readable exceptions from by calling the
:func:`beartype._util.cache.utilcacheerror.reraise_exception_cached`
function.
Parameters
----------
hint : object
PEP-compliant type hint to be type-checked.
Returns
----------
Tuple[str, CallableScope, Tuple[str, ...]]
3-tuple ``(func_wrapper_code, func_wrapper_locals,
hints_forwardref_class_basename)``, where:
* ``func_wrapper_code`` is a Python code snippet type-checking the
previously localized parameter or return value against this hint.
* ``func_wrapper_locals`` is the **local scope** (i.e., dictionary
mapping from the name to value of each attribute referenced in the
signature) of this wrapper function needed for this type-checking.
* ``hints_forwardref_class_basename`` is a tuple of the unqualified
classnames of `PEP 484`_-compliant relative forward references
visitable from this root hint (e.g., ``('MuhClass', 'YoClass')``
given the root hint ``Union['MuhClass', List['YoClass']]``).
Raises
----------
BeartypeDecorHintPepException
If this object is *not* a PEP-compliant type hint.
BeartypeDecorHintPepUnsupportedException
If this object is a PEP-compliant type hint currently unsupported by
the :func:`beartype.beartype` decorator.
BeartypeDecorHintPepDeprecatedWarning
If one or more PEP-compliant type hints visitable from this object are
deprecated.
BeartypeDecorHintPep484Exception
If one or more PEP-compliant type hints visitable from this object are
nested :attr:`typing.NoReturn` child hints, since
:attr:`typing.NoReturn` is valid *only* as a non-nested return hint.
BeartypeDecorHintPep593Exception
If one or more PEP-compliant type hints visitable from this object
subscript the `PEP 593`_-compliant :class:`typing.Annotated` class such
that:
* The second argument subscripting that class is an instance of the
:class:`beartype.vale.Is` class.
* One or more further arguments subscripting that class are *not*
instances of the :class:`beartype.vale.Is` class.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# ..................{ HINT ~ root }..................
# Top-level hint relocalized for disambiguity.
hint_root = hint
# Delete the passed parameter whose name is ambiguous within the context of
# this function for similar disambiguity.
del hint
# Python code snippet evaluating to the current passed parameter or return
# value to be type-checked against the root hint.
pith_root_expr = VAR_NAME_PITH_ROOT
# ..................{ HINT ~ current }..................
# Currently visited hint.
hint_curr = None
# Current unsubscripted typing attribute associated with this hint (e.g.,
# "Union" if "hint_curr == Union[int, str]").
hint_curr_sign = None
# Python expression evaluating to an isinstance()-able class (e.g., origin
# type) associated with the currently visited type hint if any.
hint_curr_expr = None
#FIXME: Excise us up.
# Origin type (i.e., non-"typing" superclass suitable for shallowly
# type-checking the current pith against the currently visited hint by
# passing both to the isinstance() builtin) of this hint if this hint
# originates from such a superclass.
# hint_curr_type_origin = None
# Placeholder string to be globally replaced in the Python code snippet to
# be returned (i.e., "func_wrapper_code") by a Python code snippet
# type-checking the current pith expression (i.e.,
# "pith_curr_assigned_expr") against the currently visited hint (i.e.,
# "hint_curr").
hint_curr_placeholder = None
# Full Python expression evaluating to the value of the current pith (i.e.,
# possibly nested object of the passed parameter or return value to be
# type-checked against the currently visited hint).
#
# Note that this is *NOT* a Python >= 3.8-specific assignment expression
# but rather the original inefficient expression provided by the parent
# PEP-compliant type hint of the currently visited hint.
pith_curr_expr = None
# Python code snippet expanding to the current level of indentation
# appropriate for the currently visited hint.
indent_curr = _CODE_INDENT_2
# ..................{ HINT ~ child }..................
# Currently iterated PEP-compliant child hint subscripting the currently
# visited hint, initialized to the root hint to enable the subsequently
# called _enqueue_hint_child() function to enqueue the root hint.
hint_child = hint_root
#FIXME: Excise us up.
# Current unsubscripted typing attribute associated with this hint (e.g.,
# "Union" if "hint_child == Union[int, str]").
# hint_child_sign = None
# Integer identifying the currently iterated child PEP-compliant type
# hint of the currently visited parent PEP-compliant type hint.
#
# Note this ID is intentionally initialized to -1 rather than 0. Since
# the get_next_pep_hint_child_str() method increments *BEFORE*
# stringifying this ID, initializing this ID to -1 ensures that method
# returns a string containing only non-negative substrings starting at
# 0 rather than both negative and positive substrings starting at -1.
hint_child_placeholder_id = -1
#FIXME: Excise us up.
# Python expression evaluating to the value of the currently iterated child
# hint of the currently visited parent hint.
# hint_child_expr = None
#FIXME: Excise us up.
# Origin type (i.e., non-"typing" superclass suitable for shallowly
# type-checking the current pith against the currently visited hint by
# passing both to the isinstance() builtin) of the currently iterated child
# hint of the currently visited parent hint.
# hint_child_type_origin = None
#FIXME: Excise us up.
# Python code snippet evaluating to the current (possibly nested) object of
# the passed parameter or return value to be type-checked against the
# currently iterated child hint.
#pith_child_expr = None
# Python code snippet expanding to the current level of indentation
# appropriate for the currently iterated child hint, initialized to the
# root hint indentation to enable the subsequently called
# _enqueue_hint_child() function to enqueue the root hint.
indent_child = indent_curr
# ..................{ HINT ~ childs }..................
# Current tuple of all PEP-compliant child hints subscripting the currently
# visited hint (e.g., "(int, str)" if "hint_curr == Union[int, str]").
hint_childs: tuple = None # type: ignore[assignment]
# Number of PEP-compliant child hints subscripting the currently visited
# hint.
hint_childs_len: int = None # type: ignore[assignment]
# Set of all PEP-noncompliant child hints subscripting the currently
# visited hint.
hint_childs_nonpep: set = None # type: ignore[assignment]
# Set of all PEP-compliant child hints subscripting the currently visited
# hint.
hint_childs_pep: set = None # type: ignore[assignment]
# ..................{ HINT ~ pep 484 : forwardref }..................
# Set of the unqualified classnames referred to by all relative forward
# references visitable from this root hint if any *OR* "None" otherwise
# (i.e., if no such forward references are visitable).
hints_forwardref_class_basename: set = None # type: ignore[assignment]
# Possibly unqualified classname referred to by the currently visited
# forward reference type hint.
hint_curr_forwardref_classname: str = None # type: ignore[assignment]
# ..................{ HINT ~ pep 572 }..................
# The following local variables isolated to this subsection are only
# relevant when these conditions hold:
#
# * The active Python interpreter targets at least Python 3.8, the first
# major Python version to introduce support for "PEP 572 -- Assignment
# Expressions."
# * The currently visited hint is *NOT* the root hint (i.e., "hint_root").
# If the currently visited hint is the root hint, the current pith has
# already been localized to a local variable whose name is the value of
# the "VAR_NAME_PITH_ROOT" string global and thus need *NOT* be
# relocalized to another local variable using an assignment expression.
#
# This is a necessary and sufficient condition for deciding whether a
# Python >= 3.8-specific assignment expression localizing the current pith
# should be embedded in the code generated to type-check this pith against
# this hint. This is a non-trivial runtime optimization eliminating
# repeated computations to obtain this pith from PEP-compliant child hints.
# For example, if this hint constrains this pith to be a standard sequence,
# the child pith of this parent pith is a random item selected from this
# sequence; since obtaining this child pith is non-trivial, the computation
# required to do so is performed only once by assigning this child pith to
# a unique local variable during runtime type-checking and then repeatedly
# type-checking that variable rather than the computation required to
# continually reacquire this child pith: e.g.,
#
# # Type-checking conditional for "List[List[str]]" under Python < 3.8.
# if not (
# isinstance(__beartype_pith_0, list) and
# (
# isinstance(__beartype_pith_0[__beartype_random_int % len(__beartype_pith_0)], list) and
# isinstance(__beartype_pith_0[__beartype_random_int % len(__beartype_pith_0)][__beartype_random_int % len(__beartype_pith_0[__beartype_random_int % len(__beartype_pith_0)])], str) if __beartype_pith_0[__beartype_random_int % len(__beartype_pith_0)] else True
# ) if __beartype_pith_0 else True
# ):
#
# # The same conditional under Python < 3.8.
# if not (
# isinstance(__beartype_pith_0, list) and
# (
# isinstance(__beartype_pith_1 := __beartype_pith_0[__beartype_random_int % len(__beartype_pith_0)], list) and
# isinstance(__beartype_pith_1[__beartype_random_int % len(__beartype_pith_1)], str) if __beartype_pith_1 else True
# ) if __beartype_pith_0 else True
# ):
#
# Note the localization of the random item selection from the root pith
# (i.e., "__beartype_pith_1 := __beartype_pith_0[__beartype_random_int %
# len(__beartype_pith_0)"), which only occurs once in the latter case
# rather than repeatedly as in the former case. In both cases, the same
# semantic type-checking is performed regardless of optimization.
#
# Note this optimization implicitly "bottoms out" when the currently
# visited hint is *NOT* subscripted by one or more non-ignorable
# PEP-compliant child hint arguments, as desired. If all child hints of the
# currently visited hint are either ignorable (e.g., "object", "Any") *OR*
# are non-ignorable non-"typing" types (e.g., "int", "str"), the currently
# visited hint has *NO* meaningful PEP-compliant child hints and is thus
# effectively a leaf node with respect to performing this optimization.
# is_pith_curr_assign_expr = None
# Integer suffixing the name of each local variable assigned the value of
# the current pith in a Python >= 3.8-specific assignment expression, thus
# uniquifying this variable in the body of the current wrapper function.
#
# Note that this integer is intentionally incremented as an efficient
# low-level scalar rather than an inefficient high-level
# "itertools.counter" object. Since both are equally thread-safe in the
# internal context of this function body, the former is preferable.
pith_curr_assign_expr_name_counter = 0
# Python >= 3.8-specific assignment expression assigning this full Python
# expression to the local variable assigned the value of this expression.
pith_curr_assign_expr: str = None # type: ignore[assignment]
# Name of the local variable uniquely assigned to by
# "pith_curr_assign_expr". Equivalently, this is the left-hand side (LHS)
# of that assignment expression.
pith_curr_assigned_expr: str = None # type: ignore[assignment]
# ..................{ HINT ~ label }..................
# Human-readable label prefixing the machine-readable representation of the
# currently visited type hint if this hint is nested (i.e., any hint
# *except* the root type hint) in exception and warning messages.
#
# Note that "hint_curr_label" should almost *ALWAYS* be used instead.
HINT_CHILD_LABEL = f'{_HINT_ROOT_LABEL} {repr(hint_root)} child'
# Human-readable label prefixing the machine-readable representation of the
# currently visited type hint in exception and warning messages.
hint_curr_label: str = None # type: ignore[assignment]
# ..................{ METADATA }..................
# Tuple of metadata describing the currently visited hint, appended by
# the previously visited parent hint to the "hints_meta" stack.
hint_curr_meta: tuple = None # type: ignore[assignment]
# Fixed list of all metadata describing all visitable hints currently
# discovered by the breadth-first search (BFS) below. This list acts as a
# standard First In First Out (FILO) queue, enabling this BFS to be
# implemented as an efficient imperative algorithm rather than an
# inefficient (and dangerous, due to both unavoidable stack exhaustion and
# avoidable infinite recursion) recursive algorithm.
#
# Note that this list is guaranteed by the previously called
# _die_if_hint_repr_exceeds_child_limit() function to be larger than the
# number of hints transitively visitable from this root hint. Ergo, *ALL*
# indexation into this list performed by this BFS is guaranteed to be safe.
# Ergo, avoid explicitly testing below that the "hints_meta_index_last"
# integer maintained by this BFS is strictly less than "SIZE_BIG", as this
# constraint is already guaranteed to be the case.
hints_meta = acquire_fixed_list(SIZE_BIG)
# 0-based index of metadata describing the currently visited hint in the
# "hints_meta" list.
hints_meta_index_curr = 0
# 0-based index of metadata describing the last visitable hint in the
# "hints_meta" list, initialized to "-1" to ensure that the initial
# incrementation of this index by the _enqueue_hint_child() directly called
# below initializes index 0 of the "hints_meta" fixed list.
hints_meta_index_last = -1
# ..................{ FUNC ~ code }..................
# Python code snippet type-checking the current pith against the currently
# visited hint (to be appended to the "func_wrapper_code" string).
func_curr_code: str = None # type: ignore[assignment]
# ..................{ FUNC ~ code : locals }..................
# Local scope (i.e., dictionary mapping from the name to value of each
# attribute referenced in the signature) of this wrapper function required
# by this Python code snippet.
func_wrapper_locals: CallableScope = {}
# True only if one or more PEP-compliant type hints visitable from this
# root hint require a pseudo-random integer. If true, the higher-level
# beartype._decor._code.codemain.generate_code() function prefixes the body
# of this wrapper function with code generating such an integer.
is_var_random_int_needed = False
# ..................{ CLOSURES }..................
# Closures centralizing frequently repeated logic and thus addressing any
# Don't Repeat Yourself (DRY) concerns during the breadth-first search
# (BFS) performed below.
def _enqueue_hint_child(pith_child_expr: str) -> str:
'''
**Enqueue** (i.e., append) a new tuple of metadata describing the
currently iterated child hint to the end of the ``hints_meta`` queue,
enabling this hint to be visited by the ongoing breadth-first search
(BFS) traversing over this queue.
Parameters
----------
pith_child_expr : str
Python code snippet evaluating to the child pith to be
type-checked against the currently iterated child hint.
This closure also implicitly expects the following local variables of
the outer scope to be set to relevant values:
hint_child : object
Currently iterated PEP-compliant child hint subscripting the
currently visited hint.
Returns
----------
str
Placeholder string to be subsequently replaced by code
type-checking this child pith against this child hint.
'''
# Allow these local variables of the outer scope to be modified below.
nonlocal hint_child_placeholder_id, hints_meta_index_last
# Increment the 0-based index of metadata describing the last visitable
# hint in the "hints_meta" list *BEFORE* overwriting the existing
# metadata at this index.
#
# Note this index is guaranteed to *NOT* exceed the fixed length of
# this list, by prior validation.
hints_meta_index_last += 1
# Increment the unique identifier of the currently iterated child hint.
hint_child_placeholder_id += 1
# Placeholder string to be globally replaced by code type-checking the
# child pith against this child hint, intentionally prefixed and
# suffixed by characters that:
#
# * Are intentionally invalid as Python code, guaranteeing that the
# top-level call to the exec() builtin performed by the @beartype
# decorator will raise a "SyntaxError" exception if the caller fails
# to replace all placeholder substrings generated by this method.
# * Protect the identifier embedded in this substring against ambiguous
# global replacements of larger identifiers containing this
# identifier. If this identifier were *NOT* protected in this manner,
# then the first substring "0" generated by this method would
# ambiguously overlap with the subsequent substring "10" generated by
# this method, which would then produce catastrophically erroneous
# and non-trivial to debug Python code.
hint_child_placeholder = (
f'{PEP_CODE_HINT_CHILD_PLACEHOLDER_PREFIX}'
f'{str(hint_child_placeholder_id)}'
f'{PEP_CODE_HINT_CHILD_PLACEHOLDER_SUFFIX}'
)
# Create and insert a new tuple of metadata describing this child hint
# at this index of this list.
#
# Note that this assignment is guaranteed to be safe, as "SIZE_BIG" is
# guaranteed to be substantially larger than "hints_meta_index_last".
hints_meta[hints_meta_index_last] = (
hint_child,
hint_child_placeholder,
pith_child_expr,
indent_child,
)
# Return this placeholder string.
return hint_child_placeholder
# ..................{ CLOSURES ~ locals }..................
# Local variables calling one or more closures declared above and thus
# deferred until after declaring those closures.
# Placeholder string to be globally replaced in the Python code snippet to
# be returned (i.e., "func_wrapper_code") by a Python code snippet
# type-checking the child pith expression (i.e., "pith_child_expr") against
# the currently iterated child hint (i.e., "hint_child"), initialized to a
# placeholder describing the root hint.
hint_child_placeholder = _enqueue_hint_child(pith_root_expr)
# Python code snippet type-checking the root pith against the root hint,
# localized separately from the "func_wrapper_code" snippet to enable this
# function to validate this code to be valid *BEFORE* returning this code.
func_root_code = (
f'{_PEP_CODE_CHECK_HINT_ROOT_PREFIX}{hint_child_placeholder}')
# Python code snippet to be returned, seeded with a placeholder to be
# replaced on the first iteration of the breadth-first search performed
# below with a snippet type-checking the root pith against the root hint.
func_wrapper_code = func_root_code
# ..................{ SEARCH }..................
# While the 0-based index of metadata describing the next visited hint in
# the "hints_meta" list does *NOT* exceed that describing the last
# visitable hint in this list, there remains at least one hint to be
# visited in the breadth-first search performed by this iteration.
while hints_meta_index_curr <= hints_meta_index_last:
# Metadata describing the currently visited hint.
hint_curr_meta = hints_meta[hints_meta_index_curr]
# Assert this metadata is a tuple as expected. This enables us to
# distinguish between proper access of used items and improper access
# of unused items of the parent fixed list containing this tuple, since
# an unused item of this list is initialized to "None" by default.
assert hint_curr_meta.__class__ is tuple, (
f'Current hint metadata {repr(hint_curr_meta)} at '
f'index {hints_meta_index_curr} not tuple.')
# Localize metadatum for both efficiency and f-string purposes.
hint_curr = hint_curr_meta[_HINT_META_INDEX_HINT]
hint_curr_placeholder = hint_curr_meta[_HINT_META_INDEX_PLACEHOLDER]
pith_curr_expr = hint_curr_meta[_HINT_META_INDEX_PITH_EXPR]
indent_curr = hint_curr_meta[_HINT_META_INDEX_INDENT]
#FIXME: This test can be trivially avoided by:
#* Initializing "hint_curr_label = HINT_ROOT_LABEL" above.
#* Unconditionally setting "hint_curr_label = HINT_CHILD_LABEL"
# below at the end of each iteration of this loop.
#
#Since we're going to be fundamentally refactoring this entire
#algorithm into a two-phase algorithm, let's hold off on that until the
#radioactive dust settles, shall we?
# Human-readable label prefixing the machine-readable representation of
# the currently visited type hint in exception and warning messages.
#
# Note that this label intentionally only describes the root and
# currently iterated child hints rather than the root hint, the
# currently iterated child hint, and all interim child hints leading
# from the former to the latter. The latter approach would be
# non-human-readable and insane.
hint_curr_label = (
HINT_ROOT_LABEL
if hints_meta_index_curr == 0 else
HINT_CHILD_LABEL
)
# ................{ REDUCTION }................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAVEATS: Synchronize changes here with the corresponding block of the
# beartype._decor._error._errorsleuth.CauseSleuth.__init__()
# method.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Reduce the currently visited hint to a lower-level hint-like object
# associated with this hint if this hint satisfies a condition.
#
# This decision is intentionally implemented as a linear series of
# tests ordered in descending likelihood for efficiency. While
# alternative implementations (that are more readily readable and
# maintainable) do exist, these alternatives all appear to be
# substantially less efficient.
#
# ................{ REDUCTION ~ pep 484 ~ none }................
# If this is the PEP 484-compliant "None" singleton, reduce this hint
# to the type of that singleton. While not explicitly defined by the
# "typing" module, PEP 484 explicitly supports this singleton:
# When used in a type hint, the expression None is considered
# equivalent to type(None).
# The "None" singleton is used to type callables lacking an explicit
# "return" statement and thus absurdly common. Ergo, detect this first.
if hint_curr is None:
hint_curr = NoneType
# ................{ REDUCTION ~ pep 593 }................
# If this is a PEP 593-compliant type metahint...
#
# Metahints form the core backbone of our beartype-specific data
# validation API and are thus also extremely common. Ergo, detect these
# next-to-first.
elif is_hint_pep593(hint_curr):
# If this metahint is beartype-specific (i.e., its second argument
# is an instance of the "beartype._vale._valesub._SubscriptedIs"
# class produced by subscripting the "Is" class), ignore all
# annotations on this hint by reducing this hint to its origin
# (e.g., "str" in "Annotated[str, 50, False]").
if not is_hint_pep593_beartype(hint_curr):
hint_curr = get_hint_pep593_metahint(hint_curr)
# Else, that argument is beartype-specific. In this case, preserve
# this hint as is for subsequent handling below.
# ................{ REDUCTION ~ pep 544 }................
# If this is a PEP 484-compliant IO generic base class *AND* the active
# Python interpreter targets at least Python >= 3.8 and thus supports
# PEP 544-compliant protocols, reduce this functionally useless hint to
# the corresponding functionally useful beartype-specific PEP
# 544-compliant protocol implementing this hint.
#
# Note that PEP 484-compliant IO generic base classes are technically
# usable under Python < 3.8 (e.g., by explicitly subclassing those
# classes from third-party classes). Ergo, we can neither safely emit
# warnings nor raise exceptions on visiting these classes under *ANY*
# Python version.
elif is_hint_pep544_io_generic(hint_curr):
hint_curr = get_hint_pep544_io_protocol_from_generic(hint_curr)
# ................{ REDUCTION ~ pep 484 ~ new type }................
# If this is a PEP 484-compliant new type hint, reduce this hint to the
# user-defined class aliased by this hint. Although this logic could
# also be performed below, doing so here simplifies matters.
#
# New type hints are functionally useless for any meaningful purpose
# and thus reasonably rare in the wild. Ergo, detect these last.
elif is_hint_pep484_newtype(hint_curr):
hint_curr = get_hint_pep484_newtype_class(hint_curr)
# ................{ REDUCTION ~ end }................
#FIXME: Comment this sanity check out after we're sufficiently
#convinced this algorithm behaves as expected. While useful, this check
#requires a linear search over the entire code and is thus costly.
# assert hint_curr_placeholder in func_wrapper_code, (
# '{} {!r} placeholder {} not found in wrapper body:\n{}'.format(
# hint_curr_label, hint, hint_curr_placeholder, func_wrapper_code))
# ................{ PEP }................
# If this hint is PEP-compliant...
if is_hint_pep(hint_curr):
#FIXME: Refactor to call warn_if_hint_pep_unsupported() instead.
#Actually...wait. This is probably still a valid test here. We'll
#need to instead augment the is_hint_ignorable() function to
#additionally test whether the passed hint is unsupported, in which
#case that function should return false as well as emit a non-fatal
#warning ala the new warn_if_hint_pep_unsupported() function --
#which should probably simply be removed now. *sigh*
#FIXME: Actually, in that case, we can simply reduce the following
#two calls to simply:
# die_if_hint_pep_ignorable(
# hint=hint_curr, hint_label=hint_curr_label)
#Of course, this implies we want to refactor the
#die_if_hint_pep_unsupported() function into
#die_if_hint_pep_ignorable()... probably.
# If this hint is currently unsupported, raise an exception.
#
# Note the human-readable label prefixing the representations of
# child PEP-compliant type hints is unconditionally passed. Since
# the root hint has already been validated to be supported by
# the above call to the same function, this call is guaranteed to
# *NEVER* raise an exception for that hint.
die_if_hint_pep_unsupported(
hint=hint_curr, hint_label=hint_curr_label)
# Else, this hint is supported.
# Assert that this hint is unignorable. Iteration below generating
# code for child hints of the current parent hint is *REQUIRED* to
# explicitly ignore ignorable child hints. Since the caller has
# explicitly ignored ignorable root hints, these two guarantees
# together ensure that all hints visited by this breadth-first
# search *SHOULD* be unignorable. Naturally, we validate that here.
assert not is_hint_ignorable(hint_curr), (
f'{hint_curr_label} {repr(hint_curr)} '
f'ignorable but not ignored.')
# Sign uniquely identifying this hint.
hint_curr_sign = get_hint_pep_sign(hint_curr)
# If this sign is currently unsupported, raise an exception.
#
# Note the human-readable label prefixing the representations of
# child PEP-compliant type hints is unconditionally passed. Since
# the root hint has already been validated to be supported by the
# above call to the die_if_hint_pep_unsupported() function, this
# call is guaranteed to *NEVER* raise exceptions for the root hint.
die_if_hint_pep_sign_unsupported(
hint_sign=hint_curr_sign, hint_label=hint_curr_label)
# Else, this attribute is supported.
# If this sign and thus this hint is deprecated, emit a non-fatal
# warning warning users of this deprecation.
# print(f'Testing {hint_curr_label} hint {repr(hint_curr)} for deprecation...')
warn_if_hint_pep_sign_deprecated(
hint=hint_curr,
hint_sign=hint_curr_sign,
hint_label=hint_curr_label,
)
# Tuple of all arguments subscripting this hint if any *OR* the
# empty tuple otherwise (e.g., if this hint is its own unsubscripted
# "typing" attribute).
#
# Note that the "__args__" dunder attribute is *NOT* guaranteed to
# exist for arbitrary PEP-compliant type hints. Ergo, we obtain
# this attribute via a higher-level utility getter instead.
hint_childs = get_hint_pep_args(hint_curr)
hint_childs_len = len(hint_childs)
# Python code snippet expanding to the current level of indentation
# appropriate for the currently iterated child hint.
#
# Note that this is almost always but technically *NOT* always
# required below by logic generating code type-checking the
# currently visited parent hint. Naturally, unconditionally setting
# this string here trivially optimizes the common case.
indent_child = f'{indent_curr}{_CODE_INDENT_1}'
#FIXME: Unit test that this is behaving as expected. Doing so will
#require further generalizations, including:
#* In the "beartype._decor.main" submodule:
# * Detect when running under tests.
# * When running under tests, define a new
# "func_wrapper.__beartype_wrapper_code" attribute added to
# decorated callables to be the "func_wrapper_code" string rather than
# True. Note that this obviously isn't the right way to do
# source code association. Ideally, we'd at least interface with
# the stdlib "linecache" module (e.g., by calling the
# linecache.lazycache() function intended to be used to cache
# the source code for non-file-based modules) and possibly even
# go so far as to define a PEP 302-compatible beartype module
# loader. Clearly, that's out of scope. For now, this suffices.
#* In the "beartype_test.a00_unit.data._data_hint_pep" submodule:
# * Add a new "_PepHintMetadata.code_str_match_regexes" field,
# defined as an iterable of regular expressions matching
# substrings of the "func_wrapper.__beartype_wrapper_code"
# attribute that are expected to exist.
# * For most "HINTS_PEP_META" entries, default this field to
# merely the empty tuple.
# * For deeply nested "HINTS_PEP_META" entries, define this
# field as follows:
# code_str_match_regexes=(r'\s+:=\s+',)
#* In the "beartype_test.a00_unit.pep.p484.test_p484" submodule:
# * Match the "pep_hinted.__beartype_wrapper_code" string against
# all regular expressions in the "code_str_match_regexes"
# iterable for the currently iterated "pep_hint_meta".
#
#This is fairly important, as we have no other reusable means of
#ascertaining whether this is actually being applied in general.
#FIXME: That's all great, except for the
#"func_wrapper.__beartype_wrapper_code" part. Don't do that,
#please. We really do just want to do this right the first time. As
#expected, the key to doing so is the linecache.lazycache()
#function, whose implementation under Python 3.7 reads:
#
# def lazycache(filename, module_globals):
# """Seed the cache for filename with module_globals.
#
# The module loader will be asked for the source only when getlines is
# called, not immediately.
#
# If there is an entry in the cache already, it is not altered.
#
# :return: True if a lazy load is registered in the cache,
# otherwise False. To register such a load a module loader with a
# get_source method must be found, the filename must be a cachable
# filename, and the filename must not be already cached.
# """
# if filename in cache:
# if len(cache[filename]) == 1:
# return True
# else:
# return False
# if not filename or (filename.startswith('<') and filename.endswith('>')):
# return False
# # Try for a __loader__, if available
# if module_globals and '__loader__' in module_globals:
# name = module_globals.get('__name__')
# loader = module_globals['__loader__']
# get_source = getattr(loader, 'get_source', None)
#
# if name and get_source:
# get_lines = functools.partial(get_source, name)
# cache[filename] = (get_lines,)
# return True
# return False
#
#Given that, what we need to do is:
#* Define a new "beartype._decor._pep302" submodule implementing a
# PEP 302-compatible loader for @beartype-generated wrapper
# functions, enabling external callers (including the stdlib
# "linecache" module) to obtain the source for these functions.
# For space efficiency, this submodule should internally store
# code in a compressed format -- which probably means "gzip" for
# maximal portability. This submodule should at least define these
# attributes:
# * "_FUNC_WRAPPER_MODULE_NAME_TO_CODE", a dictionary mapping from
# the unique fake module names assigned to @beartype-generated
# wrapper functions by the @beartype decorator to the compressed
# source strings for those fake modules.
# * get_source(), a function accepting one unique fake module name
# assigned to an arbitrary @beartype-generated wrapper function
# by the @beartype decorator and returning the uncompressed
# source string for that fake module. Clearly, this function
# should internally access the
# "_FUNC_WRAPPER_MODULE_NAME_TO_CODE" dictionary and either:
# * If the passed module name has *NOT* already been registered
# to that dictionary, raise an exception.
# * Else, uncompress the compressed source string previously
# registered under that module name with that dictionary and
# return that uncompressed string. Don't worry about caching
# uncompressed strings here; that's exactly what the stdlib
# "linecache" module already does on our behalf.
# Ergo, this function should have signature resembling:
# def get_source(func_wrapper_module_name: str) -> str:
# * set_source(), a function accepting one unique fake module name
# assigned to an arbitrary @beartype-generated wrapper function
# by the @beartype decorator as well as as the uncompressed
# source string for that fake module. Clearly, this function
# should internally
# "_FUNC_WRAPPER_MODULE_NAME_TO_CODE" dictionary and either:
# * If the passed module name has already been registered to
# that dictionary, raise an exception.
# * Else, compress the passed uncompressed source string and
# register that compressed string under that module name with
# that dictionary.
#* In the "beartype._decor.main" submodule:
# *
# If...
if (
# The active Python interpreter targets Python >= 3.8 *AND*...
IS_PYTHON_AT_LEAST_3_8 and
# The current pith is *NOT* the root pith...
#
# Note that we explicitly test against piths rather than
# seemingly equivalent metadata to account for edge cases.
# Notably, child hints of unions (and possibly other "typing"
# objects) do *NOT* narrow the current pith and are *NOT* the
# root hint. Ergo, a seemingly equivalent test like
# "hints_meta_index_curr != 0" would generate false positives
# and thus unnecessarily inefficient code.
pith_curr_expr != pith_root_expr
):
# Then all conditions needed to assign the current pith to a unique
# local variable via a Python >= 3.8-specific assignment expression
# are satisfied. In this case...
# Increment the integer suffixing the name of this variable
# *BEFORE* defining this local variable.
pith_curr_assign_expr_name_counter += 1
# Reduce the current pith expression to the name of this local
# variable.
pith_curr_assigned_expr = (
f'{VAR_NAME_PREFIX_PITH}'
f'{pith_curr_assign_expr_name_counter}'
)
# Python >= 3.8-specific assignment expression assigning this
# full expression to this variable.
pith_curr_assign_expr = _PEP_CODE_PITH_ASSIGN_EXPR_format(
pith_curr_assigned_expr=pith_curr_assigned_expr,
pith_curr_expr=pith_curr_expr,
)
# Else, one or more of these conditions have *NOT* been satisfied.
# In this case, preserve the Python code snippet evaluating to the
# current pith as is.
else:
pith_curr_assign_expr = pith_curr_assigned_expr = (
pith_curr_expr)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# NOTE: Whenever adding support for (i.e., when generating code
# type-checking) a new "typing" attribute below, similar support
# for that attribute *MUST* also be added to the parallel:
# * "beartype._util.hint.pep.errormain" submodule, which
# raises exceptions on the current pith failing this check.
# * "beartype._util.data.hint.pep.datapep.HINT_SIGNS_SUPPORTED_DEEP"
# frozen set of all supported unsubscripted "typing" attributes
# for which this function generates deeply type-checking code.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#FIXME: Python 3.10 provides proper syntactic support for "case"
#statements, which should allow us to dramatically optimize this
#"if" logic into equivalent "case" logic *AFTER* we drop support
#for Python 3.9. Of course, that will be basically never, so we'll
#have to preserve this for basically forever. What you gonna do?
# Switch on (as in, pretend Python provides a "case" statement)
# this attribute to decide which type of code to generate to
# type-check the current pith against the current hint.
#
# This decision is intentionally implemented as a linear series of
# tests ordered in descending likelihood for efficiency. While
# alternative implementations (that are more readily readable and
# maintainable) do exist, these alternatives all appear to be
# substantially less efficient.
#
# Consider the standard alternative of sequestering the body of
# each test implemented below into either:
#
# * A discrete private function called by this function. This
# approach requires maintaining a global private dictionary
# mapping from each support unsubscripted typing attribute to
# the function generating code for that attribute: e.g.,
# def pep_code_check_union(...): ...
# _HINT_TYPING_ATTR_ARGLESS_TO_CODER = {
# typing.Union: pep_code_check_union,
# }
# Each iteration of this loop then looks up the function
# generating code for the current attribute from this dictionary
# and calls that function to do so. Function calls come with
# substantial overhead in Python, impacting performance more
# than the comparable linear series of tests implemented below.
# Additionally, these functions *MUST* mutate local variables of
# this function by some arcane means -- either:
# * Passing these locals to each such function, returning these
# locals from each such function, and assigning these return
# values to these locals in this function after each such call.
# * Passing a single composite fixed list of these locals to each
# such function, which then mutates these locals in-place,
# which then necessitates this function permanently store these
# locals in such a list rather than as local variables.
# * A discrete closure of this function, which adequately resolves
# the aforementioned locality issue via the "nonlocal" keyword at
# a substantial up-front performance cost of redeclaring these
# closures on each invocation of this function.
# ..............{ UNION }..............
# If this hint is a union (e.g., "typing.Union[bool, str]",
# typing.Optional[float]")...
#
# Note that unions are non-physical abstractions of physical types
# and thus *NOT* themselves subject to type-checking; only the
# subscripted arguments of unions are type-checked. This differs
# from "typing" pseudo-containers like "List[int]", in which both
# the parent "List" and child "int" types represent physical types
# to be type-checked. Ergo, unions themselves impose no narrowing
# of the current pith expression and thus *CANNOT* by definition
# benefit from Python >= 3.8-specific assignment expressions. This
# differs from "typing" pseudo-containers, which narrow the current
# pith expression and thus do benefit from these expressions.
if hint_curr_sign in HINT_PEP484_SIGNS_UNION:
# Assert this union is subscripted by one or more child hints.
# Note this should *ALWAYS* be the case, as:
#
# * The unsubscripted "typing.Union" object is explicitly
# listed in the "HINTS_IGNORABLE_SHALLOW" set and should thus
# have already been ignored when present.
# * The "typing" module explicitly prohibits empty
# subscription: e.g.,
# >>> typing.Union[]
# SyntaxError: invalid syntax
# >>> typing.Union[()]
# TypeError: Cannot take a Union of no types.
assert hint_childs, (
f'{hint_curr_label} {repr(hint_curr)} unsubscripted.')
# Else, this union is subscripted by two or more arguments. Why
# two rather than one? Because the "typing" module reduces
# unions of one argument to that argument: e.g.,
# >>> import typing
# >>> typing.Union[int]
# int
# Acquire a pair of sets for use in prefiltering child hints
# into the subset of all PEP-noncompliant and -compliant child
# hints subscripting this union. For efficiency, reuse
# previously created sets if available.
#
# Since these child hints require fundamentally different forms
# of type-checking, prefiltering child hints into these sets
# *BEFORE* generating code type-checking these child hints
# improves both efficiency and maintainability below.
hint_childs_nonpep = acquire_object_typed(set)
hint_childs_pep = acquire_object_typed(set)
# Clear these sets prior to use below.
hint_childs_nonpep.clear()
hint_childs_pep.clear()
# For each subscripted argument of this union...
for hint_child in hint_childs:
# Assert that this child hint is *NOT* shallowly ignorable.
# Why? Because any union containing one or more shallowly
# ignorable child hints is deeply ignorable and should thus
# have already been ignored after a call to the
# is_hint_ignorable() tester passed this union on handling
# the parent hint of this union.
assert hint_child not in HINTS_IGNORABLE_SHALLOW, (
f'{hint_curr_label} {repr(hint_curr)} child '
f'{repr(hint_child)} ignorable but not ignored.')
# If this child hint is PEP-compliant...
if is_hint_pep(hint_child):
# Filter this child hint into the set of PEP-compliant
# child hints.
#
# Note that this PEP-compliant child hint *CANNOT* also
# be filtered into the set of PEP-noncompliant child
# hints, even if this child hint originates from a
# non-"typing" type (e.g., "List[int]" from "list").
# Why? Because that would then induce false positives
# when the current pith shallowly satisfies this
# non-"typing" type but does *NOT* deeply satisfy this
# child hint.
hint_childs_pep.add(hint_child)
# Else, this child hint is PEP-noncompliant. In this case,
# filter this child hint into the list of PEP-noncompliant
# arguments.
else:
hint_childs_nonpep.add(hint_child)
# Initialize the code type-checking the current pith against
# these arguments to the substring prefixing all such code.
func_curr_code = PEP484_CODE_CHECK_HINT_UNION_PREFIX
# If this union is subscripted by one or more PEP-noncompliant
# child hints, generate and append efficient code type-checking
# these child hints *BEFORE* less efficient code type-checking
# any PEP-compliant child hints subscripting this union.
if hint_childs_nonpep:
func_curr_code += (
_PEP484_CODE_CHECK_HINT_UNION_CHILD_NONPEP_format(
# Python expression yielding the value of the
# current pith. Specifically...
pith_curr_expr=(
# If this union is subscripted by one or more
# PEP-compliant child hints, prefer the
# expression assigning this value to a local
# variable efficiently reused by subsequent
# code generated for PEP-compliant child hints.
pith_curr_assign_expr if hint_childs_pep else
# Else, this union is *NOT* subscripted by one
# or more PEP-compliant child hints. Since this
# is the first and only test generated for this
# union, prefer the expression yielding the
# value of the current pith *WITHOUT* assigning
# this value to a local variable, which would
# otherwise pointlessly go unused.
pith_curr_expr
),
# Python expression evaluating to a tuple of these
# arguments.
#
# Note that we would ideally avoid coercing this
# set into a tuple when this set only contains one
# type by passing that type directly to the
# _add_func_wrapper_local_type() function. Sadly,
# the "set" class defines no convenient or
# efficient means of retrieving the only item of a
# 1-set. Indeed, the most efficient means of doing
# so is to iterate over that set and break:
# for first_item in muh_set: break
# While we *COULD* technically leverage that
# approach here, doing so would also mandate adding
# multiple intermediate tests, mitigating any
# performance gains. Ultimately, we avoid doing so
# by falling back to the usual approach. See also
# this relevant self-StackOverflow post:
# https://stackoverflow.com/a/40054478/2809027
hint_curr_expr=add_func_scope_types(
types=hint_childs_nonpep,
types_scope=func_wrapper_locals,
types_label=_FUNC_WRAPPER_LOCAL_LABEL,
),
))
# For each PEP-compliant child hint of this union, generate and
# append code type-checking this child hint.
for hint_child_index, hint_child in enumerate(hint_childs_pep):
func_curr_code += (
_PEP484_CODE_CHECK_HINT_UNION_CHILD_PEP_format(
# Python expression yielding the value of the
# current pith.
hint_child_placeholder=_enqueue_hint_child(
# If this union is subscripted by either...
#
# Then prefer the expression efficiently
# reusing the value previously assigned to a
# local variable by either the above
# conditional or prior iteration of the current
# conditional.
pith_curr_assigned_expr
if (
# One or more PEP-noncompliant child hints
# *OR*...
hint_childs_nonpep or
# This is any PEP-compliant child hint but
# the first...
hint_child_index > 1
) else
# Else, this union is both subscripted by no
# PEP-noncompliant child hints *AND* this is
# the first PEP-compliant child hint, prefer
# the expression assigning this value to a
# local variable efficiently reused by code
# generated by the following "else" condition
# under subsequent iteration.
#
# Note this child hint is guaranteed to be
# followed by at least one more child hint.
# Why? Because the "typing" module forces
# unions to be subscripted by two or more child
# hints. By deduction, this union must thus be
# subscripted by two or more PEP-compliant
# child hints. Ergo, we needn't explicitly
# validate that constraint here.
pith_curr_assign_expr
)))
# If this code is *NOT* its initial value, this union is
# subscripted by one or more unignorable child hints and the
# above logic generated code type-checking these child hints.
# In this case...
if func_curr_code is not PEP484_CODE_CHECK_HINT_UNION_PREFIX:
# Munge this code to...
func_curr_code = (
# Strip the erroneous " or" suffix appended by the
# last child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_OR]}'
# Suffix this code by the substring suffixing all such
# code.
f'{PEP484_CODE_CHECK_HINT_UNION_SUFFIX}'
# Format the "indent_curr" prefix into this code deferred
# above for efficiency.
).format(indent_curr=indent_curr)
# Else, this snippet is its initial value and thus ignorable.
# Release this pair of sets back to their respective pools.
release_object_typed(hint_childs_nonpep)
release_object_typed(hint_childs_pep)
# Else, this hint is *NOT* a union.
# ..............{ SHALLOW }..............
# If this hint both...
elif (
# Originates from an origin type and may thus be shallowly
# type-checked against that type *AND is either...
hint_curr_sign in HINT_SIGNS_TYPE_ORIGIN_STDLIB and (
#FIXME: Ideally, this line should just resemble:
# not is_hint_pep_subscripted(hint_curr)
#Unfortunately, unsubscripted type hints under Python 3.6
#like "typing.List" are technically subscripted due to
#subclassing subscripted superclasses, which is insane. Due
#to this insanity, we currently ignore type variables for
#purposes of detecting subscription. Since this is awful,
#drop this as soon as we drop Python 3.6 support.
# Unsubscripted *OR*...
not (
is_hint_pep_subscripted(hint_curr)
if IS_PYTHON_AT_LEAST_3_7 else
hint_childs_len
) or
#FIXME: Remove this branch *AFTER* deeply supporting all
#hints.
# Currently unsupported with deep type-checking...
hint_curr_sign not in HINT_SIGNS_SUPPORTED_DEEP
)
):
# Then generate trivial code shallowly type-checking the current
# pith as an instance of the origin type originating this sign
# (e.g., "list" for the hint "typing.List[int]").
# Code type-checking the current pith against this origin type.
func_curr_code = _PEP_CODE_CHECK_HINT_NONPEP_TYPE_format(
pith_curr_expr=pith_curr_expr,
# Python expression evaluating to this origin type.
hint_curr_expr=add_func_scope_type(
# Origin type of this hint if any *OR* raise an
# exception -- which should *NEVER* happen, as this
# hint was validated above to be supported.
cls=get_hint_pep_stdlib_type(hint_curr),
cls_scope=func_wrapper_locals,
cls_label=_FUNC_WRAPPER_LOCAL_LABEL,
),
)
# Else, this hint is either subscripted, not shallowly
# type-checkable, *OR* deeply type-checkable.
# ............{ SEQUENCES ~ standard OR tuple vari. }..............
# If this hint is either...
elif (
# A standard sequence (e.g., "typing.List[int]") *OR*...
hint_curr_sign in HINT_SIGNS_SEQUENCE_STANDARD or (
# A tuple *AND*...
hint_curr_sign in HINT_SIGNS_TUPLE and
# This tuple is subscripted by exactly two child hints
# *AND*...
hint_childs_len == 2 and
# The second child hint is just an unquoted ellipsis...
hint_childs[1] is Ellipsis
)
# Then this hint is of the form "Tuple[{typename}, ...]",
# typing a tuple accepting a variadic number of items all
# satisfying the "{typename}" child hint. Since this case is
# semantically equivalent to that of standard sequences, we
# transparently handle both here for maintainability.
#
# See below for logic handling the fixed-length "Tuple" form.
# Then this hint is either a standard sequence *OR* a similar hint
# semantically resembling a standard sequence, subscripted by one
# or more child hints.
):
# Python expression evaluating to this origin type.
hint_curr_expr = add_func_scope_type(
# Origin type of this attribute if any *OR* raise an
# exception -- which should *NEVER* happen, as all standard
# sequences originate from an origin type.
cls=get_hint_pep_stdlib_type(hint_curr),
cls_scope=func_wrapper_locals,
cls_label=_FUNC_WRAPPER_LOCAL_LABEL,
)
# print(f'Sequence type hint {hint_curr} origin type scoped: {hint_curr_expr}')
# Assert this sequence is either subscripted by exactly one
# argument *OR* a non-standard sequence (e.g., "typing.Tuple").
# Note that the "typing" module should have already guaranteed
# this on our behalf. Still, we trust nothing and no one:
# >>> import typing as t
# >>> t.List[int, str]
# TypeError: Too many parameters for typing.List; actual 2, expected 1
assert hint_curr_sign in HINT_SIGNS_TYPE_ORIGIN_STDLIB
assert is_hint_pep_subscripted(hint_curr)
assert (
hint_childs_len == 1 or
hint_curr_sign in HINT_SIGNS_TUPLE
), (
f'{hint_curr_label} {repr(hint_curr)} sequence '
f'subscripted by {hint_childs_len} arguments.')
# Lone child hint of this parent hint.
hint_child = hint_childs[0]
# If this child hint is *NOT* ignorable, deeply type-check both
# the type of the current pith *AND* a randomly indexed item of
# this pith. Specifically...
if not is_hint_ignorable(hint_child):
# Record that a pseudo-random integer is now required.
is_var_random_int_needed = True
# Code type-checking the current pith against this type.
func_curr_code = (
_PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_format(
indent_curr=indent_curr,
pith_curr_assign_expr=pith_curr_assign_expr,
pith_curr_assigned_expr=pith_curr_assigned_expr,
hint_curr_expr=hint_curr_expr,
hint_child_placeholder=_enqueue_hint_child(
# Python expression yielding the value of a
# randomly indexed item of the current pith
# (i.e., standard sequence) to be type-checked
# against this child hint.
_PEP_CODE_CHECK_HINT_SEQUENCE_STANDARD_PITH_CHILD_EXPR_format(
pith_curr_assigned_expr=(
pith_curr_assigned_expr))),
))
# Else, this child hint is ignorable. In this case, fallback to
# generating trivial code shallowly type-checking the current
# pith as an instance of this origin type.
else:
func_curr_code = _PEP_CODE_CHECK_HINT_NONPEP_TYPE_format(
pith_curr_expr=pith_curr_expr,
hint_curr_expr=hint_curr_expr,
)
# Else, this hint is neither a standard sequence *NOR* variadic
# tuple.
# ..............{ SEQUENCES ~ tuple : fixed }..............
# If this hint is a tuple, this tuple is *NOT* of the variadic form
# and *MUST* thus be of the fixed-length form.
#
# Note that if this hint is a:
# * PEP 484-compliant "typing.Tuple"-based hint, this hint is
# guaranteed to contain one or more child hints. Moreover, if
# this hint contains exactly one child hint that is the empty
# tuple, this hint is the empty fixed-length form
# "typing.Tuple[()]".
# * PEP 585-compliant "tuple"-based hint, this hint is *NOT*
# guaranteed to contain one or more child hints. If this hint
# contains *NO* child hints, this hint is equivalent to the empty
# fixed-length PEP 484-compliant form "typing.Tuple[()]". Yes,
# PEP 585 even managed to violate PEP 484-compliance. UUUURGH!
#
# While tuples are sequences, the "typing.Tuple" singleton that
# types tuples violates the syntactic norms established for other
# standard sequences by concurrently supporting two different
# syntaxes with equally different semantics:
# * "typing.Tuple[{typename}, ...]", typing a tuple whose items all
# satisfy the "{typename}" child hint. Note that the "..."
# substring here is a literal ellipses.
# * "typing.Tuple[{typename1}, {typename2}, ..., {typenameN}]",
# typing a tuple whose:
# * First item satisfies the "{typename1}" child hint.
# * Second item satisfies the "{typename2}" child hint.
# * Last item satisfies the "{typenameN}" child hint.
# Note that the "..." substring here is *NOT* a literal ellipses.
#
# This is what happens when non-human-readable APIs are promoted.
elif hint_curr_sign in HINT_SIGNS_TUPLE:
# Assert this tuple was subscripted by at least one child hint
# if this tuple is PEP 484-compliant. Note that the "typing"
# module should have already guaranteed this on our behalf.
# Trust is for the weak. See above for further commentary.
assert hint_curr_sign is tuple or hint_childs, (
f'{hint_curr_label} {repr(hint_curr)} '
f'fixed-length tuple empty.')
# Assert this tuple is *NOT* of the syntactic form
# "typing.Tuple[{typename}, ...]" handled by prior logic.
assert (
hint_childs_len <= 1 or
hint_childs[1] is not Ellipsis
), (
f'{hint_curr_label} {repr(hint_curr)} '
f'variadic tuple unhandled.')
# Initialize the code type-checking the current pith against
# this tuple to the substring prefixing all such code.
func_curr_code = _PEP_CODE_CHECK_HINT_TUPLE_FIXED_PREFIX
# If this hint is the empty fixed-length tuple, generate and
# append code type-checking the current pith to be the empty
# tuple. Yes, this edge case constitutes a code smell.
if is_hint_pep_tuple_empty(hint_curr):
func_curr_code += (
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_EMPTY_format(
pith_curr_assigned_expr=pith_curr_assigned_expr))
# Else, that ridiculous edge case does *NOT* apply. In this
# case...
else:
# Append code type-checking the length of this pith.
func_curr_code += (
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_LEN_format(
pith_curr_assigned_expr=pith_curr_assigned_expr,
hint_childs_len=hint_childs_len,
))
# For each child hint of this tuple...
for hint_child_index, hint_child in enumerate(hint_childs):
# If this child hint is ignorable, skip to the next.
if is_hint_ignorable(hint_child):
continue
# Else, this child hint is unignorable.
# Append code type-checking this child pith.
func_curr_code += _PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_CHILD_format(
hint_child_placeholder=_enqueue_hint_child(
# Python expression yielding the value of
# the currently indexed item of this tuple to
# be type-checked against this child hint.
_PEP_CODE_CHECK_HINT_TUPLE_FIXED_NONEMPTY_PITH_CHILD_EXPR_format(
pith_curr_assigned_expr=(
pith_curr_assigned_expr),
pith_child_index=hint_child_index,
)
),
)
# Munge this code to...
func_curr_code = (
# Strip the erroneous " and" suffix appended by the
# last child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_AND]}'
# Suffix this code by the substring suffixing all such
# code.
f'{_PEP_CODE_CHECK_HINT_TUPLE_FIXED_SUFFIX}'
# Format...
).format(
# Indentation deferred above for efficiency.
indent_curr=indent_curr,
pith_curr_assign_expr=pith_curr_assign_expr,
)
# Else, this hint is *NOT* a tuple.
# ..............{ ANNOTATED }..............
# If this hint is a PEP 593-compliant type metahint, this metahint
# is guaranteed by the reduction performed above to be
# beartype-specific (i.e., metahint whose second argument is an
# instance of the "beartype._vale._valesub._SubscriptedIs" class
# produced by subscripting the "Is" class). In this case...
elif hint_curr_sign is HINT_PEP593_ATTR_ANNOTATED:
# PEP-compliant type hint annotated by this metahint, localized
# to the "hint_child" local variable to satisfy the public API
# of the _enqueue_hint_child() closure called below.
hint_child = get_hint_pep593_metahint(hint_curr)
# Initialize the code type-checking the current pith against
# this metahint to the substring prefixing all such code.
#
# Note that we intentionally do *NOT* defer formatting these
# variables into this string as we do for most other kinds of
# type hints. Why? Safety. Since caller-defined code could
# theoretically embed substrings accidentally matching these
# variable names, we safely (but inefficiently) format these
# variables into the exact strings known to embed them.
func_curr_code = (
_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_PREFIX_format(
indent_curr=indent_curr,
hint_child_placeholder=_enqueue_hint_child(
# Python expression yielding the value of the
# current pith assigned to a local variable
# efficiently reused by code generated by the
# following iteration.
#
# Note this child hint is guaranteed to be followed
# by at least one more test expression referencing
# this local variable. Why? Because the "typing"
# module forces metahints to be subscripted by one
# child hint and one or more arbitrary objects. By
# deduction, this metahint must thus be subscripted
# by one or more arbitrary objects. Ergo, we
# needn't explicitly validate that constraint here.
pith_curr_assign_expr),
))
# For each beartype-specific argument subscripting this hint,
# excluding the first beartype-agnostic argument guaranteed to
# be a type...
for hint_child in get_hint_pep593_metadata(hint_curr):
# If this argument is *NOT* beartype-specific, raise an
# exception. Since the second argument was
# beartype-specific, all additional arguments are
# explicitly required to be beartype-specific as well for
# consistency and safety.
if not isinstance(hint_child, _SubscriptedIs):
raise BeartypeDecorHintPep593Exception(
f'{hint_curr_label} {repr(hint_curr)} subscripted '
f'by both @beartype-specific and -agnostic '
f'objects '
f'(i.e., {represent_object(hint_child)} not '
f'subscription of "beartype.vale.Is*" class).'
)
# Else, this argument is beartype-specific.
# Generate and append efficient code type-checking this
# validator by embedding this code as is.
func_curr_code += (
_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_CHILD_format(
indent_curr=indent_curr,
# Python expression formatting the current pith
# into the "{obj}" variable already embedded by
# that class into this code.
hint_child_expr=hint_child._is_valid_code.format(
indent=indent_child,
obj=pith_curr_assigned_expr,
),
))
# Generate locals safely merging the locals required by
# both this validator code *AND* the current code
# type-checking this entire root hint.
update_mapping(
func_wrapper_locals, hint_child._is_valid_code_locals)
# Munge this code to...
func_curr_code = (
# Strip the erroneous " and" suffix appended by the last
# child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_AND]}'
# Suffix this code by the substring suffixing all such
# code.
f'{_PEP593_CODE_CHECK_HINT_SUBSCRIPTEDIS_SUFFIX_format(indent_curr=indent_curr)}'
)
# Else, this hint is *NOT* a metahint.
# ..............{ FORWARDREF }..............
# If this hint is a forward reference...
elif hint_curr_sign is HintSignForwardRef:
# Possibly unqualified classname referred to by this hint.
hint_curr_forwardref_classname = get_hint_forwardref_classname(
hint_curr)
# If this classname contains one or more "." characters, this
# classname is fully-qualified. In this case...
if '.' in hint_curr_forwardref_classname:
# Pass the beartypistry singleton as a private
# "__beartypistry" parameter to this wrapper function.
func_wrapper_locals[ARG_NAME_TYPISTRY] = bear_typistry
# Python expression evaluating to this class when accessed
# via the private "__beartypistry" parameter.
hint_curr_expr = register_typistry_forwardref(
hint_curr_forwardref_classname)
# Else, this classname is unqualified. In this case...
else:
# If the set of unqualified classnames referred to by all
# relative forward references has yet to be instantiated,
# do so.
if hints_forwardref_class_basename is None:
hints_forwardref_class_basename = set()
# In any case, this set now exists.
# Add this unqualified classname to this set.
hints_forwardref_class_basename.add(
hint_curr_forwardref_classname)
# Placeholder substring to be replaced by the caller with a
# Python expression evaluating to this unqualified
# classname canonicalized relative to the module declaring
# the currently decorated callable when accessed via the
# private "__beartypistry" parameter.
hint_curr_expr = (
f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_PREFIX}'
f'{hint_curr_forwardref_classname}'
f'{PEP_CODE_HINT_FORWARDREF_UNQUALIFIED_PLACEHOLDER_SUFFIX}'
)
# Code type-checking the current pith against this class.
func_curr_code = _PEP_CODE_CHECK_HINT_NONPEP_TYPE_format(
pith_curr_expr=pith_curr_expr,
hint_curr_expr=hint_curr_expr,
)
# Else, this hint is *NOT* a forward reference.
# ..............{ GENERIC or PROTOCOL }..............
# If this hint is either a:
# * PEP 484-compliant generic (i.e., user-defined class subclassing
# a combination of one or more of the "typing.Generic" superclass
# and other "typing" non-class pseudo-superclasses).
# * PEP 544-compliant protocol (i.e., class subclassing a
# combination of one or more of the "typing.Protocol" superclass
# and other "typing" non-class pseudo-superclasses).
# * PEP 585-compliant generic (i.e., user-defined class subclassing
# at least one non-class PEP 585-compliant pseudo-superclasses).
# Then this hint is a PEP-compliant generic. In this case...
elif hint_curr_sign is HintSignGeneric:
#FIXME: *THIS IS NON-IDEAL.* Ideally, we should propagate *ALL*
#child type hints subscripting a generic up to *ALL*
#pseudo-superclasses of that generic (e.g., the "int" child
#hint subscripting a parent hint "MuhGeneric[int]" of type
#"class MuhGeneric(list[T]): pass" up to its "list[T]"
#pseudo-superclass).
#
#For now, we just strip *ALL* child type hints subscripting a
#generic with the following call. This suffices, because we
#just need this to work. So it goes, uneasy code bedfellows.
# If this hint is *NOT* a class, this hint is *NOT* an
# unsubscripted generic but could still be a generic
# subscripted by one or more PEP-compliant child type hints.
#
# To decide, reduce this hint to the object originating this
# hint if any, enabling the subsequent assertion to assert
# whether this origin object is an unsubscripted generic, which
# would then imply this hint to be a subscripted generic. If
# this strikes you as insane, you're not alone.
hint_curr = get_hint_pep_generic_type_or_none(hint_curr)
# Assert this hint to be a class.
assert isinstance(hint_curr, type), (
f'{hint_curr_label} {repr(hint_curr)} generic not class.')
# Tuple of the one or more unerased pseudo-superclasses
# originally listed as superclasses prior to their type erasure
# subclassed by this generic.
hint_childs = get_hint_pep_generic_bases_unerased(hint_curr)
# Initialize the code type-checking the current pith against
# this generic to the substring prefixing all such code.
func_curr_code = _PEP_CODE_CHECK_HINT_GENERIC_PREFIX
# For each pseudo-superclass subclassed by this generic...
for hint_child in hint_childs:
# print(f'hint_child: {repr(hint_child)} {is_hint_pep_type_typing(hint_child)}')
# If this pseudo-superclass is an actual class, this class
# is effectively ignorable. Why? Because the
# "_PEP_CODE_CHECK_HINT_GENERIC_PREFIX" snippet leveraged
# above already type-checks this pith against the generic
# subclassing this superclass and thus this superclass as
# well with a trivial isinstance() call. In this case, skip
# to the next pseudo-superclass.
if isinstance(hint_child, type):
continue
# Else, this pseudo-superclass is *NOT* an actual class.
#
# If this pseudo-superclass is neither a PEP 585-compliant
# type hint *NOR* a PEP-compliant type hint defined by the
# "typing" module, this pseudo-superclass *MUST* be a PEP
# 585-noncompliant user-defined pseudo-superclass. In this
# case, reduce this pseudo-superclass to the corresponding
# actual superclass originating this pseudo-superclass.
#
# Note that:
# * This horrible, irrational, and unintuitive edge case
# arises *ONLY* for user-defined PEP 484-compliant
# generics and PEP 544-compliant protocols subclassing
# another user-defined generic or protocol superclass
# subscripted by one or more type variables: e.g.,
# >>> import typing as t
# >>> class UserProtocol(t.Protocol[t.AnyStr]): pass
# >>> class UserSubprotocol(UserProtocol[str], t.Protocol): pass
# >>> UserSubprotocol.__orig_bases__
# (UserProtocol[bytes], typing.Protocol)
# >>> UserProtocolUnerased = UserSubprotocol.__orig_bases__[0]
# >>> UserProtocolUnerased is UserProtocol
# False
# >>> isinstance(UserProtocolUnerased, type)
# False
# * PEP 585-compliant generics suffer no such issues:
# >>> from beartype._util.hint.pep.proposal.utilhintpep585 import is_hint_pep585_builtin
# >>> class UserGeneric(list[int]): pass
# >>> class UserSubgeneric(UserGeneric[int]): pass
# >>> UserSubgeneric.__orig_bases__
# (UserGeneric[int],)
# >>> UserGenericUnerased = UserSubgeneric.__orig_bases__[0]
# >>> isinstance(UserGenericUnerased, type)
# True
# >>> UserGenericUnerased.__mro__
# (UserGeneric, list, object)
# >>> is_hint_pep585_builtin(UserGenericUnerased)
# True
#
# Walking up the unerased inheritance hierarchy for this
# generic or protocol iteratively visits the user-defined
# generic or protocol pseudo-superclass subscripted by one
# or more type variable. Due to poorly defined obscurities
# in the "typing" implementation, this pseudo-superclass is
# *NOT* actually a class but rather an instance of a
# private "typing" class (e.g., "typing._SpecialForm").
#
# Ergo, this pseudo-superclass will be subsequently
# detected as neither a generic nor "typing" object and
# thus raise exceptions. Our only recourse is to silently
# reduce this hint into the erased superclass to which the
# "typing" module previously transformed this hint (e.g.,
# "UserProtocol" above). This is slightly non-ideal, as
# this erased superclass is an actual class that should
# ideally be ignored rather than redundantly tested against
# the current pith again. Nonetheless, there exists no
# other means of recursing into the possibly relevant
# superclasses of this erased superclass.
#
# Note that, in theory, we could deeply refactor this
# algorithm to support the notion of child hints that
# should be ignored for purposes of type-checking but
# nonetheless recursed into. In practice, the current
# approach only introduces mild runtime inefficiencies
# while preserving sanity throughout this algorithm.
#
# Specifically, perform this awful reduction *ONLY* if
# this child hint is a PEP 484- or 544-compliant
# user-defined pseudo-superclass that is neither...
elif not (
# A PEP 585-compliant pseudo-superclass *NOR*...
is_hint_pep585_builtin(hint_child) and
# A PEP 484- or 544-compliant pseudo-superclass defined
# by the "typing" module.
is_hint_pep_typing(hint_child)
):
hint_child = (
get_hint_pep484_generic_base_erased_from_unerased(
hint_child))
# Else, this pseudo-superclass is defined by the "typing"
# module.
# If this superclass is ignorable, do so.
if is_hint_ignorable(hint_child):
continue
# Else, this superclass is unignorable.
# Generate and append code type-checking this pith against
# this superclass.
func_curr_code += (
_PEP_CODE_CHECK_HINT_GENERIC_CHILD_format(
hint_child_placeholder=_enqueue_hint_child(
# Python expression efficiently reusing the
# value of this pith previously assigned to a
# local variable by the prior prefix.
pith_curr_assigned_expr),
))
# Munge this code to...
func_curr_code = (
# Strip the erroneous " and" suffix appended by the last
# child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_AND]}'
# Suffix this code by the substring suffixing all such
# code.
f'{_PEP_CODE_CHECK_HINT_GENERIC_SUFFIX}'
# Format...
).format(
# Indentation deferred above for efficiency.
indent_curr=indent_curr,
pith_curr_assign_expr=pith_curr_assign_expr,
# Python expression evaluating to this generic type.
hint_curr_expr=add_func_scope_type(
cls=hint_curr,
cls_scope=func_wrapper_locals,
cls_label=_FUNC_WRAPPER_LOCAL_LABEL,
),
)
# print(f'{hint_curr_label} PEP generic {repr(hint)} handled.')
# Else, this hint is *NOT* a generic.
# ..............{ LITERAL }..............
# If this hint is a PEP 586-compliant type hint (i.e., the
# "typing.Literal" singleton subscripted by one or more literal
# objects), this hint is largely useless and thus intentionally
# detected last. Why? Because "typing.Literal" is subscriptable by
# objects that are instances of only *SIX* possible types, which is
# sufficiently limiting as to render this singleton patently absurd
# and a farce that we weep to even implement. In this case...
elif hint_curr_sign is HINT_PEP586_ATTR_LITERAL:
# If this hint does *NOT* comply with PEP 586 despite being a
# "typing.Literal" subscription, raise an exception. *sigh*
die_unless_hint_pep586(hint_curr)
# Else, this hint complies with PEP 586 and is thus subscripted
# by one or more compliant literal objects.
# Initialize the code type-checking the current pith against
# this hint to the substring prefixing all such code.
func_curr_code = _PEP586_CODE_CHECK_HINT_PREFIX_format(
pith_curr_assign_expr=pith_curr_assign_expr,
#FIXME: If "typing.Literal" is ever extended to support
#substantially more types (and thus actually become
#useful), optimize the construction of the "types" set
#below to instead leverage a similar
#"acquire_object_typed(set)" caching solution as that
#currently employed for unions. For now, we only shrug.
# Python expression evaluating to a tuple of the unique
# types of all literal objects subscripting this hint.
hint_child_types_expr=add_func_scope_types(
types=set(
type(hint_child) for hint_child in hint_childs),
types_scope=func_wrapper_locals,
types_label=_FUNC_WRAPPER_LOCAL_LABEL,
),
)
# For each literal object subscripting this hint...
for hint_child in hint_childs:
# Generate and append efficient code type-checking
# this data validator by embedding this code as is.
func_curr_code += _PEP586_CODE_CHECK_HINT_LITERAL_format(
pith_curr_assigned_expr=pith_curr_assigned_expr,
# Python expression evaluating to this literal
# object.
hint_child_expr=add_func_scope_attr(
attr=hint_child,
attr_scope=func_wrapper_locals,
attr_label=_FUNC_WRAPPER_LOCAL_LABEL,
)
)
# Munge this code to...
func_curr_code = (
# Strip the erroneous " or" suffix appended by the last
# child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_OR]}'
# Suffix this code by the substring suffixing this code.
f'{PEP586_CODE_CHECK_HINT_SUFFIX}'
).format(indent_curr=indent_curr)
# Else, this hint is *NOT* a PEP 586-compliant type hint.
# ..............{ UNSUPPORTED }..............
# Else, this hint is neither shallowly nor deeply supported and is
# thus unsupported. Since an exception should have already been
# raised above in this case, this conditional branch *NEVER* be
# triggered. Nonetheless, raise an exception for safety.
else:
raise BeartypeDecorHintPepUnsupportedException(
f'{hint_curr_label} {repr(hint_curr)} unsupported but '
f'erroneously detected as supported.'
)
# ................{ NON-PEP }................
# Else, this hint is *NOT* PEP-compliant.
#
# ................{ CLASSES }................
# If this hint is a non-"typing" class...
#
# Note that:
#
# * This test is intentionally performed *AFTER* that testing whether
# this hint is PEP-compliant, thus guaranteeing this hint to be a
# PEP-noncompliant non-"typing" class rather than a PEP-compliant
# type hint originating from such a class. Since many hints are both
# PEP-compliant *AND* originate from such a class (e.g., the "List"
# in "List[int]", PEP-compliant but originating from the
# PEP-noncompliant builtin class "list"), testing these hints first
# for PEP-compliance ensures we generate non-trivial code deeply
# type-checking these hints instead of trivial code only shallowly
# type-checking the non-"typing" classes from which they originate.
# * This class is guaranteed to be a subscripted argument of a
# PEP-compliant type hint (e.g., the "int" in "Union[Dict[str, str],
# int]") rather than the root type hint. Why? Because if this class
# were the root type hint, it would have already been passed into a
# faster submodule generating PEP-noncompliant code instead.
elif isinstance(hint_curr, type):
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAVEATS: Synchronize changes here with similar logic above.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Code type-checking the current pith against this type.
func_curr_code = _PEP_CODE_CHECK_HINT_NONPEP_TYPE_format(
pith_curr_expr=pith_curr_expr,
# Python expression evaluating to this type.
hint_curr_expr=add_func_scope_type(
cls=hint_curr,
cls_scope=func_wrapper_locals,
cls_label=_FUNC_WRAPPER_LOCAL_LABEL,
),
)
# Else, this hint is neither PEP-compliant *NOR* a class. In this
# case, raise an exception. Note that:
#
# * This should *NEVER* happen, as the "typing" module goes to great
# lengths to validate the integrity of PEP-compliant types at
# declaration time.
# * The higher-level die_unless_hint_nonpep() validator is
# intentionally *NOT* called here, as doing so would permit both:
# * PEP-noncompliant forward references, which could admittedly be
# disabled by passing "is_str_valid=False" to that call.
# * PEP-noncompliant tuple unions, which currently *CANNOT* be
# disabled by passing such an option to that call.
else:
raise BeartypeDecorHintPepException(
f'{hint_curr_label} {repr(hint_curr)} not PEP-compliant '
f'(e.g., neither "typing" object nor non-"typing" class).'
)
# ................{ CLEANUP }................
# Inject this code into the body of this wrapper.
func_wrapper_code = replace_str_substrs(
text=func_wrapper_code,
old=hint_curr_placeholder,
new=func_curr_code,
)
# Nullify the metadata describing the previously visited hint in this
# list for safety.
hints_meta[hints_meta_index_curr] = None
# Increment the 0-based index of metadata describing the next visited
# hint in the "hints_meta" list *BEFORE* visiting this hint but *AFTER*
# performing all other logic for the currently visited hint, implying
# this should be the last statement of this iteration.
hints_meta_index_curr += 1
# ..................{ CLEANUP }..................
# Release the fixed list of all such metadata.
release_fixed_list(hints_meta)
# If the Python code snippet to be returned remains unchanged from its
# initial value, the breadth-first search above failed to generate code. In
# this case, raise an exception.
#
# Note that this test is inexpensive, as the third character of the
# "func_root_code" code snippet is guaranteed to differ from that of
# "func_wrapper_code" code snippet if this function behaved as expected,
# which it should have... but may not have, which is why we're testing.
if func_wrapper_code == func_root_code:
raise BeartypeDecorHintPepException(
f'{HINT_ROOT_LABEL} {repr(hint_root)} not type-checked.')
# Else, the breadth-first search above successfully generated code.
# ..................{ CODE ~ locals }..................
# PEP-compliant code snippet passing the value of the random integer
# previously generated for the current call to the exception-handling
# function call embedded in the "_PEP_CODE_CHECK_HINT_ROOT_SUFFIX" snippet,
# defaulting to passing *NO* such integer.
func_wrapper_code_random_int_if_any = ''
# If type-checking the root pith requires a pseudo-random integer...
if is_var_random_int_needed:
# Pass this integer to the function raising exceptions.
func_wrapper_code_random_int_if_any = (
_PEP_CODE_CHECK_HINT_ROOT_SUFFIX_RANDOM_INT)
# Pass the random.getrandbits() function required to generate this
# integer to this wrapper function as an optional hidden parameter.
func_wrapper_locals[_ARG_NAME_GETRANDBITS] = getrandbits
# ..................{ CODE ~ suffix }..................
# Suffix this code by a Python code snippet raising a human-readable
# exception when the root pith violates the root type hint.
func_wrapper_code += _PEP_CODE_CHECK_HINT_ROOT_SUFFIX_format(
random_int_if_any=func_wrapper_code_random_int_if_any)
# Return all metadata required by higher-level callers.
return (
func_wrapper_code,
func_wrapper_locals,
# Tuple of the unqualified classnames referred to by all relative
# forward references visitable from this hint converted from that set
# to reduce space consumption after memoization by @callable_cached,
# defined as either...
(
# If *NO* relative forward references are visitable from this root
# hint, the empty tuple;
()
if hints_forwardref_class_basename is None else
# Else, that set converted into a tuple.
tuple(hints_forwardref_class_basename)
),
)
```
#### File: beartype/_decor/_data.py
```python
import inspect
from beartype._decor._code.codesnip import (
ARG_NAME_FUNC,
ARG_NAME_RAISE_EXCEPTION,
)
from beartype._decor._error.errormain import raise_pep_call_exception
from beartype._util.func.utilfuncscope import CallableScope
from collections.abc import Callable
from inspect import Signature
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CLASSES }....................
class BeartypeData(object):
'''
**Beartype data** (i.e., object aggregating *all* metadata for the callable
currently being decorated by the :func:`beartype.beartype` decorator).**
Design
----------
This the *only* object instantiated by that decorator for that callable,
substantially reducing both space and time costs. That decorator then
passes this object to most lower-level functions, which then:
#. Access read-only instance variables of this object as input.
#. Modify writable instance variables of this object as output. In
particular, these lower-level functions typically accumulate pure-Python
code comprising the generated wrapper function type-checking the
decorated callable by setting various instance variables of this object.
Caveats
----------
**This object cannot be used to communicate state between low-level
memoized callables** (e.g.,
:func:`beartype._decor._code._pep._pephint.pep_code_check_hint`) **and
higher-level callables** (e.g.,
:func:`beartype._decor._code.codemain.generate_code`). Instead, memoized
callables *must* return that state as additional return values up the call
stack to those higher-level callables. By definition, memoized callables
are *not* recalled on subsequent calls passed the same parameters. Since
only the first call to those callables passed those parameters would set
the appropriate state on this object intended to be communicated to
higher-level callables, *all* subsequent calls would subtly fail with
difficult-to-diagnose issues. See also `<issue #5_>`__, which exhibited
this very complaint.
.. _issue #5:
https://github.com/beartype/beartype/issues/5
Attributes
----------
func : Optional[Callable]
**Decorated callable** (i.e., callable currently being decorated by the
:func:`beartype.beartype` decorator) if the :meth:`reinit` method has
been called *or* ``None`` otherwise.
func_codeobj : Optional[CallableCodeObjectType]
**Code object** (i.e., instance of the :class:`CodeType` type)
underlying the decorated callable if the :meth:`reinit` method has been
called *or* ``None`` otherwise.
func_sig : Optional[inspect.Signature]
:class:`inspect.Signature` object describing this signature if the
:meth:`reinit` method has been called *or* ``None`` otherwise.
func_wrapper_locals : CallableScope
**Local scope** (i.e., dictionary mapping from the name to value of
each attribute referenced in the signature) of this wrapper function
required by this code snippet.
func_wrapper_name : Optional[str]
Machine-readable name of the wrapper function to be generated and
returned by this decorator if the :meth:`reinit` method has been called
*or* ``None`` otherwise. To efficiently (albeit imperfectly) avoid
clashes with existing attributes of the module defining that function,
this name is obfuscated while still preserving human-readability.
.. _PEP 563:
https://www.python.org/dev/peps/pep-0563
'''
# ..................{ CLASS VARIABLES }..................
# Slot all instance variables defined on this object to minimize the time
# complexity of both reading and writing variables across frequently
# called @beartype decorations. Slotting has been shown to reduce read and
# write costs by approximately ~10%, which is non-trivial.
__slots__ = (
'func',
#FIXME: Uncomment if needed.
# 'func_codeobj',
'func_sig',
'func_wrapper_locals',
'func_wrapper_name',
)
# Coerce instances of this class to be unhashable, preventing spurious
# issues when accidentally passing these instances to memoized callables by
# implicitly raising an "TypeError" exceptions on the first call to such a
# callable. There exists no tangible benefit to permitting these instances
# to be hashed (and thus also cached), since these instances are:
# * Specific to the decorated callable and thus *NOT* safely cacheable
# across functions applying to different decorated callables.
# * Already cached via the acquire_object_typed() function called by the
# "beartype._decor.main" submodule.
#
# See also:
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
__hash__ = None # type: ignore[assignment]
# ..................{ INITIALIZERS }..................
def __init__(self) -> None:
'''
Initialize this metadata by nullifying all instance variables.
Caveats
----------
**This class is not intended to be explicitly instantiated.** Instead,
callers are expected to (in order):
#. Acquire cached instances of this class via the
:mod:`beartype._util.cache.pool.utilcachepoolobjecttyped` submodule.
#. Call the :meth:`reinit` method on these instances to properly
initialize these instances.
'''
# Nullify all remaining instance variables.
self.func: Callable = None # type: ignore[assignment]
#FIXME: Uncomment if needed.
# self.func_codeobj: CallableCodeObjectType = None # type: ignore[assignment]
self.func_sig: Signature = None # type: ignore[assignment]
self.func_wrapper_locals: CallableScope = {}
self.func_wrapper_name: str = None # type: ignore[assignment]
def reinit(self, func: Callable) -> None:
'''
Reinitialize this metadata from the passed callable, typically after
acquisition of a previously cached instance of this class from the
:mod:`beartype._util.cache.pool.utilcachepoolobject` submodule.
If `PEP 563`_ is conditionally active for this callable, this function
additionally resolves all postponed annotations on this callable to
their referents (i.e., the intended annotations to which those
postponed annotations refer).
Parameters
----------
func : Callable
Callable currently being decorated by :func:`beartype.beartype`.
Raises
----------
BeartypeDecorHintPep563Exception
If evaluating a postponed annotation on this callable raises an
exception (e.g., due to that annotation referring to local state no
longer accessible from this deferred evaluation).
BeartypeDecorWrappeeException
If this callable is neither a pure-Python function *nor* method;
equivalently, if this callable is either C-based *or* a class or
object defining the ``__call__()`` dunder method.
.. _PEP 563:
https://www.python.org/dev/peps/pep-0563
'''
assert callable(func), f'{repr(func)} uncallable.'
# Avoid circular import dependencies.
from beartype._decor._pep563 import resolve_hints_pep563_if_active
# Callable currently being decorated.
self.func = func
#FIXME: Uncomment if needed.
# Code object underlying that callable unwrapped if that callable is
# pure-Python *OR* raise an exception otherwise.
# self.func_codeobj = get_func_unwrapped_codeobj(
# func=func, exception_cls=BeartypeDecorWrappeeException)
# Efficiently Reduce this local scope back to the dictionary of all
# parameters unconditionally required by *ALL* wrapper functions.
self.func_wrapper_locals.clear()
self.func_wrapper_locals[ARG_NAME_FUNC] = func
self.func_wrapper_locals[ARG_NAME_RAISE_EXCEPTION] = (
raise_pep_call_exception)
# Machine-readable name of the wrapper function to be generated.
self.func_wrapper_name = func.__name__
# Nullify all remaining attributes for safety *BEFORE* passing this
# object to any functions (e.g., resolve_hints_pep563_if_active()).
self.func_sig = None # type: ignore[assignment]
# Resolve all postponed hints on this callable if any *BEFORE* parsing
# the actual hints these postponed hints refer to.
resolve_hints_pep563_if_active(self)
# "Signature" instance encapsulating this callable's signature,
# dynamically parsed by the stdlib "inspect" module from this callable.
self.func_sig = inspect.signature(func)
```
#### File: _decor/_error/_errorsleuth.py
```python
from beartype.roar._roarexc import _BeartypeCallHintPepRaiseException
from beartype._cave._cavefast import NoneType
from beartype._cave._cavemap import NoneTypeOr
from beartype._util.hint.pep.proposal.utilhintpep484 import (
get_hint_pep484_newtype_class,
is_hint_pep484_newtype,
)
from beartype._util.hint.pep.proposal.utilhintpep544 import (
get_hint_pep544_io_protocol_from_generic,
is_hint_pep544_io_generic,
)
from beartype._util.hint.pep.proposal.utilhintpep593 import (
get_hint_pep593_metahint,
is_hint_pep593,
is_hint_pep593_beartype,
)
from beartype._util.hint.pep.utilhintpepget import (
get_hint_pep_args,
get_hint_pep_generic_bases_unerased,
get_hint_pep_sign,
)
from beartype._util.hint.pep.utilhintpeptest import (
is_hint_pep,
is_hint_pep_generic,
is_hint_pep_tuple_empty,
is_hint_pep_typevar,
)
from beartype._util.hint.utilhinttest import (
is_hint_forwardref,
is_hint_ignorable,
)
from typing import Any, Callable, NoReturn, Optional, Tuple
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CLASSES }....................
class CauseSleuth(object):
'''
**Type-checking error cause sleuth** (i.e., object recursively fabricating
the human-readable string describing the failure of the pith associated
with this object to satisfy this PEP-compliant type hint also associated
with this object).
Attributes
----------
cause_indent : str
**Indentation** (i.e., string of zero or more spaces) preceding each
line of the string returned by this getter if this string spans
multiple lines *or* ignored otherwise (i.e., if this string is instead
embedded in the current line).
exception_label : str
Human-readable label describing the parameter or return value from
which this object originates, typically embedded in exceptions raised
from this getter in the event of unexpected runtime failure.
func : Callable
Decorated callable generating this type-checking error.
hint_sign : Any
Unsubscripted :mod:`typing` attribute identifying this hint if this hint
is PEP-compliant *or* ``None`` otherwise.
hint_childs : Optional[Tuple]
Either:
* If this hint is PEP-compliant:
* If this hint is a generic, tuple of the one or more unerased
pseudo-superclasses (i.e., :mod:`typing` objects originally listed
as superclasses prior to their implicit type erasure by the
:mod:`typing` module) subclassed by this generic.
* Else, the possibly empty tuple of all arguments subscripting this
hint if this
* Else, ``None``.
pith : Any
Arbitrary object to be validated.
random_int: Optional[int]
**Pseudo-random integer** (i.e., unsigned 32-bit integer
pseudo-randomly generated by the parent :func:`beartype.beartype`
wrapper function in type-checking randomly indexed container items by
the current call to that function) if that function generated such an
integer *or* ``None`` otherwise (i.e., if that function generated *no*
such integer). See the same parameter accepted by the higher-level
:func:`beartype._decor._error.errormain.raise_pep_call_exception`
function for further details.
Attributes (Private)
----------
_hint : Any
Type hint to validate this object against.
'''
# ..................{ CLASS VARIABLES }..................
# Slot *ALL* instance variables defined on this object to both:
# * Prevent accidental declaration of erroneous instance variables.
# * Minimize space and time complexity.
__slots__ = (
'cause_indent',
'exception_label',
'func',
'hint_sign',
'hint_childs',
'pith',
'random_int',
'_hint',
)
_INIT_PARAM_NAMES = frozenset((
'cause_indent',
'exception_label',
'func',
'hint',
'pith',
'random_int',
))
'''
Frozen set of the names of all parameters accepted by the :meth:`init`
method, defined as a set to enable efficient membership testing.
'''
# ..................{ INITIALIZERS }..................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAUTION: Whenever adding, deleting, or renaming any parameter accepted by
# this method, make similar changes to the "_INIT_PARAM_NAMES" set above.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def __init__(
self,
func: Callable,
pith: Any,
hint: Any,
cause_indent: str,
exception_label: str,
random_int: int,
) -> None:
'''
Initialize this object.
'''
assert callable(func), f'{repr(func)} not callable.'
assert isinstance(cause_indent, str), (
f'{repr(cause_indent)} not string.')
assert isinstance(exception_label, str), (
f'{repr(exception_label)} not string.')
assert isinstance(random_int, NoneTypeOr[int]), (
f'{repr(random_int)} not integer or "None".')
# Classify all passed parameters.
self.func = func
self.pith = pith
self.cause_indent = cause_indent
self.exception_label = exception_label
self.random_int = random_int
# Nullify all remaining parameters for safety.
self.hint_sign: Any = None
self.hint_childs: Tuple = None # type: ignore[assignment]
# Classify this hint *AFTER* initializing all parameters above.
self.hint = hint
# ..................{ PROPERTIES }..................
@property
def hint(self) -> Any:
'''
Type hint to validate this object against.
'''
return self._hint
@hint.setter
def hint(self, hint: Any) -> None:
'''
Set the type hint to validate this object against.
'''
# ................{ REDUCTION }................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# CAVEATS: Synchronize changes here with the corresponding block of the
# beartype._decor._code._pep._pephint.pep_code_check_hint() function.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# This logic reduces the currently visited hint to an arbitrary object
# associated with this hint when this hint conditionally satisfies any
# of various conditions.
#
# ................{ REDUCTION ~ pep 484 ~ none }................
# If this is the PEP 484-compliant "None" singleton, reduce this hint
# to the type of that singleton. While not explicitly defined by the
# "typing" module, PEP 484 explicitly supports this singleton:
# When used in a type hint, the expression None is considered
# equivalent to type(None).
if hint is None:
hint = NoneType
# ................{ REDUCTION ~ pep 593 }................
# If this is a PEP 593-compliant type metahint, ignore all annotations
# on this hint (i.e., "hint_curr.__metadata__" tuple) by reducing this
# hint to its origin (e.g., "str" in "Annotated[str, 50, False]").
elif is_hint_pep593(hint):
# If the first argument subscripting this metahint is
# beartype-agnostic (e.g., *NOT* an instance of the
# "beartype.vale._SubscriptedIs" class produced by subscripting the
# "Is" class), ignore all annotations on this hint by reducing this
# hint to its origin (e.g., "str" in "Annotated[str, 50, False]").
if not is_hint_pep593_beartype(hint):
hint = get_hint_pep593_metahint(hint)
# Else, that argument is beartype-specific. In this case, preserve
# this hint as is for subsequent handling below.
# ................{ REDUCTION ~ pep 544 }................
# If this is a PEP 484-compliant IO generic base class *AND* the active
# Python interpreter targets at least Python >= 3.8 and thus supports
# PEP 544-compliant protocols, reduce this functionally useless hint to
# the corresponding functionally useful beartype-specific PEP
# 544-compliant protocol implementing this hint.
#
# Note that PEP 484-compliant IO generic base classes are technically
# usable under Python < 3.8 (e.g., by explicitly subclassing those
# classes from third-party classes). Ergo, we can neither safely emit
# warnings nor raise exceptions on visiting these classes under *ANY*
# Python version.
elif is_hint_pep544_io_generic(hint):
hint = get_hint_pep544_io_protocol_from_generic(hint)
# ................{ REDUCTION ~ pep 484 ~ new type }................
# If this is a PEP 484-compliant new type hint, reduce this hint to the
# user-defined class aliased by this hint. Although this logic could
# also be performed below, doing so here simplifies matters.
elif is_hint_pep484_newtype(hint):
hint = get_hint_pep484_newtype_class(hint)
# ................{ REDUCTION ~ end }................
# If this hint is PEP-compliant...
if is_hint_pep(hint):
# Arbitrary object uniquely identifying this hint.
self.hint_sign = get_hint_pep_sign(hint)
# Tuple of either...
self.hint_childs = (
# If this hint is a generic, the one or more unerased
# pseudo-superclasses originally subclassed by this hint.
get_hint_pep_generic_bases_unerased(hint)
if is_hint_pep_generic(hint) else
# Else, the zero or more arguments subscripting this hint.
get_hint_pep_args(hint)
)
# Classify this hint *AFTER* all other assignments above.
self._hint = hint
# ..................{ GETTERS }..................
def get_cause_or_none(self) -> Optional[str]:
'''
Human-readable string describing the failure of this pith to satisfy
this PEP-compliant type hint if this pith fails to satisfy this pith
*or* ``None`` otherwise (i.e., if this pith satisfies this hint).
Design
----------
This getter is intentionally generalized to support objects both
satisfying and *not* satisfying hints as equally valid use cases. While
the parent :func:`.errormain.raise_pep_call_exception` function
calling this getter is *always* passed an object *not* satisfying the
passed hint, this getter is under no such constraints. Why? Because
this getter is also called to find which of an arbitrary number of
objects transitively nested in the object passed to
:func:`.errormain.raise_pep_call_exception` fails to satisfy the
corresponding hint transitively nested in the hint passed to that
function.
For example, consider the PEP-compliant type hint ``List[Union[int,
str]]`` describing a list whose items are either integers or strings
and the list ``list(range(256)) + [False,]`` consisting of the integers
0 through 255 followed by boolean ``False``. Since this list is a
standard sequence, the
:func:`._peperrorsequence.get_cause_or_none_sequence_standard`
function must decide the cause of this list's failure to comply with
this hint by finding the list item that is neither an integer nor a
string, implemented by by iteratively passing each list item to the
:func:`._peperrorunion.get_cause_or_none_union` function. Since
the first 256 items of this list are integers satisfying this hint,
:func:`._peperrorunion.get_cause_or_none_union` returns
``None`` to
:func:`._peperrorsequence.get_cause_or_none_sequence_standard`
before finally finding the non-compliant boolean item and returning the
human-readable cause.
Returns
----------
Optional[str]
Either:
* If this object fails to satisfy this hint, human-readable string
describing the failure of this object to do so.
* Else, ``None``.
Raises
----------
_BeartypeCallHintPepRaiseException
If this type hint is either:
* PEP-noncompliant (e.g., tuple union).
* PEP-compliant but no getter function has been implemented to
handle this category of PEP-compliant type hint yet.
'''
# Getter function returning the desired string.
get_cause_or_none = None
# If this hint is ignorable, all possible objects satisfy this hint,
# implying this hint *CANNOT* by definition be the cause of this
# failure. In this case, immediately report None.
if is_hint_ignorable(self.hint):
return None
# Else, this hint is unignorable.
#
# If *NO* sign uniquely identifies this hint, this hint is either
# PEP-noncompliant *OR* only contextually PEP-compliant in certain
# specific use cases. In either case...
elif self.hint_sign is None:
# If this hint is a tuple union...
if isinstance(self.hint, tuple):
# Avoid circular import dependencies.
from beartype._decor._error._errortype import (
get_cause_or_none_types)
# Defer to the getter function specific to tuple unions.
get_cause_or_none = get_cause_or_none_types
# Else, this hint *NOT* is a tuple union. In this case, assume this
# hint to be an isinstanceable class. If this is *NOT* the case,
# the getter deferred to below raises a human-readable exception.
else:
# Avoid circular import dependencies.
from beartype._decor._error._errortype import (
get_cause_or_none_type)
# Defer to the getter function specific to classes.
get_cause_or_none = get_cause_or_none_type
# Else, this hint is PEP-compliant.
#
# If this PEP-compliant hint is its own unsubscripted "typing"
# attribute (e.g., "typing.List" rather than "typing.List[str]") and is
# thus subscripted by *NO* child hints, we assume this hint to
# originate from an origin type. In this case...
elif self.hint is self.hint_sign:
# If this is the PEP 484-compliant "typing.NoReturn" type hint
# permitted *ONLY* as a return annotation...
if self.hint is NoReturn:
# Avoid circular import dependencies.
from beartype._decor._error._proposal._errorpep484noreturn import (
get_cause_or_none_noreturn)
# Defer to the getter function specific to this hint.
get_cause_or_none = get_cause_or_none_noreturn
# Else, this hint is *NOT "typing.NoReturn". In this case, assume
# this is a standard PEP-compliant type hint supported by both
# parameters and return values originating from an origin type.
else:
# Avoid circular import dependencies.
from beartype._decor._error._errortype import (
get_cause_or_none_type_origin)
# Defer to the getter function supporting hints originating
# from origin types.
get_cause_or_none = get_cause_or_none_type_origin
# Else, this PEP-compliant hint is *NOT* its own unsubscripted "typing"
# attribute. In this case...
else:
# If this hint is neither...
if not (
# Subscripted by no child hints *NOR*...
self.hint_childs or
# An empty fixed-length tuple hint, whose PEP 585 (but *NOT*
# PEP 484)-compliant implementation is subscripted by no child
# hints *NOR*...
is_hint_pep_tuple_empty(self.hint) or
# A forward reference nor type variable, whose designs reside
# well outside the standard "typing" dunder variable API and
# are thus *NEVER* subscripted by child hints...
is_hint_forwardref(self.hint) or
is_hint_pep_typevar(self.hint)
):
# Then this hint should have been subscripted by one or more child
# hints but wasn't. In this case, raise an exception.
raise _BeartypeCallHintPepRaiseException(
f'{self.exception_label} PEP type hint '
f'{repr(self.hint)} unsubscripted.'
)
# Else, this hint is subscripted by one or more child hints (e.g.,
# "typing.List[str]" rather than "typing.List").
# Avoid circular import dependencies.
from beartype._decor._error.errormain import (
PEP_HINT_SIGN_TO_GET_CAUSE_FUNC)
# Getter function returning the desired string for this attribute
# if any *OR* "None" otherwise.
get_cause_or_none = PEP_HINT_SIGN_TO_GET_CAUSE_FUNC.get(
self.hint_sign, None)
# If no such function has been implemented to handle this attribute
# yet, raise an exception.
if get_cause_or_none is None:
raise _BeartypeCallHintPepRaiseException(
f'{self.exception_label} PEP type hint '
f'{repr(self.hint)} unsupported (i.e., no '
f'"get_cause_or_none_"-prefixed getter function defined '
f'for this category of hint).'
)
# Else, a getter function has been implemented to handle this
# attribute.
# Call this getter function with ourselves and return the string
# returned by this getter.
return get_cause_or_none(self)
# ..................{ PERMUTERS }..................
def permute(self, **kwargs) -> 'CauseSleuth':
'''
Shallow copy of this object such that each the passed keyword argument
overwrites the instance variable of the same name in this copy.
Parameters
----------
Keyword arguments of the same name and type as instance variables of
this object (e.g., ``hint``, ``pith``).
Returns
----------
CauseSleuth
Shallow copy of this object such that each keyword argument
overwrites the instance variable of the same name in this copy.
Raises
----------
_BeartypeCallHintPepRaiseException
If the name of any passed keyword argument is *not* the name of an
existing instance variable of this object.
Examples
----------
>>> sleuth = CauseSleuth(
... pith=[42,]
... hint=typing.List[int],
... cause_indent='',
... exception_label='List of integers',
... )
>>> sleuth_copy = sleuth.permute(pith=[24,])
>>> sleuth_copy.pith
[24,]
>>> sleuth_copy.hint
typing.List[int]
'''
# For the name of each passed keyword argument...
for param_name in kwargs.keys():
# If this name is *NOT* that of a parameter accepted by the
# __init__() method, raise an exception.
if param_name not in self._INIT_PARAM_NAMES:
raise _BeartypeCallHintPepRaiseException(
f'{self.__class__}.__init__() parameter '
f'{param_name} unrecognized.'
)
# For the name of each parameter accepted by the __init__() method...
for param_name in self._INIT_PARAM_NAMES:
# If this parameter was *NOT* explicitly passed by the caller,
# default this parameter to its current value from this object.
if param_name not in kwargs:
kwargs[param_name] = getattr(self, param_name)
# Return a new instance of this class initialized with these arguments.
return CauseSleuth(**kwargs)
```
#### File: hint/a90_core/test_a90_utilhinttest.py
```python
from pytest import raises
# ....................{ TESTS }....................
def test_die_unless_hint() -> None:
'''
Test the :func:`beartype._util.hint.utilhinttest.die_unless_hint`
validator.
'''
# Defer heavyweight imports.
from beartype.roar import (
BeartypeDecorHintNonPepException,
BeartypeDecorHintPepUnsupportedException,
)
from beartype._util.hint.utilhinttest import die_unless_hint
from beartype_test.a00_unit.data.hint.data_hint import (
NOT_HINTS, HINTS_NONPEP)
from beartype_test.a00_unit.data.hint.pep.data_hintpep import (
HINTS_PEP_META)
# Assert this function accepts PEP-noncompliant type hints.
for nonhint_pep in HINTS_NONPEP:
die_unless_hint(nonhint_pep)
# Assert this function...
for hint_pep_meta in HINTS_PEP_META:
# Accepts supported PEP-compliant type hints.
if hint_pep_meta.is_supported:
die_unless_hint(hint_pep_meta.hint)
# Rejects unsupported PEP-compliant type hints.
else:
with raises(BeartypeDecorHintPepUnsupportedException):
die_unless_hint(hint_pep_meta.hint)
# Assert this function rejects objects *NOT* supported as either
# PEP-noncompliant or -compliant type hints.
for non_hint in NOT_HINTS:
with raises(BeartypeDecorHintNonPepException):
die_unless_hint(non_hint)
def test_is_hint() -> None:
'''
Test the :func:`beartype._util.hint.utilhinttest.is_hint` tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.utilhinttest import is_hint
from beartype_test.a00_unit.data.hint.data_hint import NOT_HINTS, HINTS_NONPEP
from beartype_test.a00_unit.data.hint.pep.data_hintpep import HINTS_PEP_META
# Assert this function accepts PEP-noncompliant type hints.
for nonhint_pep in HINTS_NONPEP:
assert is_hint(nonhint_pep) is True
# Assert this function:
# * Accepts supported PEP-compliant type hints.
# * Rejects unsupported PEP-compliant type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint(hint_pep_meta.hint) is hint_pep_meta.is_supported
# Assert this function rejects objects *NOT* supported as either
# PEP-noncompliant or -compliant type hints.
for non_hint in NOT_HINTS:
assert is_hint(non_hint) is False
# Prevent pytest from capturing and displaying all expected non-fatal
# beartype-specific warnings emitted by the is_hint_ignorable() tester.
# @ignore_warnings(BeartypeDecorHintPepIgnorableDeepWarning)
def test_is_hint_ignorable() -> None:
'''
Test the :func:`beartype._util.hint.utilhinttest.is_hint_ignorable` tester.
'''
# Defer heavyweight imports.
from beartype._util.hint.utilhinttest import is_hint_ignorable
from beartype_test.a00_unit.data.hint.data_hint import (
HINTS_IGNORABLE,
HINTS_NONPEP_UNIGNORABLE,
)
from beartype_test.a00_unit.data.hint.pep.data_hintpep import HINTS_PEP_META
# Assert this function accepts ignorable type hints.
for hint_ignorable in HINTS_IGNORABLE:
assert is_hint_ignorable(hint_ignorable) is True
# Assert this function rejects unignorable PEP-noncompliant type hints.
for hint_unignorable in HINTS_NONPEP_UNIGNORABLE:
assert is_hint_ignorable(hint_unignorable) is False
# Assert this function:
# * Accepts unignorable PEP-compliant type hints.
# * Rejects ignorable PEP-compliant type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_ignorable(hint_pep_meta.hint) is (
hint_pep_meta.is_ignorable)
```
#### File: a00_unit/a10_pep/test_pep585.py
```python
from pytest import raises
# ....................{ TESTS ~ kind : builtin }....................
def test_is_hint_pep585_builtin() -> None:
'''
Test the
:func:`beartype._util.hint.pep.proposal.utilhintpep585.is_hint_pep585_builtin`
function.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.proposal.utilhintpep585 import (
is_hint_pep585_builtin)
from beartype_test.a00_unit.data.hint.pep.data_hintpep import (
HINTS_PEP_META)
# Assert this tester accepts only PEP 585-compliant type hints.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep585_builtin(hint_pep_meta.hint) is (
hint_pep_meta.is_pep585_builtin)
# ....................{ TESTS ~ kind : generic }....................
def test_is_hint_pep585_generic() -> None:
'''
Test the
:func:`beartype._util.hint.pep.proposal.utilhintpep585.is_hint_pep585_generic`
function.
'''
# Defer heavyweight imports.
from beartype._util.hint.pep.proposal.utilhintpep585 import (
is_hint_pep585_generic)
from beartype_test.a00_unit.data.hint.pep.data_hintpep import (
HINTS_PEP_META)
# Assert this tester accepts only PEP 585-compliant generics.
for hint_pep_meta in HINTS_PEP_META:
assert is_hint_pep585_generic(hint_pep_meta.hint) is (
hint_pep_meta.is_pep585_generic)
def test_get_hint_pep585_generic_typevars() -> None:
'''
Test the
:func:`beartype._util.hint.pep.proposal.utilhintpep585.get_hint_pep585_generic_typevars`
function.
'''
# Defer heavyweight imports.
from beartype.roar import BeartypeDecorHintPep585Exception
from beartype._util.hint.pep.proposal.utilhintpep585 import (
get_hint_pep585_generic_typevars)
from beartype_test.a00_unit.data.hint.pep.data_hintpep import (
HINTS_PEP_META)
# Assert this getter...
for hint_pep_meta in HINTS_PEP_META:
# If this hint is a PEP 585-compliant generic...
if hint_pep_meta.is_pep585_generic:
# Tuple of all tupe variables returned by this function.
hint_pep_typevars = get_hint_pep585_generic_typevars(
hint_pep_meta.hint)
# Returns one or more type variables for typevared PEP
# 585-compliant generics.
if hint_pep_meta.is_typevared:
assert isinstance(hint_pep_typevars, tuple)
assert hint_pep_typevars
# *NO* type variables for untypevared PEP 585-compliant generics.
else:
assert hint_pep_typevars == ()
# Raises an exception for objects *NOT* PEP 585-compliant generics.
else:
with raises(BeartypeDecorHintPep585Exception):
get_hint_pep585_generic_typevars(hint_pep_meta.hint)
```
#### File: data/hint/data_hintref.py
```python
from beartype import beartype
from typing import Union
# ....................{ CALLABLES }....................
# Decorated callable annotated by a PEP-noncompliant fully-qualified forward
# reference referring to a type that has yet to be declared.
TheDarkestForwardRefOfTheYear = (
'beartype_test.a00_unit.data.hint.data_hintref.TheDarkestEveningOfTheYear')
@beartype
def the_woods_are_lovely(dark_and_deep: TheDarkestForwardRefOfTheYear) -> (
TheDarkestForwardRefOfTheYear):
return dark_and_deep
# Decorated callable annotated by a PEP-noncompliant tuple containing both
# standard types and a fully-qualified forward reference referring to a type
# that has yet to be declared.
TheDarkestTupleOfTheYear = (complex, TheDarkestForwardRefOfTheYear, bool)
@beartype
def of_easy_wind(and_downy_flake: TheDarkestTupleOfTheYear) -> (
TheDarkestTupleOfTheYear):
return and_downy_flake
# Decorated callable annotated by a PEP-compliant unnested unqualified forward
# reference referring to a type that has yet to be declared.
@beartype
def stopping_by_woods_on(a_snowy_evening: 'TheDarkestEveningOfTheYear') -> (
'TheDarkestEveningOfTheYear'):
return a_snowy_evening
# Decorated callable annotated by a PEP-compliant nested unqualified forward
# reference referring to a type that has yet to be declared.
TheDarkestUnionOfTheYear = Union[complex, 'TheDarkestEveningOfTheYear', bytes]
@beartype
def but_i_have_promises(to_keep: TheDarkestUnionOfTheYear) -> (
TheDarkestUnionOfTheYear):
return to_keep
# ....................{ CLASSES }....................
# User-defined class previously referred to by forward references above.
class TheDarkestEveningOfTheYear(str): pass
```
#### File: nonpep/proposal/_data_hintnonpepbeartype.py
```python
from beartype_test.a00_unit.data.hint.data_hintmeta import (
NonPepHintMetadata,
PepHintPithSatisfiedMetadata,
PepHintPithUnsatisfiedMetadata,
)
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add beartype-specific PEP-noncompliant type hint test data to various
global containers declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
'''
# ..................{ TUPLES }..................
# Add beartype-specific PEP-noncompliant test type hints to this dictionary
# global.
data_module.HINTS_NONPEP_META.extend((
# ................{ TUPLE UNION }................
# Tuple union of one standard class.
NonPepHintMetadata(
hint=(str,),
piths_satisfied_meta=(
# String constant.
PepHintPithSatisfiedMetadata('Pinioned coin tokens'),
),
piths_unsatisfied_meta=(
# Byte-string constant.
PepHintPithUnsatisfiedMetadata(
pith=b'Murkily',
# Match that the exception message raised for this pith
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bstr\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
# Tuple union of two or more standard classes.
NonPepHintMetadata(
hint=(int, str),
piths_satisfied_meta=(
# Integer constant.
PepHintPithSatisfiedMetadata(12),
# String constant.
PepHintPithSatisfiedMetadata('Smirk‐opined — openly'),
),
piths_unsatisfied_meta=(
# Byte-string constant.
PepHintPithUnsatisfiedMetadata(
pith=b'Betokening',
# Match that the exception message raised for this object
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bint\b',
r'\bstr\b',
),
# Match that the exception message raised for this object
# does *NOT* contain a newline or bullet delimiter.
exception_str_not_match_regexes=(
r'\n',
r'\*',
),
),
),
),
# ................{ TYPE }................
# Builtin type.
NonPepHintMetadata(
hint=str,
piths_satisfied_meta=(
# String constant.
PepHintPithSatisfiedMetadata('Glassily lassitudinal bȴood-'),
),
piths_unsatisfied_meta=(
# Byte-string constant.
PepHintPithUnsatisfiedMetadata(
pith=b'Stains, disdain-fully ("...up-stairs!"),',
# Match that the exception message raised for this pith
# declares the types *NOT* satisfied by this object.
exception_str_match_regexes=(
r'\bstr\b',
),
# Match that the exception message raised for this pith
# does *NOT* contain...
exception_str_not_match_regexes=(
# A newline.
r'\n',
# A bullet delimiter.
r'\*',
# Descriptive terms applied only to non-builtin types.
r'\bprotocol\b',
# The double-quoted name of this builtin type.
r'"str"',
),
),
),
),
))
```
#### File: pep/proposal/_data_hintpep544.py
```python
import pathlib
from abc import abstractmethod
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_8
from beartype_test.a00_unit.data.hint.data_hintmeta import (
PepHintMetadata,
PepHintPithSatisfiedMetadata,
PepHintPithUnsatisfiedMetadata,
)
# ....................{ CONSTANTS }....................
_DATA_HINTPEP544_FILENAME = __file__
'''
Absolute filename of this data submodule, to be subsequently opened for
cross-platform IO testing purposes by the :func:`add_data` function.
'''
# ....................{ ADDERS }....................
def add_data(data_module: 'ModuleType') -> None:
'''
Add `PEP 544`_**-compliant type hint test data to various global containers
declared by the passed module.
Parameters
----------
data_module : ModuleType
Module to be added to.
.. _PEP 544:
https://www.python.org/dev/peps/pep-0544
'''
# If the active Python interpreter targets less than Python < 3.8, this
# interpreter fails to support PEP 544. In this case, reduce to a noop.
if not IS_PYTHON_AT_LEAST_3_8:
return
# Else, the active Python interpreter targets at least Python >= 3.8 and
# thus supports PEP 544.
# ..................{ IMPORTS }..................
# Defer Python >= 3.8-specific imports.
from beartype._util.data.hint.pep.sign.datapepsigns import HintSignGeneric
from typing import (
BinaryIO,
IO,
Protocol,
SupportsAbs,
SupportsBytes,
SupportsFloat,
SupportsIndex,
SupportsInt,
SupportsRound,
TextIO,
TypeVar,
runtime_checkable,
)
# Type variables.
S = TypeVar('S')
T = TypeVar('T')
# ..................{ PROTOCOLS }..................
# User-defined protocol parametrized by *NO* type variables declaring
# arbitrary concrete and abstract methods.
@runtime_checkable
class ProtocolCustomUntypevared(Protocol):
def alpha(self) -> str:
return 'Of a Spicily sated'
@abstractmethod
def omega(self) -> str: pass
# User-defined protocol parametrized by a type variable declaring arbitrary
# concrete and abstract methods.
@runtime_checkable
class ProtocolCustomTypevared(Protocol[T]):
def alpha(self) -> str:
return 'Gainfully ungiving criticisms, schismatizing Ŧheo‐'
@abstractmethod
def omega(self) -> str: pass
# User-defined class structurally (i.e., implicitly) satisfying *WITHOUT*
# explicitly subclassing this user-defined protocol.
class ProtocolCustomStructural(object):
def alpha(self) -> str:
return "Sufferance's humus excursion, humility’s endurance, an"
def omega(self) -> str:
return 'Surfeit need'
# Instance of this class.
protocol_custom_structural = ProtocolCustomStructural()
# User-defined protocol structurally (i.e., implicitly) satisfying
# *WITHOUT* explicitly subclassing the predefined "typing.SupportsInt"
# abstract base class (ABC).
#
# Note that the implementations of this and *ALL* other predefined "typing"
# protocols (e.g., "typing.SupportsFloat") bundled with older Python
# versions < 3.8 are *NOT* safely type-checkable at runtime. For safety
# , tests against *ALL* protocols including these previously predefined
# protocols *MUST* be isolated to this submodule.
class ProtocolSupportsInt(object):
def __int__(self) -> int:
return 42
# ..................{ SETS }..................
# Add PEP 544-specific deeply ignorable test type hints to that set global.
data_module.HINTS_PEP_IGNORABLE_DEEP.update((
# Parametrizations of the "typing.Protocol" abstract base class (ABC).
Protocol[S, T],
))
# ..................{ TUPLES }..................
# Add PEP 544-specific test type hints to this dictionary global.
data_module.HINTS_PEP_META.extend((
# ................{ GENERICS ~ io }................
# Unsubscripted "IO" abstract base class (ABC).
PepHintMetadata(
hint=IO,
pep_sign=HintSignGeneric,
generic_type=IO,
is_typevared=True,
piths_satisfied_meta=(
# Open read-only file handle to this submodule.
PepHintPithSatisfiedMetadata(
pith=lambda: open(_DATA_HINTPEP544_FILENAME, 'r'),
is_pith_factory=True,
),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'To piously magistrate, dis‐empower, and'),
),
),
# Unsubscripted "BinaryIO" abstract base class (ABC).
PepHintMetadata(
hint=BinaryIO,
pep_sign=HintSignGeneric,
generic_type=BinaryIO,
is_subscripted=False,
piths_satisfied_meta=(
# Open read-only binary file handle to this submodule.
PepHintPithSatisfiedMetadata(
pith=lambda: open(_DATA_HINTPEP544_FILENAME, 'rb'),
is_pith_factory=True,
),
),
piths_unsatisfied_meta=(
# Bytestring constant.
PepHintPithUnsatisfiedMetadata(
b"Of a thieved imagination's reveries"),
),
),
# Unsubscripted "TextIO" abstract base class (ABC).
PepHintMetadata(
hint=TextIO,
pep_sign=HintSignGeneric,
generic_type=TextIO,
is_subscripted=False,
piths_satisfied_meta=(
# Open read-only text file handle to this submodule.
PepHintPithSatisfiedMetadata(
pith=lambda: open(_DATA_HINTPEP544_FILENAME, 'r'),
is_pith_factory=True,
),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'Statistician’s anthemed meme athame'),
),
),
# ................{ PROTOCOLS ~ supports }................
# Unsubscripted "SupportsAbs" abstract base class (ABC).
PepHintMetadata(
hint=SupportsAbs,
pep_sign=HintSignGeneric,
generic_type=SupportsAbs,
# Oddly, some but *NOT* all "typing.Supports*" ABCs are
# parametrized by type variables. *shrug*
is_typevared=True,
piths_satisfied_meta=(
# Integer constant.
PepHintPithSatisfiedMetadata(73),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata('Scour Our flowering'),
),
),
# Unsubscripted "SupportsBytes" abstract base class (ABC).
PepHintMetadata(
hint=SupportsBytes,
pep_sign=HintSignGeneric,
generic_type=SupportsBytes,
is_subscripted=False,
piths_satisfied_meta=(
# Platform-agnostic filesystem path object constant.
#
# Note that exceedingly few stdlib types actually define the
# __bytes__() dunder method. Among the few include classes
# defined by the "pathlib" module, which is why we instantiate
# such an atypical class here. See also:
# https://stackoverflow.com/questions/45522536/where-can-the-bytes-method-be-found
PepHintPithSatisfiedMetadata(
pith=lambda: pathlib.Path('/'),
is_context_manager=True,
is_pith_factory=True,
),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'Fond suburb’s gibbet‐ribbed castrati'),
),
),
#FIXME: Uncomment after we determine whether or not any stdlib classes
#actually define the __complex__() dunder method. There don't appear to
#be any, suggesting that the only means of testing this would be to
#define a new custom "ProtocolSupportsComplex" class as we do above for
#the "ProtocolSupportsInt" class. *shrug*
# # Unsubscripted "SupportsComplex" abstract base class (ABC).
# SupportsComplex: PepHintMetadata(
# pep_sign=Generic,
# piths_satisfied_meta=(
# # Integer constant.
# 108,
# ),
# piths_unsatisfied_meta=(
# # String constant.
# PepHintPithUnsatisfiedMetadata('Fondled ΘuroƂorus-'),
# ),
# ),
# Unsubscripted "SupportsFloat" abstract base class (ABC).
PepHintMetadata(
hint=SupportsFloat,
pep_sign=HintSignGeneric,
generic_type=SupportsFloat,
is_subscripted=False,
piths_satisfied_meta=(
# Integer constant.
PepHintPithSatisfiedMetadata(92),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata('Be’yond a'),
),
),
# Unsubscripted "SupportsIndex" abstract base class (ABC) first
# introduced by Python 3.8.0.
PepHintMetadata(
hint=SupportsIndex,
pep_sign=HintSignGeneric,
generic_type=SupportsIndex,
is_subscripted=False,
piths_satisfied_meta=(
# Integer constant.
PepHintPithSatisfiedMetadata(29),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata('Self-ishly'),
),
),
# Unsubscripted "SupportsInt" abstract base class (ABC).
PepHintMetadata(
hint=SupportsInt,
pep_sign=HintSignGeneric,
generic_type=SupportsInt,
is_subscripted=False,
piths_satisfied_meta=(
# Floating-point number constant.
PepHintPithSatisfiedMetadata(25.78),
# Structurally subtyped instance.
PepHintPithSatisfiedMetadata(ProtocolSupportsInt()),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'Ungentlemanly self‐righteously, and'),
),
),
# Unsubscripted "SupportsRound" abstract base class (ABC).
PepHintMetadata(
hint=SupportsRound,
pep_sign=HintSignGeneric,
generic_type=SupportsRound,
# Oddly, some but *NOT* all "typing.Supports*" ABCs are
# parametrized by type variables. *shrug*
is_typevared=True,
piths_satisfied_meta=(
# Floating-point number constant.
PepHintPithSatisfiedMetadata(87.52),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'Our Fathers vowed, indulgently,'),
),
),
# ................{ PROTOCOLS ~ user }................
# Despite appearances, protocols implicitly subclass "typing.Generic"
# and thus do *NOT* transparently reduce to standard types.
#
# Note that the "data_hintpep484" submodule already exercises
# predefined "typing" protocols (e.g., "typing.SupportsInt"), which
# were technically introduced with PEP 484 and thus available since
# Python >= 3.4 or so.
# User-defined protocol parametrized by *NO* type variables.
PepHintMetadata(
hint=ProtocolCustomUntypevared,
pep_sign=HintSignGeneric,
generic_type=ProtocolCustomUntypevared,
is_subscripted=False,
is_type_typing=False,
piths_satisfied_meta=(
PepHintPithSatisfiedMetadata(protocol_custom_structural),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata('For durance needs.'),
),
),
# User-defined protocol parametrized by a type variable.
PepHintMetadata(
hint=ProtocolCustomTypevared,
pep_sign=HintSignGeneric,
generic_type=ProtocolCustomTypevared,
is_typevared=True,
is_type_typing=False,
piths_satisfied_meta=(
PepHintPithSatisfiedMetadata(protocol_custom_structural),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata('Machist-'),
),
),
# User-defined protocol parametrized by a type variable, itself
# parametrized by the same type variables in the same order.
PepHintMetadata(
hint=ProtocolCustomTypevared[T],
pep_sign=HintSignGeneric,
generic_type=ProtocolCustomTypevared,
is_typevared=True,
is_typing=False,
piths_satisfied_meta=(
PepHintPithSatisfiedMetadata(protocol_custom_structural),
),
piths_unsatisfied_meta=(
# String constant.
PepHintPithUnsatisfiedMetadata(
'Black and white‐bit, bilinear linaements'),
),
),
))
```
#### File: util/mark/pytskip.py
```python
import platform, pytest, sys
from beartype_test.util.mark import pytmark
from collections.abc import Mapping, Sequence
from types import FunctionType
# Sadly, the following imports require private modules and packages.
from _pytest.runner import Skipped
# ....................{ GLOBALS }....................
_NoneType = type(None)
'''
Type of the ``None`` singleton, duplicated from the possibly unsafe
:mod:`beartype.cave` submodule to avoid raising exceptions from the early
testing-time decorators defined by this utility submodule.
'''
# ....................{ GLOBALS ~ constants }....................
_PYTHON_VERSION_TUPLE = sys.version_info[:3]
'''
Machine-readable version of the active Python interpreter as a tuple of
integers.
See Also
----------
:mod:`beartype.meta`
Similar logic performed at :mod:`beartype` importation time.
'''
_PYTHON_VERSION_STR = '.'.join(
str(version_part) for version_part in sys.version_info[:3])
'''
Human-readable version of the active Python interpreter as a dot-delimited
string.
See Also
----------
:mod:`beartype.meta`
Similar logic performed at :mod:`beartype` importation time.
'''
# ....................{ SKIP }....................
skip_if = pytest.mark.skipif
'''
Conditionally skip the decorated test or fixture with the passed human-readable
justification if the passed boolean is ``False``.
Parameters
----------
boolean : bool
Boolean to be tested.
reason : str
Human-readable message justifying the skipping of this test or fixture.
'''
def skip(reason: str):
'''
Unconditionally skip the decorated test with the passed human-readable
justification.
This decorator is intended to be called both directly as a function *and*
indirectly as a decorator, which differs from both:
* :func:`pytest.skip`, intended to be called only directly as a function.
Attempting to call that function indirectly as a decorator produces
extraneous ignorable messages on standard output resembling
``"SKIP [1] beartype_test/unit/test_import.py:66: could not import
'xdist'"``, for unknown (and probably uninteresting) reasons.
* :func:`pytest.mark.skip`, intended to be called only indirectly as a
decorator. Attempting to call that decorator directly as a function
reduces to a noop, for unknown (and probably uninteresting) reasons.
Parameters
----------
reason : str
Human-readable message justifying the skipping of this test.
Returns
----------
pytest.skipif
Decorator skipping the decorated test with this justification.
'''
assert isinstance(reason, str), '"{!r}" not string.'.format(reason)
return skip_if(True, reason=reason)
# ....................{ SKIP ~ py }....................
def skip_if_pypy():
'''
Skip the decorated test or fixture if the active Python interpreter is the
PyPy, a third-party implementation emphasizing Just In Time (JIT) bytecode
optimization.
Returns
----------
pytest.skipif
Decorator skipping this text or fixture if this interpreter is PyPy
*or* the identity decorator reducing to a noop otherwise.
'''
# Defer heavyweight imports.
from beartype._util.py.utilpyinterpreter import IS_PYPY
# Skip this test if the active Python interpreter is PyPy.
return skip_if(IS_PYPY, reason='Incompatible with PyPy.')
def skip_if_python_version_greater_than_or_equal_to(version: str):
'''
Skip the decorated test or fixture if the version of the active Python
interpreter is strictly greater than or equal to the passed maximum
version.
Parameters
----------
version : str
Maximum version of the Python interpreter required by this test or
fixture as a dot-delimited string (e.g., ``3.5.0``).
Returns
----------
pytest.skipif
Decorator describing these requirements if unmet *or* the identity
decorator reducing to a noop otherwise.
See Also
----------
:mod:`beartype.meta`
Similar logic performed at :mod:`beartype` importation time.
'''
assert isinstance(version, str), f'{repr(version)} not string.'
# Defer heavyweight imports.
from beartype.meta import _convert_version_str_to_tuple
# Machine-readable required version of Python as a tuple of integers.
version_tuple = _convert_version_str_to_tuple(version)
# Skip this test if the current Python version exceeds this requirement.
return skip_if(
_PYTHON_VERSION_TUPLE >= version_tuple,
reason=f'Python {_PYTHON_VERSION_STR} >= {version}.')
def skip_if_python_version_less_than(version: str):
'''
Skip the decorated test or fixture if the version of the active Python
interpreter is strictly less than the passed minimum version.
Parameters
----------
version : str
Minimum version of the Python interpreter required by this test or
fixture as a dot-delimited string (e.g., ``3.5.0``).
Returns
----------
pytest.skipif
Decorator describing these requirements if unmet *or* the identity
decorator reducing to a noop otherwise.
See Also
----------
:mod:`beartype.meta`
Similar logic performed at :mod:`beartype` importation time.
'''
assert isinstance(version, str), f'{repr(version)} not string.'
# Defer heavyweight imports.
from beartype.meta import _convert_version_str_to_tuple
# Machine-readable required version of Python as a tuple of integers.
version_tuple = _convert_version_str_to_tuple(version)
# Skip this test if the current Python version is less than this
# requirement.
return skip_if(
_PYTHON_VERSION_TUPLE < version_tuple,
reason=f'Python {_PYTHON_VERSION_STR} < {version}.')
# ....................{ SKIP ~ py : module }....................
def skip_unless_package(
package_name: str, minimum_version: 'Optional[str]' = None):
'''
Skip the decorated test or fixture if the package with the passed name is
**unsatisfied** (i.e., either dynamically unimportable *or* importable but
of a version less than the passed minimum version if non-``None``).
Parameters
----------
package_name : str
Fully-qualified name of the package to be skipped.
minimum_version : Optional[str]
Optional minimum version of this package as a dot-delimited string
(e.g., ``0.4.0``) to be tested for if any *or* ``None`` otherwise, in
which case any version is acceptable. Defaults to ``None``.
Returns
----------
pytest.skipif
Decorator describing these requirements if unmet *or* the identity
decorator reducing to a noop otherwise.
'''
assert isinstance(package_name, str), (
f'{repr(package_name)} not string.')
# Skip the decorated test or fixture unless the requisite dunder submodule
# declared by this package satisfies these requirements.
return skip_unless_module(
module_name=f'{package_name}.__init__',
minimum_version=minimum_version,
)
def skip_unless_module(
module_name: str, minimum_version: 'Optional[str]' = None):
'''
Skip the decorated test or fixture if the module with the passed name is
**unsatisfied** (i.e., either dynamically unimportable *or* importable but
of a version less than the passed minimum version if non-``None``).
Caveats
----------
**This decorator should never be passed the fully-qualified name of a
package.** Consider calling the :func:`skip_unless_package` decorator
instead to skip unsatisfied packages. Calling this decorator with package
names guarantees those packages to be skipped, as packages are *not*
directly importable as modules.
Parameters
----------
module_name : str
Fully-qualified name of the module to be skipped.
minimum_version : Optional[str]
Optional minimum version of this module as a dot-delimited string
(e.g., ``0.4.0``) to be tested for if any *or* ``None`` otherwise, in
which case any version is acceptable. Defaults to ``None``.
Returns
----------
pytest.skipif
Decorator describing these requirements if unmet *or* the identity
decorator reducing to a noop otherwise.
'''
assert isinstance(module_name, str), (
f'{repr(module_name)} not string.')
assert isinstance(minimum_version, (str, _NoneType)), (
f'{repr(minimum_version)} neither string nor "None".')
return _skip_if_callable_raises_exception(
exception_type=Skipped,
func=pytest.importorskip,
args=(module_name, minimum_version),
)
# ....................{ SKIP ~ private }....................
def _skip_if_callable_raises_exception(
# Mandatory parameters.
exception_type: type,
func: FunctionType,
# Optional parameters.
args: 'Optional[Sequence]' = None,
kwargs: 'Optional[Mapping]' = None,
):
'''
Skip the decorated test or fixture if calling the passed callable with the
passed positional and keyword arguments raises an exception of the passed
type.
Specifically, if calling this callable raises:
* The passed type of exception, this test is marked as skipped.
* Any other type of exception, this test is marked as a failure.
* No exception, this test continues as expected.
Parameters
----------
exception_type : type
Type of exception expected to be raised by this callable.
func : FunctionType
Callable to be called.
args : Optional[Sequence]
Sequence of all positional arguments to unconditionally pass to the
passed callable if any *or* ``None`` otherwise. Defaults to ``None``.
kwargs : Optional[Mapping]
Mapping of all keyword arguments to unconditionally pass to the passed
callable if any *or* ``None`` otherwise. Defaults to ``None``.
Returns
----------
pytest.skipif
Decorator skipping this test if this callable raises this exception
*or* the identity decorator reducing to a noop otherwise.
'''
# Default all unpassed arguments to sane values.
if args is None:
args = ()
if kwargs is None:
kwargs = {}
# Validate *AFTER* defaulting these arguments.
assert isinstance(exception_type, type), (
f'{repr((exception_type))} not type.')
assert callable(func), '{repr()} uncallable.'
assert isinstance(args, Sequence), '{repr(args)} not sequence.'
assert isinstance(kwargs, Mapping), '{repr(kwargs)} not mapping.'
# Attempt to call this callable with these arguments.
try:
func(*args, **kwargs)
# If this callable raises an expected exception, skip this test.
except exception_type as exception:
return skip(str(exception))
# Else if this callable raises an unexpected exception, fail this test by
# permitting this exception to unwind the current call stack.
# Else, this callable raised no exception. Silently reduce to a noop.
return pytmark.noop
```
#### File: beartype_test/util/pyterror.py
```python
from contextlib import contextmanager
from pytest import raises
from typing import Type
# ....................{ CONTEXTS }....................
@contextmanager
def raises_uncached(exception_cls: Type[Exception]) -> 'ExceptionInfo':
'''
Context manager validating that the block exercised by this manager raises
a **cached exception** (i.e., whose message previously containing one or
more instances of the magic
:data:`beartype._util.cache.utilcacheerror.EXCEPTION_CACHED_PLACEHOLDER`
substring since replaced by the
:func:`beartype._util.cache.utilcacheerror.reraise_exception_cached`
function) of the passed type.
Parameters
----------
exception_cls : str
Type of cached exception expected to be raised by this block.
Returns
----------
:class:`pytest.nodes.ExceptionInfo`
:mod:`pytest`-specific object collecting metadata on the cached
exception of this type raised by this block.
See Also:
----------
https://docs.pytest.org/en/stable/reference.html#pytest._code.ExceptionInfo
Official :class:`pytest.nodes.ExceptionInfo` documentation.
'''
# Defer heavyweight imports.
from beartype._util.cache.utilcacheerror import (
EXCEPTION_CACHED_PLACEHOLDER)
# With a "pytest"-specific context manager validating this contextual block
# to raise an exception of this type...
with raises(exception_cls) as exception_info:
yield exception_info
# Assert this exception message does *NOT* contain this magic substring.
assert EXCEPTION_CACHED_PLACEHOLDER not in str(exception_info.value)
```
#### File: beartype/vale/_valeisabc.py
```python
from abc import ABCMeta
from beartype.roar import BeartypeValeSubscriptionException
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ SUPERCLASSES }....................
class _IsABC(object, metaclass=ABCMeta):
'''
Abstract base class of all **beartype validator factory subclasses**
(i.e., subclasses that, when subscripted (indexed) by subclass-specific
objects, create new :class:`_SubscriptedIs` objects encapsulating those
objects, themselves suitable for subscripting (indexing)
:attr:`typing.Annotated` type hints, themselves enforcing subclass-specific
validation constraints and contracts on :mod:`beartype`-decorated callable
parameters and returns annotated by those hints).
'''
# ..................{ INITIALIZERS }..................
# Ideally, this class method should be typed as returning "NoReturn", but
# doing so causes MyPy to vociferously complain: e.g.,
# beartype/vale/_valeisabc.py:43: error: "__new__" must return a class
# instance (got "NoReturn") [misc]
def __new__(cls, *args, **kwargs) -> '_IsABC':
'''
Prohibit direct instantiation by unconditionally raising an exception.
Like standard type hints (e.g., :attr:`typing.Union`), this class is
*only* intended to be subscripted (indexed).
Raises
----------
BeartypeValeSubscriptionException
Always.
'''
# Murderbot would know what to do here.
raise BeartypeValeSubscriptionException(
f'{repr(cls)} not instantiable; '
f'like most "typing" classes (e.g., "typing.Annotated"), '
f'this class is only intended to be subscripted (indexed).'
)
```
#### File: beartype/vale/_valeisobj.py
```python
from beartype.roar import BeartypeValeSubscriptionException
from beartype.vale._valeisabc import _IsABC
from beartype._vale._valesub import _SubscriptedIs
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.data.utildatadict import update_mapping
from beartype._util.func.utilfuncscope import (
CallableScope,
add_func_scope_attr,
)
from beartype._util.text.utiltextmagic import (
CODE_INDENT_1,
# LINE_RSTRIP_INDEX_AND,
)
from beartype._util.text.utiltextrepr import represent_object
from beartype._util.utilobject import SENTINEL
from typing import Any, Tuple
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CLASSES ~ subscriptable }....................
class IsAttr(_IsABC):
'''
**Beartype object attribute validator factory** (i.e., class that, when
subscripted (indexed) by both the name of any object attribute *and* any
:class:`_SubscriptedIs` object created by subscripting any
:mod:`beartype.vale` class for validating that attribute, creates another
:class:`_SubscriptedIs` object suitable for subscripting (indexing)
:attr:`typing.Annotated` type hints, which validates that
:mod:`beartype`-decorated callable parameters and returns annotated by
those hints define an attribute with that name satisfying that attribute
validator).
This class efficiently validates that callable parameters and returns
define arbitrary object attributes satisfying arbitrary validators
subscripting (indexing) this class. Any :mod:`beartype`-decorated callable
parameter or return annotated by a :attr:`typing.Annotated` type hint
subscripted (indexed) by this class subscripted (indexed) by any object
attribute name and validator (e.g., ``typing.Annotated[{cls},
beartype.vale.IsAttr[{attr_name}, {attr_validator}]]`` for any class
``{cls}``, object attribute name ``{attr_name}`, and object attribute
validator ``{attr_validator}``) validates that parameter or return value to
be an instance of that class defining an attribute with that name
satisfying that attribute validator.
**This class incurs no time performance penalties at call time.** Whereas
the general-purpose :class:`beartype.vale.Is` class necessarily calls the
caller-defined callable subscripting that class at call time and thus
incurs a minor time performance penalty, this class efficiently reduces to
one-line tests in :mod:`beartype`-generated wrapper functions *without*
calling any callables and thus incurs *no* time performance penalties.
Examples
----------
.. code-block:: python
# Import the requisite machinery.
>>> import numpy as np
>>> from beartype import beartype
>>> from beartype.vale import IsAttr, IsEqual
>>> from typing import Annotated
# Type hint matching only two-dimensional NumPy arrays of 64-bit floats,
# generating code resembling:
# (isinstance(array, np.ndarray) and
# array.ndim == 2 and
# array.dtype == np.dtype(np.float64))
>>> Numpy2DArrayOfFloats = Annotated[
... np.ndarray,
... IsAttr['ndim', IsEqual[2]],
... IsAttr['dtype', IsEqual[np.dtype(np.float64)]],
... ]
# Type hint matching only one-dimensional NumPy arrays of 64-bit floats,
# generating code resembling:
# (isinstance(array, np.ndarray) and
# array.ndim == 2 and
# array.dtype.type == np.float64)
>>> Numpy1DArrayOfFloats = Annotated[
... np.ndarray,
... IsAttr['ndim', IsEqual[2]],
... # Nested attribute validators test equality against a "."-delimited
... # attribute lookup (e.g., "dtype.type"), as expected.
... IsAttr['dtype', IsAttr['type', IsEqual[np.float64]]],
... ]
# NumPy arrays of well-known real number series.
>>> FAREY_2D_ARRAY_OF_FLOATS = np.array(
... [[0/1, 1/8,], [1/7, 1/6,], [1/5, 1/4], [2/7, 1/3], [3/8, 2/5]])
>>> FAREY_1D_ARRAY_OF_FLOATS = np.array(
... [3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8])
# Annotate callables by those type hints.
>>> @beartype
... def sqrt_sum_2d(
... array: Numpy2DArrayOfFloats) -> Numpy1DArrayOfFloats:
... """
... One-dimensional NumPy array of 64-bit floats produced by first
... summing the passed two-dimensional NumPy array of 64-bit floats
... along its second dimension and then square-rooting those sums.
... """
... return np.sqrt(array.sum(axis=1))
# Call those callables with parameters satisfying those hints.
>>> sqrt_sum_2d(FAREY_2D_ARRAY_OF_FLOATS)
[0.35355339 0.55634864 0.67082039 0.78679579 0.88034084]
# Call those callables with parameters not satisfying those hints.
>>> sqrt_sum_2d(FAREY_1D_ARRAY_OF_FLOATS)
beartype.roar._roarexc.BeartypeCallHintPepParamException: @beartyped
sqrt_sum_2d() parameter array="array([0.42857143, 0.5, 0.57142857, 0.6,
0.625, ...])" violates type hint typing.Annotated[numpy.ndarray,
IsAttr['ndim', IsEqual[2]], IsAttr['dtype', IsEqual[dtype('float64')]]],
as value "array([0.42857143, 0.5, 0.57142857, 0.6, 0.625, ...])"
violates data constraint IsAttr['ndim', IsEqual[2]].
See Also
----------
:class:`beartype.vale.Is`
Further commentary.
'''
# ..................{ DUNDERS }..................
@callable_cached
def __class_getitem__(
cls, args: Tuple[str, _SubscriptedIs]) -> _SubscriptedIs:
'''
`PEP 560`_-compliant dunder method creating and returning a new
:class:`_SubscriptedIs` object validating object attributes with the
passed name satisfying the passed validator, suitable for subscripting
`PEP 593`_-compliant :attr:`typing.Annotated` type hints.
This method is memoized for efficiency.
Parameters
----------
args : Tuple[str, _SubscriptedIs]
2-tuple ``(attr_name, attr_validator)``, where:
* ``attr_name`` is the arbitrary attribute name to validate that
parameters and returns define satisfying the passed validator.
* ``attr_validator`` is the attribute validator to validate that
attributes with the passed name of parameters and returns
satisfy.
Returns
----------
_SubscriptedIs
New object encapsulating this validation.
Raises
----------
BeartypeValeSubscriptionException
If this class was subscripted by either:
* *No* arguments.
* One argument.
* Three or more arguments.
See Also
----------
:class:`IsAttr`
Usage instructions.
.. _PEP 560:
https://www.python.org/dev/peps/pep-0560
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# If this class was subscripted by one non-tuple argument, raise an
# exception.
if not isinstance(args, tuple):
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by one non-tuple argument:\n'
f'{represent_object(args)}'
)
# Else, this class was subscripted by either no *OR* two or more
# arguments (contained in this tuple).
#
# If this class was *NOT* subscripted by two arguments...
elif len(args) != 2:
# If this class was subscripted by one or more arguments, then by
# deduction this class was subscripted by three or more arguments.
# In this case, raise a human-readable exception.
if args:
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by three or more arguments:\n'
f'{represent_object(args)}'
)
# Else, this class was subscripted by *NO* arguments. In this case,
# raise a human-readable exception.
else:
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by empty tuple.')
# Else, this class was subscripted by exactly two arguments.
# Localize these arguments to human-readable local variables.
attr_name, attr_validator = args
# Representer (i.e., callable accepting *NO* arguments returning a
# machine-readable representation of this validator), defined *AFTER*
# localizing these validator arguments.
get_repr = lambda: (
f'{cls.__name__}[{repr(attr_name)}, {repr(attr_validator)}]')
# If this name is *NOT* a string, raise an exception.
if not isinstance(attr_name, str):
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} not string.'
)
# Else, this name is a string.
#
# If this name is the empty string, raise an exception.
elif not attr_name:
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} empty.'
)
# Else, this name is a non-empty string.
#
# Note that this name has *NOT* yet been validated to be valid Python
# identifier. While we could do so here by calling our existing
# is_identifier() tester, doing so would inefficiently repeat
# the split on "." characters performed below. Instead, we iteratively
# validate each split substring to be a valid Python identifier below.
# Callable inefficiently validating object attributes with this name
# against this validator.
# is_valid: SubscriptedIsValidator = None # type: ignore[assignment]
# Code snippet efficiently validating object attributes with this name
# against this validator.
is_valid_code = ''
# Dictionary mapping from the name to value of each local attribute
# referenced in the "is_valid_code" snippet defined below.
is_valid_code_locals: CallableScope = {}
# If this attribute name is unqualified (i.e., contains no "."
# delimiters), prefer an efficient optimization avoiding iteration.
if '.' not in attr_name:
# If this name is *NOT* a valid Python identifier, raise an
# exception.
if not attr_name.isidentifier():
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} syntactically invalid '
f'(i.e., not valid Python identifier).'
)
# Else, this name is a valid Python identifier.
def is_valid(pith: Any) -> bool:
f'''
``True`` only if the passed object defines an attribute named
{repr(attr_name)} whose value satisfies the validator
{repr(attr_validator)}.
'''
# Attribute of this object with this name if this object
# defines such an attribute *OR* a sentinel placeholder
# otherwise (i.e., if this object defines *NO* such attribute).
pith_attr = getattr(pith, attr_name, SENTINEL)
# Return true only if...
return (
# This object defines an attribute with this name *AND*...
pith_attr is not SENTINEL and
# This attribute satisfies this validator.
attr_validator.is_valid(pith_attr)
)
# Names of new parameters added to the signature of wrapper
# functions enabling this validator to be tested in those functions
# *WITHOUT* additional stack frames whose values are:
# * The sentinel placeholder.
local_name_sentinel = add_func_scope_attr(
attr=SENTINEL, attr_scope=is_valid_code_locals)
# Generate locals safely merging the locals required by both the
# code generated below *AND* the code validating this attribute.
update_mapping(
is_valid_code_locals, attr_validator._is_valid_code_locals)
#FIXME: Unfortunately, this still isn't sufficiently unique,
#because "IsAttr['name', IsAttr['name', IsEqual[True]]]" is a
#trivial counter-example where the current approach breaks down.
#For true uniquification here, we're going to need to instead:
#* Define a global private counter:
# _local_name_obj_attr_value_counter = Counter(0)
#* Replace the assignment below with:
# local_name_obj_attr_value = (
# f'{{obj}}_isattr_'
# f'{next(_local_name_obj_attr_value_counter)}'
# )
# Name of a local variable in this code whose:
# * Name is sufficiently obfuscated as to be hopefully unique to
# the code generated by this validator.
# * Value is the value of this attribute of the arbitrary object
# being validated by this code.
local_name_obj_attr_value = f'{{obj}}_isattr_{attr_name}'
# Code validating this attribute's value, formatted so as to be
# safely embeddable in the larger code expression defined below.
obj_attr_value_is_valid_expr = (
attr_validator._is_valid_code.format(
# Replace the placeholder substring "{obj}" in this code
# with the local variable whose value is the value of the
# desired object attribute.
obj=local_name_obj_attr_value,
# Replace the placeholder substring "{index}" in this code
# with an indentation increased by one level.
indent=f'{{indent}}{CODE_INDENT_1}',
))
# Code snippet efficiently validating against this object.
is_valid_code = VALE_CODE_CHECK_ISATTR_format(
attr_name_expr=repr(attr_name),
local_name_obj_attr_value=local_name_obj_attr_value,
obj_attr_value_is_valid_expr=obj_attr_value_is_valid_expr,
local_name_sentinel=local_name_sentinel,
)
# Else, this attribute name is qualified (i.e., contains one or more
# "." delimiters), fallback to a general solution performing iteration.
else:
#FIXME: Implement us up when we find the time, please. We currently
#raise an exception simply because we ran out of time for this. :{
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} not unqualified Python identifier '
f'(i.e., contains one or more "." characters).'
)
# Create and return this subscription.
return _SubscriptedIs(
is_valid=is_valid,
is_valid_code=is_valid_code,
is_valid_code_locals=is_valid_code_locals,
get_repr=get_repr,
)
# ....................{ CONSTANTS }....................
#FIXME: Shift into a new "_valesnip" submodule, please.
VALE_CODE_CHECK_ISATTR = '''(
{{indent}} # True only if this pith defines an attribute with this name.
{{indent}} ({local_name_obj_attr_value} := getattr(
{{indent}} {{obj}}, {attr_name_expr}, {local_name_sentinel}))
{{indent}} is not {local_name_sentinel} and
{{indent}} {obj_attr_value_is_valid_expr}
{{indent}})'''
'''
:mod:`beartype.vale.IsAttr`-specific code snippet validating an arbitrary
object to define an attribute with an arbitrary name satisfying an arbitrary
expression evaluating to a boolean.
'''
# Format methods of the code snippets declared above as a microoptimization.
VALE_CODE_CHECK_ISATTR_format = VALE_CODE_CHECK_ISATTR.format
```
#### File: beartype/_vale/_valesub.py
```python
from beartype.roar import BeartypeValeSubscriptionException
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.data.utildatadict import merge_mappings_two
from beartype._util.func.utilfuncarg import get_func_args_len_standard
from beartype._util.func.utilfuncscope import CallableScope
from beartype._util.func.utilfunctest import is_func_python
from beartype._util.text.utiltextrepr import represent_object
from typing import Any, Callable
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ HINTS }....................
SubscriptedIsValidator = Callable[[Any,], bool]
'''
PEP-compliant type hint matching a **validator** (i.e., caller-defined callable
accepting a single arbitrary object and returning either ``True`` if that
object satisfies an arbitrary constraint *or* ``False`` otherwise).
Data validators are suitable for subscripting the :class:`Is` class.
'''
# ....................{ CLASSES ~ subscripted }....................
class _SubscriptedIs(object):
'''
**Beartype validator** (i.e., object encapsulating a caller-defined
validation callable returning ``True`` when an arbitrary object passed to
that callable satisfies an arbitrary constraint, suitable for subscripting
(indexing) `PEP 593`_-compliant :attr:`typing.Annotated` type hints
enforcing that validation on :mod:`beartype`-decorated callable parameters
and returns annotated by those hints).
Caveats
----------
**This low-level class is not intended to be externally instantiated**
(e.g., by calling the :meth:`__init__` constructor). This class is *only*
intended to be internally instantiated by subscripting (indexing) the
higher-level :class:`Is` class factory.
Attributes
----------
is_valid : Callable[[Any,], bool]
**Validator** (i.e., caller-defined callable accepting a single
arbitrary object and returning either ``True`` if that object satisfies
an arbitrary constraint *or* ``False`` otherwise).
_is_valid_code : str
**Validator code** (i.e., Python code snippet validating the
previously localized parameter or return value against the same
validation performed by the :meth:`is_valid` function). For efficiency,
callers validating data through dynamically generated code (e.g., the
:func:`beartype.beartype` decorator) rather than standard function
calls (e.g., the private :mod:`beartype._decor._hint._pep._error`
subpackage) should prefer :attr:`is_valid_code` to :meth:`is_valid`.
Despite performing the same validation as the :meth:`is_valid`
callable, this code avoids the additional stack frame imposed by
calling that callable and thus constitutes an optimization.
_is_valid_code_locals : CallableScope
**Validator code local scope** (i.e., dictionary mapping from the name
to value of each local attribute referenced in :attr:`code`) required
to dynamically compile this validator code into byte code at runtime.
_get_repr : Callable[[], str]
**Representer** (i.e., caller-defined callable accepting *no* arguments
returning a machine-readable representation of this validator).
Technically, that representation *could* be passed by the caller rather
than this callable dynamically generating that representation.
Pragmatically, generating that representation is sufficiently slow for
numerous types of validators that deferring their generation until
required by a call to the :meth:`__repr__` dunder method externally
called by a call to the :func:`repr` builtin` on this validator is
effectively mandatory. Data validators whose representations are
particularly slow to generate include:
* The :class:`Is` class subscripted by a lambda rather than non-lambda
function. Generating the representation of that class subscripted by
a non-lambda function only requires introspecting the name of that
function and is thus trivially fast. However, lambda functions have
no names and are thus *only* distinguishable by their source code;
ergo, generating the representation of that class subscripted by a
lambda function requires parsing the source code of the file
declaring that lambda for the exact substring of that code declaring
that lambda and is thus non-trivially slow.
See Also
----------
:class:`Is`
Class docstring for further details.
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# ..................{ CLASS VARIABLES }..................
# Slot all instance variables defined on this object to minimize the time
# complexity of both reading and writing variables across frequently called
# cache dunder methods. Slotting has been shown to reduce read and write
# costs by approximately ~10%, which is non-trivial.
__slots__ = (
'is_valid',
'_is_valid_code',
'_is_valid_code_locals',
'_get_repr',
)
# ..................{ INITIALIZERS }..................
def __init__(
self,
is_valid: SubscriptedIsValidator,
is_valid_code: str,
is_valid_code_locals: CallableScope,
get_repr: Callable[[], str],
) -> None:
'''
Initialize this object with the passed validation callable, code, and
code local scope.
See the class docstring for usage instructions.
Parameters
----------
is_valid : Callable[[Any,], bool]
**Data is_valid** (i.e., caller-defined callable accepting a single
arbitrary object and returning either ``True`` if that object
satisfies an arbitrary constraint *or* ``False`` otherwise).
is_valid_code : str
**Validator code** (i.e., Python code snippet validating the
previously localized parameter or return value against the same
validation performed by the :func:`is_valid` function). This code:
* *Must* contain one or more ``"{obj}"`` substrings, which external
code generators (e.g., the :func:`beartype.beartype` decorator)
will globally replace at evaluation time with the actual test
subject object to be validated by this code.
* *May* contain one or more ``"{indent}"`` substrings, which such
code generators will globally replace at evaluation time with the
line-oriented indentation required to generate a
valid Python statement embedding this code. For consistency with
`PEP 8`_-compliant and well-established Python style guides, any
additional indentation hard-coded into this code should be
aligned to **four-space indentation.**
is_valid_code_locals : Optional[CallableScope]
**Validator code local scope** (i.e., dictionary mapping from the
name to value of each local attribute referenced in
:attr:`is_valid_code` code) required to dynamically compile this
validator code into byte code at runtime.
get_repr : Callable[[], str]
**Representer** (i.e., caller-defined callable accepting *no*
arguments returning a machine-readable representation of this
validator). Technically, that representation rather than this
callable dynamically generating that representation could be passed
by the caller. Pragmatically, generating that representation is
sufficiently slow for various types of validators that deferring
their generation until required by a call to the :meth:`__repr__`
dunder method externally called by a call to the :func:`repr`
builtin` passed this validator is effectively mandatory. Data
validators whose representations are slow to generate include:
* The :class:`Is` class subscripted by a lambda rather than
non-lambda function. Generating the representation of that class
subscripted by a non-lambda function only requires introspecting
the name of that function and is thus trivially fast. However,
lambda functions have no names and are thus *only*
distinguishable by their source code; ergo, generating the
representation of that class subscripted by a lambda function
requires parsing the source code of the file declaring that
lambda for the exact substring of that code declaring that lambda
and is thus non-trivially slow.
Raises
----------
BeartypeValeSubscriptionException
If either:
* ``is_valid`` is either:
* *Not* callable.
* A C-based rather than pure-Python callable.
* A pure-Python callable accepting two or more arguments.
* ``is_valid_code`` is either:
* *Not* a string.
* A string either:
* Empty.
* Non-empty but **invalid** (i.e., *not* containing the test
subject substring ``{obj}``).
* ``is_valid_locals`` is *not* a dictionary.
* ``get_repr`` is either:
* *Not* callable.
* A C-based rather than pure-Python callable.
* A pure-Python callable accepting one or more arguments.
.. _PEP 8:
https://www.python.org/dev/peps/pep-0008
'''
# If this validator is uncallable, raise an exception.
if not callable(is_valid):
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted argument '
f'{represent_object(is_valid)} not callable.'
)
# Else, this validator is callable.
#
# If this validator is C-based, raise an exception.
elif not is_func_python(is_valid):
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted callable '
f'{repr(is_valid)} not pure-Python (e.g., C-based).'
)
# Else, this validator is pure-Python.
#
# If this validator does *NOT* accept exactly one argument, raise an
# exception.
elif get_func_args_len_standard(
func=is_valid,
exception_cls=BeartypeValeSubscriptionException,
) != 1:
raise BeartypeValeSubscriptionException(
f'Class "beartype.vale.Is" subscripted callable '
f'{repr(is_valid)} positional or keyword argument count '
f'{get_func_args_len_standard(is_valid)} != 1.'
)
# Else, this validator accepts exactly one argument. Since no further
# validation can be performed on this callable without unsafely calling
# that callable, we accept this callable as is for now.
#
# Note that we *COULD* technically inspect annotations if defined on
# this callable as well. Since this callable is typically defined as a
# lambda, annotations are typically *NOT* defined on this callable.
# If this code is *NOT* a string, raise an exception.
if not isinstance(is_valid_code, str):
raise BeartypeValeSubscriptionException(
f'Data validator code not string:\n'
f'{represent_object(is_valid_code)}'
)
# Else, this code is a string.
#
# If this code is the empty string, raise an exception.
elif not is_valid_code:
raise BeartypeValeSubscriptionException(
'Data validator code empty.')
# Else, this code is a non-empty string.
#
# If this code does *NOT* contain the test subject substring
# "{obj}" and is invalid, raise an exception.
elif '{obj}' not in is_valid_code:
raise BeartypeValeSubscriptionException(
f'Data validator code invalid (i.e., test subject '
f'substring "{{obj}}" not found):\n{is_valid_code}'
)
# Else, this code is hopefully valid.
#
# If this code is *NOT* explicitly prefixed by "(" and suffixed by
# ")", do so to ensure this code remains safely evaluable when
# embedded in parent expressions.
elif not (
is_valid_code[ 0] == '(' and
is_valid_code[-1] == ')'
):
is_valid_code = f'({is_valid_code})'
# Else, this code is explicitly prefixed by "(" and suffixed by ")".
# If this dictionary of code locals is *NOT* a dictionary, raise an
# exception.
if not isinstance(is_valid_code_locals, dict):
raise BeartypeValeSubscriptionException(
f'Data validator locals '
f'{represent_object(is_valid_code_locals)} not '
f'dictionary.'
)
# Else, this dictionary of code locals is a dictionary.
# If this representer is either uncallable, a C-based callable, *OR* a
# pure-Python callable accepting one or more arguments, raise an
# exception.
if get_func_args_len_standard(
func=get_repr,
exception_cls=BeartypeValeSubscriptionException,
) != 0:
raise BeartypeValeSubscriptionException(
f'Representer {repr(get_repr)} positional or keyword argument '
f'count {get_func_args_len_standard(get_repr)} != 0.'
)
# Else, this representer is a pure-Python callable accepting *NO*
# arguments.
# Classify this validation function, effectively binding this callable
# to this object as an object-specific static method.
self.is_valid = is_valid
self._is_valid_code = is_valid_code
self._is_valid_code_locals = is_valid_code_locals
self._get_repr = get_repr
# ..................{ DUNDERS ~ operator }..................
# Define a domain-specific language (DSL) enabling callers to dynamically
# combine and Override
def __and__(self, other: '_SubscriptedIs') -> (
'_SubscriptedIs'):
'''
**Conjunction** (i.e., ``self & other``), synthesizing a new
:class:`_SubscriptedIs` object whose validator returns ``True`` only
when the validators of both this *and* the passed
:class:`_SubscriptedIs` objects all return ``True``.
Parameters
----------
other : _SubscriptedIs
Object to conjunctively synthesize with this object.
Returns
----------
_SubscriptedIs
New object conjunctively synthesized with this object.
Raises
----------
BeartypeValeSubscriptionException
If the passed object is *not* also an instance of the same class.
'''
# If the passed object is *NOT* also an instance of this class, raise
# an exception.
if not isinstance(other, _SubscriptedIs):
raise BeartypeValeSubscriptionException(
f'Subscripted "beartype.vale.Is*" class & operand '
f'{represent_object(other)} not '
f'subscripted "beartype.vale.Is*" class.'
)
# Else, the passed object is also an instance of this class.
# Generate code conjunctively performing both validations.
is_valid_code = f'({self._is_valid_code} and {other._is_valid_code})'
# Generate locals safely merging the locals required by the code
# provided by both this and that validator.
is_valid_code_locals = merge_mappings_two(
self._is_valid_code_locals, other._is_valid_code_locals)
# Closures for great justice.
return _SubscriptedIs(
is_valid=lambda obj: self.is_valid(obj) and other.is_valid(obj),
is_valid_code=is_valid_code,
is_valid_code_locals=is_valid_code_locals, # type: ignore[arg-type]
get_repr=lambda: f'{repr(self)} & {repr(other)}',
)
def __or__(self, other: '_SubscriptedIs') -> (
'_SubscriptedIs'):
'''
**Disjunction** (i.e., ``self | other``), synthesizing a new
:class:`_SubscriptedIs` object whose validator returns ``True`` only
when the validators of either this *or* the passed
:class:`_SubscriptedIs` objects return ``True``.
Parameters
----------
other : _SubscriptedIs
Object to disjunctively synthesize with this object.
Returns
----------
_SubscriptedIs
New object disjunctively synthesized with this object.
'''
# If the passed object is *NOT* also an instance of this class, raise
# an exception.
if not isinstance(other, _SubscriptedIs):
raise BeartypeValeSubscriptionException(
f'Subscripted "beartype.vale.Is*" class | operand '
f'{represent_object(other)} not '
f'subscripted "beartype.vale.Is*" class.'
)
# Else, the passed object is also an instance of this class.
# Generate code disjunctively performing both validations.
is_valid_code = f'({self._is_valid_code} or {other._is_valid_code})'
# Generate locals safely merging the locals required by the code
# provided by both this and that validator.
is_valid_code_locals = merge_mappings_two(
self._is_valid_code_locals, other._is_valid_code_locals)
# Closures for great justice.
return _SubscriptedIs(
is_valid=lambda obj: self.is_valid(obj) or other.is_valid(obj),
is_valid_code=is_valid_code,
is_valid_code_locals=is_valid_code_locals, # type: ignore[arg-type]
get_repr=lambda: f'{repr(self)} | {repr(other)}',
)
#FIXME: Fun optimization: if inverting something that's already been
#inverted, return the original "_SubscriptedIs" object sans inversion. :p
def __invert__(self) -> '_SubscriptedIs':
'''
**Negation** (i.e., ``~self``), synthesizing a new
:class:`_SubscriptedIs` object whose validator returns ``True`` only
when the validators of this :class:`_SubscriptedIs` object returns
``False``.
Returns
----------
_SubscriptedIs
New object negating this object.
'''
# Closures for profound lore.
return _SubscriptedIs(
is_valid=lambda obj: not self.is_valid(obj),
# Inverted validator code, defined as the trivial boolean negation
# of this validator.
is_valid_code=f'(not {self._is_valid_code})',
is_valid_code_locals=self._is_valid_code_locals,
get_repr=lambda: f'~{repr(self)}',
)
# ..................{ DUNDERS ~ str }..................
@callable_cached
def __repr__(self) -> str:
'''
Machine-readable representation of this validator.
This function is memoized for efficiency.
Warns
----------
BeartypeValeLambdaWarning
If this validator is implemented as a pure-Python lambda function
whose definition is *not* parsable from the script or module
defining that lambda.
'''
# Fight the dark power with... power.
return self._get_repr()
``` |
{
"source": "jonathanmorley/HR-XSL",
"score": 2
} |
#### File: dbtexmf/core/error.py
```python
import sys
import traceback
class ErrorHandler(object):
"""
Object in charge to handle any error occured during the dblatex
transformation process. The first mandatory argument is the <object>
that signaled the error.
"""
def __init__(self):
pass
def signal(self, object, *args, **kwargs):
failure_track("Unexpected error occured")
_current_handler = None
_dump_stack = False
#
# Dblatex Error Handler API
#
# In a complex use of the API, a locking mechanism (thread.lock) should
# be used. The current implementation assumes that setup is done before
# any get().
#
def get_errhandler():
global _current_handler
# If nothing set, use a default handler that does nothing
if not(_current_handler):
_current_handler = ErrorHandler()
return _current_handler
def set_errhandler(handler):
global _current_handler
if not(isinstance(handler, ErrorHandler)):
raise ValueError("%s is not an ErrorHandler" % handler)
_current_handler = handler
def signal_error(*args, **kwargs):
get_errhandler().signal(*args, **kwargs)
def failure_track(msg):
global _dump_stack
print >>sys.stderr, (msg)
if _dump_stack:
traceback.print_exc()
def failed_exit(msg, rc=1):
failure_track(msg)
sys.exit(rc)
def dump_stack():
global _dump_stack
_dump_stack = True
```
#### File: dblatex/grubber/makeidx.py
```python
import sys
from index import Index
class Module (Index):
def __init__ (self, doc, dict):
"""
Initialize the module, checking if there is already an index.
"""
Index.__init__(self, doc, "idx", "ind", "ilg")
```
#### File: dblatex/grubber/maker.py
```python
import os
import time
import subprocess
from msg import _, msg
class Depend (object): #{{{2
"""
This is a base class to represent file dependencies. It provides the base
functionality of date checking and recursive making, supposing the
existence of a method `run()' in the object. This method is supposed to
rebuild the files of this node, returning zero on success and something
else on failure.
"""
def __init__ (self, env, prods=None, sources={}, loc={}):
"""
Initialize the object for a given set of output files and a given set
of sources. The argument `prods' is a list of file names, and the
argument `sources' is a dictionary that associates file names with
dependency nodes. The optional argument `loc' is a dictionary that
describes where in the sources this dependency was created.
"""
self.env = env
if prods:
self.prods = prods
else:
self.prods = []
self.set_date()
self.sources = sources
self.making = 0
self.failed_dep = None
self.loc = loc
def set_date (self):
"""
Define the date of the last build of this node as that of the most
recent file among the products. If some product does not exist or
there are ne products, the date is set to None.
"""
if self.prods == []:
# This is a special case used in rubber.Environment
self.date = None
else:
try:
# We set the node's date to that of the most recently modified
# product file, assuming all other files were up to date then
# (though not necessarily modified).
self.date = max(map(os.path.getmtime, self.prods))
except OSError:
# If some product file does not exist, set the last
# modification date to None.
self.date = None
def should_make (self):
"""
Check the dependencies. Return true if this node has to be recompiled,
i.e. if some dependency is modified. Nothing recursive is done here.
"""
if not self.date:
return 1
for src in self.sources.values():
if src.date > self.date:
return 1
return 0
def make (self, force=0):
"""
Make the destination file. This recursively makes all dependencies,
then compiles the target if dependencies were modified. The semantics
of the return value is the following:
- 0 means that the process failed somewhere (in this node or in one of
its dependencies)
- 1 means that nothing had to be done
- 2 means that something was recompiled (therefore nodes that depend
on this one have to be remade)
"""
if self.making:
print "FIXME: cyclic make"
return 1
self.making = 1
# Make the sources
self.failed_dep = None
must_make = force
for src in self.sources.values():
ret = src.make()
if ret == 0:
self.making = 0
self.failed_dep = src.failed_dep
return 0
if ret == 2:
must_make = 1
# Make this node if necessary
if must_make or self.should_make():
if force:
ret = self.force_run()
else:
ret = self.run()
if ret:
self.making = 0
self.failed_dep = self
return 0
# Here we must take the integer part of the value returned by
# time.time() because the modification times for files, returned
# by os.path.getmtime(), is an integer. Keeping the fractional
# part could lead to errors in time comparison with the main log
# file when the compilation of the document is shorter than one
# second...
self.date = int(time.time())
self.making = 0
return 2
self.making = 0
return 1
def force_run (self):
"""
This method is called instead of 'run' when rebuilding this node was
forced. By default it is equivalent to 'run'.
"""
return self.run()
def failed (self):
"""
Return a reference to the node that caused the failure of the last
call to "make". If there was no failure, return None.
"""
return self.failed_dep
def get_errors (self):
"""
Report the errors that caused the failure of the last call to run.
"""
if None:
yield None
def clean (self):
"""
Remove the files produced by this rule and recursively clean all
dependencies.
"""
for file in self.prods:
if os.path.exists(file):
msg.log(_("removing %s") % file)
os.unlink(file)
for src in self.sources.values():
src.clean()
self.date = None
def reinit (self):
"""
Reinitializing depends on actual dependency leaf
"""
pass
def leaves (self):
"""
Return a list of all source files that are required by this node and
cannot be built, i.e. the leaves of the dependency tree.
"""
if self.sources == {}:
return self.prods
ret = []
for dep in self.sources.values():
ret.extend(dep.leaves())
return ret
class DependLeaf (Depend): #{{{2
"""
This class specializes Depend for leaf nodes, i.e. source files with no
dependencies.
"""
def __init__ (self, env, *dest, **args):
"""
Initialize the node. The arguments of this method are the file
names, since one single node may contain several files.
"""
Depend.__init__(self, env, prods=list(dest), **args)
def run (self):
# FIXME
if len(self.prods) == 1:
msg.error(_("%r does not exist") % self.prods[0], **self.loc)
else:
msg.error(_("one of %r does not exist") % self.prods, **self.loc)
return 1
def clean (self):
pass
class DependShell (Depend): #{{{2
"""
This class specializes Depend for generating files using shell commands.
"""
def __init__ (self, env, cmd, **args):
Depend.__init__(self, env, **args)
self.cmd = cmd
def run (self):
msg.progress(_("running %s") % self.cmd[0])
rc = subprocess.call(self.cmd, stdout=msg.stdout)
if rc != 0:
msg.error(_("execution of %s failed") % self.cmd[0])
return 1
return 0
class Maker:
"""
Very simple builder environment. Much simpler than the original rubber
Environment.
"""
def __init__(self):
self.dep_nodes = []
def dep_last(self):
if not(self.dep_nodes):
return None
else:
return self.dep_nodes[-1]
def dep_append(self, dep):
self.dep_nodes.append(dep)
def make(self, force=0):
if not(self.dep_nodes):
return 0
# Just ask the last one to compile
rc = self.dep_nodes[-1].make(force=force)
if (rc == 0):
return -1
else:
return 0
def reinit(self):
# Forget the old dependency nodes
self.__init__()
```
#### File: dblatex/grubber/msg.py
```python
import os, os.path
import sys
import logging
def _(txt): return txt
class Message (object):
"""
All messages in the program are output using the `msg' object in the
main package. This class defines the interface for this object.
"""
def __init__ (self, level=1, write=None):
"""
Initialize the object with the specified verbosity level and an
optional writing function. If no such function is specified, no
message will be output until the 'write' field is changed.
"""
self.level = level
self.write = self.write_stdout
if write:
self.write = write
self.short = 0
self.path = ""
self.cwd = "./"
self.pos = []
self._log = logging.getLogger("dblatex")
level = self._log.getEffectiveLevel()
if level >= logging.WARNING:
self.stdout = open(os.devnull, "w")
else:
self.stdout = None
def write_stdout(self, text, level=0):
print text
def write_stderr(self, text, level=0):
print >>sys.stderr, text
def push_pos (self, pos):
self.pos.append(pos)
def pop_pos (self):
del self.pos[-1]
def __call__ (self, level, text):
"""
This is the low level printing function, it receives a line of text
with an associated verbosity level, so that output can be filtered
depending on command-line options.
"""
if self.write and level <= self.level:
self.write(text, level=level)
def display (self, kind, text, **info):
"""
Print an error or warning message. The argument 'kind' indicates the
kind of message, among "error", "warning", "abort", the argument
'text' is the main text of the message, the other arguments provide
additional information, including the location of the error.
"""
if kind == "error":
if text[0:13] == "LaTeX Error: ":
text = text[13:]
self._log.error(self.format_pos(info, text))
if info.has_key("code") and info["code"] and not self.short:
self._log.error(self.format_pos(info,
_("leading text: ") + info["code"]))
elif kind == "abort":
if self.short:
msg = _("compilation aborted ") + info["why"]
else:
msg = _("compilation aborted: %s %s") % (text, info["why"])
self._log.error(self.format_pos(info, msg))
# elif kind == "warning":
# self._log.warning(self.format_pos(info, text))
def error (self, text, **info):
self.display(kind="error", text=text, **info)
def warn (self, what, **where):
self._log.warning(self.format_pos(where, what))
def progress (self, what, **where):
self._log.info(self.format_pos(where, what + "..."))
def info (self, what, **where):
self._log.info(self.format_pos(where, what))
def log (self, what, **where):
self._log.debug(self.format_pos(where, what))
def debug (self, what, **where):
self._log.debug(self.format_pos(where, what))
def format_pos (self, where, text):
"""
Format the given text into a proper error message, with file and line
information in the standard format. Position information is taken from
the dictionary given as first argument.
"""
if len(self.pos) > 0:
if where is None or not where.has_key("file"):
where = self.pos[-1]
elif where is None or where == {}:
return text
if where.has_key("file") and where["file"] is not None:
pos = self.simplify(where["file"])
if where.has_key("line") and where["line"]:
pos = "%s:%d" % (pos, int(where["line"]))
if where.has_key("last"):
if where["last"] != where["line"]:
pos = "%s-%d" % (pos, int(where["last"]))
pos = pos + ": "
else:
pos = ""
if where.has_key("page"):
text = "%s (page %d)" % (text, int(where["page"]))
if where.has_key("pkg"):
text = "[%s] %s" % (where["pkg"], text)
return pos + text
def simplify (self, name):
"""
Simplify an path name by removing the current directory if the
specified path is in a subdirectory.
"""
path = os.path.normpath(os.path.join(self.path, name))
if path[:len(self.cwd)] == self.cwd:
return path[len(self.cwd):]
return path
def display_all (self, generator, writer=None):
if writer:
write = self.write
self.write = writer
something = 0
for msg in generator:
self.display(**msg)
something = 1
if writer:
self.write = write
return something
msg = Message()
```
#### File: dblatex/grubber/plugins.py
```python
import imp
from os.path import *
from msg import _, msg
import sys
class TexModule (object):
"""
This is the base class for modules. Each module should define a class
named 'Module' that derives from this one. The default implementation
provides all required methods with no effects.
"""
def __init__ (self, env, dict):
"""
The constructor receives two arguments: 'env' is the compiling
environment, 'dict' is a dictionary that describes the command that
caused the module to load.
"""
def pre_compile (self):
"""
This method is called before the first LaTeX compilation. It is
supposed to build any file that LaTeX would require to compile the
document correctly. The method must return true on failure.
"""
return 0
def post_compile (self):
"""
This method is called after each LaTeX compilation. It is supposed to
process the compilation results and possibly request a new
compilation. The method must return true on failure.
"""
return 0
def last_compile (self):
"""
This method is called after the last LaTeX compilation.
It is supposed to terminate the compilation for its specific needs.
The method must return true on failure.
"""
return 0
def clean (self):
"""
This method is called when cleaning the compiled files. It is supposed
to remove all the files that this modules generates.
"""
def command (self, cmd, args):
"""
This is called when a directive for the module is found in the source.
The method can raise 'AttributeError' when the directive does not
exist and 'TypeError' if the syntax is wrong. By default, when called
with argument "foo" it calls the method "do_foo" if it exists, and
fails otherwise.
"""
getattr(self, "do_" + cmd)(*args)
def get_errors (self):
"""
This is called if something has failed during an operation performed
by this module. The method returns a generator with items of the same
form as in LaTeXDep.get_errors.
"""
if None:
yield None
class Plugins (object):
"""
This class gathers operations related to the management of external Python
modules. Modules are requested through the `register' method, and
they are searched for first in the current directory, then in the
(possibly) specified Python package (using Python's path).
"""
def __init__ (self, path=None):
"""
Initialize the module set, possibly setting a path name in which
modules will be searched for.
"""
self.modules = {}
if not path:
self.path = [dirname(__file__)]
sys.path.append(self.path[0])
else:
self.path = path
def __getitem__ (self, name):
"""
Return the module object of the given name.
"""
return self.modules[name]
def register (self, name):
"""
Attempt to register a module with the specified name. If an
appropriate module is found, load it and store it in the object's
dictionary. Return 0 if no module was found, 1 if a module was found
and loaded, and 2 if the module was found but already loaded.
"""
if self.modules.has_key(name):
return 2
try:
file, path, descr = imp.find_module(name, [""])
except ImportError:
if not self.path:
return 0
try:
file, path, descr = imp.find_module(name, self.path)
except ImportError:
return 0
module = imp.load_module(name, file, path, descr)
file.close()
self.modules[name] = module
return 1
def clear(self):
"""
Empty the module table, unregistering every module registered. No
modules are unloaded, however, but this has no other effect than
speeding the registration if the modules are loaded again.
"""
self.modules.clear()
class Modules (Plugins):
"""
This class gathers all operations related to the management of modules.
The modules are searched for first in the current directory, then as
scripts in the 'modules' directory in the program's data directort, then
as a Python module in the package `rubber.latex'.
"""
def __init__ (self, env):
#Plugins.__init__(self, rubber.rules.latex.__path__)
Plugins.__init__(self)
self.env = env
self.objects = {}
self.commands = {}
def __getitem__ (self, name):
"""
Return the module object of the given name.
"""
return self.objects[name]
def has_key (self, name):
"""
Check if a given module is loaded.
"""
return self.objects.has_key(name)
def register (self, name, dict={}):
"""
Attempt to register a package with the specified name. If a module is
found, create an object from the module's class called `Module',
passing it the environment and `dict' as arguments, and execute all
delayed commands for this module. The dictionary describes the
command that caused the registration.
"""
if self.has_key(name):
msg.debug(_("module %s already registered") % name)
return 2
# First look for a script
moddir = ""
mod = None
for path in "", join(moddir, "modules"):
file = join(path, name + ".rub")
if exists(file):
mod = ScriptModule(self.env, file)
msg.log(_("script module %s registered") % name)
break
# Then look for a Python module
if not mod:
if Plugins.register(self, name) == 0:
msg.debug(_("no support found for %s") % name)
return 0
mod = self.modules[name].Module(self.env, dict)
msg.log(_("built-in module %s registered") % name)
# Run any delayed commands.
if self.commands.has_key(name):
for (cmd, args, vars) in self.commands[name]:
msg.push_pos(vars)
try:
mod.command(cmd, args)
except AttributeError:
msg.warn(_("unknown directive '%s.%s'") % (name, cmd))
except TypeError:
msg.warn(_("wrong syntax for '%s.%s'") % (name, cmd))
msg.pop_pos()
del self.commands[name]
self.objects[name] = mod
return 1
def clear (self):
"""
Unregister all modules.
"""
Plugins.clear(self)
self.objects = {}
self.commands = {}
def command (self, mod, cmd, args):
"""
Send a command to a particular module. If this module is not loaded,
store the command so that it will be sent when the module is register.
"""
if self.objects.has_key(mod):
self.objects[mod].command(cmd, args)
else:
if not self.commands.has_key(mod):
self.commands[mod] = []
self.commands[mod].append((cmd, args, self.env.vars.copy()))
```
#### File: dblatex/grubber/ps2pdf.py
```python
import sys
import os
from msg import _, msg
from maker import DependShell
from plugins import TexModule
class Module (TexModule):
def __init__ (self, doc, dict):
env = doc.env
ps = env.dep_last().prods[0]
root, ext = os.path.splitext(ps)
if ext != ".ps":
msg.error(_("I can't use ps2pdf when not producing a PS"))
sys.exit(2)
pdf = root + ".pdf"
cmd = ["ps2pdf"]
for opt in doc.paper.split():
cmd.append("-sPAPERSIZE=" + opt)
cmd.extend([ps, pdf])
dep = DependShell(env, cmd, prods=[pdf], sources={ ps: env.dep_last() })
env.dep_append(dep)
```
#### File: dblatex/grubber/util.py
```python
try:
import hashlib
except ImportError:
# Fallback for python 2.4:
import md5 as hashlib
import os
from msg import _, msg
def md5_file(fname):
"""
Compute the MD5 sum of a given file.
"""
m = hashlib.md5()
file = open(fname)
for line in file.readlines():
m.update(line)
file.close()
return m.digest()
class Watcher:
"""
Watch for any changes of the files to survey, by checking the file MD5 sums.
"""
def __init__(self):
self.files = {}
def watch(self, file):
if os.path.exists(file):
self.files[file] = md5_file(file)
else:
self.files[file] = None
def update(self):
"""
Update the MD5 sums of all files watched, and return the name of one
of the files that changed, or None of they didn't change.
"""
changed = []
for file in self.files.keys():
if os.path.exists(file):
new = md5_file(file)
if self.files[file] != new:
msg.debug(_("%s MD5 checksum changed") % \
os.path.basename(file))
changed.append(file)
self.files[file] = new
return changed
```
#### File: dbtexmf/dblatex/runtex.py
```python
import os
import re
import shutil
from grubber.texbuilder import LatexBuilder
class RunLatex:
def __init__(self):
self.fig_paths = []
self.index_style = ""
self.backend = "pdftex"
self.texpost = ""
self.texer = LatexBuilder()
def set_fig_paths(self, paths):
# Assume the paths are already absolute
if not(paths):
return
# Use TEXINPUTS to handle paths containing spaces
paths_blank = []
paths_input = []
for p in paths:
if p.find(" ") != -1:
paths_blank.append(p + "//")
else:
paths_input.append(p)
if paths_blank:
texinputs = os.pathsep.join(paths_blank)
os.environ["TEXINPUTS"] = os.getenv("TEXINPUTS") + os.pathsep + \
texinputs
paths = paths_input
# Unixify the paths when under Windows
if os.sep != "/":
paths = [p.replace(os.sep, "/") for p in paths]
# Protect from tilde active char (maybe others?)
self.fig_paths = [p.replace("~", r"\string~") for p in paths]
def set_bib_paths(self, bibpaths, bstpaths=None):
# Just set BIBINPUTS and/or BSTINPUTS
if bibpaths:
os.environ["BIBINPUTS"] = os.pathsep.join(bibpaths +
[os.getenv("BIBINPUTS", "")])
if bstpaths:
os.environ["BSTINPUTS"] = os.pathsep.join(bstpaths +
[os.getenv("BSTINPUTS", "")])
def set_backend(self, backend):
if not(backend in ("dvips", "pdftex", "xetex")):
raise ValueError("'%s': invalid backend" % backend)
self.backend = backend
def _clear_params(self):
self._param_started = 0
self._param_ended = 0
self._params = {}
def _set_params(self, line):
# FIXME
if self._param_ended:
return
if not(self._param_started):
if line.startswith("%%<params>"): self._param_started = 1
return
if line.startswith("%%</params>"):
self._param_ended = 1
return
p = line.split()
self._params[p[1]] = p[2]
def compile(self, texfile, binfile, format, batch=1):
root = os.path.splitext(texfile)[0]
tmptex = root + "_tmp" + ".tex"
texout = root + "." + format
# The temporary file contains the extra paths
f = file(tmptex, "w")
if self.fig_paths:
paths = "{" + "//}{".join(self.fig_paths) + "//}"
f.write("\\makeatletter\n")
f.write("\\def\\input@path{%s}\n" % paths)
f.write("\\makeatother\n")
# Copy the original file and collect parameters embedded in the tex file
self._clear_params()
input = file(texfile)
for line in input:
self._set_params(line)
f.write(line)
f.close()
input.close()
# Replace the original file with the new one
shutil.move(tmptex, texfile)
# Build the output file
try:
self.texer.batch = batch
self.texer.texpost = self.texpost
self.texer.encoding = self._params.get("latex.encoding", "latin-1")
self.texer.set_format(format)
self.texer.set_backend(self.backend)
if self.index_style:
self.texer.set_index_style(self.index_style)
self.texer.compile(texfile)
except:
# On error, dump the log errors and raise again
self.texer.print_errors()
raise
if texout != binfile:
shutil.move(texout, binfile)
def clean(self):
self.texer.clean()
def reinit(self):
self.texer.reinit()
```
#### File: dblatex/xetex/fcfallback.py
```python
from fontspec import FontSpec
from fcmanager import FcManager
class DefaultFontSpec(FontSpec):
"""
The default fontspec gives priority to its children, and
contains any character.
"""
def __init__(self):
FontSpec.__init__(self, subfont_first=True)
def contains(self, char):
return True
class FcFallbackFontSpec(DefaultFontSpec):
"""
Default fontspec that finds fonts from fontconfig
if the preexisting fontspecs don't match.
Currently this class is the only interface between the
two worlds (fontspec and fontconfig).
"""
def __init__(self):
DefaultFontSpec.__init__(self)
self.fcmanager = FcManager()
self.fccache = {}
self.fcmissed = []
try:
self.fcmanager.build_fonts(partial=True)
except:
self.fcmanager = None
def _loghas(self, id, char):
pass
def _loghas2(self, id, char):
DefaultFontSpec._loghas(self, id, char)
def match(self, char, excluded=None):
fontspec = DefaultFontSpec.match(self, char, excluded)
if fontspec != self or not(self.fcmanager):
self._loghas2(fontspec.id, char)
return fontspec
if self.isignored(char):
self._loghas2(self.id, char)
return self
# Scan again the predefined fontspecs and check with fontconfig
# if their charset can be extended
for fontspec in self.fontspecs:
if fontspec in self.fcmissed:
print "missed!"
continue
fcfont = self.fccache.get(fontspec.mainfont()) or \
self.fcmanager.get_font(fontspec.mainfont())
if not(fcfont):
self.fcmissed.append(fontspec)
continue
if fcfont.has_char(char):
fontspec.add_char(char)
self._loghas2(fontspec.id + "[fc]", char)
return fontspec
# Find the first fcfont that has this char in its charset
fcfont = self.fcmanager.get_font_handling(char)
if not(fcfont):
self._loghas2(self.id + "[?fc]", char)
return self
# Extend the fontspec group
fontspec = self.spawn_fontspec_from_fcfont(fcfont, char)
self._loghas2(fontspec.id + "[A fc]", char)
return fontspec
def spawn_fontspec_from_fcfont(self, fcfont, char):
print "New fontspec '%s' matching U%X from fontconfig"\
% (fcfont.family, ord(char))
# Create a new font
fontspec = FontSpec()
fontspec.id = fcfont.family
fontspec.transitions["enter"]["main"] = fcfont.family
fontspec.add_char(char)
# Register the font and its related fontconfig object
self.fccache[fcfont.name] = fcfont
self.add_subfont(fontspec)
return fontspec
```
#### File: dbtexmf/xslt/xsltproc.py
```python
import os
import logging
import re
from subprocess import call, Popen, PIPE
class XsltProc:
def __init__(self):
self.catalogs = os.getenv("SGML_CATALOG_FILES")
self.use_catalogs = 1
self.log = logging.getLogger("dblatex")
self.run_opts = ["--xinclude"]
# If --xincludestyle is supported we *must* use it to support external
# listings (see mklistings.xsl and pals)
if self._has_xincludestyle():
self.run_opts.append("--xincludestyle")
def get_deplist(self):
return ["xsltproc"]
def run(self, xslfile, xmlfile, outfile, opts=None, params=None):
cmd = ["xsltproc", "-o", outfile] + self.run_opts
if self.use_catalogs and self.catalogs:
cmd.append("--catalogs")
if params:
for param, value in params.items():
cmd += ["--param", param, "'%s'" % value]
if opts:
cmd += opts
cmd += [xslfile, xmlfile]
self.system(cmd)
def system(self, cmd):
self.log.debug(" ".join(cmd))
rc = call(cmd)
if rc != 0:
raise ValueError("xsltproc failed")
def _has_xincludestyle(self):
# check that with help output the option is there
p = Popen(["xsltproc"], stdout=PIPE)
data = p.communicate()[0]
m = re.search("--xincludestyle", data, re.M)
if not(m):
return False
else:
return True
class Xslt(XsltProc):
"Plugin Class to load"
``` |
{
"source": "JonathanMortlock/RPi-Beam-Profiler",
"score": 3
} |
#### File: RPiBeamProfilerApp/beamprofiler/pkl_to_image.py
```python
import numpy as np
import matplotlib.pyplot as plt
import glob, time, sys
import cPickle as pickle
# update matplotlib fonts etc
plt.rc('font',**{'family':'Serif','serif':['Times New Roman']})
params={'axes.labelsize':13,'xtick.labelsize':12,'ytick.labelsize':12,'legend.fontsize': 11,'mathtext.fontset':'cm','mathtext.rm':'serif'}
plt.rcParams.update(params)
# Colours
d_purple=[126.0/255.0,49.0/255.0,123.0/255.0] # Palatinate Purple 255C
d_black = [35./255,31./255,32./255] # Black BlackC
try:
cmap = plt.cm.inferno
except:
cmap = plt.cm.CMRmap
def main(filename):
# read-in data from pkl file
imgdata = pickle.load(open(filename, 'rb'))
X = np.arange(len(imgdata[0,:]))
Y = np.arange(len(imgdata[:,0]))
# x and y axis integrated slices
imgX = imgdata.sum(axis=0,dtype=float)
imgY = imgdata.sum(axis=1,dtype=float)
# Set up figure
fig = plt.figure()
axmain = plt.subplot2grid((6,6),(0,0),colspan=5, rowspan=5)
axH = plt.subplot2grid((6,6),(5,0),colspan=5, rowspan=1, sharex=axmain)
axV = plt.subplot2grid((6,6),(0,5),colspan=1, rowspan=5, sharey=axmain)
axmain.imshow(imgdata,aspect='auto',extent=[0, len(imgX), len(imgY), 0], cmap=cmap)
axH.plot(X, imgX/imgX.max(), 'o', mec=d_purple, ms=5, mfc=None)
axV.plot(imgY/imgY.max(), Y, 'o', mec=d_purple, ms=5, mfc=None)
# Format figure
axH.set_xlabel('X position (px)')
axV.set_ylabel('Y position (px)')
axV.yaxis.set_label_position('right')
plt.setp(axmain.get_xticklabels(),visible=False)
plt.setp(axV.get_yticklabels(),visible=False)
axmain.tick_params(direction='in',color='w',bottom=1,top=1,left=1,right=1)
axH.tick_params(direction='in',bottom=1,top=1,left=1,right=1,color=2*np.array(d_black))
axV.tick_params(direction='in',bottom=1,top=1,left=1,right=1,color=2*np.array(d_black))
# Fill the figure canvas with the plot panels
plt.tight_layout()
# Save
plt.savefig(filename[:-4]+'.png', dpi=300)
plt.savefig(filename[:-4]+'.pdf')
if __name__ == '__main__':
main(sys.argv[1])
``` |
{
"source": "jonathan-moulds-sb/aws-research-workshops",
"score": 2
} |
#### File: aws-research-workshops/lib/workshop.py
```python
import logging
import os
import time
import boto3
import argparse
import botocore.session
import botocore.exceptions
import uuid
import sys
import tarfile
from six.moves import urllib
from dateutil import parser
def create_and_configure_vpc(tag='research-workshop'):
"""Create VPC"""
ec2 = boto3.resource('ec2')
ec2_client = boto3.client('ec2')
session = boto3.session.Session()
region = session.region_name
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
vpc.modify_attribute(EnableDnsSupport={'Value':True})
vpc.modify_attribute(EnableDnsHostnames={'Value':True})
tag = vpc.create_tags(
Tags=[
{
'Key': 'Name',
'Value': tag
},
])
subnet = vpc.create_subnet(CidrBlock='10.0.0.0/24', AvailabilityZone=region + 'a')
subnet.meta.client.modify_subnet_attribute(
SubnetId=subnet.id,
MapPublicIpOnLaunch={"Value": True}
)
subnet2 = vpc.create_subnet(CidrBlock='10.0.1.0/24', AvailabilityZone=region + 'b')
subnet2.meta.client.modify_subnet_attribute(SubnetId=subnet2.id, MapPublicIpOnLaunch={"Value": True})
igw = ec2.create_internet_gateway()
igw.attach_to_vpc(VpcId=vpc.id)
public_route_table = list(vpc.route_tables.all())[0]
# add a default route, for Public Subnet, pointing to Internet Gateway
ec2_client.create_route(RouteTableId=public_route_table.id,DestinationCidrBlock='0.0.0.0/0',GatewayId=igw.id)
public_route_table.associate_with_subnet(SubnetId=subnet.id)
public_route_table.associate_with_subnet(SubnetId=subnet2.id)
return vpc, subnet, subnet2
def vpc_cleanup(vpcid):
"""Cleanup VPC"""
print('Removing VPC ({}) from AWS'.format(vpcid))
ec2 = boto3.resource('ec2')
ec2_client = ec2.meta.client
vpc = ec2.Vpc(vpcid)
# detach default dhcp_options if associated with the vpc
dhcp_options_default = ec2.DhcpOptions('default')
if dhcp_options_default:
dhcp_options_default.associate_with_vpc(
VpcId=vpc.id
)
# detach and delete all gateways associated with the vpc
for gw in vpc.internet_gateways.all():
vpc.detach_internet_gateway(InternetGatewayId=gw.id)
gw.delete()
# delete all route table associations
for rt in vpc.route_tables.all():
if not rt.associations:
rt.delete()
else:
for rta in rt.associations:
if not rta.main:
rta.delete()
# delete any instances
for subnet in vpc.subnets.all():
for instance in subnet.instances.all():
instance.terminate()
# delete our endpoints
for ep in ec2_client.describe_vpc_endpoints(
Filters=[{
'Name': 'vpc-id',
'Values': [vpcid]
}])['VpcEndpoints']:
ec2_client.delete_vpc_endpoints(VpcEndpointIds=[ep['VpcEndpointId']])
# delete our security groups
for sg in vpc.security_groups.all():
if sg.group_name != 'default':
sg.delete()
# delete any vpc peering connections
for vpcpeer in ec2_client.describe_vpc_peering_connections(
Filters=[{
'Name': 'requester-vpc-info.vpc-id',
'Values': [vpcid]
}])['VpcPeeringConnections']:
ec2.VpcPeeringConnection(vpcpeer['VpcPeeringConnectionId']).delete()
# delete non-default network acls
for netacl in vpc.network_acls.all():
if not netacl.is_default:
netacl.delete()
# delete network interfaces
for subnet in vpc.subnets.all():
for interface in subnet.network_interfaces.all():
interface.delete()
subnet.delete()
# finally, delete the vpc
ec2_client.delete_vpc(VpcId=vpcid)
def get_latest_amazon_linux():
"""Search EC2 Images for Amazon Linux"""
ec2_client = boto3.client('ec2')
filters = [ {
'Name': 'name',
'Values': ['amzn-ami-hvm-*']
},{
'Name': 'description',
'Values': ['Amazon Linux AMI*']
},{
'Name': 'architecture',
'Values': ['x86_64']
},{
'Name': 'owner-alias',
'Values': ['amazon']
},{
'Name': 'owner-id',
'Values': ['137112412989']
},{
'Name': 'state',
'Values': ['available']
},{
'Name': 'root-device-type',
'Values': ['ebs']
},{
'Name': 'virtualization-type',
'Values': ['hvm']
},{
'Name': 'hypervisor',
'Values': ['xen']
},{
'Name': 'image-type',
'Values': ['machine']
} ]
response = ec2_client.describe_images(Owners=['amazon'], Filters=filters)
source_image = newest_image(response['Images'])
return source_image['ImageId']
def newest_image(list_of_images):
"""Get Newest Amazon Linux Image from list"""
latest = None
for image in list_of_images:
if not latest:
latest = image
continue
if parser.parse(image['CreationDate']) > parser.parse(latest['CreationDate']):
latest = image
return latest
def create_role(iam, policy_name, assume_role_policy_document, inline_policy_name=None, policy_str=None, managed_policy=None):
"""Creates a new role if there is not already a role by that name"""
if role_exists(iam, policy_name):
logging.info('Role "%s" already exists. Assuming correct values.', policy_name)
return get_role_arn(iam, policy_name)
else:
response = iam.create_role(RoleName=policy_name,
AssumeRolePolicyDocument=assume_role_policy_document)
if policy_str is not None:
iam.put_role_policy(RoleName=policy_name,
PolicyName=inline_policy_name, PolicyDocument=policy_str)
if managed_policy is not None:
iam.attach_role_policy(RoleName=policy_name, PolicyArn=managed_policy)
logging.info('response for creating role = "%s"', response)
return response['Role']['Arn']
def role_exists(iam, role_name):
"""Checks if the role exists already"""
try:
iam.get_role(RoleName=role_name)
except botocore.exceptions.ClientError:
return False
return True
def get_role_arn(iam, role_name):
"""Gets the ARN of role"""
response = iam.get_role(RoleName=role_name)
return response['Role']['Arn']
def create_bucket_name(bucket_prefix):
# The generated bucket name must be between 3 and 63 chars long
return ''.join([bucket_prefix, str(uuid.uuid4())])
def create_bucket(region, session, bucket_prefix):
bucket = create_bucket_name(bucket_prefix)
if region != 'us-east-1':
session.resource('s3').create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': region})
else:
session.resource('s3').create_bucket(Bucket=bucket)
return bucket
def delete_bucket_completely(bucket_name):
"""Remove all objects from S3 bucket and delete"""
client = boto3.client('s3')
response = client.list_objects_v2(
Bucket=bucket_name,
)
while response['KeyCount'] > 0:
print('Deleting %d objects from bucket %s' % (len(response['Contents']),bucket_name))
response = client.delete_objects(
Bucket=bucket_name,
Delete={
'Objects':[{'Key':obj['Key']} for obj in response['Contents']]
}
)
response = client.list_objects_v2(
Bucket=bucket_name,
)
print('Now deleting bucket %s' % bucket_name)
response = client.delete_bucket(
Bucket=bucket_name
)
def create_db(glue_client, account_id, database_name, description):
"""Create the specified Glue database if it does not exist"""
try:
glue_client.get_database(
CatalogId=account_id,
Name=database_name
)
except glue_client.exceptions.EntityNotFoundException:
print("Creating database: %s" % database_name)
glue_client.create_database(
CatalogId=account_id,
DatabaseInput={
'Name': database_name,
'Description': description
}
)
``` |
{
"source": "JonathanMurray/dialog-tree-py",
"score": 3
} |
#### File: dialog-tree-py/dialog_tree/graph.py
```python
from typing import List, Optional, Dict
from constants import Vec2, Millis
class DialogChoice:
def __init__(self, text: str, leads_to_id: str):
self.text = text
self.leads_to_id = leads_to_id
class NodeGraphics:
def __init__(self, animation_id: Optional[str] = None, image_ids: Optional[List[str]] = None,
offset: Optional[Vec2] = None, screen_shake: Optional[Millis] = None,
instant_text: bool = False):
self.animation_id = animation_id
self.image_ids = image_ids
self.offset: Vec2 = offset or (0, 0)
self.screen_shake = screen_shake
self.instant_text = instant_text
class DialogNode:
def __init__(self, node_id: str, text: str, choices: List[DialogChoice], graphics: Optional[NodeGraphics] = None,
sound_id: Optional[str] = None):
if not node_id:
raise ValueError("Invalid node config (missing ID)")
self.node_id = node_id
self.text = text
self.choices = choices
self.graphics = graphics
self.sound_id = sound_id
class DialogGraph:
"""
A graph representation of a dialog
This class is very central. One instance represents a full dialog. It keeps track of where you are as you progress
through a dialog.
"""
def __init__(self, root_node_id: str, nodes: List[DialogNode], title: Optional[str] = None,
background_image_id: Optional[str] = None):
self.title = title
self.background_image_id = background_image_id
self._nodes_by_id: Dict[str, DialogNode] = {}
self._active_node_id = root_node_id
for node in nodes:
node_id = node.node_id
if node_id in self._nodes_by_id:
raise ValueError(f"Duplicate node ID found: {node_id}")
self._nodes_by_id[node_id] = node
for node in nodes:
for choice in node.choices:
if choice.leads_to_id not in self._nodes_by_id:
raise ValueError(
f"Dialog choice leading to missing node: {choice.leads_to_id}")
if root_node_id not in self._nodes_by_id:
raise ValueError(f"No node found with ID: {root_node_id}")
def current_node(self) -> DialogNode:
return self._nodes_by_id[self._active_node_id]
def make_choice(self, choice_index: int):
node = self._nodes_by_id[self._active_node_id]
self._active_node_id = node.choices[choice_index].leads_to_id
def nodes(self) -> List[DialogNode]:
""" Return the nodes of this graph as a list. Should not needed for normal usage,
but is used when visualizing the graph with graphviz. """
return list(self._nodes_by_id.values())
def __repr__(self):
return str(self.__dict__)
```
#### File: dialog_tree/runners/graph_visualizer.py
```python
import os
import sys
from pathlib import Path
from graphviz import Digraph
from config_file import load_dialog_from_file
from graph import DialogGraph
from text_util import layout_text_in_area
TMP_DIR = Path(".tmpfiles")
def generate_graphviz(graph_name: str, dialog_graph: DialogGraph) -> Digraph:
graph = Digraph(
name=graph_name,
comment=f"generated with Graphviz from {graph_name}",
node_attr={"shape": "box", "style": "filled", "color": "#BBCCFF"},
edge_attr={"fontsize": "11"}
)
for node in dialog_graph.nodes():
node_label = _add_newlines(node.text, 30)
graph.node(node.node_id, node_label)
for choice in node.choices:
edge_label = _add_newlines(choice.text, 20)
graph.edge(node.node_id, choice.leads_to_id, label=edge_label)
return graph
def _add_newlines(text: str, max_chars_per_line: int) -> str:
lines_iterator = layout_text_in_area(text, len, max_chars_per_line)
text_with_newlines = next(lines_iterator)
for additional_line in lines_iterator:
text_with_newlines += f"\n{additional_line}"
return text_with_newlines
def _init_tmp_dir():
if not TMP_DIR.exists():
print(f"Creating directory: {TMP_DIR}")
os.mkdir(TMP_DIR)
for existing_file in [TMP_DIR.joinpath(f) for f in os.listdir(TMP_DIR)]:
print(f"Cleaning up old file: {existing_file}")
os.remove(existing_file)
def main():
args = sys.argv[1:]
dialog_filepath = args[0]
dialog_graph = load_dialog_from_file(dialog_filepath)
_init_tmp_dir()
graph = generate_graphviz(Path(dialog_filepath).name, dialog_graph)
graph.render(directory=TMP_DIR, view=True)
print(f"Saved rendered outputs in: {TMP_DIR}")
if __name__ == '__main__':
main()
```
#### File: dialog-tree-py/dialog_tree/timing.py
```python
from typing import Callable, Any
from constants import Millis
class PeriodicAction:
def __init__(self, cooldown: Millis, callback: Callable[[], Any]):
self._cooldown = cooldown
self._callback = callback
self._time_since_last_action = 0
def update(self, elapsed_time: Millis):
self._time_since_last_action += elapsed_time
if self._time_since_last_action >= self._cooldown:
self._time_since_last_action -= self._cooldown
self._callback()
```
#### File: dialog-tree-py/dialog_tree/ui.py
```python
from abc import ABC
from random import randint
from typing import Tuple, List, Optional, Dict
import pygame
from pygame.font import Font
from pygame.rect import Rect
from pygame.surface import Surface
from constants import WHITE, GREEN, BLACK, Vec2, Vec3, Millis
from graph import DialogNode
from sound import SoundPlayer
from text_util import layout_text_in_area
from timing import PeriodicAction
class _Component(ABC):
def __init__(self, surface: Surface):
self.surface = surface
def update(self, elapsed_time: Millis):
pass
class _ScreenShake:
def __init__(self):
self.x = 0
self.y = 0
self._remaining = 0
def start(self, duration: Millis):
self._remaining = duration
def update(self, elapsed_time: Millis):
self._remaining = max(0, self._remaining - elapsed_time)
if self._remaining == 0:
self.x, self.y = 0, 0
else:
self.x, self.y = randint(-10, 10), randint(-10, 10)
class Ui:
"""The graphical user interface used for presenting a dialog on the screen"""
def __init__(self, surface: Surface, picture_size: Vec2, dialog_node: DialogNode, dialog_font: Font,
choice_font: Font, images: Dict[str, Surface], animations: Dict[str, List[Surface]], sound_player: SoundPlayer,
background: Optional[Surface], select_blip_sound_id: str):
self.surface = surface
self._picture_size = picture_size
self._width = surface.get_width()
self._dialog_font = dialog_font
self._choice_font = choice_font
self._images = images
self._animations = animations
self._sound_player = sound_player
self._background = background
self._select_blip_sound_id = select_blip_sound_id
# MUTABLE STATE BELOW
self._dialog_node = dialog_node
self._components: List[Tuple[_Component, Vec2]] = []
self._dialog_box = None
self._choice_buttons = []
self._highlighted_choice_index = 0
self._screen_shake = _ScreenShake()
self.set_dialog(dialog_node)
def set_dialog(self, dialog_node: DialogNode):
self._dialog_node = dialog_node
graphics = dialog_node.graphics
if graphics.image_ids:
animation = _Animation([self._images[i] for i in graphics.image_ids], graphics.offset)
else:
animation = _Animation(self._animations[graphics.animation_id], graphics.offset)
margin = 5
dialog_box_size = (self._width - margin * 2, 120)
self._components = [(_Picture(Surface(self._picture_size), self._background, animation), (0, 0))]
if dialog_node.text:
self._dialog_box = _TextBox(
self._dialog_font, dialog_box_size, dialog_node.text,
border_color=(150, 150, 150), text_color=(255, 255, 255), sound_player=self._sound_player)
self._components.append(
(self._dialog_box, (margin, self._picture_size[1] - dialog_box_size[1] - margin)))
if dialog_node.graphics.instant_text:
self._dialog_box.set_cursor_to_end()
self._choice_buttons = []
if graphics.screen_shake:
self._screen_shake.start(graphics.screen_shake)
def _add_choice_buttons(self):
button_height = 40
self._choice_buttons = [_ChoiceButton(self._choice_font, (self._width, button_height), choice.text)
for choice in self._dialog_node.choices]
self._highlighted_choice_index = 0
if self._choice_buttons:
self._choice_buttons[self._highlighted_choice_index].set_highlighted(True)
for i, button in enumerate(self._choice_buttons):
j = len(self._choice_buttons) - i
position = (0, self.surface.get_height() - button_height * j - 5 * j)
self._components.append((button, position))
def redraw(self):
self.surface.fill(BLACK)
dx, dy = (self._screen_shake.x, self._screen_shake.y)
for component, (x, y) in self._components:
self.surface.blit(component.surface, (x + dx, y + dy))
def update(self, elapsed_time: Millis):
self._screen_shake.update(elapsed_time)
for component, _ in self._components:
component.update(elapsed_time)
if self._dialog_box.is_cursor_at_end() and not self._choice_buttons:
self._add_choice_buttons()
def move_choice_highlight(self, delta: int):
if self._choice_buttons and len(self._choice_buttons) > 1:
new_index = (self._highlighted_choice_index + delta) % len(self._choice_buttons)
self.set_highlighted_choice(new_index)
def set_highlighted_choice(self, choice_index: int):
if choice_index != self._highlighted_choice_index:
self._sound_player.play(self._select_blip_sound_id)
self._choice_buttons[self._highlighted_choice_index].set_highlighted(False)
self._highlighted_choice_index = choice_index
self._choice_buttons[self._highlighted_choice_index].set_highlighted(True)
def highlighted_choice(self) -> Optional[int]:
if self._choice_buttons:
return self._highlighted_choice_index
def skip_text(self):
self._dialog_box.set_cursor_to_end()
def choice_button_at_position(self, target_position: Vec2) -> Optional[int]:
for choice_index, (component, position) in enumerate((c for c in self._components
if isinstance(c[0], _ChoiceButton))):
rect = Rect(position, component.surface.get_size())
if rect.collidepoint(target_position):
return choice_index
class _Animation:
def __init__(self, frames: List[Surface], offset: Vec2):
if not frames:
raise ValueError("Cannot instantiate animation without frames!")
self._frames = frames
self._frame_index = 0
self.offset = offset
def change_frame(self):
self._frame_index = (self._frame_index + 1) % len(self._frames)
def image(self) -> Surface:
return self._frames[self._frame_index]
class _Picture(_Component):
def __init__(self, surface: Surface, background: Optional[Surface], animation: _Animation):
super().__init__(surface)
self._background = background
self._animation = animation
self._redraw()
self._periodic_frame_change = PeriodicAction(Millis(130), self._change_frame)
def _redraw(self):
self.surface.fill(BLACK)
if self._background:
self.surface.blit(self._background, (0, 0))
self.surface.blit(self._animation.image(), self._animation.offset)
def _change_frame(self):
self._animation.change_frame()
self._redraw()
def update(self, elapsed_time: Millis):
self._periodic_frame_change.update(elapsed_time)
class _ChoiceButton(_Component):
def __init__(self, font: Font, size: Vec2, text: str, highlighted: bool = False):
super().__init__(Surface(size))
self._font = font
self._text = text
self._highlighted = highlighted
self._container_rect = Rect((0, 0), size)
self._redraw()
def _redraw(self):
self.surface.fill(BLACK)
if self._highlighted:
self.surface.fill((60, 60, 60), self._container_rect)
pygame.draw.rect(self.surface, GREEN, self._container_rect, width=2, border_radius=4)
else:
pygame.draw.rect(self.surface, WHITE, self._container_rect, width=1, border_radius=4)
rendered_text = self._font.render(self._text, True, WHITE)
text_position = (self._container_rect.x + (self._container_rect.w - rendered_text.get_width()) // 2,
self._container_rect.y + (self._container_rect.h - rendered_text.get_height()) // 2)
self.surface.blit(rendered_text, text_position)
def set_highlighted(self, highlighted: bool):
self._highlighted = highlighted
self._redraw()
class _TextBox(_Component):
def __init__(self, font: Font, size: Vec2, text: str, border_color: Vec3, text_color: Vec3,
sound_player: SoundPlayer):
super().__init__(Surface(size))
self.surface.set_alpha(180)
self._container_rect = Rect((0, 0), size)
pad = 30
self._text_area = self._container_rect.inflate(-pad, -pad)
self._font = font
self._border_color = border_color
self._text_color = text_color
self._sound_player = sound_player
self._lines = self._split_into_lines(text)
self._cursor = 0
self._max_cursor_position = max(0, len(text) - 1)
self._periodic_cursor_advance = PeriodicAction(Millis(40), self._advance_cursor)
self._redraw()
def _advance_cursor(self):
if self._cursor < self._max_cursor_position:
self._sound_player.play_text_blip()
self._cursor += 1
self._redraw()
def update(self, elapsed_time: Millis):
self._periodic_cursor_advance.update(elapsed_time)
def set_cursor_to_end(self):
self._cursor = self._max_cursor_position
self._redraw()
def is_cursor_at_end(self) -> bool:
return self._cursor == self._max_cursor_position
def _split_into_lines(self, text) -> List[str]:
return list(layout_text_in_area(text, lambda t: self._font.size(t)[0], self._text_area.width))
def _redraw(self):
self.surface.fill(BLACK)
pygame.draw.rect(self.surface, self._border_color, self._container_rect, width=1, border_radius=2)
x, y = self._text_area.topleft
num_chars_rendered = 0
for line in self._lines:
remaining = self._cursor + 1 - num_chars_rendered
if remaining <= 0:
return
part_of_line = line[:remaining]
rendered_line = self._font.render(part_of_line, True, self._text_color)
self.surface.blit(rendered_line, (x, y))
y += rendered_line.get_height()
num_chars_rendered += len(part_of_line)
``` |
{
"source": "jonathanmusila/ah-cli",
"score": 3
} |
#### File: ah-cli/app/articles.py
```python
import requests
import click
import pandas as pd
from halo import Halo
from pandas.io.json import json_normalize
spinner = Halo(text='Loading', spinner='dots', text_color='magenta')
url = "https://ah-django-staging.herokuapp.com/api"
@click.group()
def main():
"""
Simple CLI for consuming Authors Haven App 😍
"""
@main.command()
@click.option("--csv")
@click.argument("slug")
def get(slug, csv):
"""
This return a particular article from the given slug on Authors Haven API
"""
url_format = url + "/articles/{}/"
click.echo(slug)
spinner.start()
response = requests.get(url_format.format(slug))
spinner.stop()
spinner.clear()
if response.status_code == 404:
spinner.warn("Article with that slug not found ❎")
click.echo("Status code: {}".format(response.status_code))
elif response.status_code == 200:
spinner.succeed("Article found ✅")
click.echo("Status code: {}".format(response.status_code))
click.echo(response.json())
if csv:
df = json_normalize(response.json())
df.to_csv('articles.csv')
@main.command()
@click.option("--articles",)
def get_list(articles):
"""
This returns all the articles form Authors Haven API
"""
url_format = url + "/articles/feed/"
spinner.start()
response = requests.get(url_format)
spinner.stop()
spinner.clear()
spinner.succeed("Done fetching articles ✅")
click.echo("Status code: {}".format(response.status_code))
click.echo(response.json())
``` |
{
"source": "jonathanmusila/FlaskREsPlus",
"score": 3
} |
#### File: jonathanmusila/FlaskREsPlus/app.py
```python
from flask import Flask
from flask_restplus import Api, Resource, fields
app = Flask(__name__)
api = Api(app)
a_language = api.model('Language', {'language': fields.String('The language')})
languages = []
python = {'language':'Python'}
languages.append(python)
@api.route('/language')
class Language(Resource):
def get(self):
return languages
@api.expect(a_language)
def post(self):
languages.append(api.payload)
return {'results': "Language added"}, 201
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "jonathanmusila/simple-logic-tests",
"score": 4
} |
#### File: simple-logic-tests/fibonacci/fib.py
```python
def fibonacci(n):
if(n <= 1):
return n
else:
return(fibonacci(n-1)+fibonacci(n-2))
n = int(input("Enter number of terms:"))
print("Fibonacci sequence:")
for i in range(n):
print (fibonacci(i))
```
#### File: simple-logic-tests/tests/test_reverse.py
```python
import pytest
from reverse_word import reverse
def test_number_is_reversed():
pass
``` |
{
"source": "jonathanmusila/StackOverflow-Lite",
"score": 3
} |
#### File: site-packages/astroid/decorators.py
```python
import functools
import wrapt
from astroid import context as contextmod
from astroid import exceptions
from astroid import util
@wrapt.decorator
def cached(func, instance, args, kwargs):
"""Simple decorator to cache result of method calls without args."""
cache = getattr(instance, '__cache', None)
if cache is None:
instance.__cache = cache = {}
try:
return cache[func]
except KeyError:
cache[func] = result = func(*args, **kwargs)
return result
class cachedproperty(object):
""" Provides a cached property equivalent to the stacking of
@cached and @property, but more efficient.
After first usage, the <property_name> becomes part of the object's
__dict__. Doing:
del obj.<property_name> empties the cache.
Idea taken from the pyramid_ framework and the mercurial_ project.
.. _pyramid: http://pypi.python.org/pypi/pyramid
.. _mercurial: http://pypi.python.org/pypi/Mercurial
"""
__slots__ = ('wrapped',)
def __init__(self, wrapped):
try:
wrapped.__name__
except AttributeError:
util.reraise(TypeError('%s must have a __name__ attribute'
% wrapped))
self.wrapped = wrapped
@property
def __doc__(self):
doc = getattr(self.wrapped, '__doc__', None)
return ('<wrapped by the cachedproperty decorator>%s'
% ('\n%s' % doc if doc else ''))
def __get__(self, inst, objtype=None):
if inst is None:
return self
val = self.wrapped(inst)
setattr(inst, self.wrapped.__name__, val)
return val
def path_wrapper(func):
"""return the given infer function wrapped to handle the path
Used to stop inference if the node has already been looked
at for a given `InferenceContext` to prevent infinite recursion
"""
# TODO: switch this to wrapt after the monkey-patching is fixed (ceridwen)
@functools.wraps(func)
def wrapped(node, context=None, _func=func, **kwargs):
"""wrapper function handling context"""
if context is None:
context = contextmod.InferenceContext()
if context.push(node):
return
yielded = set()
generator = _func(node, context, **kwargs)
try:
while True:
res = next(generator)
# unproxy only true instance, not const, tuple, dict...
if res.__class__.__name__ == 'Instance':
ares = res._proxied
else:
ares = res
if ares not in yielded:
yield res
yielded.add(ares)
except StopIteration as error:
# Explicit StopIteration to return error information, see
# comment in raise_if_nothing_inferred.
if error.args:
raise StopIteration(error.args[0])
else:
raise StopIteration
return wrapped
@wrapt.decorator
def yes_if_nothing_inferred(func, instance, args, kwargs):
inferred = False
for node in func(*args, **kwargs):
inferred = True
yield node
if not inferred:
yield util.Uninferable
@wrapt.decorator
def raise_if_nothing_inferred(func, instance, args, kwargs):
'''All generators wrapped with raise_if_nothing_inferred *must*
explicitly raise StopIteration with information to create an
appropriate structured InferenceError.
'''
# TODO: Explicitly raising StopIteration in a generator will cause
# a RuntimeError in Python >=3.7, as per
# http://legacy.python.org/dev/peps/pep-0479/ . Before 3.7 is
# released, this code will need to use one of four possible
# solutions: a decorator that restores the current behavior as
# described in
# http://legacy.python.org/dev/peps/pep-0479/#sub-proposal-decorator-to-explicitly-request-current-behaviour
# , dynamic imports or exec to generate different code for
# different versions, drop support for all Python versions <3.3,
# or refactoring to change how these decorators work. In any
# event, after dropping support for Python <3.3 this code should
# be refactored to use `yield from`.
inferred = False
try:
generator = func(*args, **kwargs)
while True:
yield next(generator)
inferred = True
except StopIteration as error:
if not inferred:
if error.args:
# pylint: disable=not-a-mapping
raise exceptions.InferenceError(**error.args[0])
else:
raise exceptions.InferenceError(
'StopIteration raised without any error information.')
```
#### File: blueprint/backend/files.py
```python
import base64
from collections import defaultdict
import errno
import glob
import grp
import hashlib
import logging
import os.path
import pwd
import re
import stat
import subprocess
from blueprint import util
# An extra list of pathnames and MD5 sums that will be checked after no
# match is found in `dpkg`(1)'s list. If a pathname is given as the value
# then that file's contents will be hashed.
#
# Many of these files are distributed with packages and copied from
# `/usr/share` in the `postinst` program.
#
# XXX Update `blueprintignore`(5) if you make changes here.
MD5SUMS = {'/etc/adduser.conf': ['/usr/share/adduser/adduser.conf'],
'/etc/apparmor.d/tunables/home.d/ubuntu':
['2a88811f7b763daa96c20b20269294a4'],
'/etc/apt/apt.conf.d/00CDMountPoint':
['cb46a4e03f8c592ee9f56c948c14ea4e'],
'/etc/apt/apt.conf.d/00trustcdrom':
['a8df82e6e6774f817b500ee10202a968'],
'/etc/chatscripts/provider': ['/usr/share/ppp/provider.chatscript'],
'/etc/default/console-setup':
['0fb6cec686d0410993bdf17192bee7d6',
'b684fd43b74ac60c6bdafafda8236ed3',
'/usr/share/console-setup/console-setup'],
'/etc/default/grub': ['ee9df6805efb2a7d1ba3f8016754a119',
'ad9283019e54cedfc1f58bcc5e615dce'],
'/etc/default/irqbalance': ['7e10d364b9f72b11d7bf7bd1cfaeb0ff'],
'/etc/default/keyboard': ['06d66484edaa2fbf89aa0c1ec4989857'],
'/etc/default/locale': ['164aba1ef1298affaa58761647f2ceba',
'7c32189e775ac93487aa4a01dffbbf76'],
'/etc/default/rcS': ['/usr/share/initscripts/default.rcS'],
'/etc/environment': ['44ad415fac749e0c39d6302a751db3f2'],
'/etc/hosts.allow': ['8c44735847c4f69fb9e1f0d7a32e94c1'],
'/etc/hosts.deny': ['92a0a19db9dc99488f00ac9e7b28eb3d'],
'/etc/initramfs-tools/modules':
['/usr/share/initramfs-tools/modules'],
'/etc/inputrc': ['/usr/share/readline/inputrc'],
'/etc/iscsi/iscsid.conf': ['6c6fd718faae84a4ab1b276e78fea471'],
'/etc/kernel-img.conf': ['f1ed9c3e91816337aa7351bdf558a442'],
'/etc/ld.so.conf': ['4317c6de8564b68d628c21efa96b37e4'],
'/etc/ld.so.conf.d/nosegneg.conf':
['3c6eccf8f1c6c90eaf3eb486cc8af8a3'],
'/etc/networks': ['/usr/share/base-files/networks'],
'/etc/nsswitch.conf': ['/usr/share/base-files/nsswitch.conf'],
'/etc/pam.d/common-account': ['9d50c7dda6ba8b6a8422fd4453722324'],
'/etc/pam.d/common-auth': ['<PASSWORD>'],
'/etc/pam.d/common-password': ['<PASSWORD>'],
'/etc/pam.d/common-session': ['e2b72dd3efb2d6b29698f944d8723ab1'],
'/etc/pam.d/common-session-noninteractive':
['<PASSWORD>'],
'/etc/pam.d/fingerprint-auth-ac':
['<PASSWORD>'],
'/etc/pam.d/fingerprint-auth': ['<PASSWORD>'],
'/etc/pam.d/password-auth-ac': ['<PASSWORD>'],
'/etc/pam.d/password-auth': ['<PASSWORD>'],
'/etc/pam.d/smartcard-auth-ac':
['dfa6696dc19391b065c45<PASSWORD>2<PASSWORD>'],
'/etc/pam.d/smartcard-auth': ['<KEY>'],
'/etc/pam.d/system-auth-ac': ['e<PASSWORD>1<PASSWORD>a6<PASSWORD>a<PASSWORD>'],
'/etc/pam.d/system-auth': ['<PASSWORD>'],
'/etc/ppp/chap-secrets': ['faac59e116399eadbb37644de6494cc4'],
'/etc/ppp/pap-secrets': ['698c4d412deedc43dde8641f84e8b2fd'],
'/etc/ppp/peers/provider': ['/usr/share/ppp/provider.peer'],
'/etc/profile': ['/usr/share/base-files/profile'],
'/etc/python/debian_config': ['7f4739eb8858d231601a5ed144099ac8'],
'/etc/rc.local': ['10fd9f051accb6fd1f753f2d48371890'],
'/etc/rsyslog.d/50-default.conf':
['/usr/share/rsyslog/50-default.conf'],
'/etc/security/opasswd': ['<PASSWORD>'],
'/etc/selinux/restorecond.conf':
['b5b371cb8c7b33e17bdd0d327fa69b60'],
'/etc/selinux/targeted/modules/semanage.trans.LOCK':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/modules/active/file_contexts.template':
['bfa4d9e76d88c7dc49ee34ac6f4c3925'],
'/etc/selinux/targeted/modules/active/file_contexts':
['1622b57a3b85db3112c5f71238c68d3e'],
'/etc/selinux/targeted/modules/active/users_extra':
['daab665152753da1bf92ca0b2af82999'],
'/etc/selinux/targeted/modules/active/base.pp':
['6540e8e1a9566721e70953a3cb946de4'],
'/etc/selinux/targeted/modules/active/modules/fetchmail.pp':
['0b0c7845f10170a76b9bd4213634cb43'],
'/etc/selinux/targeted/modules/active/modules/usbmuxd.pp':
['72a039c5108de78060651833a073dcd1'],
'/etc/selinux/targeted/modules/active/modules/pulseaudio.pp':
['d9c4f1abf8397d7967bb3014391f7b61'],
'/etc/selinux/targeted/modules/active/modules/screen.pp':
['c343b6c4df512b3ef435f06ed6cfd8b4'],
'/etc/selinux/targeted/modules/active/modules/cipe.pp':
['4ea2d39babaab8e83e29d13d7a83e8da'],
'/etc/selinux/targeted/modules/active/modules/rpcbind.pp':
['48cdaa5a31d75f95690106eeaaf855e3'],
'/etc/selinux/targeted/modules/active/modules/nut.pp':
['d8c81e82747c85d6788acc9d91178772'],
'/etc/selinux/targeted/modules/active/modules/mozilla.pp':
['405329d98580ef56f9e525a66adf7dc5'],
'/etc/selinux/targeted/modules/active/modules/openvpn.pp':
['110fe4c59b7d7124a7d33fda1f31428a'],
'/etc/selinux/targeted/modules/active/modules/denyhosts.pp':
['d12dba0c7eea142c16abd1e0424dfda4'],
'/etc/selinux/targeted/modules/active/modules/rhcs.pp':
['e7a6bf514011f39f277d401cd3d3186a'],
'/etc/selinux/targeted/modules/active/modules/radius.pp':
['a7380d93d0ac922364bc1eda85af80bf'],
'/etc/selinux/targeted/modules/active/modules/policykit.pp':
['1828a7a89c5c7a9cd0bd1b04b379e2c0'],
'/etc/selinux/targeted/modules/active/modules/varnishd.pp':
['260ef0797e6178de4edeeeca741e2374'],
'/etc/selinux/targeted/modules/active/modules/bugzilla.pp':
['c70402a459add46214ee370039398931'],
'/etc/selinux/targeted/modules/active/modules/java.pp':
['ac691d90e755a9a929c1c8095d721899'],
'/etc/selinux/targeted/modules/active/modules/courier.pp':
['d6eb2ef77d755fd49d61e48383867ccb'],
'/etc/selinux/targeted/modules/active/modules/userhelper.pp':
['787e5ca0ee1c9e744e9116837d73c2b9'],
'/etc/selinux/targeted/modules/active/modules/sssd.pp':
['aeb11626d9f34af08e9cd50b1b5751c7'],
'/etc/selinux/targeted/modules/active/modules/munin.pp':
['db2927d889a3dfbe439eb67dfdcba61d'],
'/etc/selinux/targeted/modules/active/modules/ppp.pp':
['7c6f91f4aae1c13a3d2a159a4c9b8553'],
'/etc/selinux/targeted/modules/active/modules/xfs.pp':
['6b3be69f181f28e89bfcffa032097dcb'],
'/etc/selinux/targeted/modules/active/modules/consolekit.pp':
['ef682e07a732448a12f2e93da946d655'],
'/etc/selinux/targeted/modules/active/modules/telnet.pp':
['43fd78d022e499bcb6392da33ed6e28d'],
'/etc/selinux/targeted/modules/active/modules/nagios.pp':
['9c9e482867dce0aa325884a50a023a83'],
'/etc/selinux/targeted/modules/active/modules/sysstat.pp':
['0fc4e6b3472ce5e8cfd0f3e785809552'],
'/etc/selinux/targeted/modules/active/modules/tor.pp':
['2c926e3c5b79879ed992b72406544394'],
'/etc/selinux/targeted/modules/active/modules/qpidd.pp':
['959d4763313e80d8a75bc009094ea085'],
'/etc/selinux/targeted/modules/active/modules/radvd.pp':
['a7636d3df0f431ad421170150e8a9d2e'],
'/etc/selinux/targeted/modules/active/modules/aiccu.pp':
['c0eafc1357cd0c07be4034c1a27ada98'],
'/etc/selinux/targeted/modules/active/modules/tgtd.pp':
['55da30386834e60a10b4bab582a1b689'],
'/etc/selinux/targeted/modules/active/modules/sectoolm.pp':
['6f8fba8d448da09f85a03622de295ba9'],
'/etc/selinux/targeted/modules/active/modules/unconfineduser.pp':
['0bc2f6faf3b38a657c4928ec7b611d7a'],
'/etc/selinux/targeted/modules/active/modules/sambagui.pp':
['31a5121c80a6114b25db4984bdf8d999'],
'/etc/selinux/targeted/modules/active/modules/mpd.pp':
['cdabce7844a227a81c2334dec0c49e9b'],
'/etc/selinux/targeted/modules/active/modules/hddtemp.pp':
['76d85610a7e198c82406d850ccd935e1'],
'/etc/selinux/targeted/modules/active/modules/clamav.pp':
['f8f5b60e3f5b176810ea0666b989f63d'],
'/etc/selinux/targeted/modules/active/modules/tvtime.pp':
['886dc0a6e9ebcbb6787909851e7c209f'],
'/etc/selinux/targeted/modules/active/modules/cgroup.pp':
['9e1cd610b6fde0e9b42cabd7f994db46'],
'/etc/selinux/targeted/modules/active/modules/rshd.pp':
['e39cec5e9ade8a619ecb91b85a351408'],
'/etc/selinux/targeted/modules/active/modules/roundup.pp':
['133b9b3b2f70422953851e18d6c24276'],
'/etc/selinux/targeted/modules/active/modules/virt.pp':
['9ae34fca60c651c10298797c1260ced0'],
'/etc/selinux/targeted/modules/active/modules/asterisk.pp':
['f823fdcb2c6df4ddde374c9edb11ef26'],
'/etc/selinux/targeted/modules/active/modules/livecd.pp':
['8972e6ef04f490b8915e7983392b96ce'],
'/etc/selinux/targeted/modules/active/modules/netlabel.pp':
['91fc83e5798bd271742823cbb78c17ff'],
'/etc/selinux/targeted/modules/active/modules/qemu.pp':
['e561673d5f9e5c19bcae84c1641fa4a7'],
'/etc/selinux/targeted/modules/active/modules/unconfined.pp':
['3acd5dceb6b7a71c32919c29ef920785'],
'/etc/selinux/targeted/modules/active/modules/postgresql.pp':
['3ecc9f2c7b911fa37d8ab6cc1c6b0ea7'],
'/etc/selinux/targeted/modules/active/modules/apache.pp':
['c0089e4472399e9bc5237b1e0485ac39'],
'/etc/selinux/targeted/modules/active/modules/abrt.pp':
['09e212789d19f41595d7952499236a0c'],
'/etc/selinux/targeted/modules/active/modules/rsync.pp':
['e2567e8716c116ea6324c77652c97137'],
'/etc/selinux/targeted/modules/active/modules/git.pp':
['7904fd9fbae924be5377ccd51036248e'],
'/etc/selinux/targeted/modules/active/modules/amanda.pp':
['594eddbbe3b4530e79702fc6a882010e'],
'/etc/selinux/targeted/modules/active/modules/cvs.pp':
['62cf7b7d58f507cc9f507a6c303c8020'],
'/etc/selinux/targeted/modules/active/modules/chronyd.pp':
['a4ff3e36070d461771230c4019b23440'],
'/etc/selinux/targeted/modules/active/modules/gpm.pp':
['ed3f26e774be81c2cbaaa87dcfe7ae2d'],
'/etc/selinux/targeted/modules/active/modules/modemmanager.pp':
['840d4da9f32a264436f1b22d4d4a0b2a'],
'/etc/selinux/targeted/modules/active/modules/podsleuth.pp':
['67e659e9554bc35631ee829b5dc71647'],
'/etc/selinux/targeted/modules/active/modules/publicfile.pp':
['0f092d92c326444dc9cee78472c56655'],
'/etc/selinux/targeted/modules/active/modules/postfix.pp':
['a00647ad811c22810c76c1162a97e74b'],
'/etc/selinux/targeted/modules/active/modules/exim.pp':
['8c3cd1fbd8f68e80ac7707f243ac1911'],
'/etc/selinux/targeted/modules/active/modules/telepathy.pp':
['9b32f699beb6f9c563f06f6b6d76732c'],
'/etc/selinux/targeted/modules/active/modules/amtu.pp':
['1b87c9fef219244f80b1f8f57a2ce7ea'],
'/etc/selinux/targeted/modules/active/modules/bitlbee.pp':
['cf0973c8fff61577cf330bb74ef75eed'],
'/etc/selinux/targeted/modules/active/modules/memcached.pp':
['0146491b4ab9fbd2854a7e7fb2092168'],
'/etc/selinux/targeted/modules/active/modules/sandbox.pp':
['82502d6d11b83370d1a77343f20d669f'],
'/etc/selinux/targeted/modules/active/modules/dictd.pp':
['6119d37987ea968e90a39d96866e5805'],
'/etc/selinux/targeted/modules/active/modules/pingd.pp':
['16c40af7785c8fa9d40789284ce8fbb9'],
'/etc/selinux/targeted/modules/active/modules/milter.pp':
['acaec7d2ee341e97ac5e345b55f6c7ae'],
'/etc/selinux/targeted/modules/active/modules/snort.pp':
['25f360aa5dec254a8fc18262bbe40510'],
'/etc/selinux/targeted/modules/active/modules/cups.pp':
['5323d417895d5ab508048e2bc45367bf'],
'/etc/selinux/targeted/modules/active/modules/rdisc.pp':
['5bed79cb1f4d5a2b822d6f8dbf53fe97'],
'/etc/selinux/targeted/modules/active/modules/rlogin.pp':
['6f88cc86985b4bc79d4b1afbffb1a732'],
'/etc/selinux/targeted/modules/active/modules/openct.pp':
['884f078f5d12f7b1c75cf011a94746e1'],
'/etc/selinux/targeted/modules/active/modules/dbskk.pp':
['caa93f24bfeede892fd97c59ee8b61da'],
'/etc/selinux/targeted/modules/active/modules/bluetooth.pp':
['ce4f1b34168c537b611783033316760e'],
'/etc/selinux/targeted/modules/active/modules/gpsd.pp':
['dd15485b8c6e5aeac018ddbe0948464c'],
'/etc/selinux/targeted/modules/active/modules/tuned.pp':
['5fc9de20402245e4a1a19c5b31101d06'],
'/etc/selinux/targeted/modules/active/modules/piranha.pp':
['fcedf8588c027633bedb76b598b7586f'],
'/etc/selinux/targeted/modules/active/modules/vhostmd.pp':
['0ca7152ed8a0ae393051876fe89ed657'],
'/etc/selinux/targeted/modules/active/modules/corosync.pp':
['20518dface3d23d7408dd56a51c8e6e1'],
'/etc/selinux/targeted/modules/active/modules/clogd.pp':
['533994a32ecf847a3162675e171c847c'],
'/etc/selinux/targeted/modules/active/modules/samba.pp':
['c7cd9b91a5ba4f0744e3f55a800f2831'],
'/etc/selinux/targeted/modules/active/modules/howl.pp':
['fef7dd76a97921c3e5e0e66fbac15091'],
'/etc/selinux/targeted/modules/active/modules/shutdown.pp':
['55f36d9820dcd19c66729d446d3ce6b2'],
'/etc/selinux/targeted/modules/active/modules/oddjob.pp':
['54d59b40e7bc0dc0dee3882e6c0ce9f3'],
'/etc/selinux/targeted/modules/active/modules/pcscd.pp':
['e728f332850dfcb5637c4e8f220af2fc'],
'/etc/selinux/targeted/modules/active/modules/canna.pp':
['de4f1a3ada6f9813da36febc31d2a282'],
'/etc/selinux/targeted/modules/active/modules/arpwatch.pp':
['0ddc328fa054f363a035ba44ec116514'],
'/etc/selinux/targeted/modules/active/modules/seunshare.pp':
['64844bbf79ee23e087a5741918f3a7ad'],
'/etc/selinux/targeted/modules/active/modules/rhgb.pp':
['c9630cc5830fcb4b775985c5740f5a71'],
'/etc/selinux/targeted/modules/active/modules/prelude.pp':
['2b85511c571c19751bb79b288267661c'],
'/etc/selinux/targeted/modules/active/modules/portmap.pp':
['231abe579c0370f49cac533c6057792b'],
'/etc/selinux/targeted/modules/active/modules/logadm.pp':
['980b1345ef8944a90b6efdff0c8b3278'],
'/etc/selinux/targeted/modules/active/modules/ptchown.pp':
['987fc8a6ff50ef7eed0edc79f91b1ec5'],
'/etc/selinux/targeted/modules/active/modules/vmware.pp':
['8cf31ec8abd75f2a6c56857146caf5a1'],
'/etc/selinux/targeted/modules/active/modules/portreserve.pp':
['0354f017b429dead8de0d143f7950fcc'],
'/etc/selinux/targeted/modules/active/modules/awstats.pp':
['c081d3168b28765182bb4ec937b4c0b1'],
'/etc/selinux/targeted/modules/active/modules/tmpreaper.pp':
['ac0173dd09a54a87fdcb42d3a5e29442'],
'/etc/selinux/targeted/modules/active/modules/postgrey.pp':
['68013352c07570ac38587df9fb7e88ee'],
'/etc/selinux/targeted/modules/active/modules/tftp.pp':
['a47fb7872bfb06d80c8eef969d91e6f9'],
'/etc/selinux/targeted/modules/active/modules/rgmanager.pp':
['1cee78e1ff3f64c4d013ce7b820e534b'],
'/etc/selinux/targeted/modules/active/modules/aisexec.pp':
['95e70fd35e9cb8284488d6bf970815b7'],
'/etc/selinux/targeted/modules/active/modules/xguest.pp':
['d8df4b61df93008cd594f98c852d4cba'],
'/etc/selinux/targeted/modules/active/modules/cobbler.pp':
['6978d8b37b1da384130db5c5c2144175'],
'/etc/selinux/targeted/modules/active/modules/mysql.pp':
['d147af479531042f13e70d72bd58a0e9'],
'/etc/selinux/targeted/modules/active/modules/amavis.pp':
['7fc17b2f47c1d8226a9003df1ef67bb5'],
'/etc/selinux/targeted/modules/active/modules/fprintd.pp':
['d58f18b496f69a74ece1f1b1b9432405'],
'/etc/selinux/targeted/modules/active/modules/nis.pp':
['d696b167de5817226298306c79761fa2'],
'/etc/selinux/targeted/modules/active/modules/squid.pp':
['3f9e075e79ec5aa59609a7ccebce0afe'],
'/etc/selinux/targeted/modules/active/modules/smokeping.pp':
['98b83cac4488d7dd18c479b62dd3cf15'],
'/etc/selinux/targeted/modules/active/modules/ktalk.pp':
['afe14e94861782679305c91da05e7d5e'],
'/etc/selinux/targeted/modules/active/modules/certwatch.pp':
['bf13c9a642ded8354ba26d5462ddd60c'],
'/etc/selinux/targeted/modules/active/modules/games.pp':
['3bcd17c07699d58bd436896e75a24520'],
'/etc/selinux/targeted/modules/active/modules/zabbix.pp':
['5445ccfec7040ff1ccf3abf4de2e9a3c'],
'/etc/selinux/targeted/modules/active/modules/rwho.pp':
['710e29c8e621de6af9ca74869624b9f0'],
'/etc/selinux/targeted/modules/active/modules/w3c.pp':
['aea6b9518cb3fa904cc7ee82239b07c2'],
'/etc/selinux/targeted/modules/active/modules/cyphesis.pp':
['dccb3f009cd56c5f8856861047d7f2ff'],
'/etc/selinux/targeted/modules/active/modules/kismet.pp':
['f2d984e007275d35dd03a2d59ade507e'],
'/etc/selinux/targeted/modules/active/modules/zosremote.pp':
['77a2681c4b1c3c001faeca9874b58ecf'],
'/etc/selinux/targeted/modules/active/modules/pads.pp':
['76b7413009a202e228ee08c5511f3f42'],
'/etc/selinux/targeted/modules/active/modules/avahi.pp':
['b59670ba623aba37ab8f0f1f1127893a'],
'/etc/selinux/targeted/modules/active/modules/apcupsd.pp':
['81fae28232730a49b7660797ef4354c3'],
'/etc/selinux/targeted/modules/active/modules/usernetctl.pp':
['22850457002a48041d885c0d74fbd934'],
'/etc/selinux/targeted/modules/active/modules/finger.pp':
['5dd6b44358bbfabfdc4f546e1ed34370'],
'/etc/selinux/targeted/modules/active/modules/dhcp.pp':
['7e63b07b64848a017eec5d5f6b88f22e'],
'/etc/selinux/targeted/modules/active/modules/xen.pp':
['67086e8e94bdaab8247ac4d2e23162d1'],
'/etc/selinux/targeted/modules/active/modules/plymouthd.pp':
['1916027e7c9f28430fa2ac30334e8964'],
'/etc/selinux/targeted/modules/active/modules/uucp.pp':
['5bec7a345a314a37b4a2227bdfa926f1'],
'/etc/selinux/targeted/modules/active/modules/daemontools.pp':
['aad7633adfc8b04e863b481deebaf14a'],
'/etc/selinux/targeted/modules/active/modules/kdumpgui.pp':
['66e08b4187623fa1c535972a35ec058c'],
'/etc/selinux/targeted/modules/active/modules/privoxy.pp':
['f13c986051659fa900786ea54a59ceae'],
'/etc/selinux/targeted/modules/active/modules/unprivuser.pp':
['a0d128b495a6ea5da72c849ac63c5848'],
'/etc/selinux/targeted/modules/active/modules/ada.pp':
['a75fd52c873e2c9326ad87f7515a664f'],
'/etc/selinux/targeted/modules/active/modules/lircd.pp':
['3cc5cc5b24d40416f9d630a80005d33b'],
'/etc/selinux/targeted/modules/active/modules/openoffice.pp':
['522c3ee13bc37cbe9903d00f0cbccd1d'],
'/etc/selinux/targeted/modules/active/modules/puppet.pp':
['9da4c553f40f3dea876171e672168044'],
'/etc/selinux/targeted/modules/active/modules/wine.pp':
['31c470eabd98c5a5dbc66ba52ad64de0'],
'/etc/selinux/targeted/modules/active/modules/ulogd.pp':
['065551ea63de34a7257ecec152f61552'],
'/etc/selinux/targeted/modules/active/modules/mplayer.pp':
['f889dbfa3d9ef071d8e569def835a2f3'],
'/etc/selinux/targeted/modules/active/modules/ftp.pp':
['75a9f3563903eb8126ffbcc9277e1d8c'],
'/etc/selinux/targeted/modules/active/modules/gnome.pp':
['b859e2d45123f60ff27a90cdb0f40e1b'],
'/etc/selinux/targeted/modules/active/modules/ethereal.pp':
['8963c6b80025b27850f0cdf565e5bd54'],
'/etc/selinux/targeted/modules/active/modules/iscsi.pp':
['7786cb4a84889010751b4d89c72a2956'],
'/etc/selinux/targeted/modules/active/modules/chrome.pp':
['cb44c1c7b13cc04c07c4e787a259b63f'],
'/etc/selinux/targeted/modules/active/modules/guest.pp':
['308d614589af73e39a22e5c741e9eecb'],
'/etc/selinux/targeted/modules/active/modules/inn.pp':
['8d60592dcd3bf4d2fa97f0fefa9374ca'],
'/etc/selinux/targeted/modules/active/modules/gitosis.pp':
['21c79a711157224bebba0a2cccbe8881'],
'/etc/selinux/targeted/modules/active/modules/ksmtuned.pp':
['8f985e777c206d2bde3fc2ac6a28cd24'],
'/etc/selinux/targeted/modules/active/modules/sosreport.pp':
['9b4780d27555e94335f80a0bb2ab4f14'],
'/etc/selinux/targeted/modules/active/modules/ipsec.pp':
['68cacb8c78796957fb4a181390033b16'],
'/etc/selinux/targeted/modules/active/modules/comsat.pp':
['1cecb3f5cbe24251017908e14838ee2a'],
'/etc/selinux/targeted/modules/active/modules/gpg.pp':
['75358ddabb045e91010d80f1ab68307a'],
'/etc/selinux/targeted/modules/active/modules/gnomeclock.pp':
['a4e74df48faab3af8f4df0fa16c65c7e'],
'/etc/selinux/targeted/modules/active/modules/sasl.pp':
['5ba9be813a7dd4236fc2d37bc17c5052'],
'/etc/selinux/targeted/modules/active/modules/vpn.pp':
['32ae00c287432ae5ad4f8affbc9e44fe'],
'/etc/selinux/targeted/modules/active/modules/accountsd.pp':
['308057b48c6d70a45e5a603dbe625c2d'],
'/etc/selinux/targeted/modules/active/modules/devicekit.pp':
['1f5a8f12ebeebfed2cfeb3ee4648dd13'],
'/etc/selinux/targeted/modules/active/modules/psad.pp':
['b02f11705249c93735f019f5b97fdf7b'],
'/etc/selinux/targeted/modules/active/modules/mono.pp':
['8bba1cc6826e8300c140f9c393ad07e9'],
'/etc/selinux/targeted/modules/active/modules/cachefilesd.pp':
['82b93ba87b5920ecc8a7388f4cf8ea43'],
'/etc/selinux/targeted/modules/active/modules/usbmodules.pp':
['20c3a57da3c1311a75a63f1c6ae91bf3'],
'/etc/selinux/targeted/modules/active/modules/certmonger.pp':
['b9fe8ba6abc5204cd8eec546f5614ff5'],
'/etc/selinux/targeted/modules/active/modules/pegasus.pp':
['bb0ec4379c28b196d1794d7310111d98'],
'/etc/selinux/targeted/modules/active/modules/ntop.pp':
['99b46fe44ccf3c4e045dbc73d2a88f59'],
'/etc/selinux/targeted/modules/active/modules/zebra.pp':
['12adcaae458d18f650578ce25e10521a'],
'/etc/selinux/targeted/modules/active/modules/soundserver.pp':
['583abd9ccef70279bff856516974d471'],
'/etc/selinux/targeted/modules/active/modules/stunnel.pp':
['2693ac1bf08287565c3b4e58d0f9ea55'],
'/etc/selinux/targeted/modules/active/modules/ldap.pp':
['039baf0976f316c3f209a5661174a72e'],
'/etc/selinux/targeted/modules/active/modules/fail2ban.pp':
['ce13513c427ff140bf988b01bd52e886'],
'/etc/selinux/targeted/modules/active/modules/spamassassin.pp':
['e02232992676b0e1279c54bfeea290e3'],
'/etc/selinux/targeted/modules/active/modules/procmail.pp':
['d5c58e90fac452a1a6d68cc496e7f1ae'],
'/etc/selinux/targeted/modules/active/modules/afs.pp':
['6e7a4bf08dc7fa5a0f97577b913267ad'],
'/etc/selinux/targeted/modules/active/modules/ricci.pp':
['8b1d44245be204907c82c3580a43901d'],
'/etc/selinux/targeted/modules/active/modules/qmail.pp':
['ea08eb2172c275598d4f85c9b78182cd'],
'/etc/selinux/targeted/modules/active/modules/ccs.pp':
['cad223d57f431e2f88a1d1542c2ac504'],
'/etc/selinux/targeted/modules/active/modules/audioentropy.pp':
['19f6fd5e3ee2a3726a952631e993a133'],
'/etc/selinux/targeted/modules/active/modules/ncftool.pp':
['c15f4833a21e9c8cd1237ee568aadcf3'],
'/etc/selinux/targeted/modules/active/modules/nx.pp':
['3677983206101cfcd2182e180ef3876b'],
'/etc/selinux/targeted/modules/active/modules/rtkit.pp':
['0eaae15f4c12522270b26769487a06e0'],
'/etc/selinux/targeted/modules/active/modules/ntp.pp':
['141339ee3372e07d32575c6777c8e466'],
'/etc/selinux/targeted/modules/active/modules/likewise.pp':
['b5f0d18f8b601e102fd9728fbb309692'],
'/etc/selinux/targeted/modules/active/modules/aide.pp':
['69600bc8a529f8128666a563c7409929'],
'/etc/selinux/targeted/modules/active/modules/nslcd.pp':
['5c87b1c80bdd8bbf60c33ef51a765a93'],
'/etc/selinux/targeted/modules/active/modules/slocate.pp':
['fdea88c374382f3d652a1ac529fbd189'],
'/etc/selinux/targeted/modules/active/modules/execmem.pp':
['44cc2d117e3bf1a33d4e3516aaa7339d'],
'/etc/selinux/targeted/modules/active/modules/cpufreqselector.pp':
['7da9c9690dc4f076148ef35c3644af13'],
'/etc/selinux/targeted/modules/active/modules/cmirrord.pp':
['084b532fa5ccd6775c483d757bcd0920'],
'/etc/selinux/targeted/modules/active/modules/bind.pp':
['5560f5706c8c8e83d8a2ac03a85b93fb'],
'/etc/selinux/targeted/modules/active/modules/uml.pp':
['a0841bc9ffca619fe5d44c557b70d258'],
'/etc/selinux/targeted/modules/active/modules/staff.pp':
['bdf16ee0fa0721770aa31c52e45227c3'],
'/etc/selinux/targeted/modules/active/modules/certmaster.pp':
['bc589a4f0dd49a05d52b9ffda7bdd149'],
'/etc/selinux/targeted/modules/active/modules/webalizer.pp':
['c99ccad469be3c901ede9da9a87e44b2'],
'/etc/selinux/targeted/modules/active/modules/hal.pp':
['c75783ec2dd49d437a242e0c69c31c96'],
'/etc/selinux/targeted/modules/active/modules/kdump.pp':
['d731820c7b5bb711566ea23970106b7a'],
'/etc/selinux/targeted/modules/active/modules/firewallgui.pp':
['ee3522a0072989ed08f70b03f7fd69d9'],
'/etc/selinux/targeted/modules/active/modules/tcpd.pp':
['b1f7db819812da14c4e836a9d9e79980'],
'/etc/selinux/targeted/modules/active/modules/mailman.pp':
['4116cbe11d943a076dd06cea91993745'],
'/etc/selinux/targeted/modules/active/modules/smartmon.pp':
['45d6440b436d8ac3f042e80c392dd672'],
'/etc/selinux/targeted/modules/active/modules/smoltclient.pp':
['dcfd6ecd62ee7191abda39315ec6ef1b'],
'/etc/selinux/targeted/modules/active/modules/kerberos.pp':
['936533081cfbe28eb9145fde86edb4f8'],
'/etc/selinux/targeted/modules/active/modules/lockdev.pp':
['e2da620d3272f296dd90bff8b921d203'],
'/etc/selinux/targeted/modules/active/modules/automount.pp':
['a06d3d617c6d8c29e29ce3fb0db48c9c'],
'/etc/selinux/targeted/modules/active/modules/webadm.pp':
['4ac9b2f95f8d8218ec93f001995fd8ba'],
'/etc/selinux/targeted/modules/active/modules/pyzor.pp':
['c2b00c08d77d7d5a8588dd82c489e354'],
'/etc/selinux/targeted/modules/active/modules/rssh.pp':
['aacef6c826e9d699e84a1dd564b68105'],
'/etc/selinux/targeted/modules/active/modules/nsplugin.pp':
['0c90d308f5e956900150eb6ed84b0b54'],
'/etc/selinux/targeted/modules/active/modules/lpd.pp':
['5bf17a46aa2d3e2ecc0daffcf092054e'],
'/etc/selinux/targeted/modules/active/modules/dcc.pp':
['84749af337d72ba6bbbe54b013c6c62c'],
'/etc/selinux/targeted/modules/active/modules/irc.pp':
['42897f214251c7ca9bc04379c4abff5e'],
'/etc/selinux/targeted/modules/active/modules/icecast.pp':
['962c81fc8ef5fd49c925a2249d229d1d'],
'/etc/selinux/targeted/modules/active/modules/dnsmasq.pp':
['ec4a8a50eb5806e450d97a77cbe8a8b4'],
'/etc/selinux/targeted/modules/active/modules/jabber.pp':
['5a528d52f7337d44bfc867333f2b1921'],
'/etc/selinux/targeted/modules/active/modules/remotelogin.pp':
['68c22a0bc6e4d5031153cf10d75ba76a'],
'/etc/selinux/targeted/modules/active/modules/boinc.pp':
['a70386e9ffdaccd04cbb565e6fe5c822'],
'/etc/selinux/targeted/modules/active/modules/mrtg.pp':
['7e6f395e72768d350d259c15d22a1cbb'],
'/etc/selinux/targeted/modules/active/modules/snmp.pp':
['fc5166e3066504601037054874fe0487'],
'/etc/selinux/targeted/modules/active/modules/cyrus.pp':
['d2e792bf111ce4a6ffdb87fe11d89d16'],
'/etc/selinux/targeted/modules/active/modules/dovecot.pp':
['b716de8b77f0dfeb9212d5cf36bddfa1'],
'/etc/selinux/targeted/modules/active/modules/cdrecord.pp':
['24c0325480e2f1d6cf1ce31c25d5f10a'],
'/etc/selinux/targeted/modules/active/modules/calamaris.pp':
['c7ec43f01369524db32249fb755f4e7f'],
'/etc/selinux/targeted/modules/active/modules/kerneloops.pp':
['2493d3308dfcd34e94308af9d5c888c3'],
'/etc/selinux/targeted/modules/active/modules/razor.pp':
['06425e50a31f14cec090c30e05fb9827'],
'/etc/selinux/targeted/modules/active/netfilter_contexts':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/modules/active/seusers.final':
['fdf1cdf1d373e4583ca759617a1d2af3'],
'/etc/selinux/targeted/modules/active/file_contexts.homedirs':
['d7c4747704e9021ec2e16c7139fedfd9'],
'/etc/selinux/targeted/modules/active/commit_num':
['c08cc266624f6409b01432dac9576ab0'],
'/etc/selinux/targeted/modules/active/policy.kern':
['5398a60f820803049b5bb7d90dd6196b'],
'/etc/selinux/targeted/modules/active/homedir_template':
['682a31c8036aaf9cf969093d7162960a'],
'/etc/selinux/targeted/modules/semanage.read.LOCK':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/contexts/failsafe_context':
['940b12538b676287b3c33e68426898ac'],
'/etc/selinux/targeted/contexts/virtual_domain_context':
['1e28f1b8e58e56a64c852bd77f57d121'],
'/etc/selinux/targeted/contexts/removable_context':
['e56a6b14d2bed27405d2066af463df9f'],
'/etc/selinux/targeted/contexts/netfilter_contexts':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/contexts/userhelper_context':
['53441d64f9bc6337e3aac33f05d0954c'],
'/etc/selinux/targeted/contexts/virtual_image_context':
['b21a69d3423d2e085d5195e25922eaa1'],
'/etc/selinux/targeted/contexts/securetty_types':
['ee2445f940ed1b33e778a921cde8ad9e'],
'/etc/selinux/targeted/contexts/default_type':
['d0f63fea19ee82e5f65bdbb1de899c5d'],
'/etc/selinux/targeted/contexts/dbus_contexts':
['b1c42884fa5bdbde53d64cff469374fd'],
'/etc/selinux/targeted/contexts/files/file_contexts':
['1622b57a3b85db3112c5f71238c68d3e'],
'/etc/selinux/targeted/contexts/files/file_contexts.homedirs':
['d7c4747704e9021ec2e16c7139fedfd9'],
'/etc/selinux/targeted/contexts/files/media':
['3c867677892c0a15dc0b9e9811cc2c49'],
'/etc/selinux/targeted/contexts/initrc_context':
['99866a62735a38b2bf839233c1a1689d'],
'/etc/selinux/targeted/contexts/x_contexts':
['9dde3f5e3ddac42b9e99a4613c972b97'],
'/etc/selinux/targeted/contexts/customizable_types':
['68be87281cf3d40cb2c4606cd2b1ea2b'],
'/etc/selinux/targeted/contexts/users/xguest_u':
['e26010a418df86902332c57434370246'],
'/etc/selinux/targeted/contexts/users/unconfined_u':
['ee88bed48d9601ff2b11f68f97d361ac'],
'/etc/selinux/targeted/contexts/users/staff_u':
['f3412f7cbf441078a9de40fcaab93254'],
'/etc/selinux/targeted/contexts/users/root':
['328e08341d1ff9296573dd43c355e283'],
'/etc/selinux/targeted/contexts/users/user_u':
['2fe911f440282fda0590cd99540da579'],
'/etc/selinux/targeted/contexts/users/guest_u':
['61e7e7e7403b2eac30e312342e66e4cd'],
'/etc/selinux/targeted/contexts/default_contexts':
['0888c75fc814058bb3c01ef58f7a1f47'],
'/etc/selinux/targeted/policy/policy.24':
['5398a60f820803049b5bb7d90dd6196b'],
'/etc/selinux/targeted/setrans.conf':
['ae70362b6fa2af117bd6e293ce232069'],
'/etc/selinux/targeted/seusers':
['fdf1cdf1d373e4583ca759617a1d2af3'],
'/etc/selinux/config': ['91081ef6d958e79795d0255d7c374a56'],
'/etc/selinux/restorecond_user.conf':
['4e1b5b5e38c660f87d5a4f7d3a998c29'],
'/etc/selinux/semanage.conf': ['f33b524aef1a4df2a3d0eecdda041a5c'],
'/etc/sgml/xml-core.cat': ['bcd454c9bf55a3816a134f9766f5928f'],
'/etc/shells': ['0e85c87e09d716ecb03624ccff511760'],
'/etc/ssh/sshd_config': ['e24f749808133a27d94fda84a89bb27b',
'8caefdd9e251b7cc1baa37874149a870',
'874fafed9e745b14e5fa8ae71b82427d'],
'/etc/sudoers': ['02f74ccbec48997f402a063a172abb48'],
'/etc/ufw/after.rules': ['/usr/share/ufw/after.rules'],
'/etc/ufw/after6.rules': ['/usr/share/ufw/after6.rules'],
'/etc/ufw/before.rules': ['/usr/share/ufw/before.rules'],
'/etc/ufw/before6.rules': ['/usr/share/ufw/before6.rules'],
'/etc/ufw/ufw.conf': ['/usr/share/ufw/ufw.conf']}
for pathname, overrides in MD5SUMS.iteritems():
for i in range(len(overrides)):
if '/' != overrides[i][0]:
continue
try:
overrides[i] = hashlib.md5(open(overrides[i]).read()).hexdigest()
except IOError:
pass
def files(b, r):
logging.info('searching for configuration files')
# Visit every file in `/etc` except those on the exclusion list above.
for dirpath, dirnames, filenames in os.walk('/etc'):
# Determine if this entire directory should be ignored by default.
ignored = r.ignore_file(dirpath)
# Collect up the full pathname to each file, `lstat` them all, and
# note which ones will probably be ignored.
files = []
for filename in filenames:
pathname = os.path.join(dirpath, filename)
try:
files.append((pathname,
os.lstat(pathname),
r.ignore_file(pathname, ignored)))
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
# Track the ctime of each file in this directory. Weed out false
# positives by ignoring files with common ctimes.
ctimes = defaultdict(lambda: 0)
# Map the ctimes of each directory entry that isn't being ignored.
for pathname, s, ignored in files:
if not ignored:
ctimes[s.st_ctime] += 1
for dirname in dirnames:
try:
ctimes[os.lstat(os.path.join(dirpath, dirname)).st_ctime] += 1
except OSError:
pass
for pathname, s, ignored in files:
# Always ignore block special files, character special files,
# pipes, and sockets. They end up looking like deadlocks.
if stat.S_ISBLK(s.st_mode) \
or stat.S_ISCHR(s.st_mode) \
or stat.S_ISFIFO(s.st_mode) \
or stat.S_ISSOCK(s.st_mode):
continue
# Make sure this pathname will actually be able to be included
# in the blueprint. This is a bit of a cop-out since the file
# could be important but at least it's not a crashing bug.
try:
pathname = unicode(pathname)
except UnicodeDecodeError:
logging.warning('{0} not UTF-8 - skipping it'.
format(repr(pathname)[1:-1]))
continue
# Ignore ignored files and files that share their ctime with other
# files in the directory. This is a very strong indication that
# the file is original to the system and should be ignored.
if ignored \
or 1 < ctimes[s.st_ctime] and r.ignore_file(pathname, True):
continue
# Check for a Mustache template and an optional shell script
# that templatize this file.
try:
template = open(
'{0}.blueprint-template.mustache'.format(pathname)).read()
except IOError:
template = None
try:
data = open(
'{0}.blueprint-template.sh'.format(pathname)).read()
except IOError:
data = None
# The content is used even for symbolic links to determine whether
# it has changed from the packaged version.
try:
content = open(pathname).read()
except IOError:
#logging.warning('{0} not readable'.format(pathname))
continue
# Ignore files that are unchanged from their packaged version.
if _unchanged(pathname, content, r):
continue
# Resolve the rest of the file's metadata from the
# `/etc/passwd` and `/etc/group` databases.
try:
pw = pwd.getpwuid(s.st_uid)
owner = pw.pw_name
except KeyError:
owner = s.st_uid
try:
gr = grp.getgrgid(s.st_gid)
group = gr.gr_name
except KeyError:
group = s.st_gid
mode = '{0:o}'.format(s.st_mode)
# A symbolic link's content is the link target.
if stat.S_ISLNK(s.st_mode):
content = os.readlink(pathname)
# Ignore symbolic links providing backwards compatibility
# between SystemV init and Upstart.
if '/lib/init/upstart-job' == content:
continue
# Ignore symbolic links into the Debian alternatives system.
# These are almost certainly managed by packages.
if content.startswith('/etc/alternatives/'):
continue
b.add_file(pathname,
content=content,
encoding='plain',
group=group,
mode=mode,
owner=owner)
# A regular file is stored as plain text only if it is valid
# UTF-8, which is required for JSON serialization.
else:
kwargs = dict(group=group,
mode=mode,
owner=owner)
try:
if template:
if data:
kwargs['data'] = data.decode('utf_8')
kwargs['template'] = template.decode('utf_8')
else:
kwargs['content'] = content.decode('utf_8')
kwargs['encoding'] = 'plain'
except UnicodeDecodeError:
if template:
if data:
kwargs['data'] = base64.b64encode(data)
kwargs['template'] = base64.b64encode(template)
else:
kwargs['content'] = base64.b64encode(content)
kwargs['encoding'] = 'base64'
b.add_file(pathname, **kwargs)
# If this file is a service init script or config , create a
# service resource.
try:
manager, service = util.parse_service(pathname)
if not r.ignore_service(manager, service):
b.add_service(manager, service)
b.add_service_package(manager,
service,
'apt',
*_dpkg_query_S(pathname))
b.add_service_package(manager,
service,
'yum',
*_rpm_qf(pathname))
except ValueError:
pass
def _dpkg_query_S(pathname):
"""
Return a list of package names that contain `pathname` or `[]`. This
really can be a list thanks to `dpkg-divert`(1).
"""
# Cache the pathname-to-package mapping.
if not hasattr(_dpkg_query_S, '_cache'):
_dpkg_query_S._cache = defaultdict(set)
cache_ref = _dpkg_query_S._cache
for listname in glob.iglob('/var/lib/dpkg/info/*.list'):
package = os.path.splitext(os.path.basename(listname))[0]
for line in open(listname):
cache_ref[line.rstrip()].add(package)
# Return the list of packages that contain this file, if any.
if pathname in _dpkg_query_S._cache:
return list(_dpkg_query_S._cache[pathname])
# If `pathname` isn't in a package but is a symbolic link, see if the
# symbolic link is in a package. `postinst` programs commonly display
# this pattern.
try:
return _dpkg_query_S(os.readlink(pathname))
except OSError:
pass
return []
def _dpkg_md5sum(package, pathname):
"""
Find the MD5 sum of the packaged version of pathname or `None` if the
`pathname` does not come from a Debian package.
"""
# Cache any MD5 sums stored in the status file. These are typically
# conffiles and the like.
if not hasattr(_dpkg_md5sum, '_status_cache'):
_dpkg_md5sum._status_cache = {}
cache_ref = _dpkg_md5sum._status_cache
try:
pattern = re.compile(r'^ (\S+) ([0-9a-f]{32})')
for line in open('/var/lib/dpkg/status'):
match = pattern.match(line)
if not match:
continue
cache_ref[match.group(1)] = match.group(2)
except IOError:
pass
# Return this file's MD5 sum, if it can be found.
try:
return _dpkg_md5sum._status_cache[pathname]
except KeyError:
pass
# Cache the MD5 sums for files in this package.
if not hasattr(_dpkg_md5sum, '_cache'):
_dpkg_md5sum._cache = defaultdict(dict)
if package not in _dpkg_md5sum._cache:
cache_ref = _dpkg_md5sum._cache[package]
try:
for line in open('/var/lib/dpkg/info/{0}.md5sums'.format(package)):
md5sum, rel_pathname = line.split(None, 1)
cache_ref['/{0}'.format(rel_pathname.rstrip())] = md5sum
except IOError:
pass
# Return this file's MD5 sum, if it can be found.
try:
return _dpkg_md5sum._cache[package][pathname]
except KeyError:
pass
return None
def _rpm_qf(pathname):
"""
Return a list of package names that contain `pathname` or `[]`. RPM
might not actually support a single pathname being claimed by more
than one package but `dpkg` does so the interface is maintained.
"""
try:
p = subprocess.Popen(['rpm', '--qf=%{NAME}', '-qf', pathname],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return []
stdout, stderr = p.communicate()
if 0 != p.returncode:
return []
return [stdout]
def _rpm_md5sum(pathname):
"""
Find the MD5 sum of the packaged version of pathname or `None` if the
`pathname` does not come from an RPM.
"""
if not hasattr(_rpm_md5sum, '_cache'):
_rpm_md5sum._cache = {}
symlinks = []
try:
p = subprocess.Popen(['rpm', '-qa', '--dump'],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pattern = re.compile(r'^(/etc/\S+) \d+ \d+ ([0-9a-f]+) ' # No ,
'(0\d+) \S+ \S+ \d \d \d (\S+)$')
for line in p.stdout:
match = pattern.match(line)
if match is None:
continue
if '0120777' == match.group(3):
symlinks.append((match.group(1), match.group(4)))
else:
_rpm_md5sum._cache[match.group(1)] = match.group(2)
# Find the MD5 sum of the targets of any symbolic links, even
# if the target is outside of /etc.
pattern = re.compile(r'^(/\S+) \d+ \d+ ([0-9a-f]+) ' # No ,
'(0\d+) \S+ \S+ \d \d \d (\S+)$')
for pathname, target in symlinks:
if '/' != target[0]:
target = os.path.normpath(os.path.join(
os.path.dirname(pathname), target))
if target in _rpm_md5sum._cache:
_rpm_md5sum._cache[pathname] = _rpm_md5sum._cache[target]
else:
p = subprocess.Popen(['rpm', '-qf', '--dump', target],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in p.stdout:
match = pattern.match(line)
if match is not None and target == match.group(1):
_rpm_md5sum._cache[pathname] = match.group(2)
except OSError:
pass
return _rpm_md5sum._cache.get(pathname, None)
def _unchanged(pathname, content, r):
"""
Return `True` if a file is unchanged from its packaged version.
"""
# Ignore files that are from the `base-files` package (which
# doesn't include MD5 sums for every file for some reason).
apt_packages = _dpkg_query_S(pathname)
if 'base-files' in apt_packages:
return True
# Ignore files that are unchanged from their packaged version,
# or match in MD5SUMS.
md5sums = MD5SUMS.get(pathname, [])
md5sums.extend([_dpkg_md5sum(package, pathname)
for package in apt_packages])
md5sum = _rpm_md5sum(pathname)
if md5sum is not None:
md5sums.append(md5sum)
if (hashlib.md5(content).hexdigest() in md5sums \
or 64 in [len(md5sum or '') for md5sum in md5sums] \
and hashlib.sha256(content).hexdigest() in md5sums) \
and r.ignore_file(pathname, True):
return True
return False
```
#### File: blueprint/backend/yum.py
```python
import logging
import subprocess
from blueprint import util
def yum(b, r):
logging.info('searching for Yum packages')
# Try for the full list of packages. If this fails, don't even
# bother with the rest because this is probably a Debian-based
# system.
try:
p = subprocess.Popen(['rpm',
'--qf=%{NAME}\x1E%{GROUP}\x1E%{EPOCH}' # No ,
'\x1E%{VERSION}-%{RELEASE}\x1E%{ARCH}\n',
'-qa'],
close_fds=True, stdout=subprocess.PIPE)
except OSError:
return
for line in p.stdout:
package, group, epoch, version, arch = line.strip().split('\x1E')
if r.ignore_package('yum', package):
continue
if '(none)' != epoch:
version = '{0}:{1}'.format(epoch, version)
if '(none)' != arch:
version = '{0}.{1}'.format(version, arch)
b.add_package('yum', package, version)
# Create service resources for each service init script or config
# in this package.
p = subprocess.Popen(['rpm', '-ql', package],
close_fds=True, stdout=subprocess.PIPE)
for line in p.stdout:
try:
manager, service = util.parse_service(line.rstrip())
if not r.ignore_service(manager, service):
b.add_service(manager, service)
b.add_service_package(manager, service, 'yum', package)
except ValueError:
pass
```
#### File: blueprint/io/http.py
```python
import errno
import httplib
import socket
import urlparse
from blueprint import cfg
def _connect(server=None):
if server is None:
server = cfg.get('io', 'server')
url = urlparse.urlparse(server)
if -1 == url.netloc.find(':'):
port = url.port or 443 if 'https' == url.scheme else 80
else:
port = None
if 'https' == url.scheme:
return httplib.HTTPSConnection(url.netloc, port)
else:
return httplib.HTTPConnection(url.netloc, port)
def _request(verb, path, body=None, headers={}, server=None):
c = _connect(server)
try:
c.request(verb, path, body, headers)
except socket.error as e:
if errno.EPIPE != e.errno:
raise e
return c.getresponse()
def delete(path, server=None):
return _request('DELETE', path, server=server)
def get(path, headers={}, server=None):
c = _connect(server)
c.request('GET', path, None, headers)
r = c.getresponse()
while r.status in (301, 302, 307):
url = urlparse.urlparse(r.getheader('Location'))
r = get(url.path,
{'Content-Type': r.getheader('Content-Type')},
urlparse.urlunparse((url.scheme, url.netloc, '', '', '', '')))
return r
def post(path, body, headers={}, server=None):
return _request('POST', path, body, headers, server)
def put(path, body, headers={}, server=None):
return _request('PUT', path, body, headers, server)
```
#### File: io/server/backend.py
```python
import boto
import boto.exception
import httplib
import socket
from blueprint import cfg
import librato
import statsd
access_key = cfg.get('s3', 'access_key')
bucket = cfg.get('s3', 'bucket')
protocol = 'https' if cfg.getboolean('s3', 'use_https') else 'http'
region = cfg.get('s3', 'region')
s3_region = 's3' if 'US' == region else 's3-{0}'.format(region)
secret_key = cfg.get('s3', 'secret_key')
def delete(key):
"""
Remove an object from S3. DELETE requests are free but this function
still makes one billable request to account for freed storage.
"""
content_length = head(key)
if content_length is None:
return None
librato.count('blueprint-io-server.requests.delete')
statsd.increment('blueprint-io-server.requests.delete')
c = boto.connect_s3(access_key, secret_key)
b = c.get_bucket(bucket, validate=False)
try:
b.delete_key(key)
# TODO librato.something('blueprint-io-server.storage', -content_length)
statsd.update('blueprint-io-server.storage', -content_length)
except (boto.exception.BotoClientError,
boto.exception.BotoServerError,
boto.exception.S3ResponseError,
httplib.HTTPException,
socket.error,
socket.gaierror):
return False
def delete_blueprint(secret, name):
return delete(key_for_blueprint(secret, name))
def delete_tarball(secret, name, sha):
return delete(key_for_tarball(secret, name, sha))
def get(key):
"""
Fetch an object from S3. This function makes one billable request.
"""
librato.count('blueprint-io-server.requests.get')
statsd.increment('blueprint-io-server.requests.get')
c = boto.connect_s3(access_key, secret_key)
b = c.get_bucket(bucket, validate=False)
k = b.new_key(key)
try:
return k.get_contents_as_string()
except boto.exception.S3ResponseError:
return None
except (boto.exception.BotoClientError,
boto.exception.BotoServerError,
httplib.HTTPException,
socket.error,
socket.gaierror):
return False
def get_blueprint(secret, name):
return get(key_for_blueprint(secret, name))
def get_tarball(secret, name, sha):
return get(key_for_tarball(secret, name, sha))
def head(key):
"""
Make a HEAD request for an object in S3. This is needed to find the
object's length so it can be accounted. This function makes one
billable request and anticipates another.
"""
librato.count('blueprint-io-server.requests.head')
statsd.increment('blueprint-io-server.requests.head')
c = boto.connect_s3(access_key, secret_key)
b = c.get_bucket(bucket, validate=False)
try:
k = b.get_key(key)
if k is None:
return None
return k.size
except (boto.exception.BotoClientError,
boto.exception.BotoServerError,
httplib.HTTPException,
socket.error,
socket.gaierror):
return False
def head_blueprint(secret, name):
return head(key_for_blueprint(secret, name))
def head_tarball(secret, name, sha):
return head(key_for_tarball(secret, name, sha))
def key_for_blueprint(secret, name):
return '{0}/{1}/{2}'.format(secret,
name,
'blueprint.json')
def key_for_tarball(secret, name, sha):
return '{0}/{1}/{2}.tar'.format(secret,
name,
sha)
def list(key):
"""
List objects in S3 whose keys begin with the given prefix. This
function makes at least one billable request.
"""
librato.count('blueprint-io-server.requests.list')
statsd.increment('blueprint-io-server.requests.list')
c = boto.connect_s3(access_key, secret_key)
b = c.get_bucket(bucket, validate=False)
return b.list(key)
try:
return True
except (boto.exception.BotoClientError,
boto.exception.BotoServerError,
httplib.HTTPException,
socket.error,
socket.gaierror):
return False
def put(key, data):
"""
Store an object in S3. This function makes one billable request.
"""
librato.count('blueprint-io-server.requests.put')
statsd.increment('blueprint-io-server.requests.put')
# TODO librato.something('blueprint-io-server.storage', len(data))
statsd.update('blueprint-io-server.storage', len(data))
c = boto.connect_s3(access_key, secret_key)
b = c.get_bucket(bucket, validate=False)
k = b.new_key(key)
try:
k.set_contents_from_string(data,
policy='public-read',
reduced_redundancy=True)
return True
except (boto.exception.BotoClientError,
boto.exception.BotoServerError,
httplib.HTTPException,
socket.error,
socket.gaierror):
return False
def put_blueprint(secret, name, data):
return put(key_for_blueprint(secret, name), data)
def put_tarball(secret, name, sha, data):
return put(key_for_tarball(secret, name, sha), data)
def url_for(key):
return '{0}://{1}.{2}.amazonaws.com/{3}'.format(protocol,
bucket,
s3_region,
key)
def url_for_blueprint(secret, name):
return url_for(key_for_blueprint(secret, name))
def url_for_tarball(secret, name, sha):
return url_for(key_for_tarball(secret, name, sha))
```
#### File: site-packages/blueprint/rules.py
```python
from collections import defaultdict
import fnmatch
import glob
import json
import logging
import os
import os.path
import re
import subprocess
from blueprint import deps
from blueprint import util
# The default list of ignore patterns. Typically, the value of each key
# will be False. Providing True will negate the meaning of the pattern
# and cause matching files to be included in blueprints.
#
# XXX Update `blueprintignore`(5) if you make changes here.
IGNORE = {'*~': False,
'*.blueprint-template.*': False,
'*.dpkg-*': False,
'/etc/.git': False,
'/etc/.pwd.lock': False,
'/etc/X11/default-display-manager': False,
'/etc/adjtime': False,
'/etc/alternatives': False,
'/etc/apparmor': False,
'/etc/apparmor.d': False,
'/etc/blkid/blkid.tab': False,
'/etc/ca-certificates.conf': False,
'/etc/console-setup': False,
# TODO Only if it's a symbolic link to ubuntu.
'/etc/dpkg/origins/default': False,
'/etc/fstab': False,
'/etc/group-': False,
'/etc/group': False,
'/etc/gshadow-': False,
'/etc/gshadow': False,
'/etc/hostname': False,
'/etc/init.d/.legacy-bootordering': False,
'/etc/initramfs-tools/conf.d/resume': False,
'/etc/ld.so.cache': False,
'/etc/localtime': False,
'/etc/lvm/cache': False,
'/etc/mailcap': False,
'/etc/mtab': False,
'/etc/modules': False,
# TODO Only if it's a symbolic link to /var/run/motd.
'/etc/motd': False,
'/etc/network/interfaces': False,
'/etc/passwd-': False,
'/etc/passwd': False,
'/etc/pki/rpm-gpg': True,
'/etc/popularity-contest.conf': False,
'/etc/prelink.cache': False,
'/etc/resolv.conf': False, # Most people use the defaults.
'/etc/rc.d': False,
'/etc/rc0.d': False,
'/etc/rc1.d': False,
'/etc/rc2.d': False,
'/etc/rc3.d': False,
'/etc/rc4.d': False,
'/etc/rc5.d': False,
'/etc/rc6.d': False,
'/etc/rcS.d': False,
'/etc/shadow-': False,
'/etc/shadow': False,
'/etc/ssh/ssh_host_key*': False,
'/etc/ssh/ssh_host_*_key*': False,
'/etc/ssl/certs': False,
'/etc/sysconfig/clock': False,
'/etc/sysconfig/i18n': False,
'/etc/sysconfig/keyboard': False,
'/etc/sysconfig/network': False,
'/etc/sysconfig/network-scripts': False,
'/etc/timezone': False,
'/etc/udev/rules.d/70-persistent-*.rules': False,
'/etc/yum.repos.d': True}
CACHE = '/tmp/blueprintignore'
def defaults():
"""
Parse `/etc/blueprintignore` and `~/.blueprintignore` to build the
default `Rules` object.
"""
r = None
# Check for a fresh cache of the complete blueprintignore(5) rules.
if _mtime('/etc/blueprintignore') < _mtime(CACHE) \
and _mtime(os.path.expanduser('~/.blueprintignore')) < _mtime(CACHE) \
and _mtime(__file__) < _mtime(CACHE):
try:
r = Rules(json.load(open(CACHE)))
logging.info('using cached blueprintignore(5) rules')
return r
except (OSError, ValueError):
pass
# Cache things that are ignored by default first.
r = Rules({
'file': IGNORE.items(),
'package': [('apt', package, False) for package in _apt()] +
[('yum', package, False) for package in _yum()],
'service': [('sysvinit', 'skeleton', False)],
'source': [('/', False),
('/usr/local', True)],
})
# Cache the patterns stored in the blueprintignore files.
logging.info('parsing blueprintignore(5) rules')
try:
for pathname in ['/etc/blueprintignore',
os.path.expanduser('~/.blueprintignore')]:
r.parse(open(pathname), negate=True)
except IOError:
pass
# Store the cache to disk.
f = _cache_open(CACHE, 'w')
json.dump(r, f, indent=2, sort_keys=True)
f.close()
return r
def none():
"""
Build a `Rules` object that ignores every resource.
"""
return Rules({'file': [('*', False)],
'package': [('*', '*', False)],
'service': [('*', '*', False)],
'source': [('/', False)]})
def _apt():
"""
Return the set of packages that should never appear in a blueprint because
they're already guaranteed (to some degree) to be there.
"""
CACHE = '/tmp/blueprint-apt-exclusions'
# Read from a cached copy.
try:
return set([line.rstrip() for line in open(CACHE)])
except IOError:
pass
logging.info('searching for APT packages to exclude')
# Start with the root packages for the various Ubuntu installations.
s = set(['grub-pc',
'installation-report',
'language-pack-en',
'language-pack-gnome-en',
'linux-generic-pae',
'linux-server',
'os-prober',
'ubuntu-desktop',
'ubuntu-minimal',
'ubuntu-standard',
'wireless-crda'])
# Find the essential and required packages. Every server's got 'em, no
# one wants to muddle their blueprint with 'em.
for field in ('Essential', 'Priority'):
try:
p = subprocess.Popen(['dpkg-query',
'-f=${{Package}} ${{{0}}}\n'.format(field),
'-W'],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
_cache_open(CACHE, 'w').close()
return s
for line in p.stdout:
try:
package, property = line.rstrip().split()
if property in ('yes', 'important', 'required', 'standard'):
s.add(package)
except ValueError:
pass
# Walk the dependency tree all the way to the leaves.
s = deps.apt(s)
# Write to a cache.
logging.info('caching excluded APT packages')
f = _cache_open(CACHE, 'w')
for package in sorted(s):
f.write('{0}\n'.format(package))
f.close()
return s
def _yum():
"""
Return the set of packages that should never appear in a blueprint because
they're already guaranteed (to some degree) to be there.
"""
CACHE = '/tmp/blueprint-yum-exclusions'
# Read from a cached copy.
try:
return set([line.rstrip() for line in open(CACHE)])
except IOError:
pass
logging.info('searching for Yum packages to exclude')
# Start with a few groups that install common packages.
s = set(['gpg-pubkey'])
pattern = re.compile(r'^ (\S+)')
try:
p = subprocess.Popen(['yum', 'groupinfo',
'core','base', 'gnome-desktop'],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
_cache_open(CACHE, 'w').close()
return s
for line in p.stdout:
match = pattern.match(line)
if match is not None:
s.add(match.group(1))
# Walk the dependency tree all the way to the leaves.
s = deps.yum(s)
# Write to a cache.
logging.info('caching excluded Yum packages')
f = _cache_open(CACHE, 'w')
for package in sorted(s):
f.write('{0}\n'.format(package))
f.close()
return s
def _cache_open(pathname, mode):
f = open(pathname, mode)
if util.via_sudo():
uid = int(os.environ['SUDO_UID'])
gid = int(os.environ['SUDO_GID'])
os.fchown(f.fileno(), uid, gid)
return f
def _mtime(pathname):
try:
return os.stat(pathname).st_mtime
except OSError:
return 0
class Rules(defaultdict):
"""
Ordered lists of rules for ignoring/unignoring particular resources.
This is used for both `blueprintignore`(5) and `blueprint-rules`(1).
"""
def __init__(self, *args, **kwargs):
super(Rules, self).__init__(list, *args, **kwargs)
def _ignore_pathname(self, restype, dirname, pathname, ignored=False):
"""
Return `True` if the `gitignore`(5)-style `~/.blueprintignore`
file says the given file should be ignored. The starting state
of the file may be overridden by setting `ignored` to `True`.
"""
pathname = util.unicodeme(pathname)
# Determine if the `pathname` matches the `pattern`. `filename` is
# given as a convenience. See `gitignore`(5) for the rules in play.
def match(filename, pathname, pattern):
dir_only = '/' == pattern[-1]
pattern = pattern.rstrip('/')
if '/' not in pattern:
if fnmatch.fnmatch(filename, pattern):
return os.path.isdir(pathname) if dir_only else True
else:
for p in glob.glob(os.path.join(dirname, pattern)):
p = util.unicodeme(p)
if pathname == p or pathname.startswith('{0}/'.format(p)):
return os.path.isdir(pathname) if dir_only else True
return False
# Iterate over exclusion rules until a match is found. Then iterate
# over inclusion rules that appear later. If there are no matches,
# include the file. If only an exclusion rule matches, exclude the
# file. If an inclusion rule also matches, include the file.
filename = os.path.basename(pathname)
for pattern, negate in self[restype]:
if ignored != negate or not match(filename, pathname, pattern):
continue
ignored = not ignored
return ignored
def ignore_file(self, pathname, ignored=False):
"""
Return `True` if the given pathname should be ignored.
"""
return self._ignore_pathname('file', '/etc', pathname, ignored)
def ignore_package(self, manager, package, ignored=False):
"""
Iterate over package exclusion rules looking for exact matches. As
with files, search for a negated rule after finding a match. Return
`True` to indicate the package should be ignored.
"""
for m, p, negate in self['package']:
if ignored != negate \
or manager != m and '*' != m \
or package != p and '*' != p:
continue
ignored = not ignored
return ignored
def ignore_service(self, manager, service, ignored=False):
"""
Return `True` if a given service should be ignored.
"""
for m, s, negate in self['service']:
if ignored != negate \
or manager != m and '*' != m \
or service != s and '*' != s:
continue
ignored = not ignored
return ignored
def ignore_source(self, pathname, ignored=False):
"""
Return `True` if the given pathname should be ignored. Negated rules
on directories will create new source tarballs. Other rules will
ignore files within those tarballs.
"""
return self._ignore_pathname('source', '/', pathname, ignored)
def parse(self, f, negate=False):
"""
Parse rules from the given file-like object. This is used both for
`blueprintignore`(5) and for `blueprint-rules`(1).
"""
for pattern in f:
pattern = pattern.rstrip()
# Comments and blank lines.
if '' == pattern or '#' == pattern[0]:
continue
# Negated lines.
if '!' == pattern[0]:
pattern = pattern[1:]
ignored = negate
else:
ignored = not negate
# Normalize file resources, which don't need the : and type
# qualifier, into the same format as others, like packages.
if ':' == pattern[0]:
try:
restype, pattern = pattern[1:].split(':', 2)
except ValueError:
continue
else:
restype = 'file'
# Ignore a package and its dependencies or unignore a single
# package. Empirically, the best balance of power and
# granularity comes from this arrangement. Take
# build-esseantial's mutual dependence with dpkg-dev as an
# example of why.
if 'package' == restype:
try:
manager, package = pattern.split('/')
except ValueError:
logging.warning('invalid package rule "{0}"'.
format(pattern))
continue
self['package'].append((manager, package, ignored))
if not ignored:
for dep in getattr(deps,
manager,
lambda(arg): [])(package):
self['package'].append((manager, dep, ignored))
elif 'service' == restype:
try:
manager, service = pattern.split('/')
except ValueError:
logging.warning('invalid service rule "{0}"'.
format(pattern))
continue
self['service'].append((manager, service, ignored))
# Ignore or unignore a file, glob, or directory tree.
else:
self[restype].append((pattern, ignored))
return self
```
#### File: site-packages/blueprint/walk.py
```python
import os.path
import re
import git
import managers
import util
def walk(b, **kwargs):
"""
Walk an entire blueprint in the appropriate order, executing callbacks
along the way. See blueprint(5) for details on the algorithm. The
callbacks are passed directly from this method to the resource
type-specific methods and are documented there.
"""
walk_sources(b, **kwargs)
walk_files(b, **kwargs)
walk_packages(b, **kwargs)
walk_services(b, **kwargs)
def walk_sources(b, **kwargs):
"""
Walk a blueprint's source tarballs and execute callbacks.
* `before_sources():`
Executed before source tarballs are enumerated.
* `source(dirname, filename, gen_content, url):`
Executed when a source tarball is enumerated. Either `gen_content` or
`url` will be `None`. `gen_content`, when not `None`, is a callable
that will return the file's contents.
* `after_sources():`
Executed after source tarballs are enumerated.
"""
kwargs.get('before_sources', lambda *args: None)()
pattern = re.compile(r'^(?:file|ftp|https?)://', re.I)
callable = kwargs.get('source', lambda *args: None)
for dirname, filename in sorted(b.get('sources', {}).iteritems()):
if pattern.match(filename) is None:
def gen_content():
# It's a good thing `gen_content` is never called by the
# `Blueprint.__init__` callbacks, since this would always
# raise `AttributeError` on the fake blueprint structure
# used to initialize a real `Blueprint` object.
tree = git.tree(b._commit)
blob = git.blob(tree, filename)
return git.content(blob)
callable(dirname, filename, gen_content, None)
else:
url = filename
filename = os.path.basename(url)
if '' == filename:
filename = 'blueprint-downloaded-tarball.tar.gz'
callable(dirname, filename, None, url)
kwargs.get('before_sources', lambda *args: None)()
def walk_files(b, **kwargs):
"""
Walk a blueprint's files and execute callbacks.
* `before_files():`
Executed before files are enumerated.
* `file(pathname, f):`
Executed when a file is enumerated.
* `after_files():`
Executed after files are enumerated.
"""
kwargs.get('before_files', lambda *args: None)()
callable = kwargs.get('file', lambda *args: None)
for pathname, f in sorted(b.get('files', {}).iteritems()):
# AWS cfn-init templates may specify file content as JSON, which
# must be converted to a string here, lest each frontend have to
# do so.
if 'content' in f and not isinstance(f['content'], basestring):
f['content'] = util.json_dumps(f['content'])
callable(pathname, f)
kwargs.get('after_files', lambda *args: None)()
def walk_packages(b, managername=None, **kwargs):
"""
Walk a package tree and execute callbacks along the way. This is a bit
like iteration but can't match the iterator protocol due to the varying
argument lists given to each type of callback. The available callbacks
are:
* `before_packages(manager):`
Executed before a package manager's dependencies are enumerated.
* `package(manager, package, version):`
Executed when a package version is enumerated.
* `after_packages(manager):`
Executed after a package manager's dependencies are enumerated.
"""
# Walking begins with the system package managers, `apt`, `rpm`,
# and `yum`.
if managername is None:
walk_packages(b, 'apt', **kwargs)
walk_packages(b, 'rpm', **kwargs)
walk_packages(b, 'yum', **kwargs)
return
# Get the full manager from its name.
manager = managers.PackageManager(managername)
# Give the manager a chance to setup for its dependencies.
kwargs.get('before_packages', lambda *args: None)(manager)
# Each package gets its chance to take action. Note which packages
# are themselves managers so they may be visited recursively later.
next_managers = []
callable = kwargs.get('package', lambda *args: None)
for package, versions in sorted(b.get('packages',
{}).get(manager,
{}).iteritems()):
if 0 == len(versions):
callable(manager, package, None)
elif isinstance(versions, basestring):
callable(manager, package, versions)
else:
for version in versions:
callable(manager, package, version)
if managername != package and package in b.get('packages', {}):
next_managers.append(package)
# Give the manager a change to cleanup after itself.
kwargs.get('after_packages', lambda *args: None)(manager)
# Now recurse into each manager that was just installed. Recursing
# here is safer because there may be secondary dependencies that are
# not expressed in the hierarchy (for example the `mysql2` gem
# depends on `libmysqlclient-dev` in addition to its manager).
for managername in next_managers:
walk_packages(b, managername, **kwargs)
def walk_services(b, managername=None, **kwargs):
"""
Walk a blueprint's services and execute callbacks.
* `before_services(manager):`
Executed before a service manager's dependencies are enumerated.
* `service(manager, service):`
Executed when a service is enumerated.
* `after_services(manager):`
Executed after a service manager's dependencies are enumerated.
"""
# Unless otherwise specified, walk all service managers.
if managername is None:
for managername in sorted(b.get('services', {}).iterkeys()):
walk_services(b, managername, **kwargs)
return
manager = managers.ServiceManager(managername)
kwargs.get('before_services', lambda *args: None)(manager)
callable = kwargs.get('service', lambda *args: None)
for service, deps in sorted(b.get('services',
{}).get(manager,
{}).iteritems()):
callable(manager, service)
walk_service_files(b, manager, service, **kwargs)
walk_service_packages(b, manager, service, **kwargs)
walk_service_sources(b, manager, service, **kwargs)
kwargs.get('after_services', lambda *args: None)(manager)
def walk_service_files(b, manager, servicename, **kwargs):
"""
Walk a service's file dependencies and execute callbacks.
* `service_file(manager, servicename, pathname):`
Executed when a file service dependency is enumerated.
"""
deps = b.get('services', {}).get(manager, {}).get(servicename, {})
if 'files' not in deps:
return
callable = kwargs.get('service_file', lambda *args: None)
for pathname in list(deps['files']):
callable(manager, servicename, pathname)
def walk_service_packages(b, manager, servicename, **kwargs):
"""
Walk a service's package dependencies and execute callbacks.
* `service_package(manager,
servicename,
package_managername,
package):`
Executed when a file service dependency is enumerated.
"""
deps = b.get('services', {}).get(manager, {}).get(servicename, {})
if 'packages' not in deps:
return
callable = kwargs.get('service_package', lambda *args: None)
for package_managername, packages in deps['packages'].iteritems():
for package in packages:
callable(manager, servicename, package_managername, package)
def walk_service_sources(b, manager, servicename, **kwargs):
"""
Walk a service's source tarball dependencies and execute callbacks.
* `service_source(manager, servicename, dirname):`
Executed when a source tarball service dependency is enumerated.
"""
deps = b.get('services', {}).get(manager, {}).get(servicename, {})
if 'sources' not in deps:
return
callable = kwargs.get('service_source', lambda *args: None)
for dirname in list(deps['sources']):
callable(manager, servicename, dirname)
```
#### File: site-packages/flask_api/mediatypes.py
```python
from __future__ import unicode_literals
class MediaType(object):
def __init__(self, media_type):
self.main_type, self.sub_type, self.params = self._parse(media_type)
@property
def full_type(self):
return self.main_type + '/' + self.sub_type
@property
def precedence(self):
"""
Precedence is determined by how specific a media type is:
3. 'type/subtype; param=val'
2. 'type/subtype'
1. 'type/*'
0. '*/*'
"""
if self.main_type == '*':
return 0
elif self.sub_type == '*':
return 1
elif not self.params or list(self.params.keys()) == ['q']:
return 2
return 3
def satisfies(self, other):
"""
Returns `True` if this media type is a superset of `other`.
Some examples of cases where this holds true:
'application/json; version=1.0' >= 'application/json; version=1.0'
'application/json' >= 'application/json; indent=4'
'text/*' >= 'text/plain'
'*/*' >= 'text/plain'
"""
for key in self.params.keys():
if key != 'q' and other.params.get(key, None) != self.params.get(key, None):
return False
if self.sub_type != '*' and other.sub_type != '*' and other.sub_type != self.sub_type:
return False
if self.main_type != '*' and other.main_type != '*' and other.main_type != self.main_type:
return False
return True
def _parse(self, media_type):
"""
Parse a media type string, like "application/json; indent=4" into a
three-tuple, like: ('application', 'json', {'indent': 4})
"""
full_type, sep, param_string = media_type.partition(';')
params = {}
for token in param_string.strip().split(','):
key, sep, value = [s.strip() for s in token.partition('=')]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
if key:
params[key] = value
main_type, sep, sub_type = [s.strip() for s in full_type.partition('/')]
return (main_type, sub_type, params)
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, str(self))
def __str__(self):
"""
Return a canonical string representing the media type.
Note that this ensures the params are sorted.
"""
if self.params:
params_str = ', '.join([
'%s="%s"' % (key, val)
for key, val in sorted(self.params.items())
])
return self.full_type + '; ' + params_str
return self.full_type
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
# Compare two MediaType instances, ignoring parameter ordering.
return (
self.full_type == other.full_type and
self.params == other.params
)
def parse_accept_header(accept):
"""
Parses the value of a clients accept header, and returns a list of sets
of media types it included, ordered by precedence.
For example, 'application/json, application/xml, */*' would return:
[
set([<MediaType "application/xml">, <MediaType "application/json">]),
set([<MediaType "*/*">])
]
"""
ret = [set(), set(), set(), set()]
for token in accept.split(','):
media_type = MediaType(token.strip())
ret[3 - media_type.precedence].add(media_type)
return [media_types for media_types in ret if media_types]
```
#### File: site-packages/flask_api/negotiation.py
```python
from __future__ import unicode_literals
from flask import request
from flask_api import exceptions
from flask_api.mediatypes import MediaType, parse_accept_header
class BaseNegotiation(object):
def select_parser(self, parsers):
msg = '`select_parser()` method must be implemented for class "%s"'
raise NotImplementedError(msg % self.__class__.__name__)
def select_renderer(self, renderers):
msg = '`select_renderer()` method must be implemented for class "%s"'
raise NotImplementedError(msg % self.__class__.__name__)
class DefaultNegotiation(BaseNegotiation):
def select_parser(self, parsers):
"""
Determine which parser to use for parsing the request body.
Returns a two-tuple of (parser, content type).
"""
content_type_header = request.content_type
client_media_type = MediaType(content_type_header)
for parser in parsers:
server_media_type = MediaType(parser.media_type)
if server_media_type.satisfies(client_media_type):
return (parser, client_media_type)
raise exceptions.UnsupportedMediaType()
def select_renderer(self, renderers):
"""
Determine which renderer to use for rendering the response body.
Returns a two-tuple of (renderer, content type).
"""
accept_header = request.headers.get('Accept', '*/*')
for client_media_types in parse_accept_header(accept_header):
for renderer in renderers:
server_media_type = MediaType(renderer.media_type)
for client_media_type in client_media_types:
if client_media_type.satisfies(server_media_type):
if server_media_type.precedence > client_media_type.precedence:
return (renderer, server_media_type)
else:
return (renderer, client_media_type)
raise exceptions.NotAcceptable()
```
#### File: nose/plugins/errorclass.py
```python
from nose.pyversion import make_instancemethod
from nose.plugins.base import Plugin
from nose.result import TextTestResult
from nose.util import isclass
class MetaErrorClass(type):
"""Metaclass for ErrorClassPlugins that allows error classes to be
set up in a declarative manner.
"""
def __init__(self, name, bases, attr):
errorClasses = []
for name, detail in attr.items():
if isinstance(detail, ErrorClass):
attr.pop(name)
for cls in detail:
errorClasses.append(
(cls, (name, detail.label, detail.isfailure)))
super(MetaErrorClass, self).__init__(name, bases, attr)
self.errorClasses = tuple(errorClasses)
class ErrorClass(object):
def __init__(self, *errorClasses, **kw):
self.errorClasses = errorClasses
try:
for key in ('label', 'isfailure'):
setattr(self, key, kw.pop(key))
except KeyError:
raise TypeError("%r is a required named argument for ErrorClass"
% key)
def __iter__(self):
return iter(self.errorClasses)
class ErrorClassPlugin(Plugin):
"""
Base class for ErrorClass plugins. Subclass this class and declare the
exceptions that you wish to handle as attributes of the subclass.
"""
__metaclass__ = MetaErrorClass
score = 1000
errorClasses = ()
def addError(self, test, err):
err_cls, a, b = err
if not isclass(err_cls):
return
classes = [e[0] for e in self.errorClasses]
if filter(lambda c: issubclass(err_cls, c), classes):
return True
def prepareTestResult(self, result):
if not hasattr(result, 'errorClasses'):
self.patchResult(result)
for cls, (storage_attr, label, isfail) in self.errorClasses:
if cls not in result.errorClasses:
storage = getattr(result, storage_attr, [])
setattr(result, storage_attr, storage)
result.errorClasses[cls] = (storage, label, isfail)
def patchResult(self, result):
result.printLabel = print_label_patch(result)
result._orig_addError, result.addError = \
result.addError, add_error_patch(result)
result._orig_wasSuccessful, result.wasSuccessful = \
result.wasSuccessful, wassuccessful_patch(result)
if hasattr(result, 'printErrors'):
result._orig_printErrors, result.printErrors = \
result.printErrors, print_errors_patch(result)
if hasattr(result, 'addSkip'):
result._orig_addSkip, result.addSkip = \
result.addSkip, add_skip_patch(result)
result.errorClasses = {}
def add_error_patch(result):
"""Create a new addError method to patch into a result instance
that recognizes the errorClasses attribute and deals with
errorclasses correctly.
"""
return make_instancemethod(TextTestResult.addError, result)
def print_errors_patch(result):
"""Create a new printErrors method that prints errorClasses items
as well.
"""
return make_instancemethod(TextTestResult.printErrors, result)
def print_label_patch(result):
"""Create a new printLabel method that prints errorClasses items
as well.
"""
return make_instancemethod(TextTestResult.printLabel, result)
def wassuccessful_patch(result):
"""Create a new wasSuccessful method that checks errorClasses for
exceptions that were put into other slots than error or failure
but that still count as not success.
"""
return make_instancemethod(TextTestResult.wasSuccessful, result)
def add_skip_patch(result):
"""Create a new addSkip method to patch into a result instance
that delegates to addError.
"""
return make_instancemethod(TextTestResult.addSkip, result)
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: psycopg2/tests/test_cursor.py
```python
import time
import pickle
import psycopg2
import psycopg2.extensions
from testutils import (unittest, ConnectingTestCase, skip_before_postgres,
skip_if_no_namedtuple, skip_if_no_getrefcount, slow, skip_if_no_superuser,
skip_if_windows)
import psycopg2.extras
class CursorTests(ConnectingTestCase):
def test_close_idempotent(self):
cur = self.conn.cursor()
cur.close()
cur.close()
self.assert_(cur.closed)
def test_empty_query(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError, cur.execute, "")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, " ")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, ";")
def test_executemany_propagate_exceptions(self):
conn = self.conn
cur = conn.cursor()
cur.execute("create temp table test_exc (data int);")
def buggygen():
yield 1 // 0
self.assertRaises(ZeroDivisionError,
cur.executemany, "insert into test_exc values (%s)", buggygen())
cur.close()
def test_mogrify_unicode(self):
conn = self.conn
cur = conn.cursor()
# test consistency between execute and mogrify.
# unicode query containing only ascii data
cur.execute(u"SELECT 'foo';")
self.assertEqual('foo', cur.fetchone()[0])
self.assertEqual(b"SELECT 'foo';", cur.mogrify(u"SELECT 'foo';"))
conn.set_client_encoding('UTF8')
snowman = u"\u2603"
def b(s):
if isinstance(s, unicode):
return s.encode('utf8')
else:
return s
# unicode query with non-ascii data
cur.execute(u"SELECT '%s';" % snowman)
self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT '%s';" % snowman))
# unicode args
cur.execute("SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify("SELECT %s;", (snowman,)))
# unicode query and args
cur.execute(u"SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT %s;", (snowman,)))
def test_mogrify_decimal_explodes(self):
# issue #7: explodes on windows with python 2.5 and psycopg 2.2.2
try:
from decimal import Decimal
except:
return
conn = self.conn
cur = conn.cursor()
self.assertEqual(b'SELECT 10.3;',
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
@skip_if_no_getrefcount
def test_mogrify_leak_on_multiple_reference(self):
# issue #81: reference leak when a parameter value is referenced
# more than once from a dict.
cur = self.conn.cursor()
foo = (lambda x: x)('foo') * 10
import sys
nref1 = sys.getrefcount(foo)
cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo})
nref2 = sys.getrefcount(foo)
self.assertEqual(nref1, nref2)
def test_modify_closed(self):
cur = self.conn.cursor()
cur.close()
sql = cur.mogrify("select %s", (10,))
self.assertEqual(sql, b"select 10")
def test_bad_placeholder(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1, 'bar': 2})
def test_cast(self):
curs = self.conn.cursor()
self.assertEqual(42, curs.cast(20, '42'))
self.assertAlmostEqual(3.14, curs.cast(700, '3.14'))
try:
from decimal import Decimal
except ImportError:
self.assertAlmostEqual(123.45, curs.cast(1700, '123.45'))
else:
self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45'))
from datetime import date
self.assertEqual(date(2011, 1, 2), curs.cast(1082, '2011-01-02'))
self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown
def test_cast_specificity(self):
curs = self.conn.cursor()
self.assertEqual("foo", curs.cast(705, 'foo'))
D = psycopg2.extensions.new_type((705,), "DOUBLING", lambda v, c: v * 2)
psycopg2.extensions.register_type(D, self.conn)
self.assertEqual("foofoo", curs.cast(705, 'foo'))
T = psycopg2.extensions.new_type((705,), "TREBLING", lambda v, c: v * 3)
psycopg2.extensions.register_type(T, curs)
self.assertEqual("foofoofoo", curs.cast(705, 'foo'))
curs2 = self.conn.cursor()
self.assertEqual("foofoo", curs2.cast(705, 'foo'))
def test_weakref(self):
from weakref import ref
curs = self.conn.cursor()
w = ref(curs)
del curs
import gc
gc.collect()
self.assert_(w() is None)
def test_null_name(self):
curs = self.conn.cursor(None)
self.assertEqual(curs.name, None)
def test_invalid_name(self):
curs = self.conn.cursor()
curs.execute("create temp table invname (data int);")
for i in (10, 20, 30):
curs.execute("insert into invname values (%s)", (i,))
curs.close()
curs = self.conn.cursor(r'1-2-3 \ "test"')
curs.execute("select data from invname order by data")
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
def _create_withhold_table(self):
curs = self.conn.cursor()
try:
curs.execute("drop table withhold")
except psycopg2.ProgrammingError:
self.conn.rollback()
curs.execute("create table withhold (data int)")
for i in (10, 20, 30):
curs.execute("insert into withhold values (%s)", (i,))
curs.close()
def test_withhold(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
withhold=True)
self._create_withhold_table()
curs = self.conn.cursor("W")
self.assertEqual(curs.withhold, False)
curs.withhold = True
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs.close()
curs = self.conn.cursor("W", withhold=True)
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs = self.conn.cursor()
curs.execute("drop table withhold")
self.conn.commit()
def test_withhold_no_begin(self):
self._create_withhold_table()
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.assertEqual(curs.fetchone(), (20,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_withhold_autocommit(self):
self._create_withhold_table()
self.conn.commit()
self.conn.autocommit = True
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=True)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
for t in range(2):
if not t:
curs = self.conn.cursor("S")
self.assertEqual(curs.scrollable, None)
curs.scrollable = True
else:
curs = self.conn.cursor("S", scrollable=True)
self.assertEqual(curs.scrollable, True)
curs.itersize = 10
# complex enough to make postgres cursors declare without
# scroll/no scroll to fail
curs.execute("""
select x.data
from scrollable x
join scrollable y on x.data = y.data
order by y.data""")
for i, (n,) in enumerate(curs):
self.assertEqual(i, n)
curs.scroll(-1)
for i in range(99, -1, -1):
curs.scroll(-1)
self.assertEqual(i, curs.fetchone()[0])
curs.scroll(-1)
curs.close()
def test_not_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=False)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
curs = self.conn.cursor("S") # default scrollability
curs.execute("select * from scrollable")
self.assertEqual(curs.scrollable, None)
curs.scroll(2)
try:
curs.scroll(-1)
except psycopg2.OperationalError:
return self.skipTest("can't evaluate non-scrollable cursor")
curs.close()
curs = self.conn.cursor("S", scrollable=False)
self.assertEqual(curs.scrollable, False)
curs.execute("select * from scrollable")
curs.scroll(2)
self.assertRaises(psycopg2.OperationalError, curs.scroll, -1)
@slow
@skip_before_postgres(8, 2)
def test_iter_named_cursor_efficient(self):
curs = self.conn.cursor('tmp')
# if these records are fetched in the same roundtrip their
# timestamp will not be influenced by the pause in Python world.
curs.execute("""select clock_timestamp() from generate_series(1,2)""")
i = iter(curs)
t1 = (i.next())[0] # the brackets work around a 2to3 bug
time.sleep(0.2)
t2 = (i.next())[0]
self.assert_((t2 - t1).microseconds * 1e-6 < 0.1,
"named cursor records fetched in 2 roundtrips (delta: %s)"
% (t2 - t1))
@skip_before_postgres(8, 0)
def test_iter_named_cursor_default_itersize(self):
curs = self.conn.cursor('tmp')
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in one gulp
self.assertEqual(rv, [(i, i) for i in range(1, 51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_itersize(self):
curs = self.conn.cursor('tmp')
curs.itersize = 30
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in two gulps
self.assertEqual(rv, [(i, ((i - 1) % 30) + 1) for i in range(1, 51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_rownumber(self):
curs = self.conn.cursor('tmp')
# note: this fails if itersize < dataset: internally we check
# rownumber == rowcount to detect when to read anoter page, so we
# would need an extra attribute to have a monotonic rownumber.
curs.itersize = 20
curs.execute('select generate_series(1,10)')
for i, rec in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
@skip_if_no_namedtuple
def test_namedtuple_description(self):
curs = self.conn.cursor()
curs.execute("""select
3.14::decimal(10,2) as pi,
'hello'::text as hi,
'2010-02-18'::date as now;
""")
self.assertEqual(len(curs.description), 3)
for c in curs.description:
self.assertEqual(len(c), 7) # DBAPI happy
for a in ('name', 'type_code', 'display_size', 'internal_size',
'precision', 'scale', 'null_ok'):
self.assert_(hasattr(c, a), a)
c = curs.description[0]
self.assertEqual(c.name, 'pi')
self.assert_(c.type_code in psycopg2.extensions.DECIMAL.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, 10)
self.assertEqual(c.scale, 2)
c = curs.description[1]
self.assertEqual(c.name, 'hi')
self.assert_(c.type_code in psycopg2.STRING.values)
self.assert_(c.internal_size < 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
c = curs.description[2]
self.assertEqual(c.name, 'now')
self.assert_(c.type_code in psycopg2.extensions.DATE.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
def test_pickle_description(self):
curs = self.conn.cursor()
curs.execute('SELECT 1 AS foo')
description = curs.description
pickled = pickle.dumps(description, pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
self.assertEqual(description, unpickled)
@skip_before_postgres(8, 0)
def test_named_cursor_stealing(self):
# you can use a named cursor to iterate on a refcursor created
# somewhere else
cur1 = self.conn.cursor()
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
" FOR SELECT generate_series(1,7)")
cur2 = self.conn.cursor('test')
# can call fetch without execute
self.assertEqual((1,), cur2.fetchone())
self.assertEqual([(2,), (3,), (4,)], cur2.fetchmany(3))
self.assertEqual([(5,), (6,), (7,)], cur2.fetchall())
@skip_before_postgres(8, 0)
def test_named_noop_close(self):
cur = self.conn.cursor('test')
cur.close()
@skip_before_postgres(8, 0)
def test_scroll(self):
cur = self.conn.cursor()
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-1)
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-2)
self.assertEqual(cur.fetchone(), (7,))
cur.scroll(2, mode='absolute')
self.assertEqual(cur.fetchone(), (2,))
# on the boundary
cur.scroll(0, mode='absolute')
self.assertEqual(cur.fetchone(), (0,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1, mode='absolute')
cur.scroll(0, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1)
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 10, mode='absolute')
cur.scroll(9, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 1)
@skip_before_postgres(8, 0)
def test_scroll_named(self):
cur = self.conn.cursor('tmp', scrollable=True)
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
def test_bad_subclass(self):
# check that we get an error message instead of a segfault
# for badly written subclasses.
# see http://stackoverflow.com/questions/22019341/
class StupidCursor(psycopg2.extensions.cursor):
def __init__(self, *args, **kwargs):
# I am stupid so not calling superclass init
pass
cur = StupidCursor()
self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1')
self.assertRaises(psycopg2.InterfaceError, cur.executemany,
'select 1', [])
def test_callproc_badparam(self):
cur = self.conn.cursor()
self.assertRaises(TypeError, cur.callproc, 'lower', 42)
# It would be inappropriate to test callproc's named parameters in the
# DBAPI2.0 test section because they are a psycopg2 extension.
@skip_before_postgres(9, 0)
def test_callproc_dict(self):
# This parameter name tests for injection and quote escaping
paramname = '''
Robert'); drop table "students" --
'''.strip()
escaped_paramname = '"%s"' % paramname.replace('"', '""')
procname = 'pg_temp.randall'
cur = self.conn.cursor()
# Set up the temporary function
cur.execute('''
CREATE FUNCTION %s(%s INT)
RETURNS INT AS
'SELECT $1 * $1'
LANGUAGE SQL
''' % (procname, escaped_paramname))
# Make sure callproc works right
cur.callproc(procname, {paramname: 2})
self.assertEquals(cur.fetchone()[0], 4)
# Make sure callproc fails right
failing_cases = [
({paramname: 2, 'foo': 'bar'}, psycopg2.ProgrammingError),
({paramname: '2'}, psycopg2.ProgrammingError),
({paramname: 'two'}, psycopg2.ProgrammingError),
({u'bj\xc3rn': 2}, psycopg2.ProgrammingError),
({3: 2}, TypeError),
({self: 2}, TypeError),
]
for parameter_sequence, exception in failing_cases:
self.assertRaises(exception, cur.callproc, procname, parameter_sequence)
self.conn.rollback()
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_sync(self):
# If a "victim" connection is closed by a "control" connection
# behind psycopg2's back, psycopg2 always handles it correctly:
# raise OperationalError, set conn.closed to 2. This reproduces
# issue #443, a race between control_conn closing victim_conn and
# psycopg2 noticing.
control_conn = self.conn
connect_func = self.connect
wait_func = lambda conn: None
self._test_external_close(control_conn, connect_func, wait_func)
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_async(self):
# Issue #443 is in the async code too. Since the fix is duplicated,
# so is the test.
control_conn = self.conn
connect_func = lambda: self.connect(async_=True)
wait_func = psycopg2.extras.wait_select
self._test_external_close(control_conn, connect_func, wait_func)
def _test_external_close(self, control_conn, connect_func, wait_func):
# The short sleep before using victim_conn the second time makes it
# much more likely to lose the race and see the bug. Repeating the
# test several times makes it even more likely.
for i in range(10):
victim_conn = connect_func()
wait_func(victim_conn)
with victim_conn.cursor() as cur:
cur.execute('select pg_backend_pid()')
wait_func(victim_conn)
pid1 = cur.fetchall()[0][0]
with control_conn.cursor() as cur:
cur.execute('select pg_terminate_backend(%s)', (pid1,))
time.sleep(0.001)
def f():
with victim_conn.cursor() as cur:
cur.execute('select 1')
wait_func(victim_conn)
self.assertRaises(psycopg2.OperationalError, f)
self.assertEqual(victim_conn.closed, 2)
@skip_before_postgres(8, 2)
def test_rowcount_on_executemany_returning(self):
cur = self.conn.cursor()
cur.execute("create table execmany(id serial primary key, data int)")
cur.executemany(
"insert into execmany (data) values (%s)",
[(i,) for i in range(4)])
self.assertEqual(cur.rowcount, 4)
cur.executemany(
"insert into execmany (data) values (%s) returning data",
[(i,) for i in range(5)])
self.assertEqual(cur.rowcount, 5)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
```
#### File: psycopg2/tests/test_psycopg2_dbapi20.py
```python
import dbapi20
import dbapi20_tpc
from testutils import skip_if_tpc_disabled
from testutils import unittest, decorate_all_tests
import psycopg2
from testconfig import dsn
class Psycopg2Tests(dbapi20.DatabaseAPI20Test):
driver = psycopg2
connect_args = ()
connect_kw_args = {'dsn': dsn}
lower_func = 'lower' # For stored procedure test
def test_callproc(self):
# Until DBAPI 2.0 compliance, callproc should return None or it's just
# misleading. Therefore, we will skip the return value test for
# callproc and only perform the fetch test.
#
# For what it's worth, the DBAPI2.0 test_callproc doesn't actually
# test for DBAPI2.0 compliance! It doesn't check for modified OUT and
# IN/OUT parameters in the return values!
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
cur.callproc(self.lower_func,('FOO',))
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_setoutputsize(self):
# psycopg2's setoutputsize() is a no-op
pass
def test_nextset(self):
# psycopg2 does not implement nextset()
pass
class Psycopg2TPCTests(dbapi20_tpc.TwoPhaseCommitTests, unittest.TestCase):
driver = psycopg2
def connect(self):
return psycopg2.connect(dsn=dsn)
decorate_all_tests(Psycopg2TPCTests, skip_if_tpc_disabled)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main()
```
#### File: test/functional/invalid_name.py
```python
AAA = 24
try:
import collections
except ImportError:
collections = None
aaa = 42 # [invalid-name]
try:
import time
except ValueError:
time = None # [invalid-name]
try:
from sys import argv, executable as python
except ImportError:
argv = 42
python = 24
def test():
""" Shouldn't emit an invalid-name here. """
try:
import re
except ImportError:
re = None
return re
def a(): # [invalid-name]
"""yo"""
```
#### File: test/functional/monkeypatch_method.py
```python
class Clazz(object):
'test class'
def __init__(self, value):
self.value = value
def func(arg1, arg2):
'function that will be used as a method'
return arg1.value + arg2
Clazz.method = func
VAR = Clazz(1).method(2)
```
#### File: test/functional/non_iterator_returned.py
```python
import six
class FirstGoodIterator(object):
""" yields in iterator. """
def __iter__(self):
for index in range(10):
yield index
class SecondGoodIterator(object):
""" __iter__ and next """
def __iter__(self):
return self
def __next__(self):
""" Infinite iterator, but still an iterator """
return 1
def next(self):
"""Same as __next__, but for Python 2."""
return 1
class ThirdGoodIterator(object):
""" Returns other iterator, not the current instance """
def __iter__(self):
return SecondGoodIterator()
class FourthGoodIterator(object):
""" __iter__ returns iter(...) """
def __iter__(self):
return iter(range(10))
class IteratorMetaclass(type):
def __next__(cls):
return 1
def next(cls):
return 2
@six.add_metaclass(IteratorMetaclass)
class IteratorClass(object):
"""Iterable through the metaclass."""
class FifthGoodIterator(object):
"""__iter__ returns a class which uses an iterator-metaclass."""
def __iter__(self):
return IteratorClass
class FileBasedIterator(object):
def __init__(self, path):
self.path = path
self.file = None
def __iter__(self):
if self.file is not None:
self.file.close()
self.file = open(self.path)
# self file has two infered values: None and <instance of 'file'>
# we don't want to emit error in this case
return self.file
class FirstBadIterator(object):
""" __iter__ returns a list """
def __iter__(self): # [non-iterator-returned]
return []
class SecondBadIterator(object):
""" __iter__ without next """
def __iter__(self): # [non-iterator-returned]
return self
class ThirdBadIterator(object):
""" __iter__ returns an instance of another non-iterator """
def __iter__(self): # [non-iterator-returned]
return SecondBadIterator()
class FourthBadIterator(object):
"""__iter__ returns a class."""
def __iter__(self): # [non-iterator-returned]
return ThirdBadIterator
```
#### File: test/functional/stop_iteration_inside_generator.py
```python
import asyncio
class RebornStopIteration(StopIteration):
"""
A class inheriting from StopIteration exception
"""
# This one is ok
def gen_ok():
yield 1
yield 2
yield 3
return
# pylint should warn about this one
# because of a direct raising of StopIteration inside generator
def gen_stopiter():
yield 1
yield 2
yield 3
raise StopIteration # [stop-iteration-return]
# pylint should warn about this one
# because of a direct raising of an exception inheriting from StopIteration inside generator
def gen_stopiterchild():
yield 1
yield 2
yield 3
raise RebornStopIteration # [stop-iteration-return]
# pylint should warn here
# because of the possibility that next raises a StopIteration exception
def gen_next_raises_stopiter():
g = gen_ok()
while True:
yield next(g) # [stop-iteration-return]
# This one is the same as gen_next_raises_stopiter
# but is ok because the next function is inside
# a try/except block handling StopIteration
def gen_next_inside_try_except():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that don't handle StopIteration
def gen_next_inside_wrong_try_except():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
return
# This one is the same as gen_next_inside_try_except
# but is not ok because the next function is inside
# a try/except block that handle StopIteration but reraise it
def gen_next_inside_wrong_try_except2():
g = gen_ok()
while True:
try:
yield next(g)
except StopIteration:
raise StopIteration # [stop-iteration-return]
# Those two last are ok
def gen_in_for():
for el in gen_ok():
yield el
def gen_yield_from():
yield from gen_ok()
def gen_dont_crash_on_no_exception():
g = gen_ok()
while True:
try:
yield next(g) # [stop-iteration-return]
except ValueError:
raise
def gen_dont_crash_on_uninferable():
# https://github.com/PyCQA/pylint/issues/1779
yield from iter()
raise asyncio.TimeoutError()
```
#### File: test/functional/too_many_return_statements.py
```python
def stupid_function(arg): # [too-many-return-statements]
if arg == 1:
return 1
elif arg == 2:
return 2
elif arg == 3:
return 3
elif arg == 4:
return 4
elif arg == 5:
return 5
elif arg == 6:
return 6
elif arg == 7:
return 7
elif arg == 8:
return 8
elif arg == 9:
return 9
elif arg == 10:
return 10
return None
def many_yield(text):
"""Not a problem"""
if text:
yield " line 1: %s\n" % text
yield " line 2\n"
yield " line 3\n"
yield " line 4\n"
yield " line 5\n"
else:
yield " line 6\n"
yield " line 7\n"
yield " line 8\n"
yield " line 9\n"
yield " line 10\n"
```
#### File: test/functional/unused_import_assigned_to.py
```python
import uuid
import foo
from .a import x
class Y(object):
x = x[0]
def test(default=None):
return default
class BaseModel(object):
uuid = test(default=uuid.uuid4)
class bar(object):
foo = foo.baz
```
#### File: pylint/test/test_func.py
```python
import sys
import re
import pytest
from os.path import abspath, dirname, join
from pylint.testutils import _get_tests_info, linter
PY3K = sys.version_info >= (3, 0)
SYS_VERS_STR = '%d%d%d' % sys.version_info[:3]
# Configure paths
INPUT_DIR = join(dirname(abspath(__file__)), 'input')
MSG_DIR = join(dirname(abspath(__file__)), 'messages')
FILTER_RGX = None
UPDATE = False
INFO_TEST_RGX = re.compile(r'^func_i\d\d\d\d$')
# Classes
quote = "'" if sys.version_info >= (3, 3) else ''
def exception_str(self, ex): # pylint: disable=unused-argument
"""function used to replace default __str__ method of exception instances"""
return 'in %s\n:: %s' % (ex.file, ', '.join(ex.args))
class LintTestUsingModule(object):
INPUT_DIR = None
DEFAULT_PACKAGE = 'input'
package = DEFAULT_PACKAGE
linter = linter
module = None
depends = None
output = None
_TEST_TYPE = 'module'
# def runTest(self):
# # This is a hack to make ./test/test_func.py work under pytest.
# pass
def _test_functionality(self):
tocheck = [self.package+'.'+self.module]
# pylint: disable=not-an-iterable; can't handle boolean checks for now
if self.depends:
tocheck += [self.package+'.%s' % name.replace('.py', '')
for name, _ in self.depends]
self._test(tocheck)
def _check_result(self, got):
assert self._get_expected().strip()+'\n' == got.strip()+'\n'
def _test(self, tocheck):
if INFO_TEST_RGX.match(self.module):
self.linter.enable('I')
else:
self.linter.disable('I')
try:
self.linter.check(tocheck)
except Exception as ex:
# need finalization to restore a correct state
self.linter.reporter.finalize()
ex.file = tocheck
print(ex)
ex.__str__ = exception_str
raise
self._check_result(self.linter.reporter.finalize())
def _has_output(self):
return not self.module.startswith('func_noerror_')
def _get_expected(self):
if self._has_output() and self.output:
with open(self.output, 'U') as fobj:
return fobj.read().strip() + '\n'
else:
return ''
class LintTestUpdate(LintTestUsingModule):
_TEST_TYPE = 'update'
def _check_result(self, got):
if self._has_output():
try:
expected = self._get_expected()
except IOError:
expected = ''
if got != expected:
with open(self.output, 'w') as fobj:
fobj.write(got)
def gen_tests(filter_rgx):
if filter_rgx:
is_to_run = re.compile(filter_rgx).search
else:
is_to_run = lambda x: 1
tests = []
for module_file, messages_file in (
_get_tests_info(INPUT_DIR, MSG_DIR, 'func_', '')
):
if not is_to_run(module_file) or module_file.endswith(('.pyc', "$py.class")):
continue
base = module_file.replace('.py', '').split('_')[1]
dependencies = _get_tests_info(INPUT_DIR, MSG_DIR, base, '.py')
tests.append((module_file, messages_file, dependencies))
if UPDATE:
return tests
assert len(tests) < 196, "Please do not add new test cases here."
return tests
@pytest.mark.parametrize("module_file,messages_file,dependencies", gen_tests(FILTER_RGX),
ids=[o[0] for o in gen_tests(FILTER_RGX)])
def test_functionality(module_file, messages_file, dependencies,):
LT = LintTestUpdate() if UPDATE else LintTestUsingModule()
LT.module = module_file.replace('.py', '')
LT.output = messages_file
LT.depends = dependencies or None
LT.INPUT_DIR = INPUT_DIR
LT._test_functionality()
if __name__ == '__main__':
if '-u' in sys.argv:
UPDATE = True
sys.argv.remove('-u')
if len(sys.argv) > 1:
FILTER_RGX = sys.argv[1]
del sys.argv[1]
pytest.main(sys.argv)
```
#### File: pylint/test/test_regr.py
```python
import sys
import os
from os.path import abspath, dirname, join
import pytest
import astroid
import pylint.testutils as testutils
from pylint import epylint
REGR_DATA = join(dirname(abspath(__file__)), 'regrtest_data')
sys.path.insert(1, REGR_DATA)
try:
PYPY_VERSION_INFO = sys.pypy_version_info
except AttributeError:
PYPY_VERSION_INFO = None
@pytest.fixture(scope="module")
def reporter(reporter):
return testutils.TestReporter
@pytest.fixture(scope="module")
def disable(disable):
return ['I']
@pytest.fixture
def finalize_linter(linter):
"""call reporter.finalize() to cleanup
pending messages if a test finished badly
"""
yield linter
linter.reporter.finalize()
def Equals(expected):
return lambda got: got == expected
@pytest.mark.parametrize("file_name, check", [
("package.__init__", Equals("")),
("precedence_test", Equals("")),
("import_package_subpackage_module", Equals("")),
("pylint.checkers.__init__", lambda x: '__path__' not in x),
(join(REGR_DATA, "classdoc_usage.py"), Equals("")),
(join(REGR_DATA, "module_global.py"), Equals("")),
(join(REGR_DATA, "decimal_inference.py"), Equals("")),
(join(REGR_DATA, 'absimp', 'string.py'), Equals("")),
(join(REGR_DATA, 'bad_package'),
lambda x: "Unused import missing" in x),
])
def test_package(finalize_linter, file_name, check):
finalize_linter.check(file_name)
got = finalize_linter.reporter.finalize().strip()
assert check(got)
@pytest.mark.parametrize("file_name", [
join(REGR_DATA, 'import_assign.py'),
join(REGR_DATA, 'special_attr_scope_lookup_crash.py'),
join(REGR_DATA, 'try_finally_disable_msg_crash'),
])
def test_crash(finalize_linter, file_name):
finalize_linter.check(file_name)
@pytest.mark.parametrize("fname", [x for x in os.listdir(REGR_DATA)
if x.endswith('_crash.py')])
def test_descriptor_crash(fname, finalize_linter):
finalize_linter.check(join(REGR_DATA, fname))
finalize_linter.reporter.finalize().strip()
@pytest.fixture
def modify_path():
cwd = os.getcwd()
sys.path.insert(0, '')
yield
sys.path.pop(0)
os.chdir(cwd)
@pytest.mark.usefixtures("modify_path")
def test_check_package___init__(finalize_linter):
filename = 'package.__init__'
finalize_linter.check(filename)
checked = list(finalize_linter.stats['by_module'].keys())
assert checked == [filename]
os.chdir(join(REGR_DATA, 'package'))
finalize_linter.check('__init__')
checked = list(finalize_linter.stats['by_module'].keys())
assert checked == ['__init__']
@pytest.mark.skipif(PYPY_VERSION_INFO and PYPY_VERSION_INFO < (4, 0),
reason="On older PyPy versions, sys.executable was set to a value "
"that is not supported by the implementation of this function. "
"( https://bitbucket.org/pypy/pypy/commits/19e305e27e67 )")
def test_epylint_does_not_block_on_huge_files():
path = join(REGR_DATA, 'huge.py')
out, err = epylint.py_run(path, return_std=True)
assert hasattr(out, 'read')
assert hasattr(err, 'read')
output = out.read(10)
assert isinstance(output, str)
def test_pylint_config_attr():
mod = astroid.MANAGER.ast_from_module_name('pylint.lint')
pylinter = mod['PyLinter']
expect = ['OptionsManagerMixIn', 'object', 'MessagesHandlerMixIn',
'ReportsHandlerMixIn', 'BaseTokenChecker', 'BaseChecker',
'OptionsProviderMixIn']
assert [c.name for c in pylinter.ancestors()] == expect
assert list(astroid.Instance(pylinter).getattr('config'))
inferred = list(astroid.Instance(pylinter).igetattr('config'))
assert len(inferred) == 1
assert inferred[0].root().name == 'optparse'
assert inferred[0].name == 'Values'
``` |
{
"source": "JonathanNdambaPro/MDO",
"score": 3
} |
#### File: JonathanNdambaPro/MDO/deploy.py
```python
import click
import os
from pathlib import Path
@click.group()
def cli():
pass
@click.command(help="convention pep8")
@click.option("--pep8", default=True, help="mise en forme des differents fichiers, convention pep8")
def autopep8(pep8: bool):
if pep8:
p = Path('.')
py_file_to_pep8 = list(p.glob('Mahanalobis_Detection_Outliers/*.py'))
for py_file in py_file_to_pep8:
command = f"black {py_file}"
os.system(command)
@click.command(help="lancement des tests")
@click.option("--test", default=True, help="Effectue les tests avec pytest")
def lancement_test(test: bool):
if test:
command = f"pytest"
os.system(command)
@click.command(help="package la solution")
@click.option("--packaging", default=True, help="Similaire au librarie")
def packaging_solution(packaging: bool):
if packaging:
step = "python3 setup.py bdist_wheel"
os.system(f"{step}")
else:
pass
@click.command(help="push la solution sur git ou gitlab")
@click.option("--push", default=True, help="d'abord set le git")
def push_to_git(push: bool):
if push:
message_commit = str(input("Git message :"))
step_1 = "git add ."
step_2 = f'git commit -m "{message_commit}"'
setp_3 = "git push"
all_step_packaging = [step_1, step_2, setp_3]
for step in all_step_packaging:
os.system(f"{step}")
else:
pass
@click.command(help="Execute toutes les action en un fois")
@click.option(
"--global_",
default=True,
help="convention_code -> packaging_solution -> push_to_git",
)
def global_CI(global_: bool):
if global_:
global_commande = []
p = Path('.')
py_file_to_pep8 = list(p.glob('Mahanalobis_Detection_Outliers/*.py'))
for py_file in py_file_to_pep8:
command_0 = f"black {py_file}"
global_commande.append(command_0)
command_1 = "pytest"
command_2 = "python3 setup.py bdist_wheel"
message_commit = str(input("Git message :"))
command_3 = "git add ."
command_4 = f'git commit -m "{message_commit}"'
command_5 = "git push"
global_commande.extend([command_1, command_2, command_3, command_4, command_5])
for command in global_commande:
os.system(command)
else:
pass
cli.add_command(packaging_solution)
cli.add_command(push_to_git)
cli.add_command(global_CI)
cli.add_command(autopep8)
cli.add_command(lancement_test)
if __name__ == "__main__":
cli()
``` |
{
"source": "jonathanneo/databricks-unit-testing",
"score": 3
} |
#### File: databricks-unit-testing/functions/cleaning_utils.py
```python
from pyspark.sql import DataFrame, functions as F
def lowercase_all_column_names(df:DataFrame)->DataFrame:
"""
Convert all column names to lower case.
"""
for col in df.columns:
df = df.withColumnRenamed(col, col.lower())
return df
def uppercase_all_column_names(df:DataFrame)->DataFrame:
"""
Convert all column names to upper case.
"""
for col in df.columns:
df = df.withColumnRenamed(col, col.upper())
return df
def add_metadata(df:DataFrame, field_dict:dict)->DataFrame:
for pair in field_dict.items():
df = df.withColumn(pair[0], F.lit(pair[1]))
return df
```
#### File: functions/tests/test_cleaning_utils.py
```python
from pyspark.sql import Row, SparkSession
import pandas as pd
from datetime import datetime
from ..cleaning_utils import *
def test_lowercase_all_columns():
# ASSEMBLE
test_data = [
{
"ID": 1,
"First_Name": "Bob",
"Last_Name": "Builder",
"Age": 24
},
{
"ID": 2,
"First_Name": "Sam",
"Last_Name": "Smith",
"Age": 41
}
]
spark = SparkSession.builder.getOrCreate()
test_df = spark.createDataFrame(map(lambda x: Row(**x), test_data))
# ACT
output_df = lowercase_all_column_names(test_df)
output_df_as_pd = output_df.toPandas()
expected_output_df = pd.DataFrame({
"id": [1, 2],
"first_name": ["Bob", "Sam"],
"last_name": ["Builder", "Smith"],
"age": [24, 41]
})
# ASSERT
pd.testing.assert_frame_equal(left=expected_output_df,right=output_df_as_pd, check_exact=True)
def test_uppercase_all_columns():
# ASSEMBLE
test_data = [
{
"ID": 1,
"First_Name": "Bob",
"Last_Name": "Builder",
"Age": 24
},
{
"ID": 2,
"First_Name": "Sam",
"Last_Name": "Smith",
"Age": 41
}
]
spark = SparkSession.builder.getOrCreate()
test_df = spark.createDataFrame(map(lambda x: Row(**x), test_data))
# ACT
output_df = uppercase_all_column_names(test_df)
output_df_as_pd = output_df.toPandas()
expected_output_df = pd.DataFrame({
"ID": [1, 2],
"FIRST_NAME": ["Bob", "Sam"],
"LAST_NAME": ["Builder", "Smith"],
"AGE": [24, 41]
})
# ASSERT
pd.testing.assert_frame_equal(left=expected_output_df,right=output_df_as_pd, check_exact=True)
def test_add_metadata():
# ASSEMBLE
test_data = [
{
"id": 1,
"first_name": "Bob",
"last_name": "Builder",
"age": 24
},
{
"id": 2,
"first_name": "Sam",
"last_name": "Smith",
"age": 41
}
]
now = datetime.now()
field_dict = {
"task_id": 1,
"ingested_at": now
}
spark = SparkSession.builder.getOrCreate()
test_df = spark.createDataFrame(map(lambda x: Row(**x), test_data))
# ACT
output_df = add_metadata(df=test_df, field_dict=field_dict)
output_df_as_pd = output_df.toPandas()
expected_output_df = pd.DataFrame({
"id": [1, 2],
"first_name": ["Bob", "Sam"],
"last_name": ["Builder", "Smith"],
"age": [24, 41],
"task_id": [1, 1],
"ingested_at": [now, now]
})
# ASSERT
pd.testing.assert_frame_equal(left=expected_output_df,right=output_df_as_pd, check_exact=True, check_dtype=False)
``` |
{
"source": "Jonathanngundu/covid-predict",
"score": 3
} |
#### File: Jonathanngundu/covid-predict/data.py
```python
import pandas
from sklearn import linear_model
def predict(x: str,y: str,c: str, day: str):
df = pandas.read_csv(x)
depented = df[[c]]
independent = df[[y]]
linear = linear_model.LinearRegression()
linear.fit(depented, independent)
global cases_predict
cases_predict = linear.predict([[day]])
print(cases_predict)
us = predict("us.csv", "cases", "Day", "15")
us_cases = int(cases_predict)
sa = predict("SA.csv", "cases", "Day", "16")
sa_cases = int(cases_predict)
uk = predict("uk.csv", "cases", "Day", "16")
uk_cases = int(cases_predict)
us_next = predict("us.csv", "cases", "Day", "23")
us_next_week = int(cases_predict)
sa_next = predict("SA.csv", "cases", "Day", "23")
sa_next_week = int(cases_predict)
uk_next = predict("uk.csv", "cases", "Day", "23")
uk_next_week =int(cases_predict)
``` |
{
"source": "jonathannjeunje/DynamicProgramming_Starter_Fibonacci",
"score": 4
} |
#### File: jonathannjeunje/DynamicProgramming_Starter_Fibonacci/Fibonacci.py
```python
def fibonacci(n, cache = {}):
if n <= 1:
print(f"Base case: {n}")
return n
elif n not in cache:
print(f"Calculating: {n}")
print(f"Cache state:{cache}")
cache[n] = fibonacci(n-1) + fibonacci(n-2)
print(f"Cache state:{cache}")
print(f"Cache retrieval: {n}")
return cache[n]
# %%
last = fibonacci(10)
# %%
# %%
``` |
{
"source": "jonathannocek/cs256-sim",
"score": 4
} |
#### File: jonathannocek/cs256-sim/256sim.py
```python
from S20_SIM import Simulator
def read_cmd():
# Show a prompt and read a command from the terminal
cmd = input("[1;32mCommand[0;32m (H)elp | (L)oad machine code | Change (B)utton state | (S)tep | (R)eset | (Q)uit[1;32m:[m ")
parts = cmd.strip().split()
return parts[0].upper(), parts[1:]
def print_help():
print("""
Commands:
(H)elp -- Print this help message.
(L)oad machine code
-- Load machine code from a file into instruction memory.
Optionally write the filename after the command (e.g., "l
test.bin"). If a filename is not given, you will be prompted
to enter one separately.
Change (B)utton state
-- Change the state (pressed or not pressed) of the simulated
buttons. Each button is set to 1 (pressed) or 0 (not pressed).
Optionally write the buttons state after the command (e.g., "b
0101"). If only the command is given, you will be prompted to
enter the state separately.
(S)tep -- Step the simulation forward one clock cycle / one instruction.
Optionally specify a number of cycles to simulate after the
command (e.g., "s 10").
(R)eset -- Reset the state of the CPU, clearing all memory elements except
the instruction memory.
(Q)uit -- Exit the simulation.
Commands are case insensitive.""")
def main():
# Instantiate the Simulator object
sim = Simulator()
# REPL:
# Read a command
# Evaluate that command (potentially running
# one or more steps of simulation)
# Print the current state of the simulation
# Loop
while True:
print()
cmd, args = read_cmd()
if cmd[0] == 'H':
print_help()
continue
elif cmd[0] == 'L':
filename = args[0] if args else input("[1;32mBinary file:[m ")
try:
sim.load_bin(filename)
except Exception as e:
print(f"[1;31mError loading file:[m {e}")
continue
elif cmd[0] == 'B':
buttons = args[0] if args else input("[1;32mNew state[0;32m (4 buttons; 0 or 1 each; e.g. '0010' to press just the third button)[1;32m:[m ")
try:
sim.change_buttons(buttons)
except Exception as e:
print(f"[1;31mInvalid button string:[m {e}")
continue
elif cmd[0] == 'S':
n = int(args[0]) if args else 1
sim.step_n(n)
elif cmd[0] == 'R':
sim.reset()
elif cmd[0] == 'Q':
break
sim.print()
if __name__ == "__main__":
main()
``` |
{
"source": "JonathanOsAlc/academy",
"score": 2
} |
#### File: academy/controllers/academy_controller.py
```python
from odoo import http
class Academy(http.Controller):
@http.route('/academy/academy/', auth='public', website=True)
def index(self, **kw):
Teachers = http.request.env['academy.teachers']
return http.request.render('academy.index', {
'teachers': Teachers.search([])
})
@http.route('/academy/<model("academy.teachers"):teacher>/', auth='public', website=True)
def teacher(self, teacher):
return http.request.render('academy.biography', {
'person': teacher
})
``` |
{
"source": "JonathanPartain/PyChess",
"score": 3
} |
#### File: JonathanPartain/PyChess/Tile.py
```python
import pygame
class Tile:
def __init__(self, letter, number, piece):
self.letter = letter
self.number = number
self.piece = piece
self.posx1 = None
self.posx2 = None
self.posy1 = None
self.posy2 = None
global busy
``` |
{
"source": "JonathanPartain/SubtitleBOX",
"score": 3
} |
#### File: JonathanPartain/SubtitleBOX/sbox.py
```python
import os
import re
import time
import hashlib
import requests
import platform
import sys
banner = r'''
___ ___ ___ __ __
/ __| | _ ) / _ \ \ \/ /
\__ \ | _ \ | (_) | > <
|___/ |___/ \___/ /_/\_\
_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'
Subtitles BOX
'''
print(banner)
time.sleep(1)
def tk_get_file_path():
try:
import tkinter as tk
from tkinter import filedialog
except:
print("Error: tkinter is not installed/available. Please install and try again")
sys.exit()
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename()
try:
with open(file_path, 'r') as f:
pass
except:
print("Cancelled")
sys.exit()
return file_path
# got this from https://stackoverflow.com/a/58861718/13276219
def file_path():
# Get operating system
operating_system = platform.system()
if operating_system == 'Windows': # Windows, use default
import ctypes
co_initialize = ctypes.windll.ole32.CoInitialize
co_initialize(None)
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import OpenFileDialog
file_dialog = OpenFileDialog()
ret = file_dialog.ShowDialog()
if ret != 1:
print("Cancelled")
sys.exit()
return file_dialog.FileName
else: # posix/linux/macos, use tkinter
return tk_get_file_path()
file_path = file_path()
languages = {
"en" : "English",
"es" : "Spanish",
"fr" : "French",
"it" : "Italian",
"nl" : "Dutch",
"pl" : "Polish",
"pt" : "Portuguese",
"ro" : "Romanian",
"sv" : "Swedish",
"tr" : "Turkish"
}
def get_hash(name):
readsize = 64 * 1024
with open(name, 'rb') as f:
size = os.path.getsize(name)
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
def create_url():
film_hash = get_hash(name=file_path)
url = "http://api.thesubdb.com/?action=search&hash={}".format(film_hash)
return url
def request_subtitile():
url = create_url()
header = { "user-agent": "SubDB/1.0 (SubtitleBOX/1.0; https://github.com/sameera-madushan/SubtitleBOX.git)" }
req = requests.get(url, headers=header)
if req.status_code == 200:
k = req.content.decode('utf-8')
global l
l = k.split(",")
print("\nSubtitle files are available in following languages...\n")
for i in l:
for k,v in languages.items():
if i == k:
print(" " + k + " (" + v + ")")
else:
print("Oops!! Subtitle not found.")
exit()
request_subtitile()
def download(data):
filename = file_path[:-4]
with open(filename + ".srt", 'wb') as f:
f.write(data)
f.close()
while True:
try:
select_langauge = input("\nChoose your langauge (Please use language codes): ").lower()
if select_langauge in l:
url = create_url()
search = re.sub(r'search', "download", url)
final_url = search + "&language={}".format(select_langauge)
header = { "user-agent": "SubDB/1.0 (SubtitleBOX/1.0; https://github.com/sameera-madushan/SubtitleBOX.git)" }
req = requests.get(final_url, headers=header)
if req.status_code == 200:
data = req.content
download(data=data)
print("\nSubtitle downloaded successfully")
break
else:
print("\nUnknown Error")
break
else:
print("\nInvalid language code selected. Please try again.")
except KeyboardInterrupt:
print("\nProgramme Interrupted")
break
``` |
{
"source": "jonathanpascoe/chalice",
"score": 2
} |
#### File: functional/cli/test_cli.py
```python
import json
import zipfile
import os
import sys
import re
import pytest
from click.testing import CliRunner
import mock
from botocore.exceptions import ClientError
from chalice import cli
from chalice.cli import factory
from chalice.config import Config, DeployedResources
from chalice.utils import record_deployed_values
from chalice.utils import PipeReader
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.logs import LogRetriever
from chalice.invoke import LambdaInvokeHandler
from chalice.invoke import UnhandledLambdaError
from chalice.awsclient import ReadTimeout
from chalice.deploy.validate import ExperimentalFeatureError
class FakeConfig(object):
def __init__(self, deployed_resources):
self._deployed_resources = deployed_resources
def deployed_resources(self, chalice_stage_name):
return self._deployed_resources
@pytest.fixture
def runner():
return CliRunner()
@pytest.fixture
def mock_cli_factory():
cli_factory = mock.Mock(spec=factory.CLIFactory)
cli_factory.create_config_obj.return_value = Config.create(project_dir='.')
cli_factory.create_botocore_session.return_value = mock.sentinel.Session
return cli_factory
def assert_chalice_app_structure_created(dirname):
app_contents = os.listdir(os.path.join(os.getcwd(), dirname))
assert 'app.py' in app_contents
assert 'requirements.txt' in app_contents
assert '.chalice' in app_contents
assert '.gitignore' in app_contents
def _run_cli_command(runner, function, args, cli_factory=None):
# Handles passing in 'obj' so we can get commands
# that use @pass_context to work properly.
# click doesn't support this natively so we have to duplicate
# what 'def cli(...)' is doing.
if cli_factory is None:
cli_factory = factory.CLIFactory('.')
result = runner.invoke(
function, args, obj={'project_dir': '.', 'debug': False,
'factory': cli_factory})
return result
def test_create_new_project_creates_app(runner):
with runner.isolated_filesystem():
result = runner.invoke(cli.new_project, ['testproject'])
assert result.exit_code == 0
# The 'new-project' command creates a directory based on
# the project name
assert os.listdir(os.getcwd()) == ['testproject']
assert_chalice_app_structure_created(dirname='testproject')
def test_create_project_with_prompted_app_name(runner):
with runner.isolated_filesystem():
result = runner.invoke(cli.new_project, input='testproject')
assert result.exit_code == 0
assert os.listdir(os.getcwd()) == ['testproject']
assert_chalice_app_structure_created(dirname='testproject')
def test_error_raised_if_dir_already_exists(runner):
with runner.isolated_filesystem():
os.mkdir('testproject')
result = runner.invoke(cli.new_project, ['testproject'])
assert result.exit_code == 1
assert 'Directory already exists: testproject' in result.output
def test_can_load_project_config_after_project_creation(runner):
with runner.isolated_filesystem():
result = runner.invoke(cli.new_project, ['testproject'])
assert result.exit_code == 0
config = factory.CLIFactory('testproject').load_project_config()
assert config == {
'version': '2.0',
'app_name': 'testproject',
'stages': {
'dev': {'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME},
}
}
def test_default_new_project_adds_index_route(runner):
with runner.isolated_filesystem():
result = runner.invoke(cli.new_project, ['testproject'])
assert result.exit_code == 0
app = factory.CLIFactory('testproject').load_chalice_app()
assert '/' in app.routes
def test_gen_policy_command_creates_policy(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = runner.invoke(cli.cli, ['gen-policy'], obj={})
assert result.exit_code == 0
# The output should be valid JSON.
parsed_policy = json.loads(result.output)
# We don't want to validate the specific parts of the policy
# (that's tested elsewhere), but we'll check to make sure
# it looks like a policy document.
assert 'Version' in parsed_policy
assert 'Statement' in parsed_policy
def test_can_package_command(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.package, ['outdir'])
assert result.exit_code == 0, result.output
assert os.path.isdir('outdir')
dir_contents = os.listdir('outdir')
assert 'sam.json' in dir_contents
assert 'deployment.zip' in dir_contents
def test_can_package_with_single_file(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.package, ['--single-file', 'package.zip'])
assert result.exit_code == 0, result.output
assert os.path.isfile('package.zip')
with zipfile.ZipFile('package.zip', 'r') as f:
assert sorted(f.namelist()) == ['deployment.zip', 'sam.json']
def test_debug_flag_enables_logging(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = runner.invoke(
cli.cli, ['--debug', 'package', 'outdir'], obj={})
assert result.exit_code == 0
assert re.search('[DEBUG].*Creating deployment package',
result.output) is not None
def test_does_deploy_with_default_api_gateway_stage_name(runner,
mock_cli_factory):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
# This isn't perfect as we're assuming we know how to
# create the config_obj like the deploy() command does,
# it should give us more confidence that the api gateway
# stage defaults are still working.
cli_factory = factory.CLIFactory('.')
config = cli_factory.create_config_obj(
chalice_stage_name='dev',
autogen_policy=None,
api_gateway_stage=None
)
assert config.api_gateway_stage == DEFAULT_APIGATEWAY_STAGE_NAME
def test_can_specify_api_gateway_stage(runner, mock_cli_factory):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.deploy,
['--api-gateway-stage', 'notdev'],
cli_factory=mock_cli_factory)
assert result.exit_code == 0
mock_cli_factory.create_config_obj.assert_called_with(
autogen_policy=None, chalice_stage_name='dev',
api_gateway_stage='notdev'
)
def test_can_deploy_specify_connection_timeout(runner, mock_cli_factory):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.deploy,
['--connection-timeout', 100],
cli_factory=mock_cli_factory)
assert result.exit_code == 0
mock_cli_factory.create_botocore_session.assert_called_with(
connection_timeout=100
)
def test_can_retrieve_url(runner, mock_cli_factory):
deployed_values_dev = {
"schema_version": "2.0",
"resources": [
{"rest_api_url": "https://dev-url/",
"name": "rest_api",
"resource_type": "rest_api"},
]
}
deployed_values_prod = {
"schema_version": "2.0",
"resources": [
{"rest_api_url": "https://prod-url/",
"name": "rest_api",
"resource_type": "rest_api"},
]
}
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
deployed_dir = os.path.join('.chalice', 'deployed')
os.makedirs(deployed_dir)
record_deployed_values(
deployed_values_dev,
os.path.join(deployed_dir, 'dev.json')
)
record_deployed_values(
deployed_values_prod,
os.path.join(deployed_dir, 'prod.json')
)
result = _run_cli_command(runner, cli.url, [],
cli_factory=mock_cli_factory)
assert result.exit_code == 0
assert result.output == 'https://dev-url/\n'
prod_result = _run_cli_command(runner, cli.url, ['--stage', 'prod'],
cli_factory=mock_cli_factory)
assert prod_result.exit_code == 0
assert prod_result.output == 'https://prod-url/\n'
def test_error_when_no_deployed_record(runner, mock_cli_factory):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.url, [],
cli_factory=mock_cli_factory)
assert result.exit_code == 2
assert 'not find' in result.output
@pytest.mark.skipif(sys.version_info[:2] == (3, 7),
reason="Cannot generate pipeline for python3.7.")
def test_can_generate_pipeline_for_all(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.generate_pipeline, ['pipeline.json'])
assert result.exit_code == 0, result.output
assert os.path.isfile('pipeline.json')
with open('pipeline.json', 'r') as f:
template = json.load(f)
# The actual contents are tested in the unit
# tests. Just a sanity check that it looks right.
assert "AWSTemplateFormatVersion" in template
assert "Outputs" in template
def test_no_errors_if_override_codebuild_image(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.generate_pipeline,
['-i', 'python:3.6.1', 'pipeline.json'])
assert result.exit_code == 0, result.output
assert os.path.isfile('pipeline.json')
with open('pipeline.json', 'r') as f:
template = json.load(f)
# The actual contents are tested in the unit
# tests. Just a sanity check that it looks right.
image = template['Parameters']['CodeBuildImage']['Default']
assert image == 'python:3.6.1'
def test_can_configure_github(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
# The -i option is provided so we don't have to skip this
# test on python3.6
result = _run_cli_command(
runner, cli.generate_pipeline,
['--source', 'github', '-i' 'python:3.6.1', 'pipeline.json'])
assert result.exit_code == 0, result.output
assert os.path.isfile('pipeline.json')
with open('pipeline.json', 'r') as f:
template = json.load(f)
# The template is already tested in the unit tests
# for template generation. We just want a basic
# sanity check to make sure things are mapped
# properly.
assert 'GithubOwner' in template['Parameters']
assert 'GithubRepoName' in template['Parameters']
def test_can_extract_buildspec_yaml(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.generate_pipeline,
['--buildspec-file', 'buildspec.yml',
'-i', 'python:3.6.1',
'pipeline.json'])
assert result.exit_code == 0, result.output
assert os.path.isfile('buildspec.yml')
with open('buildspec.yml') as f:
data = f.read()
# The contents of this file are tested elsewhere,
# we just want a basic sanity check here.
assert 'chalice package' in data
def test_can_specify_profile_for_logs(runner, mock_cli_factory):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.logs, ['--profile', 'my-profile'],
cli_factory=mock_cli_factory
)
assert result.exit_code == 0
assert mock_cli_factory.profile == 'my-profile'
def test_can_provide_lambda_name_for_logs(runner, mock_cli_factory):
deployed_resources = DeployedResources({
"resources": [
{"name": "foo",
"lambda_arn": "arn:aws:lambda::app-dev-foo",
"resource_type": "lambda_function"}]
})
mock_cli_factory.create_config_obj.return_value = FakeConfig(
deployed_resources)
log_retriever = mock.Mock(spec=LogRetriever)
log_retriever.retrieve_logs.return_value = []
mock_cli_factory.create_log_retriever.return_value = log_retriever
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(
runner, cli.logs, ['--name', 'foo'],
cli_factory=mock_cli_factory
)
assert result.exit_code == 0
log_retriever.retrieve_logs.assert_called_with(
include_lambda_messages=False, max_entries=None)
mock_cli_factory.create_log_retriever.assert_called_with(
mock.sentinel.Session, 'arn:aws:lambda::app-dev-foo'
)
def test_can_call_invoke(runner, mock_cli_factory, monkeypatch):
invoke_handler = mock.Mock(spec=LambdaInvokeHandler)
mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler
mock_reader = mock.Mock(spec=PipeReader)
mock_reader.read.return_value = 'barbaz'
mock_cli_factory.create_stdin_reader.return_value = mock_reader
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'],
cli_factory=mock_cli_factory)
assert result.exit_code == 0
assert invoke_handler.invoke.call_args == mock.call('barbaz')
def test_invoke_does_raise_if_service_error(runner, mock_cli_factory):
deployed_resources = DeployedResources({"resources": []})
mock_cli_factory.create_config_obj.return_value = FakeConfig(
deployed_resources)
invoke_handler = mock.Mock(spec=LambdaInvokeHandler)
invoke_handler.invoke.side_effect = ClientError(
{
'Error': {
'Code': 'LambdaError',
'Message': 'Error message'
}
},
'Invoke'
)
mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler
mock_reader = mock.Mock(spec=PipeReader)
mock_reader.read.return_value = 'barbaz'
mock_cli_factory.create_stdin_reader.return_value = mock_reader
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'],
cli_factory=mock_cli_factory)
assert result.exit_code == 1
assert invoke_handler.invoke.call_args == mock.call('barbaz')
assert (
"Error: got 'LambdaError' exception back from Lambda\n"
"Error message"
) in result.output
def test_invoke_does_raise_if_unhandled_error(runner, mock_cli_factory):
deployed_resources = DeployedResources({"resources": []})
mock_cli_factory.create_config_obj.return_value = FakeConfig(
deployed_resources)
invoke_handler = mock.Mock(spec=LambdaInvokeHandler)
invoke_handler.invoke.side_effect = UnhandledLambdaError('foo')
mock_cli_factory.create_lambda_invoke_handler.return_value = invoke_handler
mock_reader = mock.Mock(spec=PipeReader)
mock_reader.read.return_value = 'barbaz'
mock_cli_factory.create_stdin_reader.return_value = mock_reader
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'],
cli_factory=mock_cli_factory)
assert result.exit_code == 1
assert invoke_handler.invoke.call_args == mock.call('barbaz')
assert 'Unhandled exception in Lambda function, details above.' \
in result.output
def test_invoke_does_raise_if_read_timeout(runner, mock_cli_factory):
mock_cli_factory.create_lambda_invoke_handler.side_effect = \
ReadTimeout('It took too long')
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'],
cli_factory=mock_cli_factory)
assert result.exit_code == 1
assert 'It took too long' in result.output
def test_invoke_does_raise_if_no_function_found(runner, mock_cli_factory):
mock_cli_factory.create_lambda_invoke_handler.side_effect = \
factory.NoSuchFunctionError('foo')
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
os.chdir('testproject')
result = _run_cli_command(runner, cli.invoke, ['-n', 'foo'],
cli_factory=mock_cli_factory)
assert result.exit_code == 2
assert 'foo' in result.output
def test_error_message_displayed_when_missing_feature_opt_in(runner):
with runner.isolated_filesystem():
cli.create_new_project_skeleton('testproject')
sys.modules.pop('app', None)
with open(os.path.join('testproject', 'app.py'), 'w') as f:
# Rather than pick an existing experimental feature, we're
# manually injecting a feature flag into our app. This ensures
# we don't have to update this test if a feature graduates
# from trial to accepted. The '_features_used' is a "package
# private" var for chalice code.
f.write(
'from chalice import Chalice\n'
'app = Chalice("myapp")\n'
'app._features_used.add("MYTESTFEATURE")\n'
)
os.chdir('testproject')
result = _run_cli_command(runner, cli.package, ['out'])
assert isinstance(result.exception, ExperimentalFeatureError)
assert 'MYTESTFEATURE' in str(result.exception)
@pytest.mark.parametrize(
"path",
[
None,
'.',
os.getcwd,
],
)
def test_cli_with_absolute_path(runner, path):
with runner.isolated_filesystem():
if callable(path):
path = path()
result = runner.invoke(
cli.cli,
['--project-dir', path, 'new-project', 'testproject'],
obj={})
assert result.exit_code == 0
assert os.listdir(os.getcwd()) == ['testproject']
assert_chalice_app_structure_created(dirname='testproject')
``` |
{
"source": "jonathanpascoe/raster-vision",
"score": 2
} |
#### File: data/label_source/semantic_segmentation_label_source_config.py
```python
from typing import Optional, Union
from rastervision.core.data.raster_source import (RasterSourceConfig,
RasterizedSourceConfig)
from rastervision.core.data.label_source import (
LabelSourceConfig, SemanticSegmentationLabelSource)
from rastervision.core.data.class_config import (ClassConfig)
from rastervision.pipeline.config import (register_config, Field)
@register_config('semantic_segmentation_label_source')
class SemanticSegmentationLabelSourceConfig(LabelSourceConfig):
"""Config for a read-only label source for semantic segmentation."""
raster_source: Union[RasterSourceConfig, RasterizedSourceConfig] = Field(
..., description='The labels in the form of rasters.')
rgb_class_config: Optional[ClassConfig] = Field(
None,
description=
('If set, will infer the class_ids for the labels using the colors field. This '
'assumes the labels are stored as RGB rasters.'))
def update(self, pipeline=None, scene=None):
super().update()
self.rgb_class_config.ensure_null_class()
def build(self, class_config, crs_transformer, extent, tmp_dir):
if isinstance(self.raster_source, RasterizedSourceConfig):
rs = self.raster_source.build(class_config, crs_transformer,
extent)
else:
rs = self.raster_source.build(tmp_dir)
return SemanticSegmentationLabelSource(
rs,
class_config.get_null_class_id(),
rgb_class_config=self.rgb_class_config)
```
#### File: data/raster_source/multi_raster_source.py
```python
from typing import Optional, Sequence
from pydantic import conint
import numpy as np
from rastervision.core.box import Box
from rastervision.core.data import ActivateMixin
from rastervision.core.data.raster_source import (RasterSource, CropOffsets)
from rastervision.core.data.crs_transformer import CRSTransformer
from rastervision.core.data.utils import all_equal
class MultiRasterSourceError(Exception):
pass
class MultiRasterSource(ActivateMixin, RasterSource):
"""A RasterSource that combines multiple RasterSources by concatenting
their output along the channel dimension (assumed to be the last dimension).
"""
def __init__(self,
raster_sources: Sequence[RasterSource],
raw_channel_order: Sequence[conint(ge=0)],
allow_different_extents: bool = False,
force_same_dtype: bool = False,
channel_order: Optional[Sequence[conint(ge=0)]] = None,
crs_source: conint(ge=0) = 0,
raster_transformers: Sequence = [],
extent_crop: Optional[CropOffsets] = None):
"""Constructor.
Args:
raster_sources (Sequence[RasterSource]): Sequence of RasterSources.
raw_channel_order (Sequence[conint(ge=0)]): Channel ordering that
will always be applied before channel_order.
allow_different_extents (bool):
When true, the sub-rasters are allowed to be of different sizes. The
purpose of this flag is to allow use of rasters that cover the same area
but are of slightly different size (due to reprojection differences).
No special reprojection logic is triggered by this parameter. It is
assumed that the underlying raster sources are guaranteed to supply chips
of the same size. Use with caution.
force_same_dtype (bool): If true, force all subchips to have the same dtype
as the first subchip. No careful conversion is done, just a quick cast.
Use with caution.
channel_order (Sequence[conint(ge=0)], optional): Channel ordering
that will be used by .get_chip(). Defaults to None.
raster_transformers (Sequence, optional): Sequence of transformers.
Defaults to [].
extent_crop (CropOffsets, optional): Relative
offsets (top, left, bottom, right) for cropping the extent.
Useful for using splitting a scene into different datasets.
Defaults to None i.e. no cropping.
"""
num_channels = len(raw_channel_order)
if not channel_order:
channel_order = list(range(num_channels))
super().__init__(channel_order, num_channels, raster_transformers)
self.allow_different_extents = allow_different_extents
self.force_same_dtype = force_same_dtype
self.raster_sources = raster_sources
self.raw_channel_order = list(raw_channel_order)
self.crs_source = crs_source
self.extent_crop = extent_crop
self.validate_raster_sources()
def validate_raster_sources(self) -> None:
dtypes = [rs.get_dtype() for rs in self.raster_sources]
if not self.force_same_dtype and not all_equal(dtypes):
raise MultiRasterSourceError(
'dtypes of all sub raster sources must be equal. '
f'Got: {dtypes} '
'(carfully consider using force_same_dtype)')
extents = [rs.get_extent() for rs in self.raster_sources]
if not self.allow_different_extents and not all_equal(extents):
raise MultiRasterSourceError(
'extents of all sub raster sources must be equal. '
f'Got: {extents} '
'(carefully consider using allow_different_extents)')
sub_num_channels = sum(
len(rs.channel_order) for rs in self.raster_sources)
if sub_num_channels != self.num_channels:
raise MultiRasterSourceError(
f'num_channels ({self.num_channels}) != sum of num_channels '
f'of sub raster sources ({sub_num_channels})')
def _subcomponents_to_activate(self) -> None:
return self.raster_sources
def get_extent(self) -> Box:
rs = self.raster_sources[0]
extent = rs.get_extent()
if self.extent_crop is not None:
h, w = extent.get_height(), extent.get_width()
skip_top, skip_left, skip_bottom, skip_right = self.extent_crop
ymin, xmin = int(h * skip_top), int(w * skip_left)
ymax, xmax = h - int(h * skip_bottom), w - int(w * skip_right)
return Box(ymin, xmin, ymax, xmax)
return extent
def get_dtype(self) -> np.dtype:
rs = self.raster_sources[0]
dtype = rs.get_dtype()
return dtype
def get_crs_transformer(self) -> CRSTransformer:
rs = self.raster_sources[self.crs_source]
return rs.get_crs_transformer()
def _get_chip(self, window: Box) -> np.ndarray:
"""Return the raw chip located in the window.
Get raw chips from sub raster sources, concatenate them and
apply raw_channel_order.
Args:
window: Box
Returns:
[height, width, channels] numpy array
"""
chip_slices = [rs._get_chip(window) for rs in self.raster_sources]
if self.force_same_dtype:
for i in range(1, len(chip_slices)):
chip_slices[i] = chip_slices[i].astype(chip_slices[0].dtype)
chip = np.concatenate(chip_slices, axis=-1)
chip = chip[..., self.raw_channel_order]
return chip
def get_chip(self, window: Box) -> np.ndarray:
"""Return the transformed chip in the window.
Get raw chips from sub raster sources, concatenate them,
apply raw_channel_order, followed by channel_order, followed
by transformations.
Args:
window: Box
Returns:
np.ndarray with shape [height, width, channels]
"""
chip_slices = [rs.get_chip(window) for rs in self.raster_sources]
if self.force_same_dtype:
for i in range(1, len(chip_slices)):
chip_slices[i] = chip_slices[i].astype(chip_slices[0].dtype)
chip = np.concatenate(chip_slices, axis=-1)
chip = chip[..., self.raw_channel_order]
chip = chip[..., self.channel_order]
for transformer in self.raster_transformers:
chip = transformer.transform(chip, self.channel_order)
return chip
``` |
{
"source": "JonathanPerkins/agile2stl",
"score": 4
} |
#### File: JonathanPerkins/agile2stl/agile2stl.py
```python
import argparse
import numpy as np
import matplotlib.tri as mtri
from stl import mesh
def do_conversion(infile, outfile):
''' Convert the image infile to a STL outfile '''
print("\nConverting Agile price data to STL\n")
# Open data file and read into a numpy array
in_data = np.loadtxt(infile, delimiter=",")
in_width = len(in_data)
in_depth = len(in_data[0])
# At this point, data array width is 365/366 days and
# depth of 48 half hour slots per day.
# In order to be able to map to a 3d column, expand each
# data point to a 3x2 grid.
# This will also help to easily create the STL triangles
# later, without turning each data value into a sharp point.
scale_depth = 3
scale_width = 2
# And also add a border around the data, this improves the
# appearance and more importantly will also close off the edges
# and help complete the bottom box
border_size = 10
# The height of the base
base_height = 5
# The maximum -ve offset supported (values below this will be clamped)
# Must be less than base_height! This is how far down into the base
# the lowest (clamped) value will go.
max_neg = -3
# This is therefore the final size of our data array
scaled_width = (in_width * scale_width) + (2 * border_size)
scaled_depth = (in_depth * scale_depth) + (2 * border_size)
# Create the scaled up integer array, including the border
# Note that we fill this with z values of base_height, as that is
# our zero cost plane and the top of the base box.
scaled = np.full((scaled_width, scaled_depth), base_height, dtype='uint16')
for x in range(in_width):
for y in range(in_depth):
# Scale the z component, convert to integer, adding top of box offset
if in_data[x][y] >= max_neg:
z = int(in_data[x][y] + base_height + 0.5)
else:
z = base_height + max_neg
# Store, scaling and applying the border offset
for xx in range(scale_width):
for yy in range(scale_depth):
scaled[((scale_width * x) + border_size + xx), ((scale_depth * y) + border_size + yy)] = z
# To close the bottom of the image and create a box of height base_height, we need
# to add an extra 5 sides made up of 4 vertices and 10 triangles
num_box_extra_vertices = 4
num_box_extra_triangles = 10
# Create an array of the (x,y,z) values of all the vertices in the surface
num_x = len(scaled[0])
num_y = len(scaled)
num_vertices = (num_x * num_y) + num_box_extra_vertices
vertices = np.zeros((num_vertices, 3), dtype=int)
# Start by loading the extra 4 base vertices
vertices[0:4] = [[0, 0, 0], [num_x-1, 0, 0], [0, num_y-1, 0], [num_x-1, num_y-1, 0]]
# Then the prepared data from above
i = num_box_extra_vertices
for y in range(num_y):
for x in range(num_x):
vertices[i] = [x, y, scaled[y][x]]
i = i + 1
# Create an array of the triange vertices indexes
# Each triangle is defined by a vector of 3 indexes into the vertices array
# We can make some assumptions based on our known grid shape, ie:
# (x, y) * n n+1 * (x+1, y)
#
# (x, y+1) * n+256 n+257 * (x+1, y+1)
# We can form 2 triangles from the above (using right hand rule (anti-clockwise)):
# [n+1, n, n+256] and [n+1, n+256, n+257]
def get_vertice_index(x_pos, y_pos):
''' Given the image pixel (x,y) position, return its index in the vertices array '''
return x_pos + (y_pos * num_x) + num_box_extra_vertices
num_triangles = (((num_x - 1) * (num_y - 1)) * 2) + num_box_extra_triangles
triangles = np.zeros((num_triangles, 3), dtype=int)
x = 0
y = 0
for i in range(0, len(triangles), 2):
triangles[i] = [get_vertice_index(x+1, y), get_vertice_index(x, y), get_vertice_index(x, y+1)]
triangles[i+1] = [get_vertice_index(x+1, y), get_vertice_index(x, y+1), get_vertice_index(x+1, y+1)]
x = x + 1
if x == (num_x - 1):
y = y + 1
x = 0
i = num_triangles - num_box_extra_triangles
# Add the base box triangles
# [[0, 0, 0], [num_x-1, 0, 0], [0, num_y-1, 0], [num_x-1, num_y-1, 0]]
# Viewed top down:
# bottom vertices: 0 1 next layer up: above_0 above_1
# 2 3 above_2 above_3
# Viewed from below, this is how we build the triangles:
# bottom vertices: 2 3 next layer up: above_2 above_3
# 0 1 above_0 above_1
above_0 = 4
above_1 = 4 + num_x - 1
above_2 = 4 + num_x * (num_y - 1)
above_3 = 4 + num_x * num_y - 1
triangles[i:num_triangles] = [
[3, 2, 1], [0, 1, 2], # base 2_triangles (viewed from below)
[above_0, 0, 2], [2, above_2, above_0], # side 1
[above_2, 2, 3], [3, above_3, above_2], # side 2
[above_3, 3, 1], [1, above_1, above_3], # side 3
[above_1, 1, 0], [0, above_0, above_1] # side 4
]
tris = mtri.Triangulation(vertices[:, 0], vertices[:, 1], triangles=triangles)
# Use the Mesh module to write the STL file
data = np.zeros(len(tris.triangles), dtype=mesh.Mesh.dtype)
image_mesh = mesh.Mesh(data, remove_empty_areas=False)
image_mesh.x[:] = vertices[:, 1][tris.triangles]
image_mesh.y[:] = vertices[:, 0][tris.triangles]
image_mesh.z[:] = vertices[:, 2][tris.triangles]
image_mesh.save(outfile)
# -------------------------------------------------------------------------
# Main entry point
# -------------------------------------------------------------------------
# Create an options parser
PARSER = argparse.ArgumentParser(description="Convert Octopus Agile historical price data to a 3D visualisation",
fromfile_prefix_chars='@')
PARSER.add_argument('input_file', nargs=1,
help='the input AGILE data file')
PARSER.add_argument('output_file', nargs=1,
help='the output STL file')
# Run the parser, exiting on error
ARGS = PARSER.parse_args()
# Parsed OK, run the command
do_conversion(ARGS.input_file[0], ARGS.output_file[0])
``` |
{
"source": "jonathanperrie/iscDNase-seq",
"score": 3
} |
#### File: src/Figure3/generate_rand_peak_sets.py
```python
import numpy as np
import pandas as pd
from pybedtools import BedTool
import random
def calc_gc(peaks,hg18_path):
"""calculate the GC content in peak set.
Parameters
----------
peaks : peak set
hg18_path : genome path
Returns
-------
np.mean(gc) : average GC content over all peaks
"""
peaks=BedTool(peaks).nucleotide_content(hg18_path)
gc=[]
for peak in peaks:
gc+=[float(peak[7])]
return(np.mean(gc))
def is_valid(cand,bp_df):
"""check if there is overlap between some candidate peak and the peaks in the blacklist regions.
Parameters
----------
cand : candidate peak
bp_df : blacklist regions as a data frame
Returns
-------
Bool : Boolean describing if match was valid
"""
pos_matches=bp_df[bp_df.chr==cand[0]]
if len(pos_matches[(pos_matches.start<cand[1]) & (pos_matches.end>cand[1])]):
return False
elif len(pos_matches[(pos_matches.start<cand[2]) & (pos_matches.end>cand[2])]):
return False
elif len(pos_matches[(pos_matches.start>cand[1]) & (pos_matches.start<cand[2])]):
return False
elif len(pos_matches[(pos_matches.start<cand[1]) & (pos_matches.start>cand[2])]):
return False
else:
return True
def find_new_peaks(peaks,hg18_path,bpath,gl_path,width,threshold):
"""find a new set of peaks with equal proportions of peaks at each chromosome.
Parameters
----------
peaks : reference peak set
bpath : blacklist path
gl_path : genome length path
Returns
-------
pseudo_rand_peaks : new set of random peaks
"""
# all proposed peaks should have a start point a standard peak-width away from the end of the chromosome
genome_len=read_peaks(gl_path)
genome_len={x[0]:int(x[1])-width for x in genome_len}
blacklist_peaks=read_peaks(bpath)
bp_df=pd.DataFrame(blacklist_peaks)
bp_df.rename(index=str,columns={0:"chr",1:"start",2:"end"},inplace=True)
bp_df.start=bp_df.start.astype(int)
bp_df.end=bp_df.end.astype(int)
rand_peaks=[]
# generate initial random peak set that has same chromosome proportions
for peak in peaks:
chrom=peak[0]
start=random.randint(0,genome_len[chrom])
end=start+width
while not is_valid((chrom,start,end),bp_df):
chrom=peak[0]
start=random.randint(0,genome_len[chrom])
end=start+width
rand_peaks+=[[chrom,start,end,'.','.','*']]
# adjust random peak set so that it's gc-content is close to true peaks threshold
true_gc=calc_gc(peaks,hg18_path)
rand_gc=calc_gc(rand_peaks,hg18_path)
i=0
while(np.abs(true_gc-rand_gc)>threshold):
# reset index when hitting the end
i=np.random.choice(len(peaks))
peak=rand_peaks[i]
rand_gc_peak=float(BedTool([rand_peaks[i]]).nucleotide_content(hg18_path)[0][7])
chrom=peak[0]
# evaluate a new candidate peak
start=random.randint(0,genome_len[chrom])
end=start+width
while not is_valid((chrom,start,end),bp_df):
chrom=peak[0]
start=random.randint(0,genome_len[chrom])
end=start+width
cand_gc_peak=float(BedTool([[chrom,start,end]]).nucleotide_content(hg18_path)[0][4])
# if change in GC content will make the random peak set more closely related to the reference peak
# set, then go ahead with the update
if true_gc>=rand_gc and cand_gc_peak>rand_gc_peak:
rand_peaks[i]=[chrom,start,end,'.','.','*']
rand_gc=rand_gc-rand_gc_peak/len(peaks)+cand_gc_peak/len(peaks)
elif true_gc<rand_gc and cand_gc_peak<rand_gc_peak:
rand_peaks[i]=[chrom,start,end,'.','.','*']
rand_gc=rand_gc-rand_gc_peak/len(peaks)+cand_gc_peak/len(peaks)
else:
continue
return rand_peaks
def read_peaks(path):
"""read in list.
Parameters
----------
path : list path
Returns
-------
a : list with elements on each line separated
"""
with open(path,"rt") as f:
a=f.readlines()
a=[x.split() for x in a]
return a
def find_rand_peaks(peak_path,other_args):
"""downsample peaks and find a random set of peaks with comparable chromosome proportions and GC content.
Parameters
----------
peak_path : peak set path
other_args : other paths and program specs
hg18_path : genome path
bpath : blacklist path
gl_path : genome length path
width : 500
threshold : GC-content proportion
Returns
-------
dpeaks : downsampled original peak set
rand_peaks : random peak set with similar proportions to downsampled peak set
"""
peaks=read_peaks(peak_path)
peaks=[x for x in peaks if x[0]!='chrM']
hg18_path=other_args[0]
bpath=other_args[1]
gl_path=other_args[2]
width=other_args[3]
threshold=other_args[4]
rand_peaks=find_new_peaks(peaks,hg18_path,bpath,gl_path,width,threshold)
print(peak_path)
return(rand_peaks)
```
#### File: src/Figure3/make_rand_peaks.py
```python
from multiprocessing import Pool
import signal
from itertools import product
import glob
import os
import sys
sys.path.append('../../src/Figure3/')
from generate_rand_peak_sets import *
peak_paths=glob.glob("peaks/*specific*.bed")+glob.glob("peaks/inter_peaks/*specific*.bed")
bpath="peaks/hg18.blacklist.bed"
hg18_path="/fdb/indexes/hg18/hg18.fa"
# outside scope of program
gl_path="/data/perriejv/genome_len/hg18_chrlen.txt"
width=500
threshold=0.05
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
nproc = int(os.environ.get("SLURM_CPUS_PER_TASK", "2"))
p = Pool(nproc, init_worker)
try:
rand_peaks=p.starmap(find_rand_peaks,product(peak_paths,[(hg18_path,bpath,gl_path,width,threshold)]))
except (KeyboardInterrupt,SystemExit):
p.terminate()
p.join()
sys.exit(1)
else:
p.close()
p.join()
def write_peaks(path,peaks):
"""read in list.
Parameters
----------
path : list path
Returns
-------
a : list with elements on each line separated
"""
if "inter" in path:
filename="peaks/rand_peaks/random_inter_"+path.split('/')[-1]
else:
filename="peaks/rand_peaks/random_"+path.split('/')[-1]
with open(filename,"w") as f:
for p in peaks:
# add tabs
p[1]=str(p[1])
p[2]=str(p[2])
f.write("\t".join(p)+"\n")
[write_peaks(peak_paths[i],rand_peaks[i]) for i in range(len(rand_peaks))]
``` |
{
"source": "JonathanPeterCole/Tech-Support-Site",
"score": 3
} |
#### File: Tech-Support-Site/FlaskApp/__init__.py
```python
import os, requests, validation, config
from flask import Flask, redirect, request, render_template, send_from_directory, escape
from flask_mail import Mail, Message
app = Flask (__name__)
app.config.from_object(config.BaseConfig)
mail = Mail(app)
version = "2.1.3"
@app.route("/")
def index():
return render_template('index.html.j2')
@app.route("/book")
def book():
return render_template('book.html.j2', site_key = app.config.get("RECAPTCHA_SITE_KEY"))
@app.route("/book/submit", methods = ['POST'])
def submit_booking():
# Get the form data
json_data = request.get_json()
# Check for data in dictionary
if not json_data:
return "Request missing JSON data"
# Check the reCAPTCHA
if "g-recaptcha-response" not in json_data.keys():
return "Recaptcha response missing"
if not check_recaptcha(json_data.pop("g-recaptcha-response"), request.remote_addr):
return "Recaptcha check failed"
# Validate the received data
if not validation.validate(json_data):
return "Data validation check failed"
# Prepare the data
escape_values(json_data)
convert_newlines(json_data)
# Attempt to send the mail
if not send_booking_mail(json_data):
return "Send mail error"
# If this point is reached, everything completed successfully
return "success"
def check_recaptcha(response, ip):
# Prepare the recaptcha verification_data
verification_data = {
"secret": app.config.get("RECAPTCHA_SECRET_KEY"),
"response": response,
"remoteip": ip
}
# Make the request
request = requests.get(
app.config.get("RECAPTCHA_VERIFY_URL"),
params = verification_data
)
# Check the request results
if request.json()["success"]:
return True
else:
return False
def convert_newlines(data):
# Replace all \n's with <br>
for key, value in data.items():
data[key] = "<br>".join(value.split("\r"))
def escape_values(data):
# Escape all the values in the dictionary
for key, value in data.items():
data[key] = escape(value)
def send_booking_mail(booking_info):
# Prepare the body of the message
mail_body = render_template("emails/new-booking.html.j2", data = booking_info)
# Prepare the email
email = Message("New Booking from " + booking_info["name"],
html = mail_body,
recipients = [app.config["BOOKING_EMAIL_TARGET"]])
# Send the email
try:
mail.send(email)
except:
return False
return True
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
# Get Robots.txt and Sitemap.xml from the static folder
return send_from_directory(app.static_folder, request.path[1:])
@app.context_processor
def set_variables():
# Set variables for all templates
visitor_message = app.config.get("VISITOR_MESSAGE")
resource_version = "?version=" + version
if visitor_message:
return dict(resource_version = resource_version, visitor_message = visitor_message)
else:
return dict(resource_version = resource_version)
@app.after_request
def add_header(response):
# Disable cache
# Reference: https://stackoverflow.com/questions/34066804/disabling-caching-in-flask
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, max-age=0"
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "0"
return response
if __name__ == "__main__":
app.run()
``` |
{
"source": "JonathanPetit/Parser-Renamer-torrentfile",
"score": 3
} |
#### File: Parser-Renamer-torrentfile/MovieSerieTorrent/formatting.py
```python
try:
from parser import Parser
except:
from .parser import Parser
from colorama import Fore, Style, init
from tabulate import tabulate
import os
class Formatting:
def __init__(self):
self.headers_movie =self.headers = ['N°', 'Title', 'Part', 'Year', 'Language', 'Quality']
self.headers_serie = ['N°', 'Title', 'Season', 'Episode', 'Language', 'Quality']
self.table = None
self.infos = None
init(autoreset=True)
def _list_for_formatting(self, files):
parse_file = Parser().parse(files)
self.infos = parse_file[0]
if self.infos['type'] == 'serie':
return ['{title}', '{season}', '{episode}', '{languages}', '{quality}']
else:
return ['{title}', '{Part}', '{year}', '{languages}', '{quality}']
def formatting(self, path):
list_movies = []
list_serie = []
self.path = path
j = 1
for files in os.listdir(self.path):
i = 0
self.files = self._list_for_formatting(files)
if files.endswith('.DS_Store') == False:
for elements in self.files:
try:
self.files[i] = self.files[i].format(**self.infos)
except KeyError:
self.files[i] = ''
i += 1
if self.infos['type'] == 'serie':
list_serie.append(self.files)
else:
list_movies.append(self.files)
self.files.insert(0, j)
j += 1
for files in list_serie:
files.insert(0, j)
j += 1
print(Fore.RED + 'MOVIE:')
print(tabulate(list_movies, headers=self.headers_movie))
print('\n')
print(Fore.RED + 'SERIE:')
print(tabulate(list_serie, headers=self.headers_serie))
print('\n')
```
#### File: Parser-Renamer-torrentfile/MovieSerieTorrent/renamer.py
```python
import os
from fuzzywuzzy import fuzz
try:
from parser import Parser
except:
from .parser import Parser
class Renamer:
def __init__(self):
self.infos = None
self.excess = None
self.parse_file = None
self.rename_file = []
self.compteur = 0
self.filename = None
def extract(self, files):
self.parse_file = Parser().parse(files)
self.infos = self.parse_file[0]
self.excess = self.parse_file[1]
if self.infos['type'] == 'serie':
self.rename_file = ['{title}', ' {season}{episode} ', '-{languages}-', '{quality}', '.{extension}']
return self.rename_file
else:
self.rename_file = ['{title}', ' {Part}', ' ({year})', '-{languages}-', '{quality}', '.{extension}']
return self.rename_file
def preview(self, files):
self.rename_file = self.extract(files)
# Build liste for filename
for elements in self.rename_file:
try:
self.rename_file[self.compteur] = self.rename_file[self.compteur].format(**self.infos)
except KeyError:
self.rename_file[self.compteur] = ''
self.compteur += 1
# Build filename
for element in self.rename_file:
if element == '':
self.rename_file.remove('')
# Rename
self.filename = ''.join(self.rename_file)
return self.filename
def renaming(self, path, filename):
filename = self.preview(filename)
for element in os.listdir(path):
if fuzz.token_set_ratio(filename, element) == 100:
path_file = os.path.join(path, element)
target = os.path.join(path, filename)
os.rename(path_file, target)
``` |
{
"source": "JonathanPierce/Algae",
"score": 3
} |
#### File: Algae/postprocessors/guassian.py
```python
import helpers.common as common
import helpers.io as io
from multiprocessing import Process
import math
# converts a JSON pair result into a Python object
def pairJSONToObject(json):
student1 = json["pair"][0]
student2 = json["pair"][1]
score = float(json["score"])
return common.PairResult(student1, student2, score)
# finds the mean of the data
def getMean(data):
total = 0.0
count = 0.0
for element in data:
total = total + element.score
count = count + 1.0
return total / count
# finds the std. deviation of the data
def getDeviation(data, mean):
totalDiff = 0.0
count = 0.0
for element in data:
totalDiff = totalDiff + (element.score - mean)**2.0
count = count + 1.0
normalized = totalDiff / count
return math.sqrt(normalized)
# gets the z-score of a data point
def zScore(score, mean, deviation):
return (score - mean) / deviation
# filters out result those that aren't suspicious
def filterData(data, mean, deviation, threshold, above, minThreshold):
results = []
for element in data:
z = zScore(element.score, mean, deviation)
if z <= threshold and not above:
results.append(element)
continue
if z >= threshold and above:
results.append(element)
continue
if minThreshold != None and element.score <= minThreshold and not above:
results.append(element)
continue
if minThreshold != None and element.score >= minThreshold and above:
results.append(element)
continue
return results
# creates clusters from the filtered data
def createClusters(data, filename, assignName, allowPartners, helpers):
clusters = []
for element in data:
cluster = common.Cluster(allowPartners, filename, element.score)
member1 = common.Member(element.pair[0], assignName, helpers)
member2 = common.Member(element.pair[1], assignName, helpers)
cluster.add(member1)
cluster.add(member2)
clusters.append(cluster)
return clusters
# runs an entry in parellel
def runEntry(filename, students, helpers, assignment, args, allowPartners):
# get the data
assignName = assignment.name
sourceSuffix = args["sourceSuffix"]
resultsSuffix = args["resultsSuffix"]
threshold = assignment.args["threshold"]
above = args["above"]
minThreshold = None
if assignment.args.has_key("minThreshold"):
minThreshold = assignment.args["minThreshold"]
safeFilename = common.makeFilenameSafe(filename) + sourceSuffix
filepath = helpers.getProcessedPath(assignName, safeFilename)
if filepath != None:
rawData = common.PairResults(assignName, safeFilename, helpers)
data = []
# convert into python objects
for pair in rawData.iterate():
data.append(pair)
# get the mean
mean = getMean(data)
# get the deviation
deviation = getDeviation(data, mean)
helpers.printf("{}/{}: mean {}, deviation {}\n".format(assignName, filename, mean, deviation))
# filter out data
filtered = filterData(data, mean, deviation, threshold, above, minThreshold)
# create the clusters
clusters = createClusters(filtered, filename, assignName, allowPartners, helpers)
# flush to disk
common.clustersToStandardJSON(clusters, assignName, common.makeFilenameSafe(filename) + resultsSuffix, helpers)
# all done!
helpers.printf("Finished '{}', with {} results!\n".format(assignName, len(clusters)))
# the main function
def run(students, assignments, args, helpers):
# threads to join later
threads = []
# for each assignment
for assignment in assignments:
# for each entry
assignName = assignment.name
allowPartners = assignment.args["allowPartners"]
# print progress
helpers.printf("postprocessing '{}' in parellel...\n".format(assignName))
# allow entry lists and file lists
entries = []
if assignment.args.has_key("entries"):
entries = assignment.args["entries"]
else:
if assignment.args.has_key("files"):
entries = assignment.args["files"]
for entry in entries:
# use the first source as the filename in case fo an entry
filename = entry
if assignment.args.has_key("entries"):
filename = entry["sources"][0]
# create the thread
t = Process(target=runEntry, args=(filename, students, helpers, assignment, args, allowPartners))
threads.append(t)
t.start()
# join all of the threads
for t in threads:
t.join()
# all done
return True
```
#### File: Algae/processors/bloom.py
```python
import helpers.common as common
from multiprocessing import Process
import math
from index import *
from token_index import weightFun as tokenWeightFun
from token_index import genKeys as tokenGenKeys
from ident_index import weightFun as identWeightFun
from ident_index import genKeys as identGenKeys
def runAssignment(students, assignment, args, helpers, weightFun, genKeys):
assignName = assignment.name
files = assignment.args["files"]
allowPartners = assignment.args["allowPartners"]
threshold = args["threshold"] * float(len(students))
sourceSuffixes = ["tokenized.txt", "identifiers.txt", "literals.txt"]
resultsSuffix = args["resultsSuffix"]
helpers.printf("Running assignment '{}' in parellel...\n".format(assignName))
for filename in files:
indexes = [InvertedIndex(), InvertedIndex(), InvertedIndex()]
# for each type of Data
for i in range(3):
sourceSuffix = sourceSuffixes[i]
curWeightFun = weightFun[i]
curGenKeys = genKeys[i]
index = indexes[i]
for student in students:
# try to read the file
safeFilename = common.makeFilenameSafe(filename) + sourceSuffix
text = helpers.readFromPreprocessed(student, assignName, safeFilename)
if text != None:
# generate the keys
keys = curGenKeys(text)
# add to the index
for key in keys:
index.add(key, student)
# prune and weight
index.prune(threshold)
index.weight(curWeightFun, len(students))
# build the denormalized pair results
resultFilename = common.makeFilenameSafe(filename) + "raw_" + resultsSuffix
results = common.PairResults(assignName, resultFilename, helpers)
seen = []
for student in students:
combined = {}
for i in range(3):
# retreive the keys
safeFilename = common.makeFilenameSafe(filename) + sourceSuffixes[i]
text = helpers.readFromPreprocessed(student, assignName, safeFilename)
index = indexes[i]
if text != None:
# generate the keys
keys = genKeys[i](text)
# get the member (for the partner)
member = common.Member(student, assignName, helpers)
partner = member.partner
# handle allowPartners
if not allowPartners:
partner = None
# get the score results
studentResults = index.scoreStudent(student, partner, keys)
# add to results
for other in studentResults:
if other in combined:
# add the score
combined[other] += studentResults[other]
else:
# create the entry
combined[other] = studentResults[other]
# add to pair results
for other in combined:
if other not in seen:
pair = common.PairResult(student, other, combined[other])
results.add(pair)
# prevent duplicates
seen.append(student)
# normalize the scores to range 0-100
results.finish()
biggest = 0.0
for pair in results.iterate():
if pair.score > biggest:
biggest = float(pair.score)
# flush to disk
finalResultFilename = common.makeFilenameSafe(filename) + resultsSuffix
finalResults = common.PairResults(assignName, finalResultFilename, helpers)
for pair in results.iterate():
pair.score = (float(pair.score) / biggest) * 100.0
finalResults.add(pair)
finalResults.finish()
# all done
helpers.printf("Finished '{}'!\n".format(assignName))
def run(students, assignments, args, helpers):
# threads to join later
threads = []
def literalWeightFun(key, students, total):
return 1.0 + ((1.0 - float(len(students))/total) * 5.0)
# for each assignment
for assignment in assignments:
weightFun = [tokenWeightFun, identWeightFun, literalWeightFun]
genKeys = [tokenGenKeys, identGenKeys, identGenKeys]
t = Process(target=runAssignment, args=(students, assignment, args, helpers, weightFun, genKeys))
threads.append(t)
t.start()
# wait for all to finish
for t in threads:
t.join()
# all done
return True
```
#### File: Algae/processors/test.py
```python
def run(students, assignments, args, helpers):
helpers.printf("Processor!\n")
helpers.printf("{} students and {} assignments\n".format(len(students), len(assignments)))
helpers.printf(args['message'] + '\n')
return True
``` |
{
"source": "JonathanPlasse/industrial_training",
"score": 3
} |
#### File: detect_pump/nodes/detect_pump.py
```python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
import math
import numpy as np
# known pump geometry
# - units are pixels (of half-size image)
PUMP_DIAMETER = 360
PISTON_DIAMETER = 90
PISTON_COUNT = 7
def showImage(img):
cv2.imshow('image', img)
cv2.waitKey(1)
def plotCircles(img, circles, color):
if circles is None: return
for (x,y,r) in circles[0]:
cv2.circle(img, (int(x),int(y)), int(r), color, 2)
def ptDist(p1, p2):
dx=p2[0]-p1[0]; dy=p2[1]-p1[1]
return math.sqrt( dx*dx + dy*dy )
def ptMean(p1, p2):
return ((int(p1[0]+p2[0])/2, int(p1[1]+p2[1])/2))
def rect2centerline(rect):
p0=rect[0]; p1=rect[1]; p2=rect[2]; p3=rect[3];
width=ptDist(p0,p1); height=ptDist(p1,p2);
# centerline lies along longest median
if (height > width):
cl = ( ptMean(p0,p1), ptMean(p2,p3) )
else:
cl = ( ptMean(p1,p2), ptMean(p3,p0) )
return cl
def ptLineDist(pt, line):
x0=pt[0]; x1=line[0][0]; x2=line[1][0];
y0=pt[1]; y1=line[0][1]; y2=line[1][1];
return abs((x2-x1)*(y1-y0)-(x1-x0)*(y2-y1))/(math.sqrt((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)))
def findAngle(p1, p2, p3):
p1=np.array(p1); p2=np.array(p2); p3=np.array(p3);
v1=p1-p2; v2=p3-p2;
return math.atan2(-v1[0]*v2[1]+v1[1]*v2[0],v1[0]*v2[0]+v1[1]*v2[1]) * 180/3.14159
def process_image(msg):
try:
# convert sensor_msgs/Image to OpenCV Image
bridge = CvBridge()
orig = bridge.imgmsg_to_cv2(msg, "bgr8")
drawImg = orig
# resize image (half-size) for easier processing
resized = cv2.resize(orig, None, fx=0.5, fy=0.5)
drawImg = resized
# convert to single-channel image
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
drawImg = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
# threshold grayscale to binary (black & white) image
threshVal = 150
ret,thresh = cv2.threshold(gray, threshVal, 255, cv2.THRESH_BINARY)
drawImg = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
# detect outer pump circle
pumpRadiusRange = ( PUMP_DIAMETER/2-2, PUMP_DIAMETER/2+2)
pumpCircles = cv2.HoughCircles(thresh, cv2.HOUGH_GRADIENT, 1, PUMP_DIAMETER, param2=7, minRadius=pumpRadiusRange[0], maxRadius=pumpRadiusRange[1])
plotCircles(drawImg, pumpCircles, (255,0,0))
if (pumpCircles is None):
raise Exception("No pump circles found!")
elif len(pumpCircles[0])<>1:
raise Exception("Wrong # of pump circles: found {} expected {}".format(len(pumpCircles[0]),1))
else:
pumpCircle = pumpCircles[0][0]
# detect blobs inside pump body
pistonArea = 3.14159 * PISTON_DIAMETER**2 / 4
blobParams = cv2.SimpleBlobDetector_Params()
blobParams.filterByArea = True;
blobParams.minArea = 0.80 * pistonArea;
blobParams.maxArea = 1.20 * pistonArea;
blobDetector = cv2.SimpleBlobDetector_create(blobParams)
blobs = blobDetector.detect(thresh)
drawImg = cv2.drawKeypoints(drawImg, blobs, (), (0,255,0), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if len(blobs) <> PISTON_COUNT:
raise Exception("Wring # of pistons: found {} expected {}".format(len(blobs), PISTON_COUNT))
pistonCenters = [(int(b.pt[0]),int(b.pt[1])) for b in blobs]
# determine primary axis, using largest contour
im2, contours, h = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxC = max(contours, key=lambda c: cv2.contourArea(c))
boundRect = cv2.minAreaRect(maxC)
centerline = rect2centerline(cv2.boxPoints(boundRect))
cv2.line(drawImg, centerline[0], centerline[1], (0,0,255))
# find closest piston to primary axis
closestPiston = min( pistonCenters, key=lambda ctr: ptLineDist(ctr, centerline))
cv2.circle(drawImg, closestPiston, 5, (255,255,0), -1)
# calculate pump angle
p1 = (orig.shape[1], pumpCircle[1])
p2 = (pumpCircle[0], pumpCircle[1])
p3 = (closestPiston[0], closestPiston[1])
angle = findAngle(p1, p2, p3)
print "Found pump angle: {}".format(angle)
except Exception as err:
print err
# show results
showImage(drawImg)
def start_node():
rospy.init_node('detect_pump')
rospy.loginfo('detect_pump node started')
rospy.Subscriber("image", Image, process_image)
rospy.spin()
if __name__ == '__main__':
try:
start_node()
except rospy.ROSInterruptException:
pass
```
#### File: detect_pump/nodes/image_pub.py
```python
import rospy
import sys
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
def rotateImg(img, angle):
rows,cols,ch = img.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
return cv2.warpAffine(img,M,(cols,rows))
def start_node(filename):
rospy.init_node('image_pub')
rospy.loginfo('image_pub node started')
img = cv2.imread(filename)
# cv2.imshow("image", img)
# cv2.waitKey(2000)
bridge = CvBridge()
imgMsg = bridge.cv2_to_imgmsg(img, "bgr8")
pub = rospy.Publisher('image', Image, queue_size=10)
angle = 0
while not rospy.is_shutdown():
rotImg = rotateImg(img, angle)
imgMsg = bridge.cv2_to_imgmsg(rotImg, "bgr8")
pub.publish(imgMsg)
angle = (angle + 10) % 360
rospy.Rate(1.0).sleep() # 1 Hz
if __name__ == '__main__':
try:
start_node( rospy.myargv(argv=sys.argv)[1] )
except rospy.ROSInterruptException:
pass
``` |
{
"source": "jonathanplov/RealEstate",
"score": 3
} |
#### File: RealEstate/pythonScripts/dataScraper.py
```python
import urllib.request as urllib2
from bs4 import BeautifulSoup
import xml_calls
import re
import database_setup
import database_calls
data = []
primaryList = []
secondaryList = []
types = ['villa','raekkehus','ejerlejlighed']
estateBool = False
"""Url looks like this siteurl/type-city-postcode"""
site_url = 'https://www.boliga.dk/indeks/til-salg/'
page = urllib2.urlopen(site_url)
soup = BeautifulSoup(page, 'html.parser')
#Get data class
class getData:
def __init__(self,iterationIternal,iteration,primary,secondary,thumbnail,eBool):
self.iterationIternal = iterationIternal
self.iteration = iteration
self.primary = primary
self.secondary = secondary
self.thumbnail = thumbnail
self.eBool = eBool
def getPrice(self):
return self.primary[self.iteration+1].text.strip()
def getAddress(self):
return self.secondary[self.iteration].text.strip()
def getPriceSQM(self):
return self.secondary[self.iteration+1].text.strip()
def getImageUrl(self):
try:
return str(self.thumbnail).split("style=\"background-image:url('")[self.iterationIternal+1].split("')")[0]
except:
return 'placeholder.png'
def logEstates():
#Run database_setup file
try:
database_setup.run()
except:
print('Database run error')
estateAmnt = 0
iType = 0
while iType < len(types):
estateType = types[iType]
iXml = 0
while iXml < xml_calls.getDataAmnt():
instance = xml_calls.getXmlData(iXml)
url = site_url + estateType + '-' + instance.getDesc() + '-' + instance.getCode()
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
allOccPrimary = soup.findAll('span', attrs={'class': 'primary-value'})
allOccSecondary = soup.findAll('span', attrs={'class': 'secondary-value'})
allOccThumbnail = soup.findAll('div', attrs={'class': 'thumbnail'})
for elem in allOccPrimary:
primaryList.append(elem)
print('[Number of type: ' + estateType + ' in ' + instance.getDesc() + ': ' + str(len(primaryList)/2) + ']')
iEstate = 0
iEstateInternal = 0
estateBool = False
while iEstate < len(primaryList):
if(iEstate % 2 == 0):
dataPoint = getData(iEstateInternal,iEstate,allOccPrimary,allOccSecondary,allOccThumbnail,estateBool)
dbInstance = database_calls.estateDB(estateType, dataPoint.getAddress(), instance.getDesc(), instance.getCode(), dataPoint.getPrice(), dataPoint.getPriceSQM(), dataPoint.getImageUrl())
dbInstance.addToDB()
iEstateInternal += 1
if estateBool == True:
estateBool = False
else:
estateBool = True
iEstate += 1
estateAmnt +=1
primaryList.clear()
secondaryList.clear()
iXml += 1
iType += 1
print("Total amount of estates counted: ")
print(estateAmnt/2)
logEstates()
``` |
{
"source": "jonathanpoelen/jln.mp",
"score": 3
} |
#### File: jln.mp/tools/update_meson_and_sources.py
```python
import os
import shutil
all_targets = set()
cpp_test_paths = []
meson_output_lines = []
def ordered_listdir(path):
l = os.listdir(path)
l.sort()
return l
def new_target(name):
while name in all_targets:
name += '_'
all_targets.add(name)
return name
def new_executable(name, path, prefix, i):
target = f'{prefix}{i}'
meson_output_lines.append(f"{target} = executable('{name}', '{path}', dependencies: test_dep)")
return target
def new_alias(name, targets):
deps = ", ".join(targets)
meson_output_lines.append(f"alias_target('{name}', {deps})")
def make_targets(path, prefix, target_prefix='t'):
l = []
for f in ordered_listdir(path):
newpath = os.path.join(path, f)
if os.path.isfile(newpath):
if f.endswith('.cpp'):
basename = f[:-4]
cpp_test_paths.append(f'{path}/{f}')
name = new_target(basename)
l.append(new_executable(name, newpath, target_prefix, f'_{basename}'))
else:
dirtarget = f'{prefix}.{f}'
targets = make_targets(newpath, dirtarget, f'{target_prefix}_{f}')
name = new_target(dirtarget)
new_alias(name, targets)
l += targets
return l
autogen_tests = []
def gentest(test_prefix, dir_path, filename):
test_name = f'{test_prefix}{filename[:-3]}cpp'.replace('/','@')
test_name = f'test/autogen/{test_name}'
autogen_tests.append(test_name)
with open(test_name, 'w') as ftest:
ftest.write(f'#include "jln/{dir_path}/{filename}"\n')
def genfiles(dir_path):
path = f'include/jln/{dir_path}'
seppos = dir_path.find('/')
test_prefix = '' if seppos == -1 else f'{dir_path[seppos+1:]}/'
with open(f'{path}.hpp', 'w') as outfile:
outfile.write("#pragma once\n\n")
for d in ordered_listdir(f'{path}'):
if d in ('smp', 'config'):
continue
newpath = f'{path}/{d}'
if os.path.isdir(newpath):
if d == 'detail':
for filename in os.listdir(newpath):
if filename.endswith('.hpp'):
gentest(test_prefix, dir_path, f'{d}/{filename}')
else:
outfile.write(f'#include <jln/{dir_path}/{d}.hpp>\n')
with open(f'{newpath}.hpp', 'w') as flist:
flist.write("#pragma once\n\n")
for filename in ordered_listdir(newpath):
if filename.endswith('.hpp'):
flist.write(f'#include <jln/{dir_path}/{d}/{filename}>\n')
gentest(test_prefix, dir_path, f'{d}/{filename}')
elif not os.path.isdir(newpath[:-4]):
if d != 'detail':
outfile.write(f'#include <jln/{dir_path}/{d}>\n')
gentest(test_prefix, dir_path, d)
if os.path.isdir('test/autogen/'):
shutil.rmtree('test/autogen/')
os.mkdir('test/autogen')
new_alias('mp', make_targets('test/src', 'mp'))
genfiles('mp')
genfiles('mp/smp')
with open(f'test/autogen/main.cpp', 'w') as f:
f.write('int main() {}')
autogen_tests.append(f'test/autogen/main.cpp')
with open('meson.build') as f:
content = f.read()
start_str = '# start tests\n'
stop_str = '# stop tests\n'
start = content.index(start_str)
stop = content.index(stop_str, start)
with open('meson.build', 'w') as f:
f.write(content[:start])
f.write(start_str)
f.write('\n'.join(meson_output_lines))
f.write('\n')
sources = "',\n '".join(autogen_tests)
f.write(f"executable('check_inc', [\n '{sources}'\n], dependencies: test_dep)\n")
f.write(stop_str)
f.write(content[stop + len(stop_str):])
with open('test/mp.cpp', 'w') as f:
for path in cpp_test_paths:
f.write(f'#include "{path[5:]}"\n')
``` |
{
"source": "jonathanpoelen/qwebdriver",
"score": 2
} |
#### File: qwebdriver/qwebdriver/webdriver.py
```python
import sys
import json
from typing import Union, Optional
from PySide2.QtWebEngineCore import QWebEngineUrlRequestInterceptor
from PySide2.QtWebEngineWidgets import (QWebEngineDownloadItem,
QWebEngineSettings,
QWebEngineProfile,
QWebEngineView,
QWebEnginePage)
from PySide2.QtCore import (QCoreApplication,
QUrl,
QRect,
Qt,
Slot,
QTimer,
QEventLoop)
from PySide2.QtWidgets import QApplication, QShortcut
from PySide2.QtGui import QImage
_LOG_CAT = '\x1b[33m[driver]\x1b[0m'
class _UrlRequestInterceptor(QWebEngineUrlRequestInterceptor):
"""Wrapper to use a function with QWebEngineProfile::set_url_request_interceptor()"""
interceptor:Optional[callable] = None
def __init__(self, log:callable):
super().__init__()
self.log = log
def interceptRequest(self, info):
"""Block url when self.interceptor(url) is True"""
url = info.requestUrl().url(QUrl.FormattingOptions(QUrl.None_))
blocked = self.interceptor(url)
self.log(_LOG_CAT, 'rejected:\x1b[31m' if blocked else 'accepted:\x1b[32m', url, '\x1b[0m')
if blocked:
info.block(True)
class _WebPage(QWebEnginePage):
"""Wrapper to control javascript errors and console messages"""
js_error = None
js_trace = False
def javaScriptConsoleMessage(self, level, message, lineNumber, sourceID):
if self.js_trace:
# TODO check sourceID
if level == QWebEnginePage.ErrorMessageLevel:
self.js_error = f'{message} line {lineNumber}'
print(f'js:{lineNumber}:', message, file=sys.stderr)
class JsException(Exception):
pass
def _null_fn(*args):
"""A function that does nothing (used when logger is None)"""
pass
def _strerr_print(*args):
"""print on stderr"""
print(*args, file=sys.stderr)
pass
class AppDriver:
"""QApplication + WebDriver"""
_excep = None
def __init__(self, headless=True, logger=False):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
# QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
self._app = QApplication(sys.argv)
self.driver = WebDriver(headless, logger)
def run(self, f:callable) -> int:
"""
Call f(driver) then use quit()
"""
timer = QTimer()
timer.timeout.connect(lambda: self._run(f))
timer.setSingleShot(True)
timer.start(0)
r = self._app.exec_()
if self._excep:
raise self._excep
return r
def _run(self, f):
try:
f(self.driver)
except Exception as e:
self._excep = e
finally:
self.quit()
def exec_(self) -> int:
return self._app.exec_()
def quit(self):
self.driver.quit()
self._app.quit()
def __enter__(self):
return self.driver
def __exit__(self, type, value, traceback):
self.quit()
class WebDriver:
_result = None
_view = None
_last_js_error = None
_dev_view = None
_headless_view = None
def __init__(self, headless:bool=True, logger:Union[callable,bool,None]=False):
if logger == True:
self.log = _strerr_print
elif logger:
self.log = logger
else:
self.log = _null_fn
self._headless = headless
self._event_loop = QEventLoop()
self._timer = QTimer()
self._timer.timeout.connect(self._event_loop.exit)
self._timer.setSingleShot(True)
self._profile = QWebEngineProfile(qApp)
self._profile.downloadRequested.connect(self._download_request)
self._page = _WebPage(self._profile)
# self._page.loadFinished.connect(self._event_loop.exit)
self._interceptor = _UrlRequestInterceptor(self.log)
# use with execute_script for function results
self._json_decode = json.JSONDecoder().decode
self._view = QWebEngineView()
self._view.loadFinished.connect(self._event_result)
self._view.setPage(self._page)
self._view.setAttribute(Qt.WA_NoSystemBackground)
self._view.show()
if headless:
# Wide screen so that a maximum of things is visible in width
self._view.resize(4096, 8192)
self._page.settings().setAttribute(QWebEngineSettings.ShowScrollBars, False)
self._view.setAttribute(Qt.WA_Disabled)
self._view.setAttribute(Qt.WA_DontShowOnScreen)
else:
self._view.resize(1024, 750)
exit_act = QShortcut("Ctrl+Q", self._view)
exit_act.activated.connect(self.quit)
devtool_act = QShortcut("F12", self._view)
devtool_act.activated.connect(self._toggle_devtools)
def quit(self) -> None:
if not self._page:
return
if self._view:
self._view.deleteLater()
if self._dev_view:
self._dev_view.deleteLater()
if self._headless_view:
self._headless_view.deleteLater()
self._page.deleteLater()
self._profile.deleteLater()
self._page = None
def set_url_request_interceptor(self, interceptor:Optional[callable]) -> None:
"""
:param interceptor: A function that takes a url and returns True when
it needs to be blocked. When interceptor is None, it is disabled.
"""
self.log(_LOG_CAT, 'init interceptor:', interceptor)
self._interceptor.interceptor = interceptor
self._profile.setUrlRequestInterceptor(self._interceptor)
def get(self, url:str) -> None:
"""Open a url"""
self.log(_LOG_CAT, 'load:', url)
self._page.setUrl(url)
self._event_loop.exec_()
self.log(_LOG_CAT, 'loaded')
return self._result
def sleep_ms(self, ms:int) -> None:
"""Wait ms milliseconds"""
self.log(_LOG_CAT, 'sleep:', ms)
self._timer.start(ms)
self._event_loop.exec_()
def download(self, url:str, filename:str=None, with_progression:bool=False) -> None:
"""Download a url
:param with_progression: When True, progression is sent to logger
"""
self.log(_LOG_CAT, 'download:', url, 'to filename', filename)
self._with_progression = with_progression and self.log != _null_fn
self._page.download(url, filename)
self._event_loop.exec_()
def execute_script(self, script:str, raise_if_js_error:bool=True):
"""Execute a javascript code.
The javascript code can return a value with return which must be
compatible with JSON.stringify().
:param raise_if_js_error: When True, convert javascript error and
console.error message to JsException. Otherwise return None when
a javascript error is encountered.
"""
self.log(_LOG_CAT, 'script:', script)
# return an empty string when there is an error in the script
# return '[]' for nil/undefined value (no return value)
# otherwise return a json whose value is in a list
script = f'{{ const ___r = (()=>{{ {script} }})();(___r === null || ___r === undefined) ? "[]" : JSON.stringify([___r]); }}'
self._page.js_error = None
self._page.js_trace = True
self._page.runJavaScript(script, 0, self._event_result)
self._event_loop.exec_()
self._page.js_trace = False
self._last_js_error = self._page.js_error
self.log(_LOG_CAT, 'result:', (self._last_js_error, self._result))
if not self._result:
if raise_if_js_error:
raise JsException(self._last_js_error)
return None
if self._result != '[]':
return self._json_decode(self._result)[0]
return None
def get_last_js_error(self) -> str:
"""Get last javascript error."""
return self._last_js_error or ''
def grab(self, x:int=0, y:int=0, w:int=-1, h:int=-1,
frozen_after_ms:int=0, max_iter:int=10) -> QImage:
"""Get a QImage of the page.
If the image area is larger than the page, it is automatically truncated.
A negative value for w or h means page width/height.
:param frozen_after_ms: start a loop which stops when 2 captures
separated by a time limit are identical or when max_iter is reached.
:param max_iter: maximum number of iterations used with frozen_after_ms.
"""
self.log(_LOG_CAT, 'grab:', (x, y, w, h), 'delay:', frozen_after_ms, 'max_iter:', max_iter)
page_width, page_height, scroll_top, screen_h, scroll_left = self.execute_script(
f'''
e = document.documentElement;
return [e.scrollWidth, e.scrollHeight, e.scrollTop, e.clientHeight, window.scrollX];''')
# shrink w/h compared to page_width/page_height
w = page_width if w < 0 else min(w, page_width-x)
h = page_height - y if h < 0 else min(h, page_height-y)
w = min(w + min(0, x), page_width)
h = min(h + min(0, y), page_height)
x = max(0, x)
y = max(0, y)
if x >= page_width or y >= page_height or w <= 0 or h <= 0:
return QImage()
must_resize = (y < scroll_top or y + h > scroll_top + screen_h)
view = self._view
if must_resize:
old_size = view.size()
if not self._headless:
view = self._headless_view
if not view:
view = QWebEngineView()
view.setAttribute(Qt.WA_Disabled)
view.setAttribute(Qt.WA_NoSystemBackground)
view.setAttribute(Qt.WA_DontShowOnScreen)
self._headless_view = view
# no scroll when page_height == h
view_width = page_width if page_height == h else self._view.size().width()
view.resize(view_width, h)
if not self._headless:
view.setPage(self._page)
view.show()
self.sleep_ms(200)
if y != scroll_top:
self.execute_script(f'window.scroll({scroll_left}, {y})')
self.sleep_ms(50 + h//1000 * 20)
rect = QRect(x, 0, w, h)
else:
rect = QRect(x, y - scroll_top, w, h)
self.log(_LOG_CAT, 'computed rect:', rect)
img1 = view.grab(rect).toImage()
if frozen_after_ms:
for i in range(max_iter):
self.sleep_ms(frozen_after_ms)
img2 = self._view.grab(rect).toImage()
if img1 == img2:
break
img1 = img2
if must_resize:
if not self._headless:
self._view.setPage(self._page)
view.hide()
else:
view.resize(old_size)
self.execute_script(f'window.scroll({scroll_left}, {scroll_top})')
self.sleep_ms(200)
return img1
def take_screenshot(self, filename:str, format:Optional[str]=None,
quality:int=-1,
x:int=0, y:int=0, w:int=-1, h:int=-1,
frozen_after_ms:int=0, max_iter:int=10) -> bool:
"""Screenshot the page.
See self.grab()
"""
self.log(_LOG_CAT, 'screenshot:', filename, 'format:', format)
img = self.grab(x, y, w, h, frozen_after_ms, max_iter)
return img.save(filename, format, quality)
def resize(self, width:int=-1, height:int=-1) -> None:
"""Resize view
A negative value for width or height means contents size width/height.
"""
self.log(_LOG_CAT, 'resize:', width, height)
if width <= 0 or height <= 0:
size = self._page.contentsSize().toSize()
if width <= 0: width = size.width()
if height <= 0: height = size.height()
self.log(_LOG_CAT, 'resize(computed):', width, height)
self._view.resize(width, height)
def contents_size(self) -> tuple[int,int]:
"""Get contents size"""
size = self._page.contentsSize().toSize()
return (size.width(), size.height())
def scroll(self, x:int, y:int) -> None:
"""Scroll at position"""
self.execute_script(f'window.scroll({x},{y})')
def enable_devtools(self, enable:bool=True) -> None:
"""Enable or disable devtools"""
if bool(self._dev_view) == enable:
return
if enable:
self._dev_view = QWebEngineView()
page = self._dev_view.page()
self._page.setDevToolsPage(page)
self._dev_view.show()
else:
self._dev_view.deleteLater()
self._dev_view = None
self._page.setDevToolsPage(None)
def _toggle_devtools(self):
self.enable_devtools(not bool(self._dev_view))
def _event_result(self, result):
self._result = result
self._event_loop.exit()
def _download_request(self, item:QWebEngineDownloadItem):
# TODO check url origin
self._download_item = item
item.finished.connect(self._download_finished)
if self._with_progression:
item.downloadProgress.connect(self._download_progress)
item.accept()
def _download_finished(self):
self.log(_LOG_CAT, 'download, done')
state = self._download_item.state()
self._result = True
if state == QWebEngineDownloadItem.DownloadCompleted:
self._result = False
else:
self.log(_LOG_CAT, self._download_item.interruptReasonString())
self._download_item = None
self._event_loop.exit()
def _download_progress(self, bytesReceived, bytesTotal):
self.log(_LOG_CAT, f'download {bytesReceived}/{bytesTotal}')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.quit()
``` |
{
"source": "JonathanPorta/browsepy",
"score": 2
} |
#### File: browsepy/tests/test_module.py
```python
import unittest
import re
import os
import os.path
import shutil
import tempfile
import tarfile
import xml.etree.ElementTree as ET
import io
import stat
import mimetypes
import flask
from werkzeug.exceptions import NotFound
import browsepy
import browsepy.file
import browsepy.manager
import browsepy.__main__
import browsepy.compat
import browsepy.tests.utils as test_utils
PY_LEGACY = browsepy.compat.PY_LEGACY
range = browsepy.compat.range # noqa
class FileMock(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AppMock(object):
config = browsepy.app.config.copy()
class Page(object):
@classmethod
def itertext(cls, element):
'''
Compatible element.itertext()
'''
if element.text:
yield element.text
for child in element:
for text in cls.itertext(child):
yield text
if child.tail:
yield child.tail
def __init__(self, data, response=None):
self.data = data
self.response = response
@classmethod
def innerText(cls, element, sep=''):
return sep.join(cls.itertext(element))
@classmethod
def from_source(cls, source, response=None):
return cls(source, response)
class ListPage(Page):
path_strip_re = re.compile('\s+/\s+')
def __init__(self, path, directories, files, removable, upload, source,
response=None):
self.path = path
self.directories = directories
self.files = files
self.removable = removable
self.upload = upload
self.source = source
self.response = response
@classmethod
def from_source(cls, source, response=None):
html = ET.fromstring(source)
rows = [
(
row[0].attrib.get('class') == 'icon inode',
row[1].find('.//a').attrib['href'],
any(button.attrib.get('class') == 'button remove'
for button in row[2].findall('.//a'))
)
for row in html.findall('.//table/tbody/tr')
]
return cls(
cls.path_strip_re.sub(
'/',
cls.innerText(html.find('.//h1'), '/')
).strip(),
[url for isdir, url, removable in rows if isdir],
[url for isdir, url, removable in rows if not isdir],
all(removable
for isdir, url, removable in rows
) if rows else False,
html.find('.//form//input[@type=\'file\']') is not None,
source,
response
)
class ConfirmPage(Page):
def __init__(self, path, name, back, source, response=None):
self.path = path
self.name = name
self.back = back
self.source = source
self.response = response
@classmethod
def from_source(cls, source, response=None):
html = ET.fromstring(source)
name = cls.innerText(html.find('.//strong')).strip()
prefix = html.find('.//strong').attrib.get('data-prefix', '')
return cls(
prefix + name,
name,
html.find('.//form[@method=\'get\']').attrib['action'],
source,
response
)
class PageException(Exception):
def __init__(self, status, *args):
self.status = status
super(PageException, self).__init__(status, *args)
class Page404Exception(PageException):
pass
class Page302Exception(PageException):
pass
class TestApp(unittest.TestCase):
module = browsepy
generic_page_class = Page
list_page_class = ListPage
confirm_page_class = ConfirmPage
page_exceptions = {
404: Page404Exception,
302: Page302Exception,
None: PageException
}
def setUp(self):
self.app = self.module.app
self.base = tempfile.mkdtemp()
self.start = os.path.join(self.base, 'start')
self.remove = os.path.join(self.base, 'remove')
self.upload = os.path.join(self.base, 'upload')
self.exclude = os.path.join(self.base, 'exclude')
os.mkdir(self.start)
os.mkdir(self.remove)
os.mkdir(self.upload)
os.mkdir(self.exclude)
open(os.path.join(self.start, 'testfile.txt'), 'w').close()
open(os.path.join(self.remove, 'testfile.txt'), 'w').close()
open(os.path.join(self.exclude, 'testfile.txt'), 'w').close()
def exclude_fnc(path):
return path == self.exclude \
or path.startswith(self.exclude + os.sep)
self.app.config.update(
directory_base=self.base,
directory_start=self.start,
directory_remove=self.remove,
directory_upload=self.upload,
exclude_fnc=exclude_fnc,
SERVER_NAME='test',
)
self.base_directories = [
self.url_for('browse', path='remove'),
self.url_for('browse', path='start'),
self.url_for('browse', path='upload'),
]
self.start_files = [self.url_for('open', path='start/testfile.txt')]
self.remove_files = [self.url_for('open', path='remove/testfile.txt')]
self.upload_files = []
def clear(self, path):
assert path.startswith(self.base + os.sep), \
'Cannot clear directories out of base'
for sub in os.listdir(path):
sub = os.path.join(path, sub)
if os.path.isdir(sub):
shutil.rmtree(sub)
else:
os.remove(sub)
def tearDown(self):
shutil.rmtree(self.base)
test_utils.clear_flask_context()
def get(self, endpoint, **kwargs):
status_code = kwargs.pop('status_code', 200)
follow_redirects = kwargs.pop('follow_redirects', False)
if endpoint in ('index', 'browse'):
page_class = self.list_page_class
elif endpoint == 'remove':
page_class = self.confirm_page_class
elif endpoint == 'sort' and follow_redirects:
page_class = self.list_page_class
else:
page_class = self.generic_page_class
with kwargs.pop('client', None) or self.app.test_client() as client:
response = client.get(
self.url_for(endpoint, **kwargs),
follow_redirects=follow_redirects
)
if response.status_code != status_code:
raise self.page_exceptions.get(
response.status_code,
self.page_exceptions[None]
)(response.status_code)
result = page_class.from_source(response.data, response)
response.close()
test_utils.clear_flask_context()
return result
def post(self, endpoint, **kwargs):
status_code = kwargs.pop('status_code', 200)
data = kwargs.pop('data') if 'data' in kwargs else {}
with kwargs.pop('client', None) or self.app.test_client() as client:
response = client.post(
self.url_for(endpoint, **kwargs),
data=data,
follow_redirects=True
)
if response.status_code != status_code:
raise self.page_exceptions.get(
response.status_code,
self.page_exceptions[None]
)(response.status_code)
result = self.list_page_class.from_source(response.data, response)
test_utils.clear_flask_context()
return result
def url_for(self, endpoint, **kwargs):
with self.app.app_context():
return flask.url_for(endpoint, _external=False, **kwargs)
def test_index(self):
page = self.get('index')
self.assertEqual(page.path, '%s/start' % os.path.basename(self.base))
start = os.path.abspath(os.path.join(self.base, '..'))
self.app.config['directory_start'] = start
self.assertRaises(
Page404Exception,
self.get, 'index'
)
self.app.config['directory_start'] = self.start
def test_browse(self):
basename = os.path.basename(self.base)
page = self.get('browse')
self.assertEqual(page.path, basename)
self.assertEqual(page.directories, self.base_directories)
self.assertFalse(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='start')
self.assertEqual(page.path, '%s/start' % basename)
self.assertEqual(page.files, self.start_files)
self.assertFalse(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='remove')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
self.assertTrue(page.removable)
self.assertFalse(page.upload)
page = self.get('browse', path='upload')
self.assertEqual(page.path, '%s/upload' % basename)
self.assertEqual(page.files, self.upload_files)
self.assertFalse(page.removable)
self.assertTrue(page.upload)
self.assertRaises(
Page404Exception,
self.get, 'browse', path='..'
)
self.assertRaises(
Page404Exception,
self.get, 'browse', path='start/testfile.txt'
)
self.assertRaises(
Page404Exception,
self.get, 'browse', path='exclude'
)
def test_open(self):
content = b'hello world'
with open(os.path.join(self.start, 'testfile3.txt'), 'wb') as f:
f.write(content)
page = self.get('open', path='start/testfile3.txt')
self.assertEqual(page.data, content)
self.assertRaises(
Page404Exception,
self.get, 'open', path='../shall_not_pass.txt'
)
def test_remove(self):
open(os.path.join(self.remove, 'testfile2.txt'), 'w').close()
page = self.get('remove', path='remove/testfile2.txt')
self.assertEqual(page.name, 'testfile2.txt')
self.assertEqual(page.path, 'remove/testfile2.txt')
self.assertEqual(page.back, self.url_for('browse', path='remove'))
basename = os.path.basename(self.base)
page = self.post('remove', path='remove/testfile2.txt')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
os.mkdir(os.path.join(self.remove, 'directory'))
page = self.post('remove', path='remove/directory')
self.assertEqual(page.path, '%s/remove' % basename)
self.assertEqual(page.files, self.remove_files)
self.assertRaises(
Page404Exception,
self.get, 'remove', path='start/testfile.txt'
)
self.assertRaises(
Page404Exception,
self.post, 'remove', path='start/testfile.txt'
)
self.app.config['directory_remove'] = None
self.assertRaises(
Page404Exception,
self.get, 'remove', path='remove/testfile.txt'
)
self.app.config['directory_remove'] = self.remove
self.assertRaises(
Page404Exception,
self.get, 'remove', path='../shall_not_pass.txt'
)
self.assertRaises(
Page404Exception,
self.get, 'remove', path='exclude/testfile.txt'
)
def test_download_file(self):
binfile = os.path.join(self.base, 'testfile.bin')
bindata = bytes(range(256))
with open(binfile, 'wb') as f:
f.write(bindata)
page = self.get('download_file', path='testfile.bin')
os.remove(binfile)
self.assertEqual(page.data, bindata)
self.assertRaises(
Page404Exception,
self.get, 'download_file', path='../shall_not_pass.txt'
)
self.assertRaises(
Page404Exception,
self.get, 'download_file', path='start'
)
self.assertRaises(
Page404Exception,
self.get, 'download_file', path='exclude/testfile.txt'
)
def test_download_directory(self):
binfile = os.path.join(self.start, 'testfile.bin')
excfile = os.path.join(self.start, 'testfile.exc')
bindata = bytes(range(256))
exclude = self.app.config['exclude_fnc']
def tarball_files(path):
page = self.get('download_directory', path=path)
iodata = io.BytesIO(page.data)
with tarfile.open('p.tgz', mode="r:gz", fileobj=iodata) as tgz:
tgz_files = [
member.name
for member in tgz.getmembers()
if member.name
]
tgz_files.sort()
return tgz_files
for path in (binfile, excfile):
with open(path, 'wb') as f:
f.write(bindata)
self.app.config['exclude_fnc'] = None
self.assertEqual(
tarball_files('start'),
['testfile.%s' % x for x in ('bin', 'exc', 'txt')]
)
self.app.config['exclude_fnc'] = lambda p: p.endswith('.exc')
self.assertEqual(
tarball_files('start'),
['testfile.%s' % x for x in ('bin', 'txt')]
)
self.app.config['exclude_fnc'] = exclude
self.assertRaises(
Page404Exception,
self.get, 'download_directory', path='../../shall_not_pass'
)
self.assertRaises(
Page404Exception,
self.get, 'download_directory', path='exclude'
)
def test_upload(self):
def genbytesio(nbytes, encoding):
c = unichr if PY_LEGACY else chr # noqa
return io.BytesIO(''.join(map(c, range(nbytes))).encode(encoding))
files = {
'testfile.txt': genbytesio(127, 'ascii'),
'testfile.bin': genbytesio(255, 'utf-8'),
}
output = self.post(
'upload',
path='upload',
data={
'file%d' % n: (data, name)
for n, (name, data) in enumerate(files.items())
}
)
expected_links = sorted(
self.url_for('open', path='upload/%s' % i)
for i in files
)
self.assertEqual(sorted(output.files), expected_links)
self.clear(self.upload)
self.assertRaises(
Page404Exception,
self.post, 'upload', path='start', data={
'file': (genbytesio(127, 'ascii'), 'testfile.txt')
}
)
def test_upload_duplicate(self):
c = unichr if PY_LEGACY else chr # noqa
files = (
('testfile.txt', 'something'),
('testfile.txt', 'something_new'),
)
output = self.post(
'upload',
path='upload',
data={
'file%d' % n: (io.BytesIO(data.encode('ascii')), name)
for n, (name, data) in enumerate(files)
}
)
self.assertEqual(len(files), len(output.files))
first_file_url = self.url_for('open', path='upload/%s' % files[0][0])
self.assertIn(first_file_url, output.files)
file_contents = []
for filename in os.listdir(self.upload):
with open(os.path.join(self.upload, filename), 'r') as f:
file_contents.append(f.read())
file_contents.sort()
expected_file_contents = sorted(content for filename, content in files)
self.assertEqual(file_contents, expected_file_contents)
self.clear(self.upload)
def test_sort(self):
self.assertRaises(
Page404Exception,
self.get, 'sort', property='text', path='exclude'
)
files = {
'a.txt': 'aaa',
'b.png': 'aa',
'c.zip': 'a'
}
by_name = [
self.url_for('open', path=name)
for name in sorted(files)
]
by_name_desc = list(reversed(by_name))
by_type = [
self.url_for('open', path=name)
for name in sorted(files, key=lambda x: mimetypes.guess_type(x)[0])
]
by_type_desc = list(reversed(by_type))
by_size = [
self.url_for('open', path=name)
for name in sorted(files, key=lambda x: len(files[x]))
]
by_size_desc = list(reversed(by_size))
for name, content in files.items():
path = os.path.join(self.base, name)
with open(path, 'wb') as f:
f.write(content.encode('ascii'))
client = self.app.test_client()
page = self.get('browse', client=client)
self.assertListEqual(page.files, by_name)
self.assertRaises(
Page302Exception,
self.get, 'sort', property='text', client=client
)
page = self.get('browse', client=client)
self.assertListEqual(page.files, by_name)
page = self.get('sort', property='-text', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_name_desc)
page = self.get('sort', property='type', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_type)
page = self.get('sort', property='-type', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_type_desc)
page = self.get('sort', property='size', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_size)
page = self.get('sort', property='-size', client=client,
follow_redirects=True)
self.assertListEqual(page.files, by_size_desc)
# We're unable to test modified sorting due filesystem time resolution
page = self.get('sort', property='modified', client=client,
follow_redirects=True)
page = self.get('sort', property='-modified', client=client,
follow_redirects=True)
def test_sort_cookie_size(self):
files = [chr(i) * 150 for i in range(97, 123)]
for name in files:
path = os.path.join(self.base, name)
os.mkdir(path)
client = self.app.test_client()
for name in files:
page = self.get('sort', property='modified', path=name,
client=client, status_code=302)
for cookie in page.response.headers.getlist('set-cookie'):
if cookie.startswith('browse-sorting='):
self.assertLessEqual(len(cookie), 4000)
def test_endpoints(self):
# test endpoint function for the library use-case
# likely not to happen when serving due flask's routing protections
with self.app.app_context():
self.assertIsInstance(
self.module.sort(property='name', path='..'),
NotFound
)
self.assertIsInstance(
self.module.browse(path='..'),
NotFound
)
self.assertIsInstance(
self.module.open_file(path='../something'),
NotFound
)
self.assertIsInstance(
self.module.download_file(path='../something'),
NotFound
)
self.assertIsInstance(
self.module.download_directory(path='..'),
NotFound
)
self.assertIsInstance(
self.module.remove(path='../something'),
NotFound
)
self.assertIsInstance(
self.module.upload(path='..'),
NotFound
)
class TestFile(unittest.TestCase):
module = browsepy.file
def setUp(self):
self.app = browsepy.app # FIXME
self.workbench = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workbench)
test_utils.clear_flask_context()
def textfile(self, name, text):
tmp_txt = os.path.join(self.workbench, name)
with open(tmp_txt, 'w') as f:
f.write(text)
return tmp_txt
def test_iter_listdir(self):
directory = self.module.Directory(path=self.workbench)
tmp_txt = self.textfile('somefile.txt', 'a')
content = list(directory._listdir(precomputed_stats=True))
self.assertEqual(len(content), 1)
self.assertEqual(content[0].size, '1 B')
self.assertEqual(content[0].path, tmp_txt)
content = list(directory._listdir(precomputed_stats=False))
self.assertEqual(len(content), 1)
self.assertEqual(content[0].size, '1 B')
self.assertEqual(content[0].path, tmp_txt)
def test_check_forbidden_filename(self):
cff = self.module.check_forbidden_filename
self.assertFalse(cff('myfilename', destiny_os='posix'))
self.assertTrue(cff('.', destiny_os='posix'))
self.assertTrue(cff('..', destiny_os='posix'))
self.assertTrue(cff('::', destiny_os='posix'))
self.assertTrue(cff('/', destiny_os='posix'))
self.assertTrue(cff('com1', destiny_os='nt'))
self.assertTrue(cff('LPT2', destiny_os='nt'))
self.assertTrue(cff('nul', destiny_os='nt'))
self.assertFalse(cff('com1', destiny_os='posix'))
def test_secure_filename(self):
sf = self.module.secure_filename
self.assertEqual(sf('a/a'), 'a')
self.assertEqual(sf('//'), '')
self.assertEqual(sf('c:\\', destiny_os='nt'), '')
self.assertEqual(sf('c:\\COM1', destiny_os='nt'), '')
self.assertEqual(sf('COM1', destiny_os='nt'), '')
self.assertEqual(sf('COM1', destiny_os='posix'), 'COM1')
def test_mime(self):
f = self.module.File('non_working_path', app=self.app)
self.assertEqual(f.mimetype, 'application/octet-stream')
f = self.module.File('non_working_path_with_ext.txt', app=self.app)
self.assertEqual(f.mimetype, 'text/plain')
tmp_txt = self.textfile('ascii_text_file', 'ascii text')
# test file command
if browsepy.compat.which('file'):
f = self.module.File(tmp_txt, app=self.app)
self.assertEqual(f.mimetype, 'text/plain; charset=us-ascii')
self.assertEqual(f.type, 'text/plain')
self.assertEqual(f.encoding, 'us-ascii')
# test non-working file command
bad_path = os.path.join(self.workbench, 'path')
os.mkdir(bad_path)
bad_file = os.path.join(bad_path, 'file')
with open(bad_file, 'w') as f:
f.write('#!/usr/bin/env bash\nexit 1\n')
os.chmod(bad_file, os.stat(bad_file).st_mode | stat.S_IEXEC)
old_path = os.environ['PATH']
os.environ['PATH'] = bad_path
try:
f = self.module.File(tmp_txt, app=self.app)
self.assertEqual(f.mimetype, 'application/octet-stream')
finally:
os.environ['PATH'] = old_path
def test_size(self):
test_file = os.path.join(self.workbench, 'test.csv')
with open(test_file, 'wb') as f:
f.write(b',\n' * 512)
f = self.module.File(test_file, app=self.app)
default = self.app.config['use_binary_multiples']
self.app.config['use_binary_multiples'] = True
self.assertEqual(f.size, '1.00 KiB')
self.app.config['use_binary_multiples'] = False
self.assertEqual(f.size, '1.02 KB')
self.app.config['use_binary_multiples'] = default
def test_properties(self):
empty_file = os.path.join(self.workbench, 'empty.txt')
open(empty_file, 'w').close()
f = self.module.File(empty_file, app=self.app)
self.assertEqual(f.name, 'empty.txt')
self.assertEqual(f.can_download, True)
self.assertEqual(f.can_remove, False)
self.assertEqual(f.can_upload, False)
self.assertEqual(f.parent.path, self.workbench)
self.assertEqual(f.is_directory, False)
def test_choose_filename(self):
f = self.module.Directory(self.workbench, app=self.app)
first_file = os.path.join(self.workbench, 'testfile.txt')
filename = f.choose_filename('testfile.txt', attempts=0)
self.assertEqual(filename, 'testfile.txt')
open(first_file, 'w').close()
filename = f.choose_filename('testfile.txt', attempts=0)
self.assertNotEqual(filename, 'testfile (2).txt')
filename = f.choose_filename('testfile.txt', attempts=2)
self.assertEqual(filename, 'testfile (2).txt')
second_file = os.path.join(self.workbench, filename)
open(second_file, 'w').close()
filename = f.choose_filename('testfile.txt', attempts=3)
self.assertEqual(filename, 'testfile (3).txt')
filename = f.choose_filename('testfile.txt', attempts=2)
self.assertNotEqual(filename, 'testfile (2).txt')
class TestFileFunctions(unittest.TestCase):
module = browsepy.file
def test_fmt_size(self):
fnc = self.module.fmt_size
for n, unit in enumerate(self.module.binary_units):
self.assertEqual(fnc(2**(10 * n)), (1, unit))
for n, unit in enumerate(self.module.standard_units):
self.assertEqual(fnc(1000**n, False), (1, unit))
def test_secure_filename(self):
self.assertEqual(self.module.secure_filename('/path'), 'path')
self.assertEqual(self.module.secure_filename('..'), '')
self.assertEqual(self.module.secure_filename('::'), '')
self.assertEqual(self.module.secure_filename('\0'), '_')
self.assertEqual(self.module.secure_filename('/'), '')
self.assertEqual(self.module.secure_filename('C:\\'), '')
self.assertEqual(
self.module.secure_filename('COM1.asdf', destiny_os='nt'),
'')
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='ascii'),
'_')
if PY_LEGACY:
expected = unicode('\xf1', encoding='latin-1') # noqa
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='utf-8'),
expected)
self.assertEqual(
self.module.secure_filename(expected, fs_encoding='utf-8'),
expected)
else:
self.assertEqual(
self.module.secure_filename('\xf1', fs_encoding='utf-8'),
'\xf1')
def test_alternative_filename(self):
self.assertEqual(
self.module.alternative_filename('test', 2),
'test (2)')
self.assertEqual(
self.module.alternative_filename('test.txt', 2),
'test (2).txt')
self.assertEqual(
self.module.alternative_filename('test.tar.gz', 2),
'test (2).tar.gz')
self.assertEqual(
self.module.alternative_filename('test.longextension', 2),
'test (2).longextension')
self.assertEqual(
self.module.alternative_filename('test.tar.tar.tar', 2),
'test.tar (2).tar.tar')
self.assertNotEqual(
self.module.alternative_filename('test'),
'test')
def test_relativize_path(self):
self.assertEqual(
self.module.relativize_path(
'/parent/child',
'/parent',
'/'),
'child')
self.assertEqual(
self.module.relativize_path(
'/grandpa/parent/child',
'/grandpa/parent',
'/'),
'child')
self.assertEqual(
self.module.relativize_path(
'/grandpa/parent/child',
'/grandpa',
'/'),
'parent/child')
self.assertRaises(
browsepy.OutsideDirectoryBase,
self.module.relativize_path, '/other', '/parent', '/'
)
def test_under_base(self):
self.assertTrue(
self.module.check_under_base('C:\\as\\df\\gf', 'C:\\as\\df', '\\'))
self.assertTrue(self.module.check_under_base('/as/df', '/as', '/'))
self.assertFalse(
self.module.check_under_base('C:\\cc\\df\\gf', 'C:\\as\\df', '\\'))
self.assertFalse(self.module.check_under_base('/cc/df', '/as', '/'))
class TestMain(unittest.TestCase):
module = browsepy.__main__
def setUp(self):
self.app = browsepy.app
self.parser = self.module.ArgParse(sep=os.sep)
self.base = tempfile.mkdtemp()
self.exclude_file = os.path.join(self.base, '.ignore')
with open(self.exclude_file, 'w') as f:
f.write('.ignore\n')
def tearDown(self):
shutil.rmtree(self.base)
def test_defaults(self):
result = self.parser.parse_args([])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, os.getcwd())
self.assertEqual(result.initial, None)
self.assertEqual(result.removable, None)
self.assertEqual(result.upload, None)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertEqual(result.plugin, [])
def test_params(self):
plugins = ['plugin_1', 'plugin_2', 'namespace.plugin_3']
result = self.parser.parse_args([
'127.1.1.1',
'5000',
'--directory=%s' % self.base,
'--initial=%s' % self.base,
'--removable=%s' % self.base,
'--upload=%s' % self.base,
'--exclude=a',
'--exclude-from=%s' % self.exclude_file,
] + [
'--plugin=%s' % plugin
for plugin in plugins
])
self.assertEqual(result.host, '127.1.1.1')
self.assertEqual(result.port, 5000)
self.assertEqual(result.directory, self.base)
self.assertEqual(result.initial, self.base)
self.assertEqual(result.removable, self.base)
self.assertEqual(result.upload, self.base)
self.assertListEqual(result.exclude, ['a'])
self.assertListEqual(result.exclude_from, [self.exclude_file])
self.assertEqual(result.plugin, plugins)
result = self.parser.parse_args([
'--directory', self.base,
'--plugin', ','.join(plugins),
'--exclude', '/.*'
])
self.assertEqual(result.directory, self.base)
self.assertEqual(result.plugin, plugins)
self.assertListEqual(result.exclude, ['/.*'])
result = self.parser.parse_args([
'--directory=%s' % self.base,
'--initial='
])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, self.base)
self.assertIsNone(result.initial)
self.assertIsNone(result.removable)
self.assertIsNone(result.upload)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertListEqual(result.plugin, [])
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--directory=%s' % __file__]
)
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--exclude-from=non-existing']
)
def test_exclude(self):
result = self.parser.parse_args([
'--exclude', '/.*',
'--exclude-from', self.exclude_file,
])
extra = self.module.collect_exclude_patterns(result.exclude_from)
self.assertListEqual(extra, ['.ignore'])
match = self.module.create_exclude_fnc(
result.exclude + extra, '/b', sep='/')
self.assertTrue(match('/b/.a'))
self.assertTrue(match('/b/.a/b'))
self.assertFalse(match('/b/a/.a'))
self.assertTrue(match('/b/a/.ignore'))
match = self.module.create_exclude_fnc(
result.exclude + extra, 'C:\\b', sep='\\')
self.assertTrue(match('C:\\b\\.a'))
self.assertTrue(match('C:\\b\\.a\\b'))
self.assertFalse(match('C:\\b\\a\\.a'))
self.assertTrue(match('C:\\b\\a\\.ignore'))
def test_main(self):
params = {}
self.module.main(
argv=[],
run_fnc=lambda app, **kwargs: params.update(kwargs)
)
defaults = {
'host': '127.0.0.1',
'port': 8080,
'debug': False,
'threaded': True
}
params_subset = {k: v for k, v in params.items() if k in defaults}
self.assertEqual(defaults, params_subset)
class TestMimetypePluginManager(unittest.TestCase):
module = browsepy.manager
def test_mimetype(self):
manager = self.module.MimetypePluginManager()
self.assertEqual(
manager.get_mimetype('potato'),
'application/octet-stream'
)
self.assertEqual(
manager.get_mimetype('potato.txt'),
'text/plain'
)
manager.register_mimetype_function(
lambda x: 'application/xml' if x == 'potato' else None
)
self.assertEqual(
manager.get_mimetype('potato.txt'),
'text/plain'
)
self.assertEqual(
manager.get_mimetype('potato'),
'application/xml'
)
class TestPlugins(unittest.TestCase):
app_module = browsepy
manager_module = browsepy.manager
def setUp(self):
self.app = self.app_module.app
self.original_namespaces = self.app.config['plugin_namespaces']
self.plugin_namespace, self.plugin_name = __name__.rsplit('.', 1)
self.app.config['plugin_namespaces'] = (self.plugin_namespace,)
self.manager = self.manager_module.PluginManager(self.app)
def tearDown(self):
self.app.config['plugin_namespaces'] = self.original_namespaces
self.manager.clear()
test_utils.clear_flask_context()
def test_manager(self):
self.manager.load_plugin(self.plugin_name)
self.assertTrue(self.manager._plugin_loaded)
endpoints = sorted(
action.endpoint
for action in self.manager.get_widgets(FileMock(mimetype='a/a'))
)
self.assertEqual(
endpoints,
sorted(('test_x_x', 'test_a_x', 'test_x_a', 'test_a_a'))
)
self.assertEqual(
self.app.view_functions['test_plugin.root'](),
'test_plugin_root'
)
self.assertIn('test_plugin', self.app.blueprints)
self.assertRaises(
self.manager_module.PluginNotFoundError,
self.manager.load_plugin,
'non_existent_plugin_module'
)
self.assertRaises(
self.manager_module.InvalidArgumentError,
self.manager.register_widget
)
def test_namespace_prefix(self):
self.assertTrue(self.manager.import_plugin(self.plugin_name))
self.app.config['plugin_namespaces'] = (
self.plugin_namespace + '.test_',
)
self.assertTrue(self.manager.import_plugin('module'))
def register_plugin(manager):
manager._plugin_loaded = True
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_x_x',
filter=lambda f: True
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_a_x',
filter=lambda f: f.mimetype.startswith('a/')
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_x_a',
filter=lambda f: f.mimetype.endswith('/a')
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_a_a',
filter=lambda f: f.mimetype == 'a/a'
)
manager.register_widget(
type='button',
place='entry-actions',
endpoint='test_b_x',
filter=lambda f: f.mimetype.startswith('b/')
)
test_plugin_blueprint = flask.Blueprint(
'test_plugin',
__name__,
url_prefix='/test_plugin_blueprint')
test_plugin_blueprint.add_url_rule(
'/',
endpoint='root',
view_func=lambda: 'test_plugin_root')
manager.register_blueprint(test_plugin_blueprint)
``` |
{
"source": "jonathanpotter/thunderx-smoke",
"score": 4
} |
#### File: jonathanpotter/thunderx-smoke/numeric-integration.py
```python
from pyspark import SparkContext
sc = SparkContext(appName = "NumericIntegration")
import numpy as np
from scipy.integrate import quad
# Use NumPy to define a simple function and sample it between 0 and 10 at 200 points
def f(x):
return (x-3)*(x-5)*(x-7)+85
x = np.linspace(0, 10, 200)
#x = np.linspace(0, 10, 200000)
#x = np.linspace(0, 10, 200000000)
y = f(x)
# Use NumPy to choose a region to integrate over and take only a few points in that region
a, b = 1, 8 # the left and right boundaries
N = 5 # the number of points
xint = np.linspace(a, b, N)
yint = f(xint)
# Compute the integral both at high accuracy and with the trapezoid approximation
# Use SciPy to calculate the integral
integral, error = quad(f, a, b)
print("The integral is:", integral, "+/-", error)
# Use NumPy to calculate the area with the trapezoid approximation
integral_trapezoid = sum( (xint[1:] - xint[:-1]) * (yint[1:] + yint[:-1])
) / 2
print("The trapezoid approximation with", len(xint),
"points is:", integral_trapezoid)
print("############################################################################")
print("Python Version {0}".format(sc.pythonVer))
print("NumPy Version {0}".format(np.__version__))
print("NumPy and SciPy are working!")
print("############################################################################")
```
#### File: jonathanpotter/thunderx-smoke/word-count.py
```python
from pyspark import SparkContext
sc = SparkContext(appName = "WordCountExample")
# Create an RDD in PySpark from large text file in HDFS
rdd = sc.textFile("/data/complete-works-of-shakespeare.txt")
# Create function to make it all lower-case and split the lines into words,
# creating a new RDD with each element being a word.
def Func(lines):
lines = lines.lower()
lines = lines.split()
return lines
rdd_flat = rdd.flatMap(Func)
# Do a word count using a map-reduce like function.
# Map each word with a count of 1 like a key-value pair where the value is 1.
rdd_mapped = rdd_flat.map(lambda x: (x,1))
# Then group each count by key.
rdd_grouped = rdd_mapped.groupByKey()
# Take the sum of each word, then swap the key value pair order,
# then sort by value instead of key.
rdd_frequency = rdd_grouped.mapValues(sum).map(lambda x: (x[1],x[0])).sortByKey(False)
# Print the 10 most frequent words.
print("The 10 most frequent words: {0}".format(rdd_frequency.take(10)))
``` |
{
"source": "jonathanpwolfe/Tests_Demo",
"score": 3
} |
#### File: Tests_Demo/POMs/Login.py
```python
from DriverStuff import DriverClass
import os
class Login:
def __init__(self):
self.driver = DriverClass.Driver().driver
def getSignIn(self):
return self.driver.find_element_by_link_text("Sign in")
def clickSignIn(self):
self.getSignIn().click()
def getUsernameField(self):
return self.driver.find_element_by_id('login_field')
def getPasswordField(self):
return self.driver.find_element_by_id('password')
def enterUsername(self):
self.getUsernameField().send_keys(os.getenv['GITHUB_USER'])
def enterPassword(self):
self.getPasswordField().send_keys(os.getenv['GITHUB_PASS'])
```
#### File: Tests_Demo/Tests/conftest.py
```python
import pytest
from Navigation import Navigation
@pytest.fixture()
def setUp(self):
self.Nav = Navigation.Navigation()
``` |
{
"source": "jonathanqv/pygsflow",
"score": 2
} |
#### File: pygsflow/autotest/test_015.py
```python
import os
import numpy as np
from gsflow.utils import Raster
from gsflow.output import PrmsDiscretization
ws = os.path.abspath(os.path.dirname(__file__))
def test_raster_sampling_methods():
import gsflow
from gsflow.utils import Raster
rws = os.path.join(ws, "..", "examples", "data", "geospatial")
iws = os.path.join(ws, "..", "examples", "data", "sagehen", "modflow")
raster_name = "dem.img"
try:
rio = Raster.load(os.path.join(rws, raster_name))
except:
return
ml = gsflow.modflow.Modflow.load(
"saghen.nam", version="mfnwt",
model_ws=iws
)
xoff = 214110
yoff = 4366620
ml.modelgrid.set_coord_info(xoff, yoff)
x0, x1, y0, y1 = rio.bounds
x0 += 3000
y0 += 3000
x1 -= 3000
y1 -= 3000
shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
rio.crop(shape)
methods = {
"min": 2088.52343,
"max": 2103.54882,
"mean": 2097.05035,
"median": 2097.36254,
"mode": 2088.52343,
"nearest": 2097.81079,
"linear": 2097.81079,
"cubic": 2097.81079,
}
for method, value in methods.items():
data = rio.resample_to_grid(
ml.modelgrid, band=rio.bands[0], method=method, no_numba=True
)
print(data[34, 37])
if np.abs(data[34, 37] - value) > 1e-05:
raise AssertionError(
f"{method} resampling returning incorrect values"
)
def test_raster_sampling_methods_numba():
try:
from numba import jit
except ImportError:
return
import gsflow
from gsflow.utils import Raster
rws = os.path.join(ws, "..", "examples", "data", "geospatial")
iws = os.path.join(ws, "..", "examples", "data", "sagehen", "modflow")
raster_name = "dem.img"
try:
rio = Raster.load(os.path.join(rws, raster_name))
except:
return
ml = gsflow.modflow.Modflow.load(
"saghen.nam", version="mfnwt",
model_ws=iws
)
xoff = 214110
yoff = 4366620
ml.modelgrid.set_coord_info(xoff, yoff)
x0, x1, y0, y1 = rio.bounds
x0 += 3000
y0 += 3000
x1 -= 3000
y1 -= 3000
shape = np.array([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)])
rio.crop(shape)
methods = {
"min": 2088.52343,
"max": 2103.54882,
"mean": 2097.05035,
"median": 2097.36254,
"mode": 2088.0, # note some precision is lost in the C mode routine
"nearest": 2097.81079,
"linear": 2097.81079,
"cubic": 2097.81079,
}
for method, value in methods.items():
data = rio.resample_to_grid(
ml.modelgrid, band=rio.bands[0], method=method
)
print(data[34, 37])
if np.abs(data[34, 37] - value) > 1e-05:
raise AssertionError(
f"{method} resampling returning incorrect values"
)
if __name__ == "__main__":
test_raster_sampling_methods()
test_raster_sampling_methods_numba()
```
#### File: examples/frontiers/sagehen_50m.py
```python
import os
import utm
import platform
import flopy
import numpy as np
import shapefile
import matplotlib.pyplot as plt
from gsflow.utils import Raster
from flopy.plot import styles
from gsflow import GsflowModel, PrmsModel, PrmsData
from gsflow.builder import (
GenerateFishnet,
ModflowBuilder,
ControlFileBuilder,
PrmsBuilder,
FlowAccumulation
)
import gsflow.builder.builder_utils as bu
def nash_sutcliffe_efficiency(qsim, qobs, flg):
if flg:
qsim = np.log(qsim)
qobs = np.log(qobs)
qsim[np.isinf(qsim)] = np.nan
qobs[np.isinf(qobs)] = np.nan
numerator = np.nansum((qsim - qobs) ** 2)
denominator = np.nansum((qobs - np.nanmean(qobs)) ** 2)
nse = 1 - (numerator / denominator)
return nse
def build_lut(f, dtype=int):
d = {}
with open(f) as foo:
for line in foo:
temp = line.strip().split("#")[0]
if not temp:
continue
else:
l = temp.split(":")
d[dtype(l[0])] = float(l[1])
return d
if __name__ == "__main__":
sample_grid = True
# set file names here
ws = os.path.abspath(os.path.dirname(__file__))
iws = os.path.join(ws, "..", "data", "geospatial")
ows = os.path.join(ws, "temp")
if not os.path.exists(ows):
os.mkdir(ows)
dem_file = os.path.join(iws, 'dem.img')
pour_point_file = os.path.join(iws, "model_points.shp")
resampled_dem = os.path.join(ows, "sagehen_50m_med.txt")
stream_threshold = 810000 # m3 of drainage area
cellsize = 50
# generate a "Fishnet"
modelgrid = GenerateFishnet(dem_file, xcellsize=cellsize, ycellsize=cellsize)
# resample DEM to the model grid using minimum elevation
if sample_grid:
raster = Raster.load(dem_file)
dem = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
np.savetxt(resampled_dem, dem, delimiter=" ")
else:
dem = np.genfromtxt(resampled_dem)
hru_type = np.ones((modelgrid.nrow, modelgrid.ncol), dtype=int)
fa = FlowAccumulation(dem,
modelgrid.xcellcenters,
modelgrid.ycellcenters,
hru_type=hru_type, verbose=True)
flow_dir = fa.flow_directions(dijkstra=True, breach=0.001)
flow_acc = fa.flow_accumulation()
# read in pour point from shapefile and set watershed boundary
with shapefile.Reader(pour_point_file) as r:
shape = r.shape(0)
pour_point = shape.points
watershed = fa.define_watershed(pour_point, modelgrid, fmt='xy')
threshold = stream_threshold / (cellsize ** 2)
strm_obj = fa.make_streams(flow_dir, flow_acc, threshold)
cascades = fa.get_cascades(strm_obj)
i = np.floor(3646 / modelgrid.ncol)
j = 3646 % modelgrid.ncol
mfbuild = ModflowBuilder(modelgrid, dem, "sagehen_50m")
botm = dem - 100
botm.shape = (1, modelgrid.nrow, modelgrid.ncol)
ml = mfbuild.build_all(strm_obj.reach_data,
strm_obj.segment_data,
strm_obj.irunbnd,
finf=np.ones(dem.shape),
botm=botm,
ibound=watershed.astype(int),
iuzfbnd=watershed.astype(int)
)
# update dis file to create a transient model
flopy.modflow.ModflowDis(
ml,
nlay=ml.dis.nlay,
nrow=ml.dis.nrow,
ncol=ml.dis.ncol,
nper=2,
delr=ml.dis.delr,
delc=ml.dis.delc,
laycbd=ml.dis.laycbd,
top=ml.dis.top,
botm=ml.dis.botm,
perlen=[1, 5356],
nstp=[1, 5356],
tsmult=[1, 1],
steady=[True, False],
itmuni=ml.dis.itmuni,
lenuni=ml.dis.lenuni
)
# update a few SFR parameters for GSFLOW!
ml.sfr.segment_data[0]["flow"] *= 0
ml.sfr.segment_data[0]["roughch"] = 0.04
ml.sfr.reach_data["strhc1"] = 0.1
# tune some of the other MODFLOW parameters
ml.upw.hk *= 1.75e-03
ml.upw.ss *= 1.0
prms_outfile = os.path.join(ows, "sagehen.param")
prmsbuild = PrmsBuilder(
strm_obj,
cascades,
modelgrid,
fa.get_dem_data().ravel(),
hru_type=watershed,
hru_subbasin=watershed
)
param_obj = prmsbuild.build()
lat, lon = utm.to_latlon(
modelgrid.xcellcenters.ravel(),
modelgrid.ycellcenters.ravel(),
10,
"N"
)
param_obj.set_values("hru_lat", lat)
param_obj.set_values("hru_lon", lon)
sample_rasters = True
nhru = modelgrid.nrow * modelgrid.ncol
# load in rasters and luts for parameterizing prms
veg_type_raster = os.path.join(iws, "us_140evt_utm.img")
veg_cov_raster = os.path.join(iws, "us_140evc_utm.img")
awc_raster = os.path.join(iws, "awc.img")
clay_raster = os.path.join(iws, "clay.img")
ksat_raster = os.path.join(iws, "ksat.img")
sand_raster = os.path.join(iws, "sand.img")
impervious_raster = os.path.join(iws, "nlcd2011_imp_utm.img")
prism = {"ppt_utm": [], "tmax_utm": [], "tmin_utm": []}
for folder in prism.keys():
for f in os.listdir(os.path.join(iws, "climate", folder)):
if os.path.isfile(os.path.join(iws, "climate", folder, f)) and f.endswith(".img"):
prism[folder].append(os.path.join(iws, "climate", folder, f))
resampled_veg_type = os.path.join(ows, "veg_type_nearest_50.txt")
resampled_veg_cov = os.path.join(ows, "veg_cov_nearest_50.txt")
resampled_awc = os.path.join(ows, "awc_median_50.txt")
resampled_clay = os.path.join(ows, "clay_median_50.txt")
resampled_ksat = os.path.join(ows, "ksat_median_50.txt")
resampled_sand = os.path.join(ows, "sand_median_50.txt")
resampled_impervious = os.path.join(ows, "impervious_median_50.txt")
resampled_ppt = os.path.join(ows, "ppt_bilinear_50.txt")
resampled_tmax = os.path.join(ows, 'tmax_bilinear_50.txt')
resampled_tmin = os.path.join(ows, 'tmin_bilinear_50.txt')
covtype_remap = os.path.join(iws, "..", 'remaps', "landfire", "covtype.rmp")
covden_sum_remap = os.path.join(iws, "..", 'remaps', "landfire", "covdensum.rmp")
covden_win_remap = os.path.join(iws, "..", 'remaps', "landfire", "covdenwin.rmp")
root_depth_remap = os.path.join(iws, "..", 'remaps', "landfire", 'rtdepth.rmp')
snow_intcp_remap = os.path.join(iws, "..", "remaps", "landfire", "snow_intcp.rmp")
srain_intcp_remap = os.path.join(iws, "..", "remaps", "landfire", "srain_intcp.rmp")
climate_dataframe = os.path.join(iws, 'climate', "sagehen_climate.csv")
climate_lapse_rates = os.path.join(iws, 'climate', 'sagehen_lapse_rates.csv')
if sample_rasters:
ibound = watershed.astype(int)
raster = Raster.load(veg_type_raster)
veg_type = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="nearest",
multithread=True,
thread_pool=12
)
veg_type[ibound == 0] = 0
veg_type = veg_type.astype(int)
np.savetxt(resampled_veg_type, veg_type, fmt="%d")
raster = Raster.load(veg_cov_raster)
veg_cov = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="nearest",
multithread=True,
thread_pool=12
)
veg_cov[ibound == 0] = 0
veg_cov = veg_cov.astype(int)
np.savetxt(resampled_veg_cov, veg_cov, fmt="%d")
raster = Raster.load(awc_raster)
awc = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
awc[ibound == 0] = 0
awc[awc == raster.nodatavals[0]] = np.nanmedian(awc)
np.savetxt(resampled_awc, awc)
raster = Raster.load(ksat_raster)
ksat = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
ksat[ibound == 0] = 0
ksat[ksat == raster.nodatavals[0]] = np.nanmedian(ksat)
np.savetxt(resampled_ksat, ksat)
raster = Raster.load(sand_raster)
sand = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
sand[ibound == 0] = 0
sand[sand == raster.nodatavals[0]] = np.nanmedian(sand)
sand /= 100
np.savetxt(resampled_sand, sand)
raster = Raster.load(clay_raster)
clay = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
clay[ibound == 0] = 0
clay[clay == raster.nodatavals[0]] = np.nanmedian(clay)
clay /= 100
np.savetxt(resampled_clay, clay)
raster = Raster.load(impervious_raster)
impervious = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="median",
multithread=True,
thread_pool=12
)
impervious[ibound == 0] = 0
impervious /= 100
np.savetxt(resampled_impervious, impervious)
ppt = []
for rstr in prism["ppt_utm"]:
raster = Raster.load(rstr)
tppt = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="linear",
multithread=True,
thread_pool=12
)
ppt.append(tppt.ravel())
ppt = np.array(ppt)
np.savetxt(resampled_ppt, ppt)
tmin = []
for rstr in prism["tmin_utm"]:
raster = Raster.load(rstr)
ttmin = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="linear",
multithread=True,
thread_pool=12
)
tmin.append(ttmin.ravel())
tmin = np.array(tmin)
np.savetxt(resampled_tmin, tmin)
tmax = []
for rstr in prism["tmax_utm"]:
raster = Raster.load(rstr)
ttmax = raster.resample_to_grid(
modelgrid,
band=raster.bands[0],
method="linear",
multithread=True,
thread_pool=12
)
tmax.append(ttmax.ravel())
tmax = np.array(tmax)
np.savetxt(resampled_tmax, tmax)
else:
veg_type = np.genfromtxt(resampled_veg_type, dtype=int)
veg_cov = np.genfromtxt(resampled_veg_cov, dtype=int)
awc = np.genfromtxt(resampled_awc)
ksat = np.genfromtxt(resampled_ksat)
sand = np.genfromtxt(resampled_sand)
clay = np.genfromtxt(resampled_clay)
impervious = np.genfromtxt(resampled_impervious)
ppt = np.genfromtxt(resampled_ppt)
tmax = np.genfromtxt(resampled_tmax)
tmin = np.genfromtxt(resampled_tmin)
ppt.shape = (12, nhru)
tmax.shape = (12, nhru)
tmin.shape = (12, nhru)
covtype_lut = build_lut(covtype_remap)
covden_sum_lut = build_lut(covden_sum_remap)
covden_win_lut = build_lut(covden_win_remap)
root_depth_lut = build_lut(root_depth_remap)
snow_intcp_lut = build_lut(snow_intcp_remap)
srain_intcp_lut = build_lut(srain_intcp_remap)
# read in "climate dataframe"
import pandas as pd
cdf = pd.read_csv(climate_dataframe)
ldf = pd.read_csv(climate_lapse_rates)
# build vegatative cover parameters
covtype = bu.covtype(veg_type, covtype_lut)
covden_sum = bu.covden_sum(veg_cov, covden_sum_lut)
covden_win = bu.covden_win(covtype.values, covden_win_lut)
rad_trncf = bu.rad_trncf(covden_win.values)
snow_intcp = bu.snow_intcp(veg_type, snow_intcp_lut)
srain_intcp = bu.srain_intcp(veg_type, srain_intcp_lut)
wrain_intcp = bu.wrain_intcp(veg_type, snow_intcp_lut)
# add veg to param_obj
param_obj.add_record_object(covtype, True)
param_obj.add_record_object(covden_sum, True)
param_obj.add_record_object(covden_win, True)
param_obj.add_record_object(rad_trncf, True)
param_obj.add_record_object(snow_intcp, True)
param_obj.add_record_object(srain_intcp, True)
param_obj.add_record_object(wrain_intcp, True)
# build soil parameters
root_depth = bu.root_depth(veg_type, root_depth_lut)
hru_aspect = bu.d8_to_hru_aspect(flow_dir)
hru_slope = bu.d8_to_hru_slope(
flow_dir,
dem,
modelgrid.xcellcenters,
modelgrid.ycellcenters
)
soil_type = bu.soil_type(clay, sand)
soil_moist_max = bu.soil_moist_max(awc, root_depth)
soil_moist_init = bu.soil_moist_init(soil_moist_max.values)
soil_rech_max = bu.soil_rech_max(awc, root_depth)
ssr2gw_rate = bu.ssr2gw_rate(ksat, sand, soil_moist_max.values)
ssr2gw_sq = bu.ssr2gw_exp(nhru)
soil_rech_init = bu.soil_rech_init(soil_rech_max.values)
slowcoef_lin = bu.slowcoef_lin(ksat, hru_aspect.values, cellsize, cellsize)
slowcoef_sq = bu.slowcoef_sq(
ksat,
hru_aspect.values,
sand,
soil_moist_max.values,
cellsize,
cellsize
)
# add soil parameters to prms object
param_obj.add_record_object(hru_slope, replace=True)
param_obj.add_record_object(hru_aspect, replace=True)
param_obj.add_record_object(soil_type, replace=True)
param_obj.add_record_object(soil_moist_max, replace=True)
param_obj.add_record_object(soil_moist_init, replace=True)
param_obj.add_record_object(soil_rech_max, replace=True)
param_obj.add_record_object(soil_rech_init, replace=True)
param_obj.add_record_object(ssr2gw_rate, replace=True)
param_obj.add_record_object(ssr2gw_sq, replace=True)
param_obj.add_record_object(slowcoef_lin, replace=True)
param_obj.add_record_object(slowcoef_sq, replace=True)
# imperviousness parameters
hru_percent_imperv = bu.hru_percent_imperv(impervious)
carea_max = bu.carea_max(impervious)
# add imperv to prms obj
param_obj.add_record_object(hru_percent_imperv, replace=True)
param_obj.add_record_object(carea_max, replace=True)
# climate parameters
param_obj.add_record(name="nobs", values=[1,])
outlet_sta = modelgrid.intersect(pour_point[0][0], pour_point[0][1])
outlet_sta = modelgrid.get_node([(0,) + outlet_sta])
print(outlet_sta)
cdf = bu.add_prms_date_columns_to_df(cdf, "date")
cdf.rename(
columns={
'precip': 'precip_0',
'tmin': 'tmin_0',
'tmax': 'tmax_0',
'runoff': 'runoff_0',
'date': 'Date'
},
inplace=True
)
# reorder dataframe to later build a prms Data object from it
cdfcols = [
"Year", "Month", "Day", "Hour", "Minute", "Second",
"tmax_0", "tmin_0", "precip_0", "runoff_0", "Date"
]
cdf = cdf[cdfcols]
# start climate parameter calculations
mean_ppt = bu.get_mean_monthly_from_df(cdf, 'precip_0')
cdf["tmax_0"] = bu.fahrenheit_to_celsius(cdf["tmax_0"].values)
cdf["tmin_0"] = bu.fahrenheit_to_celsius(cdf["tmin_0"].values)
mean_tmax = bu.get_mean_monthly_from_df(cdf, "tmax_0", temperature=True)
mean_tmin = bu.get_mean_monthly_from_df(cdf, "tmin_0", temperature=True)
rain_adj = bu.rain_adj(ppt, mean_ppt)
snow_adj = bu.snow_adj(ppt, mean_ppt)
tmin_lapse = bu.tmin_lapse(ldf.tmin_lapse.values * (5 / 9))
tmax_lapse = bu.tmax_lapse(ldf.tmax_lapse.values * (5 / 9))
tmax_adj = bu.tmax_adj(nhru)
tmin_adj = bu.tmin_adj(nhru)
jh_coef = bu.calculate_jensen_haise(dem, mean_tmin, mean_tmax)
# add climate parameters to param obj
param_obj.add_record_object(rain_adj, replace=True)
param_obj.add_record_object(snow_adj, replace=True)
param_obj.add_record_object(tmin_lapse, replace=True)
param_obj.add_record_object(tmax_lapse, replace=True)
param_obj.add_record_object(tmax_adj, replace=True)
param_obj.add_record_object(tmin_adj, replace=True)
param_obj.add_record_object(jh_coef, replace=True)
param_obj.add_record(
"outlet_sta",
values=[outlet_sta[0] + 1,],
dimensions=[["one", 1]],
datatype=1
)
param_obj.add_record(
"id_obsrunoff",
values=[outlet_sta[0] + 1, ],
dimensions=[["one", 1]],
datatype=1
)
param_obj.add_record(
"tsta_elev",
values=[1932.4,],
dimensions=[["ntemp", 1]],
datatype=2
)
# build the prms data file
prmsdata = PrmsData(data_df=cdf)
control_obj = ControlFileBuilder().build("saghen_50m", param_obj, ml)
# build the PrmsModel
prms = PrmsModel(control_obj, parameters=param_obj, data=prmsdata)
gsf = GsflowModel(control=control_obj, prms=prms, mf=ml)
gsf.control.set_values("start_time", [1982, 10, 1, 0, 0, 0])
gsf.control.add_record("end_time", values=[1996, 9, 31, 0, 0, 0])
gsf.control.add_record("print_debug", values=[0, ])
gsf.control.add_record("modflow_time_zero", values=[1982, 10, 1, 0, 0, 0])
gsf.control.add_record("data_file", values=["sagehen_50m.data", ])
gsf.control.add_record("srunoff_module", values=["srunoff_smidx"])
gsf.control.set_values("model_mode", values=["GSFLOW5"])
gsf.control.set_values("subbasin_flag", values=[0, ])
gsf.control.set_values("parameter_check_flag", values=[0, ])
gsf.control.add_record("statsON_OFF", values=[1])
gsf.control.add_record("nstatVars", values=[6])
gsf.control.add_record("statVar_element", values=["1", "1", "1", "1", "1", "1"])
gsf.control.add_record("statVar_names",
values=["runoff",
"basin_cfs",
"basin_ssflow_cfs",
"basin_gwflow_cfs",
"basin_sroff_cfs",
"basin_dunnian"])
gsf.control.add_record("stat_var_file", values=["statvar.dat"])
# Modify PRMS paramters for calibration
# temp dist
tmax_lapse = gsf.prms.parameters.get_values('tmax_lapse')
tmin_lapse = gsf.prms.parameters.get_values('tmin_lapse')
tmax_lapse = tmax_lapse + 1.2 #0.7
tmin_lapse = tmin_lapse + 1.2 #0.7
gsf.prms.parameters.set_values("tmax_lapse", values=tmax_lapse)
gsf.prms.parameters.set_values("tmin_lapse", values=tmin_lapse)
max_missing = gsf.prms.parameters.get_values('max_missing')
max_missing = max_missing*2
gsf.prms.parameters.set_values("max_missing", values=max_missing)
# snow
tmax_allsnow = gsf.prms.parameters.get_values('tmax_allsnow')
tmax_allsnow[:] = 0.7
gsf.prms.parameters.set_values("tmax_allsnow", values=tmax_allsnow)
value = [2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1, 2.1]
gsf.prms.parameters.add_record("tmax_allrain_offset", values=value, dimensions=[('nmonths', 12)])
covden_win = gsf.prms.parameters.get_values('covden_win')
rad_trncf = gsf.prms.parameters.get_values('rad_trncf')
rad_trncf = 0.8 * covden_win # correlated to covden_win
gsf.prms.parameters.set_values("rad_trncf", values=rad_trncf)
# ET
soil_moist_max = gsf.prms.parameters.get_values('soil_moist_max')
soil_moist_max = soil_moist_max * 3.0
gsf.prms.parameters.set_values("soil_moist_max", values=soil_moist_max)
value = [0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03]
gsf.prms.parameters.add_record("jh_coef", values=value, dimensions=[('nmonths', 12)])
# runoff
snowinfil_max = gsf.prms.parameters.get_values('snowinfil_max')
snowinfil_max = snowinfil_max * 5.0
gsf.prms.parameters.set_values("snowinfil_max", values=snowinfil_max)
smidx_coef = gsf.prms.parameters.get_values('smidx_coef')
smidx_coef = smidx_coef / 100.0
smidx_exp = gsf.prms.parameters.get_values('smidx_exp')
smidx_exp = smidx_exp / 100.0
carea_max = gsf.prms.parameters.get_values('carea_max')
carea_max = carea_max / 100.0
gsf.prms.parameters.set_values("smidx_coef", values=smidx_coef)
gsf.prms.parameters.set_values("smidx_exp", values=smidx_exp)
gsf.prms.parameters.set_values("carea_max", values=carea_max)
# interflow
slowcoef_sq = gsf.prms.parameters.get_values('slowcoef_sq')
slowcoef_sq = slowcoef_sq * 0.1
gsf.prms.parameters.set_values("slowcoef_sq", values=slowcoef_sq)
slowcoef_lin = gsf.prms.parameters.get_values('slowcoef_lin')
slowcoef_lin = slowcoef_lin * 3.0
gsf.prms.parameters.set_values("slowcoef_lin", values=slowcoef_lin)
# Recharge
ssr2gw_rate = gsf.prms.parameters.get_values('ssr2gw_rate')
ssr2gw_rate = ssr2gw_rate * 500.0
gsf.prms.parameters.set_values("ssr2gw_rate", values=ssr2gw_rate)
sat_threshold = gsf.prms.parameters.get_values('sat_threshold')
sat_threshold = sat_threshold / 3
gsf.prms.parameters.set_values("sat_threshold", values=sat_threshold)
# clean unused parameters
par_to_remove = ["gw_up_id", "gw_down_id", "gw_strmseg_down_id", "gw_pct_up"]
for par_ in par_to_remove:
gsf.prms.parameters.remove_record(par_)
gsf.write_input(basename="sagehen_50m",
workspace=ows)
gsf.prms.parameters.remove_record("adjmix_rain")
exe_name = os.path.join("..", "..", "bin", "gsflow")
if platform.system().lower() == "windows":
exe_name += ".exe"
# reload the model to assure that it has valid formatting
gsf = GsflowModel.load_from_file(os.path.join(ows, "sagehen_50m_cont.control"))
gsf.run_model(gsflow_exe=exe_name)
# load PRMS output with simulated and measured streamflow and flow components
stats = gsf.prms.get_StatVar()
# get data from 10/1/1985 onward
stats = stats[1096:]
stats.reset_index(inplace=True, drop=True)
# Calculate N-S and Log(N-S)
nse_val = nash_sutcliffe_efficiency(stats.basin_cfs_1, stats.runoff_1,
False)
nse_val_log = nash_sutcliffe_efficiency(stats.basin_cfs_1, stats.runoff_1,
True)
nse_val_log_str = str(nse_val_log)
print(nse_val, nse_val_log)
gw_seepage = stats.basin_cfs_1.values.copy() - (
stats.basin_ssflow_cfs_1.values.copy() +
stats.basin_sroff_cfs_1.values.copy() +
stats.basin_dunnian_1.values.copy()
)
with styles.USGSMap():
fig, axis = plt.subplots(2, 1, figsize=(10, 6))
plt.rcParams.update({'font.size': 100})
axis[0].plot(stats.Date, stats.basin_cfs_1, color='r', linewidth=2.2, label='simulated 50m calibration, NSE=0.74')
axis[0].plot(stats.Date, stats.runoff_1, '--', color='b', linewidth=1.5, label='measured')
handles, labels = axis[0].get_legend_handles_labels()
axis[0].legend(handles, labels, bbox_to_anchor=(0.25, 0.65))
axis[0].set_xlabel("Date")
axis[0].set_ylabel("Streamflow, in cfs")
axis[0].set_ylim(0, 300)
plt.xlabel("Date")
plt.ylabel("Streamflow, in cfs")
plt.ylim(0, 300)
with styles.USGSMap():
axis[1].set_xlabel("Date")
axis[1].set_ylabel("Flow Components, in cfs")
axis[1].set_yscale("log")
plt.xlabel("Date")
plt.ylabel("Flow Components, in cfs")
plt.yscale("log")
plt.ylim(1.0e-3, 1.0e4)
axis[1].plot(stats.Date, stats.basin_ssflow_cfs_1, color='r', linewidth=1.5, label='Interflow')
axis[1].plot(stats.Date, gw_seepage, color='purple', linewidth=1.5, label='Groundwater seepage')
axis[1].plot(stats.Date, stats.basin_sroff_cfs_1, color='y', linewidth=1.5, label='Hortonian runoff')
axis[1].plot(stats.Date, stats.basin_dunnian_1, color='b', linewidth=1.5, label='Dunnian runoff')
handles, labels = axis[1].get_legend_handles_labels()
axis[1].legend(handles, labels, bbox_to_anchor=(0.25, 0.65))
plt.tight_layout()
plt.show()
```
#### File: gsflow/builder/builder_utils.py
```python
import os
import numpy as np
import pandas as pd
from ..prms import ParameterRecord
def build_lut(f, dtype=int):
"""
Method to load a remap file into a lut
Parameters
----------
f : str
file name
dtype : type
int or float type, defaults to int
Returns
-------
dict
"""
d = {}
with open(f) as foo:
for line in foo:
temp = line.strip().split("#")[0]
if not temp:
continue
else:
l = temp.split(":")
d[dtype(l[0])] = float(l[1])
return d
def covtype(cov_resampled, lut):
"""
Method to associate resampled vegitation type to PRMS covtype
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = np.array([lut[i] for i in cov_resampled.ravel()], dtype=int)
record = ParameterRecord(
"cov_type",
values,
dimensions=[
["nhru", values.size],
],
datatype=1,
)
return record
def root_depth(cov_resampled, lut):
"""
Method to get a rooting depth from cover type. This is an intermediate
array used to calculate other parameters
Parameters
----------
cov_resampled : np.ndarray
lut : d
Returns
-------
np.ndarray of rooting depths
"""
values = np.array([lut[i] for i in cov_resampled.ravel()], dtype=float)
return values
def covden_sum(cov_resampled, lut):
"""
Method to associate resampled vegitation coverage to PRMS covden_sum
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
covden = np.array([lut[i] for i in cov_resampled.ravel()], dtype=float)
covden = covden / 100.0
covden = ParameterRecord(
"covden_sum",
covden,
dimensions=[
["nhru", covden.size],
],
datatype=2,
)
return covden
def covden_win(cov_resampled, lut):
"""
Method to associate resampled vegitation coverage to PRMS covden_win
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
covden = covden_sum(cov_resampled, lut)
covden.name = "covden_win"
return covden
def rad_trncf(covden_win):
"""
Method to calculate rad_trncf from the covden_win parameter values
Parameters
----------
covden_win : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = 0.9917 * np.exp(-2.7557 * covden_win)
values[values < 1e-5] = 0
record = ParameterRecord(
"rad_trncf",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def snow_intcp(cov_resampled, lut):
"""
Method to associate resampled vegitation type to PRMS snow_intcp
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = np.array([lut[i] for i in cov_resampled.ravel()], dtype=float)
record = ParameterRecord(
"snow_intcp",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def srain_intcp(cov_resampled, lut):
"""
Method to associate resampled vegitation type to PRMS srain_intcp
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = snow_intcp(cov_resampled, lut)
record.name = "srain_intcp"
return record
def wrain_intcp(cov_resampled, lut):
"""
Method to associate resampled vegitation type to PRMS wrain_intcp
Parameters
----------
cov_resampled : np.ndarray
lut : dict
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = snow_intcp(cov_resampled, lut)
record.name = "wrain_intcp"
return record
def soil_type(clay, sand):
"""
Method to determine soil type from resampled clay and sand percentages
Parameters
----------
clay : np.ndarray
sand : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord
"""
values = np.zeros(clay.size, dtype=int)
values = np.where(clay.ravel() >= 0.40, 3, values)
values = np.where(sand.ravel() >= 0.50, 1, values)
values[values == 0] = 2
record = ParameterRecord(
"soil_type",
values,
dimensions=[
["nhru", values.size],
],
datatype=1,
)
return record
def soil_moist_max(awc, root_depth):
"""
Method to calculate the soil_moist_max parameter
Parameters
----------
awc : np.ndarray
available water content
root_depth : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = awc.ravel() * root_depth.ravel()
values[values < 3] = 3
values[values > 20] = 20
record = ParameterRecord(
"soil_moist_max",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def soil_moist_init(soil_moist_max, factor=0.1):
"""
Method to calculate the soil_rech_init parameter
Parameters
----------
soil_moist_max : np.ndarray
maximum soil moisture water content
factor : float
scaling factor
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = soil_moist_max * factor
record = ParameterRecord(
"soil_moist_init",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def soil_rech_max(awc, root_depth, max_depth=18):
"""
Method to calculate the soil_rech_max parameter
Parameters
----------
awc : np.ndarray
available water content
root_depth : np.ndarray
max_depth : float
maximum rooting depth to calculate recharge (default 18)
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = np.where(
root_depth.ravel() > max_depth,
awc.ravel() * max_depth,
awc.ravel() * root_depth.ravel(),
)
values[values < 1e-05] = 1e-05
values[values > 20] = 20
record = ParameterRecord(
"soil_rechr_max",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def soil_rech_init(soil_rech_max, factor=0.1):
"""
Method to calculate the soil_rech_init parameter
Parameters
----------
soil_rech_max : np.ndarray
maximum recharge water content
factor : float
scaling factor
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = soil_moist_init(soil_rech_max, factor)
record.name = "soil_rechr_init"
return record
def ssr2gw_rate(ksat, sand, soil_moist_max):
"""
Method to calculate the ssr2gw_rate from ksat (inches/day) and % sand
Parameters
----------
ksat : np.ndarray
sand : np.ndarray
soil_moist_max : np.ndarray
Return
------
gsflow.prms.ParameterRecord object
"""
values = ksat.ravel() / (sand.ravel() * soil_moist_max.ravel())
values[np.isnan(values)] = 1e-04
values[values > 999] = 999
values[values < 1e-04] = 1e-04
record = ParameterRecord(
"ssr2gw_rate",
values,
dimensions=[
["nssr", values.size],
],
datatype=2,
)
return record
def ssr2gw_exp(nhru):
"""
Method to calculate the ssr2gw_sq for PRMS
Parameters
----------
nhru : int
Return
------
gsflow.prms.ParameterRecord object
"""
values = np.zeros((nhru,))
record = ParameterRecord(
"ssr2gw_sq",
values,
dimensions=[
["nssr", nhru],
],
datatype=2,
)
return record
def slowcoef_lin(ksat, aspect, dx, dy):
"""
Method to calculate the slowcoef_lin parameter in prms
Parameters
----------
ksat : np.ndarray
aspect : np.ndarray
dx : float
grid dx dimension for a single cell
dy : float
grid dy dimension for a single cell
Returns
-------
gsflow.prms.ParameterRecord object
"""
hypot = np.sqrt(dx**2 + dy**2)
hru_len = np.where((aspect == 90) | (aspect == 270), dx, dy)
hru_len = np.where(
(aspect != 0) & (aspect != 90) & (aspect != 180) & (aspect != 270),
hypot,
hru_len,
)
values = (
0.1 * np.abs(ksat.ravel() * np.sin(aspect * np.pi / 180.0)) / hru_len
)
values[np.isnan(values)] = 0
values[values < 1e-05] = 0
record = ParameterRecord(
"slowcoef_lin",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def slowcoef_sq(ksat, aspect, sand, soil_moist_max, dx, dy):
"""
Method to calculate the slowcoef_lin parameter in prms
Parameters
----------
ksat : np.ndarray
aspect : np.ndarray
sand : np.ndarray
fraction of sand in soil
soil_moist_max : np.ndarray
soil moist max
dx : float
grid dx dimension for a single cell
dy : float
grid dy dimension for a single cell
Returns
-------
gsflow.prms.ParameterRecord object
"""
hypot = np.sqrt(dx**2 + dy**2)
hru_len = np.where((aspect == 90) | (aspect == 270), dx, dy)
hru_len = np.where(
(aspect != 0) & (aspect != 90) & (aspect != 180) & (aspect != 270),
hypot,
hru_len,
)
values = 0.9 * np.abs(
(ksat.ravel() * np.sin(aspect * np.pi / 180.0))
/ (hru_len * (sand.ravel() * soil_moist_max.ravel()))
)
values[np.isnan(values)] = 0
values[values < 1e-05] = 0
record = ParameterRecord(
"slowcoef_sq",
values,
dimensions=[
["nhru", values.size],
],
datatype=2,
)
return record
def hru_percent_imperv(resampled_imperv):
"""
Method to set hru_percent_imperv parameter
Parameters
----------
resampled_imperv : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = ParameterRecord(
"hru_percent_imperv",
resampled_imperv.ravel(),
dimensions=[
["nhru", resampled_imperv.size],
],
datatype=2,
)
return record
def carea_max(resampled_imperv):
"""
Method to set carea_max parameter
Parameters
----------
resampled_imperv : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = hru_percent_imperv(resampled_imperv)
record.name = "carea_max"
return record
def rain_adj(resampled_ppt, mean_monthly_sta):
"""
Method to calculate rain_adj from a single station
Parameters
----------
resampled_ppt : np.ndarray
mean_monthly_sta : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
mean_monthly_sta.shape = (mean_monthly_sta.size, 1)
values = resampled_ppt / mean_monthly_sta
values[values > 10] = 10
values[values < 0.5] = 0.5
record = ParameterRecord(
"rain_adj",
values.ravel(),
dimensions=[
["nhru", values.shape[-1]],
["nmonths", mean_monthly_sta.size],
],
datatype=2,
)
return record
def snow_adj(resampled_ppt, mean_monthly_sta):
"""
Method to calculate rain_adj from a single station
Parameters
----------
resampled_ppt : np.ndarray
mean_monthly_sta : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = rain_adj(resampled_ppt, mean_monthly_sta)
record.name = "snow_adj"
return record
def tmax_lapse(lapse_rates):
"""
Method to calculate tmax_lapse parameter
Parameters
----------
lapse_rates : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = ParameterRecord(
"tmax_lapse",
lapse_rates.ravel(),
dimensions=[["nmonths", len(lapse_rates)]],
datatype=2,
)
return record
def tmin_lapse(lapse_rates):
"""
Method to calculate tmax_lapse parameter
Parameters
----------
lapse_rates : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
record = tmax_lapse(lapse_rates)
record.name = "tmin_lapse"
return record
def tmax_adj(nhru, nmonths=12):
"""
Method to calculate tmax_adj from a single station
Parameters
----------
nhru : int
nmonths : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = np.zeros((nhru * nmonths))
record = ParameterRecord(
"tmax_adj",
values,
dimensions=[["nhru", nhru], ["nmonths", nmonths]],
datatype=2,
)
return record
def tmin_adj(nhru, nmonths=12):
"""
Method to calculate tmin_adj from a single station
Parameters
----------
nhru : int
nmonths : np.ndarray
Returns
-------
gsflow.prms.ParameterRecord object
"""
values = np.zeros((nhru * nmonths))
record = ParameterRecord(
"tmin_adj",
values,
dimensions=[["nhru", nhru], ["nmonths", nmonths]],
datatype=2,
)
return record
def calculate_jensen_haise(dem, tmin_mean, tmax_mean):
"""
Method to calculate the Jensen Haise coefficient for PRMS
Parameters
----------
dem : np.ndarray
tmin_mean : np.ndarray
list of monthly mean values for tmin
tmax_mean : np.ndarray
array of monthly mean values for tmax
Returns
-------
gsflow.prms.ParameterRecord object
"""
nhru = dem.size
dem = np.ravel(dem)
idx = np.where(tmax_mean == np.max(tmax_mean))[0]
tmax = tmax_mean[idx[0]]
tmin = tmin_mean[idx[0]]
jh_coef = 27.5 - 0.25 * (ea(tmax) - ea(tmin)) - (dem / 1000.0)
jh_coef = ParameterRecord(
"jh_coef_hru", jh_coef, dimensions=[["nhru", nhru]], datatype=2
)
return jh_coef
def ea(tempc):
"""
Method to calculate ea for Jensen Haise equation
Parameters
----------
tempc : np.ndarray
monthly mean temperatures
Returns
-------
np.ndarray
"""
tea = 6.1078 * np.exp((17.269 * tempc) / (tempc + 237.3))
return tea
def d8_to_hru_aspect(flow_directions):
"""
Method to get cell aspect from a D8 flow accumulation array
Parameters
----------
flow_directions : np.ndarray
numpy array of flow directions
Returns
-------
gsflow.prms.ParameterRecord object of aspect angles (0 - 360 deg)
"""
d8_to_aspect = {
-2: 0,
-1: 0,
32: 315,
64: 0,
128: 45,
16: 270,
1: 90,
8: 225,
4: 180,
2: 135,
}
aspect = np.array(
[d8_to_aspect[i] for i in flow_directions.ravel()], dtype=float
)
aspect = ParameterRecord(
"hru_aspect",
aspect,
dimensions=[
["nhru", aspect.size],
],
datatype=2,
)
return aspect
def d8_to_hru_slope(flow_directions, dem, xcenters, ycenters):
"""
Method to calculate slopes from a D8 flow accumulation array
Parameters
----------
flow_directions : np.ndarray
2d numpy array of flow directions
dem : np.ndarray
2d numpy array of dem elevations
xcenters : np.ndarray
modelgrid x-centers
ycenters : np.ndarray
modelgrid y-centers
Returns
-------
gsflow.prms.ParameterRecord object
"""
ioff = {
32: -1,
64: -1,
128: -1,
16: 0,
1: 0,
8: 1,
4: 1,
2: 1,
-1: 0,
-2: 0,
}
joff = {
32: -1,
64: 0,
128: 1,
16: -1,
1: 1,
8: -1,
4: 0,
2: 1,
-1: 0,
-2: 0,
}
slope = np.zeros(flow_directions.shape, dtype=float)
for i, row in enumerate(flow_directions):
for j, fdir in enumerate(row):
i1 = i + ioff[fdir]
j1 = j + joff[fdir]
asq = (xcenters[i, j] - xcenters[i1, j1]) ** 2
bsq = (ycenters[i, j] - ycenters[i1, j1]) ** 2
dist = np.sqrt(asq + bsq)
delta_elev = dem[i, j] - dem[i1, j1]
if dist == 0:
continue
else:
slope[i, j] = delta_elev / dist
slope[slope < 1e-05] = 0
slope[slope > 10] = 10
slope = ParameterRecord(
"hru_slope",
slope.ravel(),
dimensions=[
["nhru", slope.size],
],
datatype=2,
)
return slope
def add_prms_date_columns_to_df(df, column):
"""
Method to add date columns for PRMS to a pandas dataframe
Parameters
----------
df : pd.DataFrame
column : str
date column name
Returns
-------
pd.Dataframe
"""
df["Year"] = pd.DatetimeIndex(df[column]).year
df["Month"] = pd.DatetimeIndex(df[column]).month
df["Day"] = pd.DatetimeIndex(df[column]).day
df["Hour"] = pd.DatetimeIndex(df[column]).hour
df["Minute"] = pd.DatetimeIndex(df[column]).minute
df["Second"] = pd.DatetimeIndex(df[column]).second
return df
def get_mean_monthly_from_df(df, column, nodataval=-999, temperature=False):
"""
Method to calculate mean monthly values from a dataframe
Parameters
----------
df : pd.Dataframe
column : str
data column name
nodataval : float
no data value (sets to nan when calculating
temperature : bool
boolean flag to indicate this is temperature (does not sum())
Returns
-------
np.ndarray
"""
means = []
for month in range(1, 13):
tdf = df[df.Month == month]
tdf[tdf[column] == nodataval] = np.nan
if temperature:
tdf = tdf.groupby(by=["Year"], as_index=False)[column].mean()
else:
tdf = tdf.groupby(by=["Year"], as_index=False)[column].sum()
means.append(tdf[column].mean())
return np.array(means)
def fahrenheit_to_celsius(deg_f, nodataval=-999.0):
"""
Method to convert fahrenheit to celsius
Parameters
----------
deg_f : float, np.ndarray
nodataval : float
value of nodata instances
Returns
-------
float, np.ndarray
"""
if isinstance(deg_f, (int, float)):
deg_f = np.array([deg_f])
elif isinstance(deg_f, list):
deg_f = np.array(deg_f)
deg_c = np.where(deg_f != nodataval, (deg_f - 32.0) * (5.0 / 9.0), -999.0)
if deg_c.size == 1:
return deg_c[0]
else:
return deg_c
``` |
{
"source": "Jonathan-R0/TeoriaDeAlgoritmos",
"score": 3
} |
#### File: TP1/src/ParseadorTest.py
```python
import pytest
import unittest
from Parseador import Parseador
class ParseadorTest(unittest.TestCase):
def testParseadorDevuelveContratoEsperado(self):
parser = Parseador("TestCases/Test1.txt")
contrato = parser.getProximoContrato()
assert contrato is not None
assert contrato.t_inicio == 1
assert contrato.t_final == 3
assert contrato.nombre == "Test"
def testParseadorOmiteContratoInvalido(self):
parser = Parseador("TestCases/Test2.txt")
contrato = parser.getProximoContrato()
assert contrato is None
def testParseadorObtenerTodosLosContratos(self):
parser = Parseador("TestCases/Test3.txt")
contratos = parser.getTodosLosContratos()
assert len(contratos) == 2
assert contratos[0].t_inicio == 1
assert contratos[0].t_final == 3
assert contratos[1].t_inicio == 4
assert contratos[1].t_final == 5
```
#### File: TP2/src/JuegoTest.py
```python
import pytest
import unittest
from Juego import JuegoCartas
class JuegoTest(unittest.TestCase):
def testSeisCartas(self):
juego = JuegoCartas([3, 8, 11, 1, 19, 12])
assert juego.getMaxPick() == 33
def testDosCartas(self):
juego = JuegoCartas([1, 7])
assert juego.getMaxPick() == 7
def testCuatroCartas(self):
juego = JuegoCartas([11, 12, 13, 14])
assert juego.getMaxPick() == 26
def testCincoCartas(self):
juego = JuegoCartas([2, 23, 20, 7, 2])
assert juego.getMaxPick() == 24
# En este caso el jugador pierde inevitablemente.
``` |
{
"source": "JonathanRaiman/dali-cython-stub",
"score": 3
} |
#### File: dali-cython-stub/dali/activation.py
```python
from dali.core import MatOps as ops
class tanh_object(object):
def __call__(self, *args, **kwargs):
return ops.tanh(*args, **kwargs)
class relu_object(object):
def __call__(self, *args, **kwargs):
return ops.relu(*args, **kwargs)
class sigmoid_object(object):
def __call__(self, *args, **kwargs):
return ops.sigmoid(*args, **kwargs)
class identity_object(object):
def __call__(self, *args, **kwargs):
assert len(args) == 1
assert len(kwargs) == 0
return args[0]
tanh = tanh_object()
relu = relu_object()
sigmoid = sigmoid_object()
identity = identity_object()
__all__ = ["tanh", "relu", "sigmoid", "identity"]
```
#### File: dali/models/mlp.py
```python
import dali.core as D
class MLP(object):
def __init__(self, input_sizes, hiddens, nonlinearities):
self.input_sizes = input_sizes
self.hiddens = hiddens
self.input_nonlinearity, self.layer_nonlinearities = nonlinearities[0], nonlinearities[1:]
self.input_layer = D.StackedInputLayer(input_sizes, hiddens[0])
self.layers = [D.Layer(h_from, h_to) for h_from, h_to in zip(hiddens[:-1], hiddens[1:])]
def activate(self, inputs):
assert len(self.layers) == len(self.layer_nonlinearities)
hidden = self.input_nonlinearity(self.input_layer.activate(inputs))
for l, nonlinearity in zip(self.layers, self.layer_nonlinearities):
hidden = nonlinearity(l.activate(hidden))
return hidden
def parameters(self):
ret = self.input_layer.parameters()
for l in self.layers:
ret.extend(l.parameters())
return ret
def name_parameters(self, prefix):
self.input_layer.name_parameters(prefix + "_input_layer")
for layer_idx, layer in enumerate(self.layers):
layer.name_parameters(prefix + '_layer%d' % (layer_idx,))
```
#### File: dali/utils/capture.py
```python
from collections import defaultdict
class Capture(object):
instances = set()
def __init__(self):
self.state = defaultdict(lambda: [])
@classmethod
def add(cls, name, value):
for instance in cls.instances:
instance.state[name].append(value)
def __enter__(self):
Capture.instances.add(self)
def __exit__(self, *args, **kwargs):
Capture.instances.remove(self)
```
#### File: dali/utils/misc.py
```python
import dill as pickle
import inspect
import numpy as np
import types
from os import makedirs, listdir
from os.path import join, exists
import dali.core as D
class RunningAverage(object):
def __init__(self, alpha=0.95):
self.alpha = alpha
self.value = None
def update(self, measurement):
if self.value is None:
self.value = measurement
else:
self.value = (self.alpha * self.value +
(1.0 - self.alpha) * measurement)
def __float__(self):
return float(self.value)
def apply_recursively_on_type(x, f, target_type, list_callback=None):
if type(x) == target_type:
return f(x)
elif type(x) == list or isinstance(x, types.GeneratorType):
ret = [ apply_recursively_on_type(el, f, target_type, list_callback) for el in x]
if list_callback and all(type(el) == target_type for el in x):
ret = list_callback(ret)
return ret
elif type(x) == dict:
res = {}
for k,v in x.items():
res[k] = apply_recursively_on_type(v, f, target_type, list_callback)
return res
else:
return x
def integer_ceil(a, b):
return (a + b - 1) // b
def subsample(seq, maximum_length):
if seq == []:
return seq
return seq[::integer_ceil(len(seq), maximum_length)]
def median_smoothing(signal, window=10):
res = []
for i in range(window, len(signal)):
actual_window = signal[i-window:i]
res.append(np.median(actual_window))
return res
def pickle_from_scope(directory, variables, caller_globals=None, caller_locals=None):
if not exists(directory):
makedirs(directory)
if caller_globals is None or caller_locals is None:
stack = inspect.stack()
if caller_globals is None:
caller_globals = stack[1][0].f_globals
if caller_locals is None:
caller_locals = stack[1][0].f_locals
del stack
for var in variables:
with open(join(directory, var + ".pkz"), "wb") as f:
value = caller_locals.get(var) or caller_globals.get(var)
assert value is not None
pickle.dump(value, f)
def unpickle_as_dict(directory, whitelist=None, extension='.pkz'):
assert exists(directory)
res = {}
for file_name in listdir(directory):
if file_name.endswith(extension):
var_name = file_name[:-len(extension)]
if whitelist is None or var_name in whitelist:
with open(join(directory, file_name), "rb") as f:
res[var_name] = pickle.load(f)
return res
def add_device_args(parser):
parser.add_argument("--device", type=str, default='gpu', choices=['gpu','cpu'], help="Whether model should run on GPU or CPU.")
parser.add_argument("--gpu_id", type=int, default=0, help="Which GPU to use (zero-indexed just like in CUDA APIs)")
def set_device_from_args(args, verbose=False):
D.config.default_device = args.device
if args.device == 'gpu':
D.config.default_gpu = args.gpu_id
if verbose:
print("Using %s" % (D.config.gpu_id_to_name(args.gpu_id)))
__all__ = [
"apply_recursively_on_type",
"integer_ceil",
"subsample",
"median_smoothing",
"pickle_from_scope",
"unpickle_as_dict",
"RunningAverage",
"add_device_args",
"set_device_from_args"
]
```
#### File: dali/utils/throttled.py
```python
import time
class Throttled(object):
def __init__(self, min_time_since_last_run_s=5):
"""Used for simple throttled execution.
Here's a simple example:
@Throttled(1)
def lol(i):
print('epoch %d' % (i,), flush=True)
for i in range(100000000):
lol(i)
Above code will report the epoch every second.
Here's another way:
throttled = Throttled(1)
for i in range(100000000000):
if throttled.should_i_run():
print('epoch %d' % (i,), flush=True)
"""
self.last_time = None
self.min_time_since_last_run_s = min_time_since_last_run_s
def should_i_run(self, min_time_since_last_run_s=None):
min_time_since_last_run_s = min_time_since_last_run_s or self.min_time_since_last_run_s
now = time.time()
if self.last_time is None or (now - self.last_time) > min_time_since_last_run_s:
self.last_time = now
return True
else:
return False
def maybe_run(self, f, min_time_since_last_run_s=None):
if self.should_i_run(min_time_since_last_run_s):
return f()
else:
return None
def __call__(self, f):
def wrapper(*args, **kwargs):
return self.maybe_run(lambda: f(*args, **kwargs))
return wrapper
```
#### File: dali/utils/vocab.py
```python
from dali.utils import apply_recursively_on_type
class VocabEncoded(int):
pass
class Vocab(object):
UNK = '**UNK**'
EOS = '**EOS**'
def __init__(self, words=None, add_eos=True, add_unk=True):
self.index2word = []
self.word2index = {}
self.eos = None
self.unk = None
if add_unk:
self.add(Vocab.UNK)
if add_eos:
self.add(Vocab.EOS)
if words:
self.add(words)
def __contains__(self, key):
if isinstance(key, int):
return key in range(len(self.index2word))
elif isinstance(key, str):
return key in self.word2index
else:
raise ValueError("expected(index or string)")
def add(self, obj):
def add_f(word):
idx = self.word2index.get(word)
if idx is None:
idx = len(self.index2word)
self.index2word.append(word)
self.word2index[word] = idx
if word is Vocab.UNK:
self.unk = idx
if word is Vocab.EOS:
self.eos = idx
return word
apply_recursively_on_type(obj, add_f, str)
def words(self):
return self.word2index.keys()
def __len__(self):
return len(self.index2word)
def __getitem__(self, index):
if isinstance(index, int):
return self.index2word[index]
elif isinstance(index, str):
if self.unk is not None:
return VocabEncoded(self.word2index.get(index) or self.unk)
else:
return VocabEncoded(self.word2index[index])
else:
raise ValueError("expected(index or string)")
def decode(self, obj, strip_eos=False, decode_type=int):
def decode_f(word_idx):
return self.index2word[word_idx]
def decode_list_f(lst):
if strip_eos:
assert self.eos is not None
return [el for el in lst if el != Vocab.EOS]
else:
return lst
return apply_recursively_on_type(obj, decode_f, decode_type, list_callback=decode_list_f)
def encode(self, obj, add_eos=False, encode_type=int):
def encode_f(word):
if self.unk is not None:
return encode_type(self.word2index.get(word) or self.unk)
else:
return encode_type(self.word2index[word])
def encode_list_f(lst):
lst = [encode_f(word) for word in lst]
if add_eos:
assert self.eos is not None
lst.append(VocabEncoded(self.eos))
return lst
return apply_recursively_on_type(obj, lambda x:x, str, list_callback=encode_list_f)
__all__ = [
"VocabEncoded","Vocab"
]
```
#### File: examples/translation/predict.py
```python
import argparse
import random
import math
from dali.utils import (
set_device_from_args,
add_device_args,
unpickle_as_dict,
)
from dali.data.utils import split_punctuation
from translation import TranslationModel
def parse_args():
parser = argparse.ArgumentParser()
add_device_args(parser)
parser.add_argument("--path", type=str, required='True', help="Path to saved model")
parser.add_argument("--beam_width", type=int, default=5, help="Beam width used when prediction")
parser.add_argument("--max_output_length", type=int, default=40, help="Maximum number of words in the translation")
parser.add_argument("--show_beams", action='store_true', default=False,
help="If true shows all the beams and probabilities")
return parser.parse_args()
def show_reconstructions(model, example_pair, vocabs, max_sentence_length):
from_words, to_words = example_pair
from_vocab, to_vocab = vocabs
from_with_unk = ' '.join(from_vocab.decode(from_vocab.encode(from_words)))
to_with_unk = ' '.join(to_vocab.decode(to_vocab.encode(to_words)))
print('TRANSLATING: %s' % from_with_unk)
print('REFERENCE: %s' % to_with_unk)
print('')
def main(args):
set_device_from_args(args)
RELEVANT_VARIABLES = ["model", "vocabs"]
loaded = unpickle_as_dict(args.path, RELEVANT_VARIABLES)
model = loaded["model"]
from_vocab, to_vocab = loaded["vocabs"]
while True:
from_sentence = split_punctuation(input()).split(' ')
encoded = from_vocab.encode(list(reversed(from_sentence)), add_eos=False)
beams = model.predict(encoded,
eos_symbol=to_vocab.eos,
max_sequence_length=args.max_output_length + 1,
beam_width=args.beam_width)
if args.show_beams:
for solution, score, _ in beams:
score = math.exp(score.w[0])
# reveal the unks
solution = ' '.join(to_vocab.decode(solution, strip_eos=True))
print('%f => %s' % (score, to_vocab.decode(solution, True)))
else:
print(' '.join(to_vocab.decode(beams[0].solution, strip_eos=True)))
if __name__ == '__main__':
main(parse_args())
``` |
{
"source": "JonathanRaiman/pytreebank",
"score": 3
} |
#### File: pytreebank/pytreebank/treelstm.py
```python
from .labeled_trees import LabeledTree
import codecs
def import_tree_corpus(labels_path, parents_path, texts_path):
"""
Import dataset from the TreeLSTM data generation scrips.
Arguments:
----------
labels_path : str, where are labels are stored (should be in
data/sst/labels.txt).
parents_path : str, where the parent relationships are stored
(should be in data/sst/parents.txt).
texts_path : str, where are strings for each tree are stored
(should be in data/sst/sents.txt).
Returns:
--------
list<LabeledTree> : loaded example trees.
"""
with codecs.open(labels_path, "r", "UTF-8") as f:
label_lines = f.readlines()
with codecs.open(parents_path, "r", "UTF-8") as f:
parent_lines = f.readlines()
with codecs.open(texts_path, "r", "UTF-8") as f:
word_lines = f.readlines()
assert len(label_lines) == len(parent_lines)
assert len(label_lines) == len(word_lines)
trees = []
for labels, parents, words in zip(label_lines, parent_lines, word_lines):
labels = [int(l) + 2 for l in labels.strip().split(" ")]
parents = [int(l) for l in parents.strip().split(" ")]
words = words.strip().split(" ")
assert len(labels) == len(parents)
trees.append(read_tree(parents, labels, words))
return trees
def assign_texts(node, words, next_idx=0):
"""
Recursively assign the words to nodes by finding and
assigning strings to the leaves of a tree in left
to right order.
"""
if len(node.children) == 0:
node.text = words[next_idx]
return next_idx + 1
else:
for child in node.children:
next_idx = assign_texts(child, words, next_idx)
return next_idx
def read_tree(parents, labels, words):
"""
Take as input a list of integers for parents
and labels, along with a list of words, and
reconstruct a LabeledTree.
"""
trees = {}
root = None
for i in range(1, len(parents) + 1):
if i not in trees and parents[i - 1] != - 1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = LabeledTree()
if prev is not None:
tree.add_child(prev)
trees[idx] = tree
tree.label = labels[idx - 1]
if trees.get(parent) is not None:
trees[parent].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
assert assign_texts(root, words) == len(words)
return root
``` |
{
"source": "JonathanRaiman/xml_cleaner",
"score": 4
} |
#### File: xml_cleaner/ciseau/sentence_tokenizer.py
```python
from .regular_expressions import word_with_alpha_and_period
from .quoted_expressions import group_quoted_tokens
from .constants import (
PUNCT_SYMBOLS,
CONTINUE_PUNCT_SYMBOLS
)
from .word_tokenizer import tokenize
def is_end_symbol(symbol):
return (
symbol[:2] in PUNCT_SYMBOLS
)
def detect_sentence_boundaries(tokens):
"""
Subdivide an input list of strings (tokens)
into multiple lists according to detected
sentence boundaries.
```
detect_sentence_boundaries(
["Cat ", "sat ", "mat", ". ", "Cat ", "'s ", "named ", "Cool", "."]
)
#=> [
["Cat ", "sat ", "mat", ". "],
["Cat ", "'s ", "named ", "Cool", "."]
]
```
Arguments:
----------
tokens : list<str>
Returns:
--------
list<list<str>> : original list subdivided into multiple
lists according to (detected) sentence boundaries.
"""
tokenized = group_quoted_tokens(tokens)
words = []
sentences = []
for i in range(len(tokenized)):
# this is a parenthetical:
end_sentence = False
if isinstance(tokenized[i], list):
if len(words) == 0:
# end if a sentence finishes inside quoted section,
# and no sentence was begun beforehand
if is_end_symbol(tokenized[i][-2].rstrip()):
end_sentence = True
else:
# end if a sentence finishes inside quote marks
if (tokenized[i][0][0] == '"' and
is_end_symbol(tokenized[i][-2].rstrip()) and
not tokenized[i][1][0].isupper()):
end_sentence = True
words.extend(tokenized[i])
else:
stripped_tokenized = tokenized[i].rstrip()
if is_end_symbol(stripped_tokenized):
words.append(tokenized[i])
not_last_word = i + 1 != len(tokenized)
next_word_lowercase = (
not_last_word and
tokenized[i+1][0].islower()
)
next_word_continue_punct = (
not_last_word and
tokenized[i+1][0] in CONTINUE_PUNCT_SYMBOLS
)
end_sentence = not (
not_last_word and
(
next_word_lowercase or
next_word_continue_punct
)
)
else:
words.append(tokenized[i])
if end_sentence:
sentences.append(words)
words = []
# add final sentence, if it wasn't added yet.
if len(words) > 0:
sentences.append(words)
# If the final word ends in a period:
if len(sentences) > 0 and sentences[-1][-1]:
alpha_word_piece = word_with_alpha_and_period.match(sentences[-1][-1])
if alpha_word_piece:
sentences[-1][-1] = alpha_word_piece.group(1)
sentences[-1].append(alpha_word_piece.group(2))
return sentences
def remove_whitespace(sentences):
"""
Clear out spaces and newlines
from the list of list of strings.
Arguments:
----------
sentences : list<list<str>>
Returns:
--------
list<list<str>> : same strings as input,
without spaces or newlines.
"""
return [[w.rstrip() for w in sent] for sent in sentences]
def sent_tokenize(text, keep_whitespace=False, normalize_ascii=True):
"""
Perform sentence + word tokenization on the input text
using regular expressions and english/french specific
rules.
Arguments:
----------
text : str, input string to tokenize
keep_whitespace : bool, whether to strip out spaces
and newlines.
normalize_ascii : bool, perform some replacements
on rare characters so that they become
easier to process in a ascii pipeline
(canonicalize dashes, replace œ -> oe, etc..)
Returns:
--------
list<list<str>> : sentences with their content held
in a list of strings for each token.
"""
sentences = detect_sentence_boundaries(
tokenize(
text,
normalize_ascii
)
)
if not keep_whitespace:
sentences = remove_whitespace(sentences)
return sentences
```
#### File: xml_cleaner/ciseau/word_tokenizer.py
```python
import re
from .constants import (
PUNCT_SYMBOLS,
ABBR,
MONTHS,
UNDECIDED,
SHOULD_SPLIT,
SHOULD_NOT_SPLIT
)
from .regular_expressions import (
word_with_period,
no_punctuation,
numerical_expression,
repeated_dash_converter,
dash_converter,
pure_whitespace,
left_quote_shifter,
left_quote_converter,
one_letter_long_or_repeating,
left_single_quote_converter,
remaining_quote_converter,
english_nots,
english_contractions,
english_specific_appendages,
french_appendages,
right_single_quote_converter,
simple_dash_finder,
advanced_dash_finder,
url_file_finder,
shifted_ellipses,
shifted_standard_punctuation,
multi_single_quote_finder
)
def protect_shorthand(text, split_locations):
"""
Annotate locations in a string that contain
periods as being true periods or periods
that are a part of shorthand (and thus should
not be treated as punctuation marks).
Arguments:
----------
text : str
split_locations : list<int>, same length as text.
"""
word_matches = list(re.finditer(word_with_period, text))
total_words = len(word_matches)
for i, match in enumerate(word_matches):
match_start = match.start()
match_end = match.end()
for char_pos in range(match_start, match_end):
if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1:
match_start = char_pos
word = text[match_start:match_end]
if not word.endswith('.'):
# ensure that words contained within other words:
# e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of'
if (not word[0].isdigit() and
split_locations[match_start] == UNDECIDED):
split_locations[match_start] = SHOULD_SPLIT
continue
period_pos = match_end - 1
# this is not the last word, abbreviation
# is not the final period of the sentence,
# moreover:
word_is_in_abbr = word[:-1].lower() in ABBR
is_abbr_like = (
word_is_in_abbr or
one_letter_long_or_repeating.match(word[:-1]) is not None
)
is_digit = False if is_abbr_like else word[:-1].isdigit()
is_last_word = i == (total_words - 1)
is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace())
is_not_ending = not is_ending
abbreviation_and_not_end = (
len(word) > 1 and
is_abbr_like and
is_not_ending
)
if abbreviation_and_not_end and (
(not is_last_word and word_matches[i+1].group(0)[0].islower()) or
(not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or
word[0].isupper() or
word_is_in_abbr or
len(word) == 2):
# next word is lowercase (e.g. not a new sentence?), or next word
# is punctuation or next word is totally uppercase (e.g. 'Mister.
# ABAGNALE called to the stand')
if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations):
split_locations[period_pos + 1] = SHOULD_SPLIT
split_locations[period_pos] = SHOULD_NOT_SPLIT
elif (is_digit and
len(word[:-1]) <= 2 and
not is_last_word and
word_matches[i+1].group(0).lower() in MONTHS):
# a date or weird number with a period:
if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations):
split_locations[period_pos + 1] = SHOULD_SPLIT
split_locations[period_pos] = SHOULD_NOT_SPLIT
elif split_locations[period_pos] == UNDECIDED:
# split this period into its own segment:
split_locations[period_pos] = SHOULD_SPLIT
def split_with_locations(text, locations):
"""
Use an integer list to split the string
contained in `text`.
Arguments:
----------
text : str, same length as locations.
locations : list<int>, contains values
'SHOULD_SPLIT', 'UNDECIDED', and
'SHOULD_NOT_SPLIT'. Will create
strings between each 'SHOULD_SPLIT'
locations.
Returns:
--------
Generator<str> : the substrings of text
corresponding to the slices given
in locations.
"""
start = 0
for pos, decision in enumerate(locations):
if decision == SHOULD_SPLIT:
if start != pos:
yield text[start:pos]
start = pos
if start != len(text):
yield text[start:]
def mark_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
for match in regex.finditer(text):
end_match = match.end()
if end_match < len(split_locations):
split_locations[end_match] = SHOULD_SPLIT
def mark_begin_end_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex,
and adds a 'SHOULD_SPLIT' at the beginning of the
matching group. Each character within the matching
group will be marked as 'SHOULD_NOT_SPLIT'.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
for match in regex.finditer(text):
end_match = match.end()
begin_match = match.start()
for i in range(begin_match+1, end_match):
split_locations[i] = SHOULD_NOT_SPLIT
if end_match < len(split_locations):
if split_locations[end_match] == UNDECIDED:
split_locations[end_match] = SHOULD_SPLIT
if split_locations[begin_match] == UNDECIDED:
split_locations[begin_match] = SHOULD_SPLIT
def tokenize(text, normalize_ascii=True):
"""
Convert a single string into a list of substrings
split along punctuation and word boundaries. Keep
whitespace intact by always attaching it to the
previous token.
Arguments:
----------
text : str
normalize_ascii : bool, perform some replacements
on non-ascii characters to canonicalize the
string (defaults to True).
Returns:
--------
list<str>, list of substring tokens.
"""
# 1. If there's no punctuation, return immediately
if no_punctuation.match(text):
return [text]
# 2. let's standardize the input text to ascii (if desired)
# Note: this will no longer respect input-to-output character positions
if normalize_ascii:
# normalize these greco-roman characters to ascii:
text = text.replace(u"œ", "oe").replace(u"æ", "ae")
# normalize dashes:
text = repeated_dash_converter.sub("-", text)
# 3. let's construct an integer array of the possible split locations:
split_locations = [UNDECIDED] * len(text)
regexes = (
pure_whitespace,
left_quote_shifter,
left_quote_converter,
left_single_quote_converter,
remaining_quote_converter,
# regex can't fix this -> regex ca n't fix this
english_nots,
# you'll dig this -> you 'll dig this
english_contractions,
# the rhino's horns -> the rhino 's horns
english_specific_appendages,
# qu'a tu fais au rhino -> qu ' a tu fais au rhino,
french_appendages
)
# 4. Mark end locations for specific regular expressions:
for regex in regexes:
mark_regex(regex, text, split_locations)
begin_end_regexes = (
multi_single_quote_finder,
right_single_quote_converter,
# use dashes as the breakpoint:
# the rhino--truck -> the rhino -- truck
simple_dash_finder if normalize_ascii else advanced_dash_finder,
numerical_expression,
url_file_finder,
shifted_ellipses,
# the #rhino! -> the # rhino ! ;
# the rino[sic] -> the rino [ sic ]
shifted_standard_punctuation
)
# 5. Mark begin and end locations for other regular expressions:
for regex in begin_end_regexes:
mark_begin_end_regex(regex, text, split_locations)
# 6. Remove splitting on exceptional uses of periods:
# I'm with Mr. -> I 'm with Mr. , I'm with Mister. -> I 'm with Mister .
protect_shorthand(text, split_locations)
if normalize_ascii:
text = dash_converter.sub("-", text)
# 7. Return the split string using the integer list:
return list(split_with_locations(text, split_locations))
``` |
{
"source": "jonathanrd/PySimPlot",
"score": 3
} |
#### File: PySimPlot/PySimPlot/export.py
```python
def csv(sequences,window, outputFile):
import csv
# First make the CSV header
header = ["pointer"]
for y in range(1,len(sequences.seqs) ):
header.append(sequences.seqs[y]["Name"])
# Now read all of the identities
rows = [sequences.seqs[y]["Identities"].keys()]
for y in range(1,len(sequences.seqs) ):
rows.append(sequences.seqs[y]["Identities"].values())
rows = zip(*rows)
# Write it to the output file
with open(outputFile, "w") as f:
writer = csv.writer(f)
writer.writerow(header)
for row in rows:
writer.writerow(row)
```
#### File: PySimPlot/PySimPlot/similarity.py
```python
class Pointer:
# Set all initial values using calculate() on load
def __init__(self, window, step, offset = 0):
self.window = window
self.step = step
self.iteration = 0
self.offset = offset
self.calculate()
# Read the current parameters and set values
def calculate(self):
self.window_lower = self.step * self.iteration + self.offset
self.window_upper = self.window + self.step * self.iteration + self.offset
# Is the window size odd or even? Set correct pointer location
if (self.window % 2) == 0:
self.pointer = int((self.window / 2) - 1 + self.step * self.iteration) + self.offset
else:
self.pointer = int((self.window - 1) / 2 + self.step * self.iteration) + self.offset
# Increase the iteration count and recalculate all values
def increment(self):
self.iteration += 1
self.calculate()
# Reset the iteration count to zero and recalculate all values
def reset(self):
self.iteration = 0
self.calculate()
def compare(sequenceA, sequenceB, gaps = False):
"""Compares two sequences and returns the identity. If gaps is set to True then count a shared gap as identical.
Args:
sequenceA (str): The first sequence.
sequenceA (str): The second sequence.
gaps (bool): Should gaps count as identical? (Default: False)
Returns:
float: identity.
"""
assert(len(sequenceA) == len(sequenceA)), "Sequence lengths do not match"
length = len(sequenceA)
# Initiate counters
identical = 0
gap_length = 0
# Look at each position in turn
for base in range(0, length):
# Deal with gaps (gap in ref and seq to compare)
if((sequenceA[base] == "-" and sequenceB[base] == "-") and gaps == False) :
gap_length += 1
continue
# Is the base/residue the same?
if ( sequenceA[base] == sequenceB[base]):
# Increase the counter
identical += 1
# Avoid a divide by zero error
if (gap_length == length): length += 1
# Convert the count to a percentage
identity = identical / (length - gap_length) * 100
return identity
def main(seqA, seqB, verbose, gaps, window, step):
sequenceA = seqA["Sequence"]
sequenceB = seqB["Sequence"]
offset = seqA["SeqStart"]
# Verbose output
if (verbose):
print("\n\nNow comparing: ",seqA["Name"],"with",seqB["Name"])
print("\n\n")
# Initiate an empy dict to hold the identity values
combined_identities = {}
# Initiate the pointer class
pointer = Pointer(window, step, offset)
# Iterate through the sequences
# but stop when the window goes past the last base/residue
while pointer.window_upper <= seqA["SeqEnd"]:
# Extract the part each sequence to compare
lower = pointer.window_lower
upper = pointer.window_upper
compareA = sequenceA[lower:upper]
compareB = sequenceB[lower:upper]
# Compare the sequences and get the % identity
identity = compare(compareA, compareB, gaps)
# Verbose output
if (verbose):
print("Window: ",pointer.window_lower,pointer.window_upper)
print("A: ",compareA)
print("B: ",compareB)
print("Identity: ",round(identity,2),"\n")
# Add the identity to our array using the pointer position as a key
combined_identities[pointer.pointer] = identity
# Increment the pointer and window location
pointer.increment()
seqB["Identities"]=combined_identities
#print(combined_identities)
pointer.reset()
``` |
{
"source": "JonathanReeve/CommonsDownloader",
"score": 3
} |
#### File: CommonsDownloader/commonsdownloader/commonsdownloader.py
```python
import os
import logging
import argparse
from commonsdownloader.thumbnaildownload import download_file, DownloadException
from itertools import zip_longest
def get_category_files_from_api(category_name):
"""Yield the file names of a category by querying the MediaWiki API."""
import mwclient
site = mwclient.Site('commons.wikimedia.org')
category = site.Categories[category_name]
return (x.page_title for x in category.members(namespace=6))
def download_from_category(category_name, output_path, width):
"""Download files of a given category."""
file_names = get_category_files_from_api(category_name)
files_to_download = zip_longest(file_names, [], fillvalue=width)
download_files_if_not_in_manifest(files_to_download, output_path)
def get_files_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a text file handler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.rsplit(',', 1)
width = int(width)
except ValueError:
image_name = line
width = None
yield (image_name, width)
def download_from_file_list(file_list, output_path):
"""Download files from a given textfile list."""
files_to_download = get_files_from_textfile(file_list)
download_files_if_not_in_manifest(files_to_download, output_path)
def get_files_from_arguments(files, width):
"""Yield the file names and chosen width."""
return zip_longest(files, [], fillvalue=width)
def download_from_files(files, output_path, width):
"""Download files from a given file list."""
files_to_download = get_files_from_arguments(files, width)
download_files_if_not_in_manifest(files_to_download, output_path)
def get_local_manifest_path(output_path):
"""Return the path to the local downloading manifest."""
return os.path.join(output_path, '.manifest')
def read_local_manifest(output_path):
"""Return the contents of the local manifest, as a dictionary."""
local_manifest_path = get_local_manifest_path(output_path)
try:
with open(local_manifest_path, 'r') as f:
manifest = dict(get_files_from_textfile(f))
logging.debug('Retrieving %s elements from manifest', len(manifest))
return manifest
except IOError:
logging.debug('No local manifest at %s', local_manifest_path)
return {}
def is_file_in_manifest(file_name, width, manifest):
"""Whether the given file, in its given width, is in manifest."""
return (manifest.get(file_name, '-1') == width)
def write_file_to_manifest(file_name, width, manifest_fh):
"""Write the given file in manifest."""
manifest_fh.write("%s,%s\n" % (file_name, str(width)))
logging.debug("Wrote file %s to manifest", file_name)
def download_files_if_not_in_manifest(files_iterator, output_path):
"""Download the given files to the given path, unless in manifest."""
local_manifest = read_local_manifest(output_path)
with open(get_local_manifest_path(output_path), 'a') as manifest_fh:
for (file_name, width) in files_iterator:
if is_file_in_manifest(file_name, width, local_manifest):
logging.info('Skipping file %s', file_name)
continue
try:
download_file(file_name, output_path, width=width)
write_file_to_manifest(file_name, width, manifest_fh)
except DownloadException as e:
logging.error("Could not download %s: %s", file_name, e.message)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
"""Overriding call action."""
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
source_group.add_argument("-c", "--category", metavar="CATEGORY",
dest="category_name",
type=str,
help='A category name (without prefix)')
parser.add_argument("files", nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument("-v",
action="count",
dest="verbose",
default=1,
help="Verbosity level. -v for DEBUG")
verbosity_group.add_argument("-q", "--quiet",
action="store_const",
dest="verbose",
const=0,
help="To silence the INFO messages")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging_level = logging_map.get(args.verbose, logging.DEBUG)
logging.basicConfig(level=logging_level)
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.category_name:
download_from_category(args.category_name, args.output_path, args.width)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help()
if __name__ == "__main__":
main()
``` |
{
"source": "JonathanREmery/EncoderDataNetworking",
"score": 3
} |
#### File: EncoderDataNetworking/Python/ProcessData.py
```python
import math
# ProcessData class to process the raw data received from the robot
class ProcessData():
# Takes in the data received from the robot in string form and returns a nice and organized dictionary
@staticmethod
def parseData(data):
try:
data = str(data).replace('b\'','').replace('\'','')
parsed = data.split(',')
parsedData = {}
for x in parsed:
parsedData[x.split(':')[0]] = float(x.split(':')[1])
return parsedData
except:
return {'leftEncoderValue':math.nan, 'rightEncoderValue':math.nan, 'leftEncoderVelocity':math.nan, 'rightEncoderVelocity':math.nan, 'leftEncoderAcceleration':math.nan, 'rightEncoderAcceleration':math.nan}
``` |
{
"source": "JonathanRennison/editor",
"score": 2
} |
#### File: editor/app/__init__.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import platform
from flask import Flask
app = Flask(__name__)
app.config.from_object("config")
from app import routes
from . import util
def print_version(out=sys.stderr):
branch = 'unknown'
revision = 'unknown'
try:
branch, revision = util.get_head_revision()
except IOError:
pass
print('2immerse authoring tool backend, branch %s, revision %s, python %s' % (branch, revision, platform.python_version()), file=out)
print_version()
```
#### File: editor/test/test_editing.py
```python
from __future__ import absolute_import
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
import unittest
import urllib.request, urllib.parse, urllib.error
import urllib.parse
import os
import json
import uuid
from . import pretest
from app.api import document
class TestEditing(unittest.TestCase):
def _buildUrl(self, extra=''):
myUrl = urllib.parse.urljoin(
u'file:', urllib.request.pathname2url(os.path.abspath(__file__))
)
docUrl = urllib.parse.urljoin(
myUrl,
u"fixtures/test_editing%s.xml" % (extra)
)
return docUrl
def _createDocument(self):
d = document.Document(uuid.uuid4())
d.setTestMode(True)
docUrl = self._buildUrl()
d.load(docUrl)
return d
def test_getChapters(self):
d = self._createDocument()
e = d.editing()
chapter = e.getChapters()
self.assertEqual(chapter['id'], 'rootchapterid')
self.assertEqual(len(chapter['chapters']), 1)
self.assertEqual(chapter['chapters'][0]['id'], 'subchapterid')
def test_getChapter(self):
d = self._createDocument()
e = d.editing()
chapter = e.getChapter('rootchapterid')
self.assertEqual(chapter['id'], 'rootchapterid')
self.assertEqual(len(chapter['tracks']), 1)
self.assertEqual(chapter['tracks'][0]['id'], 'trackid')
self.assertEqual(chapter['tracks'][0]['elements'][0]['asset'], 'assetid')
def test_getLayout(self):
d = self._createDocument()
e = d.editing()
layout = e.getLayout()
self.assertEqual(len(layout['devices']), 1)
self.assertEqual(len(layout['regions']), 1)
def test_getAssets(self):
d = self._createDocument()
e = d.editing()
assets = e.getAssets()
self.assertEqual(len(assets), 1)
self.assertEqual(assets[0]['id'], 'assetid')
def test_addChapterBefore(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
newId = e.addChapterBefore(subChapterId)
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters']), 2)
self.assertEqual(rootChapter['chapters'][0]['id'], newId)
def test_addChapterAfter(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
newId = e.addChapterAfter(subChapterId)
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters']), 2)
self.assertEqual(rootChapter['chapters'][1]['id'], newId)
def test_addSubChapter(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
newId = e.addSubChapter(subChapterId)
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters'][0]['chapters']), 1)
self.assertEqual(rootChapter['chapters'][0]['chapters'][0]['id'], newId)
def test_renameChapter(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
newId = e.addSubChapter(subChapterId)
e.renameChapter(newId, 'Try Renaming')
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters'][0]['chapters']), 1)
self.assertEqual(rootChapter['chapters'][0]['chapters'][0]['name'], 'Try Renaming')
def test_deleteChapter(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
newId = e.addSubChapter(subChapterId)
e.deleteChapter(subChapterId)
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters']), 0)
def test_addTrack(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
trackId = e.addTrack(subChapterId, 'regionid')
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters'][0]['tracks']), 1)
self.assertEqual(rootChapter['chapters'][0]['tracks'][0]['id'], trackId)
self.assertEqual(rootChapter['chapters'][0]['tracks'][0]['region'], 'regionid')
def test_deleteTrack(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
trackId = e.addTrack(subChapterId, 'regionid')
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters'][0]['tracks']), 1)
e.deleteTrack(trackId)
rootChapter = e.getChapters()
self.assertEqual(len(rootChapter['chapters'][0]['tracks']), 0)
def test_addElement(self):
d = self._createDocument()
e = d.editing()
oldCount = d._count()
rootChapter = e.getChapters()
subChapterId = rootChapter['chapters'][0]['id']
trackId = e.addTrack(subChapterId, 'regionid')
elementId = e.addElement(trackId, 'assetid')
e.setElementBegin(elementId, 42)
e.setElementDuration(elementId, 43)
thisChapter = e.getChapter(subChapterId)
self.assertEqual(len(thisChapter['tracks']), 1)
self.assertDictEqual(thisChapter['tracks'][0]['elements'][0], {'asset':'assetid', 'begin':42.0, 'duration':43.0})
self.assertEqual(thisChapter['tracks'][0]['region'], 'regionid')
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.