metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jozhang97/mmsegmentation",
"score": 2
} |
#### File: mmseg/datasets/box2seg.py
```python
import os.path as osp
from mmseg.datasets.builder import DATASETS
from mmseg.datasets.custom import CustomDataset
@DATASETS.register_module()
class Box2seg(CustomDataset):
CLASSES = ('background', 'foreground')
PALETTE = [[0, 0, 0], [255, 0, 0]]
def __init__(self, **kwargs):
super(Box2seg, self).__init__(
img_suffix='.jpg', seg_map_suffix='.png', **kwargs)
assert osp.exists(self.img_dir)
``` |
{
"source": "jozhang97/Side-tuning",
"score": 2
} |
#### File: Side-tuning/configs/habitat.py
```python
import numpy as np
@ex.named_config
def cfg_habitat():
uuid = 'habitat_core'
cfg = {}
cfg['learner'] = {
'algo': 'ppo', # Learning algorithm for RL agent. Currently PPO, SLAM, imitation_learning
'clip_param': 0.1, # Clip param for trust region in PPO
'entropy_coef': 1e-4, # Weighting of the entropy term in PPO
'eps': 1e-5, # Small epsilon to prevent divide-by-zero
'gamma': 0.99, # Gamma to use if env.observation_space.shape = 1
'internal_state_size': 512, # If using a recurrent policy, what state size to use (if no recurrent policy, make this small for memory savings)
'lr': 1e-4, # Learning rate for algorithm
'num_steps': 1000, # Length of each rollout (grab 'num_steps' consecutive samples to form rollout)
'num_mini_batch': 8, # Size of PPO minibatch (from rollout, block into this many minibatchs to compute losses on)
'num_stack': 4, # Frames that each cell (CNN) can see
'max_grad_norm': 0.5, # Clip grads
'ppo_epoch': 8, # Number of times PPO goes over the buffer
'recurrent_policy': False, # Use a recurrent version with the cell as the standard model
'tau': 0.95, # When using GAE
'use_gae': True, # Whether to use GAE
'value_loss_coef': 1e-3, # Weighting of value_loss in PPO
'perception_network_reinit': False, # reinitialize the perception_module, used when checkpoint is used
'perception_network': 'AtariNet',
'perception_network_kwargs': {
'extra_kwargs': {
'normalize_taskonomy': True
}
},
'test': False,
'use_replay': True,
'replay_buffer_size': 3000, # This is stored on CPU
'on_policy_epoch': 8, # Number of on policy rollouts in each update
'off_policy_epoch': 8,
'slam_class': None,
'slam_kwargs': {},
'loss_kwargs': { # Used for regularization losses (e.g. weight tying)
'intrinsic_loss_coefs': [],
'intrinsic_loss_types': [],
},
'deterministic': False,
'rollout_value_batch_multiplier': 2,
'cache_kwargs': {},
'optimizer_class': 'optim.Adam',
'optimizer_kwargs': {},
}
cfg['env'] = {
'add_timestep': False, # Add timestep to the observation
'env_name': 'Habitat_PointNav', # Environment to use
"env_specific_kwargs": {
'swap_building_k_episodes': 10,
'gpu_devices': [0],
'scenario_kwargs': { # specific to the scenario - pointnav or exploration
'use_depth': False,
'max_geodesic_dist': 99999 # For PointNav, we skip episodes that are too "hard"
},
'map_kwargs': {
'map_building_size': 22, # How large to make the IMU-based map
'map_max_pool': False, # Use max-pooling on the IMU-based map
'use_cuda': False,
'history_size': None, # How many prior steps to include on the map
},
'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1
# 'val_scenes': ['Denmark', 'Greigsville', 'Eudora', 'Pablo', 'Elmira', 'Mosquito', 'Sands', 'Swormville', 'Sisters', 'Scioto', 'Eastville', 'Edgemere', 'Cantwell', 'Ribera'],
'val_scenes': None,
'train_scenes': None,
# 'train_scenes': ['Beach'],
},
'sensors': {
'features': None,
'taskonomy': None,
'rgb_filled': None,
'map': None,
'target': None,
'depth': None,
'global_pos': None,
'pointgoal': None,
},
'transform_fn_pre_aggregation': None, # Depreciated
'transform_fn_pre_aggregation_fn': None, # Transformation to apply to each individual image (before batching)
'transform_fn_pre_aggregation_kwargs': {}, # Arguments - MUST BE ABLE TO CALL eval ON ALL STRING VALUES
'transform_fn_post_aggregation': None, # Depreciated
'transform_fn_post_aggregation_fn': None, # Arguments - MUST BE ABLE TO CALL eval ON ALL STRING VALUES
'transform_fn_post_aggregation_kwargs': {},
'num_processes': 8,
'num_val_processes': 1,
'additional_repeat_count': 0,
}
cfg['saving'] = {
'checkpoint':None,
'checkpoint_num': None,
'checkpoint_configs': False, # copy the metadata of the checkpoint. YMMV.
'log_dir': LOG_DIR,
'log_interval': 10,
'save_interval': 100,
'save_dir': 'checkpoints',
'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'),
'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'),
'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'),
'vis_interval': 200,
'visdom_server': 'localhost',
'visdom_port': '8097',
'obliterate_logs': False,
}
cfg['training'] = {
'cuda': True,
'gpu_devices': None, # None uses all devices, otherwise give a list of devices
'seed': 42,
'num_frames': 1e8,
'resumable': False,
}
@ex.named_config
def cfg_test():
cfg = {}
cfg['saving'] = {
'resumable': True,
'checkpoint_configs': True,
}
override = {}
override['saving'] = {
'visdom_server': 'localhost',
}
override['env'] = {
'num_processes': 10,
'num_val_processes': 10,
'env_specific_kwargs': {
'test_mode': True,
'scenario_kwargs': { # specific to the scenario - pointnav or exploration
'max_geodesic_dist': 99999
}
}
}
override['learner'] = {
'test_k_episodes': 994,
'test': True,
}
####################################
# Active Tasks
####################################
@ex.named_config
def planning():
uuid = 'habitat_planning'
cfg = {}
cfg['learner'] = {
'perception_network_kwargs': { # while related to the agent, these are ENVIRONMENT specific
'n_map_channels': 3,
'use_target': True,
}
}
cfg['env'] = {
'env_name': 'Habitat_PointNav', # Environment to use
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': { # the most basic sensors
'names_to_transforms': {
'map': 'identity_transform()',
'global_pos': 'identity_transform()',
'target': 'identity_transform()',
},
'keep_unnamed': False,
},
'transform_fn_post_aggregation_fn': 'TransformFactory.independent',
'transform_fn_post_aggregation_kwargs': {
'names_to_transforms': {
'map':'map_pool_collated((3,84,84))',
},
'keep_unnamed': True,
}
}
@ex.named_config
def exploration():
uuid = 'habitat_exploration'
cfg = {}
cfg['learner'] = {
'lr': 1e-3, # Learning rate for algorithm
'perception_network_kwargs': { # while related to the agent, these are ENVIRONMENT specific
'n_map_channels': 1,
'use_target': False,
}
}
cfg['env'] = {
'env_name': 'Habitat_Exploration', # Environment to use
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': { # the most basic sensors
'names_to_transforms': {
'map': 'identity_transform()',
'global_pos': 'identity_transform()',
},
'keep_unnamed': False,
},
'transform_fn_post_aggregation_fn': 'TransformFactory.independent',
'transform_fn_post_aggregation_kwargs': {
'names_to_transforms': {
'map':'map_pool_collated((1,84,84))',
},
'keep_unnamed': True,
},
# For exploration, always map_pool in the pre_aggregation, do not in post_aggregation
# 'map':rescale_centercrop_resize((1,84,84)),
"env_specific_kwargs": {
'scenario_kwargs': {
'max_episode_steps': 1000,
},
'map_kwargs': {
'map_size': 84,
'fov': np.pi / 2,
'min_depth': 0,
'max_depth': 1.5,
'relative_range': True, # scale the map_range so centered at initial agent position
'map_x_range': [-11, 11],
'map_y_range': [-11, 11],
'fullvision': False, # robot unlocks all cells in what it sees, as opposed to just center to ground
},
'reward_kwargs': {
'slack_reward': 0,
}
},
}
####################################
# Settings
####################################
@ex.named_config
def small_settings5():
# hope this runs well on gestalt, one GPU (moderate RAM requirements)
uuid = 'habitat_small_settings5'
cfg = {}
cfg['learner'] = {
'num_steps': 512, # Length of each rollout (grab 'num_steps' consecutive samples to form rollout)
'replay_buffer_size': 1024, # This is stored on CPU
'on_policy_epoch': 5, # Number of on policy rollouts in each update
'off_policy_epoch': 10,
'num_mini_batch': 24, # Size of PPO minibatch (from rollout, block into this many minibatchs to compute losses on)
'rollout_value_batch_multiplier': 1,
}
cfg['env'] = {
'num_processes': 6,
'num_val_processes': 1,
}
@ex.named_config
def cvpr_settings():
# Settings we used for CVPR 2019 Habitat Challenge, see https://github.com/facebookresearch/habitat-challenge
uuid = 'habitat_cvpr_settings'
cfg = {}
cfg['learner'] = {
'num_steps': 512, # Length of each rollout (grab 'num_steps' consecutive samples to form rollout)
'replay_buffer_size': 4096, # This is stored on CPU
'on_policy_epoch': 8, # Number of on policy rollouts in each update
'off_policy_epoch': 8,
'num_mini_batch': 8, # Size of PPO minibatch (from rollout, block into this many minibatchs to compute losses on)
'rollout_value_batch_multiplier': 1,
}
cfg['env'] = {
'num_processes': 6,
'num_val_processes': 1,
}
####################################
# Development
####################################
@ex.named_config
def prototype():
uuid='test'
cfg = {}
cfg['env'] = {
'num_processes': 2,
'num_val_processes': 1,
'env_specific_kwargs': {
'train_scenes': ['Adrian'],
'val_scenes': ['Denmark'],
}
}
cfg['saving'] = {
'log_interval': 2,
'vis_interval': 1,
}
@ex.named_config
def debug():
# this does not use VectorizedEnv, so supports pdb
uuid='test'
cfg = {}
override = {}
cfg['learner'] = {
'num_steps': 100,
'replay_buffer_size': 300,
'deterministic': True,
}
cfg['env'] = {
'num_processes': 1,
'num_val_processes': 0,
'env_specific_kwargs': {
'train_scenes': ['Adrian'],
'debug_mode': True,
}
}
cfg['saving'] = {
'log_interval': 2,
'vis_interval': 1,
}
override['env'] = {
'num_processes': 1,
'num_val_processes': 0,
"env_specific_kwargs": {
'debug_mode': True,
}
}
```
#### File: Side-tuning/configs/icifar_cfg.py
```python
@ex.named_config
def cifar10_data():
# HP borrowed from https://github.com/akamaster/pytorch_resnet_cifar10
cfg = {
'learner': {
'lr': 1e-1,
'optimizer_class': 'optim.SGD',
'optimizer_kwargs': {
'momentum': 0.9,
'weight_decay': 1e-4,
},
'lr_scheduler_method': 'optim.lr_scheduler.MultiStepLR',
'lr_scheduler_method_kwargs': {
'milestones': [100,150],
},
'max_grad_norm': None,
'use_feedback': False,
},
'training': {
'dataloader_fn': 'icifar_dataset.get_cifar_dataloaders',
'dataloader_fn_kwargs': {
'data_path': '/mnt/data/cifar10', # for docker
'num_workers': 8,
'pin_memory': True,
'epochlength': 20000,
'batch_size': 128,
'batch_size_val': 256,
},
'loss_fn': 'softmax_cross_entropy',
'loss_kwargs': {},
'use_masks': False,
'sources': [['rgb']], # Len of targets
'targets': [['cifar10']],
'masks': None,
'task_is_classification': [True],
'num_epochs': 1000,
},
'saving': {
'ticks_per_epoch': 5,
'log_interval': 1,
'save_interval': 200,
}
}
N_TASKS = 10
@ex.named_config
def icifar_data():
# 5000/1000 train/val images per class
n_epochs = 4
n_classes = 100
n_tasks = N_TASKS
n = 100 // n_tasks
chunked_classes = []
for i in range((n_classes + n - 1) // n):
chunked_classes.append(np.arange(i * n, (i + 1) * n))
chunked_names = [[f'cifar{cs.min()}-{cs.max()}'] for cs in chunked_classes]
cfg = {
'training': {
# 'split_to_use': 'splits.taskonomy_no_midlevel["debug"]',
'dataloader_fn': 'icifar_dataset.get_dataloaders',
'dataloader_fn_kwargs': {
'data_path': '/mnt/data/cifar100',
'load_to_mem': False, # if dataset small enough, can load activations to memory
'num_workers': 8,
'pin_memory': True,
'epochlength': 5000*n_epochs,
'epochs_until_cycle': 0,
'batch_size': 128,
'batch_size_val': 256,
},
'loss_fn': 'softmax_cross_entropy',
'loss_kwargs': {},
'use_masks': False,
'sources': [['rgb']] * len(chunked_classes), # Len of targets
'targets': chunked_names,
'masks': None,
'task_is_classification': [True] * len(chunked_classes),
'num_epochs': N_TASKS,
},
'saving': {
'ticks_per_epoch': 1,
'log_interval': 1,
'save_interval': 10, # why did s use 1000 here?
},
'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'dataset': 'icifar',
},
'use_feedback': False,
},
}
del n, n_tasks, n_classes, chunked_classes, i, chunked_names, n_epochs
# For Boosting experiment
N_TASKS = 10
@ex.named_config
def icifar0_10_data():
cfg = {
'training': {
# 'split_to_use': 'splits.taskonomy_no_midlevel["debug"]',
'dataloader_fn': 'icifar_dataset.get_dataloaders',
'dataloader_fn_kwargs': {
'data_path': '/mnt/data/cifar100', # for docker
'load_to_mem': False, # if dataset small enough, can load activations to memory
'num_workers': 8,
'pin_memory': True,
'epochlength': 20000,
'batch_size': 128,
'batch_size_val': 256,
},
'loss_fn': 'softmax_cross_entropy',
'loss_kwargs': {},
'use_masks': False,
'sources': [['rgb']] * N_TASKS, # Len of targets
'targets': [['cifar0-9']] * N_TASKS,
'masks': None,
'task_is_classification': [True] * N_TASKS,
}
}
@ex.named_config
def cifar_hp():
uuid = 'no_uuid'
cfg = {}
cfg['learner'] = {
'lr': 1e-3, # Learning rate for algorithm
'optimizer_kwargs' : {
# 'weight_decay': 2e-6
'weight_decay': 0e-6
},
}
@ex.named_config
def debug_cifar100():
cfg = {
'training': {
'dataloader_fn_kwargs': {
'epochlength': 50000 // 128,
},
},
'learner': {
'model_kwargs': {
'num_classes': 100,
}
}
}
##################
# Simple Models
##################
@ex.named_config
def model_resnet_cifar():
cfg = { 'learner': {
'model': 'ResnetiCifar44',
# 'model_kwargs': {'bsp': True, 'period': 1, 'debug': False},
},
'training': {
'resume_from_checkpoint_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'resume_training': True,
}
}
##################
# Initializations
##################
@ex.named_config
def init_lowenergy_cifar():
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'side_class': 'FCN4Reshaped',
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar-lowenergy.pth',
} } }
@ex.named_config
def init_xavier():
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'side_weights_path': None,
} } }
##################
# BSP - binary superposition
##################
@ex.named_config
def bsp_cifar():
# use binary superposition from https://arxiv.org/pdf/1902.05522
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'GenericSidetuneNetwork',
'base_kwargs': {
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', # user needs to input
'base_kwargs': {'bsp': True, 'period': 10},
'side_kwargs': {'bsp': True, 'period': 10},
},
} } }
@ex.named_config
def bsp_norecurse_cifar():
# use binary superposition from https://arxiv.org/pdf/1902.05522
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar-bsp.pth', # user needs to input
'base_kwargs': {'bsp': True, 'period': 10},
} } }
@ex.named_config
def bsp_debug():
cfg = { 'learner': {
'model_kwargs': {
'base_kwargs': {'bsp': True, 'debug': True},
}
}}
##################
# Models
##################
@ex.named_config
def model_boosted_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'BoostedNetwork',
'model_kwargs': {
'base_class': None,
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {},
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_boosted_wbase_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'BoostedNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': True},
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {},
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_resnet_icifar0_10():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': False},
'use_baked_encoding': False,
'side_class': None,
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_independent_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'side_class': 'GenericSidetuneNetwork',
'side_kwargs': {
'n_channels_in': 3,
'n_channels_out': 8,
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': False},
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
},
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_independent_resnet_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': None,
'base_weights_path': None, # user needs to input
'base_kwargs': {},
'use_baked_encoding': False,
'side_class': 'ResnetiCifar44NoLinear',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_independent_fcn4_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': None,
'base_weights_path': None, # user needs to input
'base_kwargs': {},
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_finetune_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'GenericSidetuneNetwork',
'base_kwargs': {
'n_channels_in': 3,
'n_channels_out': 8,
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': False},
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
},
'use_baked_encoding': False,
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_finetune_resnet44_cifar():
cfg = {
'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': False},
'use_baked_encoding': False,
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
},
},
}
@ex.named_config
def model_lifelong_finetune_fcn4_cifar():
cfg = {
'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'FCN4Reshaped',
'base_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': False},
'use_baked_encoding': False,
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
},
},
}
@ex.named_config
def model_lifelong_sidetune_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': True },
'use_baked_encoding': False,
'side_class': 'FCN4Reshaped',
'side_kwargs': {'eval_only': False },
'side_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
} } }
del n_channels_out
@ex.named_config
def model_lifelong_features_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': True },
'use_baked_encoding': False,
'side_class': None,
'side_kwargs': {},
'side_weights_path': None,
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
} } }
del n_channels_out
@ex.named_config
def pnn_v2_cifar():
cfg = { 'learner': {
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinearWithCache',
'side_class': 'FCN4Progressive',
'side_kwargs': {},
'pnn': True,
} } }
@ex.named_config
def pnn_v4_cifar():
cfg = { 'learner': {
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinearWithCache',
'side_class': 'FCN4ProgressiveH',
'side_kwargs': {},
'pnn': True,
} } }
@ex.named_config
def model_lifelong_sidetune_reverse_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'FCN4Reshaped',
'base_weights_path': '/mnt/models/fcn4-from-resnet44-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': True},
'use_baked_encoding': False,
'side_class': 'ResnetiCifar44NoLinear',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
@ex.named_config
def model_lifelong_sidetune_double_resnet_cifar():
n_channels_out = 3
cfg = { 'learner': {
'model': 'LifelongSidetuneNetwork',
'model_kwargs': {
'base_class': 'ResnetiCifar44NoLinear',
'base_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth', # user needs to input
'base_kwargs': {'eval_only': True},
'use_baked_encoding': False,
'side_class': 'ResnetiCifar44NoLinear',
'side_kwargs': {'eval_only': False},
'side_weights_path': '/mnt/models/resnet44-nolinear-cifar.pth',
'transfer_class': 'nn.Linear',
'transfer_kwargs': {'in_features': 64, 'out_features': 10},
'transfer_weights_path': None,
'decoder_class': None,
'decoder_weights_path': None, # user can input for smart initialization
'decoder_kwargs': {},
} } }
del n_channels_out
```
#### File: Side-tuning/configs/rl.py
```python
@ex.named_config
def taskonomy_features():
''' Implements an agent with some mid-level feature.
From the paper:
From Learning to Navigate Using Mid-Level Visual Priors (Sax et al. '19)
Taskonomy: Disentangling Task Transfer Learning
<NAME>, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>.
2018
Viable feature options are:
[]
'''
uuid = 'habitat_taskonomy_feature'
cfg = {}
cfg['learner'] = {
'perception_network': 'TaskonomyFeaturesOnlyNet',
'perception_network_kwargs': {
'extra_kwargs': {
'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune
}
}
}
cfg['env'] = {
'env_specific_kwargs': {
'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1
},
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': {
'names_to_transforms': {
'taskonomy':'rescale_centercrop_resize((3,256,256))',
},
},
'transform_fn_post_aggregation_fn': 'TransformFactory.independent',
'transform_fn_post_aggregation_kwargs': {
'names_to_transforms': {
'taskonomy':"taskonomy_features_transform('/mnt/models/curvature_encoder.dat')",
},
'keep_unnamed': True,
}
}
@ex.named_config
def blind():
''' Implements a blinded agent. This has no visual input, but is still able to reason about its movement
via path integration.
'''
uuid = 'blind'
cfg = {}
cfg['learner'] = {
'perception_network': 'TaskonomyFeaturesOnlyNet',
}
cfg['env'] = {
'env_specific_kwargs': {
'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1
},
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': {
'names_to_transforms': {
'taskonomy': 'blind((8,16,16))',
# 'rgb_filled': 'rescale_centercrop_resize((3,84,84))',
},
},
}
@ex.named_config
def midtune():
# Specific type of finetune where we train the policy then open the representation to be learned.
# Specifically, we take trained midlevel agents and finetune all the weights.
uuid = 'habitat_midtune'
cfg = {}
cfg['learner'] = {
'perception_network_reinit': True, # reinitialize the perception_module, used when checkpoint is used
'rollout_value_batch_multiplier': 1,
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune
'sidetune_kwargs': {
'n_channels_in': 3,
'n_channels_out': 8,
'normalize_pre_transfer': False,
'base_class': 'FCN5',
'base_kwargs': {'normalize_outputs': False},
'base_weights_path': None, # user needs to specify
'side_class': 'FCN5',
'side_kwargs': {'normalize_outputs': False},
'side_weights_path': None, # user needs to specify
}
}
},
}
cfg['saving'] = {
'checkpoint': None,
}
cfg['env'] = {
'env_specific_kwargs': {
'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1
},
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': {
'names_to_transforms': {
'rgb_filled': 'rescale_centercrop_resize((3,256,256))',
},
},
}
@ex.named_config
def finetune():
uuid = 'habitat_finetune'
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'main_perception_network': 'TaskonomyFeaturesOnlyNet', # for sidetune
'sidetune_kwargs': {
'n_channels_in': 3,
'n_channels_out': 8,
'normalize_pre_transfer': False,
'side_class': 'FCN5',
'side_kwargs': {'normalize_outputs': False},
'side_weights_path': None, # user needs to specify
}
}
},
'rollout_value_batch_multiplier': 1,
}
cfg['env'] = {
'env_specific_kwargs': {
'target_dim': 16, # Taskonomy reps: 16, scratch: 9, map_only: 1
},
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': {
'names_to_transforms': {
'rgb_filled': 'rescale_centercrop_resize((3,256,256))',
},
},
}
@ex.named_config
def sidetune():
uuid = 'habitat_sidetune'
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'n_channels_in': 3,
'n_channels_out': 8,
'normalize_pre_transfer': False,
'base_class': 'TaskonomyEncoder',
'base_weights_path': None,
'base_kwargs': {'eval_only': True, 'normalize_outputs': False},
'side_class': 'FCN5',
'side_kwargs': {'normalize_outputs': False},
'side_weights_path': None,
'alpha_blend': True,
},
'attrs_to_remember': ['base_encoding', 'side_output', 'merged_encoding'], # things to remember for supp. losses / visualization
}
},
'rollout_value_batch_multiplier': 1,
}
cfg['env'] = {
'transform_fn_pre_aggregation_fn': 'TransformFactory.independent',
'transform_fn_pre_aggregation_kwargs': {
'names_to_transforms': {
'rgb_filled': 'rescale_centercrop_resize((3,256,256))',
},
},
}
####################################
# Base Network
####################################
@ex.named_config
def rlgsn_base_resnet50():
# base is frozen by default
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'base_class': 'TaskonomyEncoder',
'base_weights_path': None, # user needs to input
'base_kwargs': {'eval_only': True, 'normalize_outputs': False},
}
}
},
}
@ex.named_config
def rlgsn_base_fcn5s():
# base is frozen by default
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'base_class': 'FCN5',
'base_weights_path': None, # user needs to input
'base_kwargs': {'eval_only': True, 'normalize_outputs': False},
}
}
},
}
@ex.named_config
def rlgsn_base_learned():
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'base_kwargs': {'eval_only': False},
}
}
},
}
####################################
# Side Network
####################################
@ex.named_config
def rlgsn_side_resnet50():
# side is learned by default
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'side_class': 'TaskonomyEncoder',
'side_weights_path': None, # user needs to input
'side_kwargs': {'eval_only': False, 'normalize_outputs': False},
}
}
},
}
@ex.named_config
def rlgsn_side_fcn5s():
# side is learned by default
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'side_class': 'FCN5',
'side_weights_path': None, # user needs to input
'side_kwargs': {'eval_only': False, 'normalize_outputs': False},
}
}
},
}
@ex.named_config
def rlgsn_side_frozen():
cfg = {}
cfg['learner'] = {
'perception_network': 'RLSidetuneWrapper',
'perception_network_kwargs': {
'extra_kwargs': {
'sidetune_kwargs': {
'side_kwargs': {'eval_only': True},
}
}
},
}
```
#### File: evkit/models/chain.py
```python
import copy
import torch.nn as nn
import torch
from torchsummary import summary
class ChainedModel(nn.Module):
def __init__(self, nets):
super().__init__()
self.nets = nets
def forward(self, x):
outputs = []
for net in self.nets:
x = net(x)
outputs.append(x)
return outputs
def ChainDirect(network_constructor, n_channels_list, universal_kwargs={}):
print("Universal kwargs", network_constructor, universal_kwargs)
return network_constructor(in_channels=n_channels_list[0],
out_channels=n_channels_list[1],
**universal_kwargs)
# class ChainDirect(nn.Module):
# def __init__(self, network_constructor, n_channels_list, universal_kwargs={}):
# super().__init__()
# print("Universal kwargs", network_constructor, universal_kwargs)
# self.net = network_constructor(in_channels=n_channels_list[0],
# out_channels=n_channels_list[1],
# **universal_kwargs)
# def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
# for i, (net, ckpt_fpath) in enumerate(zip([self.net], checkpoint_paths)):
# if logger is not None:
# logger.info(f"Loading step {i} from {ckpt_fpath}")
# checkpoint = torch.load(ckpt_fpath)
# sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
# net.load_state_dict(sd)
# return self
# def forward(self, x):
# return self.net(x)
class ChainedEDModel(nn.Module):
def __init__(self, network_constructor, n_channels_list, universal_kwargs={}):
super().__init__()
self.nets = nn.ModuleList()
for i in range(len(n_channels_list) - 1):
net = network_constructor(in_channels=n_channels_list[i],
out_channels=n_channels_list[i + 1],
**universal_kwargs)
self.nets.append(net)
def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
for i, (net, ckpt_fpath) in enumerate(zip(self.nets, checkpoint_paths)):
if logger is not None:
logger.info(f"Loading step {i} from {ckpt_fpath}")
checkpoint = torch.load(ckpt_fpath)
sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
net.load_state_dict(sd)
return self
def forward(self, x):
outputs = []
for net in self.nets:
x = net(x)
outputs.append(x)
# x = x[0]
return outputs
# class ChainedEDModelHomo(nn.Module):
# def __init__(self, network_constructor, n_channels_list, universal_kwargs={}):
# super().__init__()
# self.nets = nn.ModuleList()
# for i in range(len(n_channels_list) - 1):
# net = network_constructor(in_channels=n_channels_list[i],
# out_channels=n_channels_list[i+1],
# **universal_kwargs)
# self.nets.append(net)
# def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
# for i, (net, ckpt_fpath) in enumerate(zip(self.nets, checkpoint_paths)):
# if logger is not None:
# logger.info(f"Loding step {i} from {ckpt_fpath}")
# checkpoint = torch.load(ckpt_fpath)
# sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
# net.load_state_dict(sd)
# return self
# def forward(self, x):
# outputs = []
# for net in self.nets:
# x = net(x)
# outputs.append(x)
# return outputs
class ChainedEDModelWithUncertaintyChannel(nn.Module):
def __init__(self, network_constructor, n_channels_list, universal_kwargs={}):
super().__init__()
self.nets = nn.ModuleList()
for i in range(len(n_channels_list) - 1):
in_channels = n_channels_list[i] if i == 0 else n_channels_list[i] + 1
net = network_constructor(in_channels=in_channels,
out_channels=n_channels_list[i + 1],
**universal_kwargs)
self.nets.append(net)
def __pad_with_zeros(self, weights):
new_shape = list(weights.shape)
new_shape[1] = new_shape[1] + 1
new_params = torch.zeros(new_shape)
new_params[:, :new_shape[1] - 1] = weights
print(new_params.shape)
return new_params
def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
for i, (net, ckpt_fpath) in enumerate(zip(self.nets, checkpoint_paths)):
if logger is not None:
logger.info(f"Loding step {i} from {ckpt_fpath}")
checkpoint = torch.load(ckpt_fpath)
if logger is not None:
logger.info(f"Loaded epoch {checkpoint['epoch']} from {ckpt_fpath}")
sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
if i > 0:
sd['down1.conv1.weight'] = self.__pad_with_zeros(sd['down1.conv1.weight'])
net.load_state_dict(sd)
return self
def forward(self, x):
outputs = []
for net in self.nets:
x = net(x)
outputs.append(x)
x = torch.cat(x, dim=1)
return outputs
class ChainedEDModelWithLinearUncertaintyChannel(nn.Module):
def __init__(self, network_constructor, n_channels_list, universal_kwargs={}):
super().__init__()
self.nets = nn.ModuleList()
for i in range(len(n_channels_list) - 1):
in_channels = n_channels_list[i] if i == 0 else 2 * n_channels_list[i]
net = network_constructor(in_channels=in_channels,
out_channels=n_channels_list[i + 1],
**universal_kwargs)
self.nets.append(net)
def __pad_with_zeros(self, weights):
new_shape = list(weights.shape)
print(weights.shape)
new_shape[1] = 2 * new_shape[1]
new_params = torch.zeros(new_shape)
new_params[:, :weights.shape[1]] = weights
print(new_params.shape)
return new_params
def initialize_from_checkpoints(self, checkpoint_paths, logger=None):
for i, (net, ckpt_fpath) in enumerate(zip(self.nets, checkpoint_paths)):
if logger is not None:
logger.info(f"Loding step {i} from {ckpt_fpath}")
checkpoint = torch.load(ckpt_fpath)
sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
if i > 0:
sd['down1.conv1.weight'] = self.__pad_with_zeros(sd['down1.conv1.weight'])
net.load_state_dict(sd)
return self
def forward(self, x):
outputs = []
for net in self.nets:
x = net(x)
outputs.append(x)
x = torch.cat(x, dim=1)
return outputs
```
#### File: evkit/models/expert.py
```python
import numpy as np
import os
import torch
from PIL import Image
import torchvision.transforms as transforms
from tlkit.data.img_transforms import MAKE_RESCALE_0_1_NEG1_POS1
SPLITS = ['train', 'val', 'test']
t_to_np = lambda x: (np.transpose(x.cpu().numpy(), (1,2,0)) * 255).astype(np.uint8)
show = lambda x: Image.fromarray(t_to_np(x))
rgb_t = transforms.Compose([
transforms.CenterCrop([256, 256]),
transforms.Resize(256),
transforms.ToTensor(),
MAKE_RESCALE_0_1_NEG1_POS1(3),
])
class Expert():
def __init__(self, data_dir, compare_with_saved_trajs=False, follower=None):
self.data_dir = data_dir
self.compare_with_saved_trajs = compare_with_saved_trajs # this lets us compare all observations with saved
self.traj_dir = None
self.action_idx = 0
self.same_as_il = True
self.follower = None
if follower is not None:
checkpoint_obj = torch.load(follower)
start_epoch = checkpoint_obj['epoch']
print('Loaded imitator (epoch {}) from {}'.format(start_epoch, follower))
self.follower = checkpoint_obj['model']
self.follower.eval()
def reset(self, envs):
scene_id = envs.env.env.env._env.current_episode.scene_id
scene_id = scene_id.split('/')[-1].split('.')[0]
episode_id = envs.env.env.env._env.current_episode.episode_id
self.traj_dir = self._find_traj_dir(scene_id, episode_id)
self.action_idx = 0
self.same_as_il = True
if self.follower is not None:
self.follower.reset()
def _find_traj_dir(self, scene_id, episode_id):
for split in SPLITS:
for building in os.listdir(os.path.join(self.data_dir, split)):
if building == scene_id:
for episode in os.listdir(os.path.join(self.data_dir, split, building)):
if str(episode_id) in episode:
return os.path.join(self.data_dir, split, building, episode)
assert False, f'Could not find scene {scene_id}, episode {episode_id} in {self.data_dir}'
def _load_npz(self, fn):
path = os.path.join(self.traj_dir, fn)
return np.load(path)['arr_0']
def _cmp_with_il(self, k, observations, img=False, save_png=False, printer=True):
if k not in observations:
if printer:
print(f'Key {k}: cannot find')
return 0
if img:
il_obj = Image.open(os.path.join(self.traj_dir, f'{k}_{self.action_idx:03d}.png'))
il_obj = rgb_t(il_obj).cuda()
else:
il_obj = torch.Tensor(self._load_npz(f'{k}_{self.action_idx:03d}.npz')).cuda()
num_channels = observations[k].shape[1] // 4
rl_obj = observations[k][0][-num_channels:]
if save_png:
debug_dir = os.path.join('/mnt/data/debug/', str(self.action_idx))
os.makedirs(debug_dir, exist_ok=True)
show(il_obj).save(os.path.join(debug_dir, f'il_{k}.png'))
show(rl_obj).save(os.path.join(debug_dir, f'rl_{k}.png'))
diff = torch.sum(il_obj - rl_obj)
if printer:
print(f'Key {k}: {diff}')
return diff
def _debug(self, observations):
self._cmp_with_il('map', observations, save_png=self.same_as_il, printer=self.same_as_il)
self._cmp_with_il('target', observations, printer=self.same_as_il)
self._cmp_with_il('rgb_filled', observations, img=True, save_png=self.same_as_il, printer=self.same_as_il)
self._cmp_with_il('taskonomy', observations, printer=self.same_as_il)
def act(self, observations, states, mask_done, deterministic=True):
action = self._load_npz(f'action_{self.action_idx:03d}.npz')
if len(action.shape) == 0:
action = int(action)
else:
action = np.argmax(action)
if self.compare_with_saved_trajs:
mapd = self._cmp_with_il('map', observations, printer=False)
taskonomyd = self._cmp_with_il('taskonomy', observations, printer=False)
targetd = self._cmp_with_il('target', observations, printer=False)
if abs(mapd) > 0.1 or abs(taskonomyd) > 0.1 or abs(targetd) > 1e-6:
print('-' * 50)
print(f'Running {self.traj_dir} on step {self.action_idx}. expert: {action}, targetd {targetd} mapdif {mapd}, taskdif {taskonomyd}')
self._debug(observations)
self.same_as_il = False
if self.follower is not None:
_, follower_action, _, _ = self.follower.act(observations, states, mask_done, True)
follower_action = follower_action.squeeze(1).cpu().numpy()[0]
if follower_action != action and action != 3:
print('-' * 50)
print(f'Running {self.traj_dir} on step {self.action_idx}. expert: {action}, follower: {follower_action}, mapdif {mapd}, taskdif {taskonomyd}')
self._debug(observations)
self.same_as_il = False
if action == 3:
action = 2
self.action_idx += 1
return 0, torch.Tensor([action]).unsqueeze(1), 0, states
def cuda(self, *inputs, **kwargs):
pass
def eval(self, *inputs, **kwargs):
pass
```
#### File: evkit/models/forward_inverse.py
```python
from gym import spaces
import multiprocessing.dummy as mp
import multiprocessing
import numpy as np
import os
import torch
import torch
import torch.nn as nn
from torch.nn import Parameter, ModuleList
import torch.nn.functional as F
from evkit.rl.utils import init, init_normc_
from evkit.utils.misc import is_cuda
from evkit.preprocess import transforms
import pickle as pkl
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
################################
# Inverse Models
# Predict s_{t+1} | s_t, a_t
################################
class ForwardModel(nn.Module):
def __init__(self, state_shape, action_shape, hidden_size):
super().__init__()
self.fc1 = init_(nn.Linear(state_shape + action_shape[1], hidden_size))
self.fc2 = init_(nn.Linear(hidden_size, state_shape))
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
################################
# Inverse Models
# Predict a_t | s_t, s_{t+1}
################################
class InverseModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = init_(nn.Linear(input_size * 2, hidden_size))
# Note to stoip gradient
self.fc2 = init_(nn.Linear(hidden_size, output_size))
def forward(self, phi_t, phi_t_plus_1):
x = torch.cat([phi_t, phi_t_plus_1], 1)
x = F.relu(self.fc1(x))
logits = self.fc2(x)
return logits
# ainvprobs = nn.softmax(logits, dim=-1)
```
#### File: evkit/models/srl_architectures.py
```python
import torch.nn as nn
from torch.nn import Parameter, ModuleList
import torch.nn.functional as F
import torch
import multiprocessing
import numpy as np
import os
from gym import spaces
from torchvision.models import resnet18
from evkit.rl.utils import init, init_normc_
from evkit.preprocess import transforms
import torchvision as vision
from evkit.models.architectures import FrameStacked, Flatten, atari_conv
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
N_CHANNELS = 3
def getNChannels():
return N_CHANNELS
########################
# SRL
########################
class BaseModelSRL(nn.Module):
"""
Base Class for a SRL network
It implements a getState method to retrieve a state from observations
"""
def __init__(self):
super(BaseModelSRL, self).__init__()
def getStates(self, observations):
"""
:param observations: (th.Tensor)
:return: (th.Tensor)
"""
return self.forward(observations)
def forward(self, x):
raise NotImplementedError
class BaseModelAutoEncoder(BaseModelSRL):
"""
Base Class for a SRL network (autoencoder family)
It implements a getState method to retrieve a state from observations
"""
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512):
super(BaseModelAutoEncoder, self).__init__()
self.output_size = output_size
self.n_frames = 4
self.n_frames = n_frames
self.output_size = output_size
self.n_map_channels = n_map_channels
self.use_target = use_target
self.use_map = n_map_channels > 0
if self.use_map:
self.map_tower = nn.Sequential(
atari_conv(self.n_frames * self.n_map_channels),
nn.Conv2d(32, 64, kernel_size=4, stride=1), #, padding=3, bias=False),
nn.ReLU(inplace=True),
)
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
# Inspired by ResNet:
# conv3x3 followed by BatchNorm2d
self.encoder_conv = nn.Sequential(
# 224x224xN_CHANNELS -> 112x112x64
nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64
conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64
conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64
)
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 13x13x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 27x27x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 55x55x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 111x111x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, getNChannels(), kernel_size=4, stride=2), # 224x224xN_CHANNELS
)
self.encoder = FrameStacked(self.encoder_conv, self.n_frames)
self.conv1 = nn.Conv2d(self.n_frames * (64 + self.target_channels), 64, 3, stride=1) # c4 s 4
self.flatten = Flatten()
self.fc1 = init_(nn.Linear(64 * 4 * 4 * (self.use_map) + 64 * 4 * 4 * (1), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
def getStates(self, observations):
"""
:param observations: (th.Tensor)
:return: (th.Tensor)
"""
return self.encode(observations)
def encode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# raise NotImplementedError
self.encoder_conv(x)
def decode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# raise NotImplementedError
self.decoder_conv(x)
def forward(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
x_taskonomy = x['taskonomy']
if self.use_target:
x_taskonomy = torch.cat([x_taskonomy, x["target"]], dim=1)
x_taskonomy = F.relu(self.conv1(x_taskonomy))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)
x = self.flatten(x_taskonomy)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
encoded = self.encode(x)
# decoded = self.decode(encoded).view(input_shape)
return encoded #, decoded
def conv3x3(in_planes, out_planes, stride=1):
""""
From PyTorch Resnet implementation
3x3 convolution with padding
:param in_planes: (int)
:param out_planes: (int)
:param stride: (int)
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def srl_features_transform(task_path, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
_rescale_thunk = transforms.rescale_centercrop_resize((3, 224, 224))
if task_path != 'pixels_as_state':
# net = TaskonomyEncoder().cuda()
net = nn.Sequential(
# 224x224xN_CHANNELS -> 112x112x64
nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64
conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64
conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64
).cuda()
net.eval()
if task_path != 'None':
checkpoint = torch.load(task_path)
# checkpoint = {k.replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k}
checkpoint = {k.replace('model.conv_layers.', '').replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k or 'conv_layers' in k}
net.load_state_dict(checkpoint)
def encode(x):
if task_path == 'pixels_as_state':
return x
with torch.no_grad():
return net(x)
def _features_transform_thunk(obs_space):
rescale, _ = _rescale_thunk(obs_space)
def pipeline(x):
# x = rescale(x).view(1, 3, 224, 224)
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
if task_path == 'pixels_as_state':
raise NotImplementedError
return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
else:
return pipeline, spaces.Box(-1, 1, (64, 6, 6), dtype)
return _features_transform_thunk
```
#### File: evkit/models/unet.py
```python
import os, sys, math, random, itertools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.checkpoint import checkpoint
# from utils import *
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True):
super().__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, prev_feature_map, x):
if self.up_sample:
x = self.up_sampling(x)
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super().__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super().__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
# x = self.relu(self.last_conv2(x))
x = self.last_conv2(x)
# x = F.tanh(x)
x = x.clamp(min=-1.0, max=1.0) # -1, F.tanh(x)
return x
def loss(self, pred, target):
loss = torch.tensor(0.0, device=pred.device)
return loss, (loss.detach(),)
class UNetHeteroscedasticFull(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-5):
super().__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, ((out_channels + 1) * (out_channels)) / 2, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
# mu = F.tanh(mu)
mu = mu.clamp(min=-1.0, max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return mu, scale
class UNetHeteroscedasticIndep(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-5):
super().__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
# mu = F.tanh(mu)
mu = mu.clamp(min=-1.0, max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return mu, scale
class UNetHeteroscedasticPooled(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3, eps=1e-5, use_clamp=False):
super().__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.last_conv1_uncert = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn_uncert = nn.GroupNorm(8, 16)
self.last_conv2_uncert = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
self.eps = eps
self.use_clamp = use_clamp
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
mu = self.relu(self.last_bn(self.last_conv1(x)))
mu = self.last_conv2(mu)
if self.use_clamp:
mu = mu.clamp(min=-1.0, max=1.0)
else:
mu = F.tanh(mu)
# mu = mu.clamp(min=-1.0, max=1.0)
scale = self.relu(self.last_bn_uncert(self.last_conv1_uncert(x)))
scale = self.last_conv2_uncert(scale)
scale = F.softplus(scale)
return mu, scale
class UNetReshade(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super().__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.relu(self.last_conv2(x))
x = x.clamp(max=1, min=0).mean(dim=1, keepdim=True)
x = x.expand(-1, 3, -1, -1)
return x
def loss(self, pred, target):
loss = torch.tensor(0.0, device=pred.device)
return loss, (loss.detach(),)
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=True, groups=8, dilation=1, transpose=False):
super().__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=padding * dilation)
if self.transpose:
self.convt = nn.ConvTranspose2d(
f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1
)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
# x = F.dropout(x, 0.04, self.training)
x = self.bn(x)
if self.transpose:
# x = F.upsample(x, scale_factor=2, mode='bilinear')
x = F.relu(self.convt(x))
# x = x[:, :, :-1, :-1]
x = F.relu(self.conv(x))
return x
def load_from_file(net, checkpoint_path):
checkpoint = torch.load(checkpoint_path)
sd = {k.replace("module.", ""): v for k, v in checkpoint['state_dict'].items()}
net.load_state_dict(sd)
for p in net.parameters():
p.requires_grad = False
return net
```
#### File: rl/algo/ppo_curiosity.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import random
class PPOCuriosity(object):
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
optimizer=None,
lr=None,
eps=None,
max_grad_norm=None,
amsgrad=True,
weight_decay=0.0):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = 0.2
self.inverse_loss_coef = 0.8
self.curiosity_coef = 0.2
self.original_task_reward_proportion = 1.0
self.max_grad_norm = max_grad_norm
self.optimizer = optimizer
if self.optimizer is None:
self.optimizer = optim.Adam(actor_critic.parameters(),
lr=lr,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
advantages = rollouts.returns * self.original_task_reward_proportion - rollouts.value_preds
# advantages = (advantages - advantages.mean()) / (
# advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
max_importance_weight_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
for e in range(self.ppo_epoch):
if hasattr(self.actor_critic.base, 'gru'):
data_generator = rollouts.recurrent_generator(
advantages, self.num_mini_batch)
raise NotImplementedError("PPOCuriosity has not implemented for recurrent networks because masking is undefined")
else:
# data_generator = rollouts.feed_forward_generator(
# advantages, self.num_mini_batch)
data_generator = rollouts.feed_forward_generator_with_next_state(
advantages, self.num_mini_batch)
for sample in data_generator:
observations_batch, next_observations_batch, rnn_history_state, actions_batch, \
return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
# observations_batch, rnn_history_state, actions_batch, \
# return_batch, masks_batch, old_action_log_probs_batch, \
# adv_targ = sample
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, next_rnn_history_state, state_features = self.actor_critic.evaluate_actions(
observations_batch, rnn_history_state,
masks_batch, actions_batch)
# import pdb
# pdb.set_trace()
# masks_batch is from state_t but we ned for state_t_plus_1. Bad if recurrent!
value, next_state_features, _ = self.actor_critic.base(
next_observations_batch, next_rnn_history_state, masks_batch)
# Curiosity
# Inverse Loss
pred_action = self.actor_critic.base.inverse_model(state_features.detach(), next_state_features)
self.inverse_loss = F.cross_entropy(pred_action, actions_batch.squeeze(1))
# Forward Loss: Only works for categorical actions
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs), device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch, 1.0)
pred_next_state = self.actor_critic.base.forward_model(state_features.detach(), one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state, next_state_features.detach())
# Exploration bonus
curiosity_bonus = (1.0 - self.original_task_reward_proportion) * self.curiosity_coef * self.forward_loss
return_batch += curiosity_bonus
adv_targ += curiosity_bonus
adv_targ = (adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-5)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
clipped_ratio = torch.clamp(ratio,
1.0 - self.clip_param,
1.0 + self.clip_param)
surr1 = ratio * adv_targ
surr2 = clipped_ratio * adv_targ
self.action_loss = -torch.min(surr1, surr2).mean()
# value_loss = torch.mean(clipped_ratio * (values - return_batch) ** 2)
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.forward_loss.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.inverse_loss.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(
self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += self.dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
self.forward_loss_epoch /= num_updates
self.inverse_loss_epoch /= num_updates
self.last_update_max_importance_weight = max_importance_weight_epoch
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch, {}
def get_loss(self):
return self.value_loss * self.value_loss_coef \
+ self.action_loss \
- self.dist_entropy * self.entropy_coef \
+ self.forward_loss * self.forward_loss_coef \
+ self.inverse_loss * self.inverse_loss_coef
class PPOReplayCuriosity(object):
def __init__(self,
actor_critic,
clip_param,
ppo_epoch,
num_mini_batch,
value_loss_coef,
entropy_coef,
on_policy_epoch,
off_policy_epoch,
lr=None,
eps=None,
max_grad_norm=None,
amsgrad=True,
weight_decay=0.0,
curiosity_reward_coef=0.1,
forward_loss_coef=0.2,
inverse_loss_coef=0.8):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.on_policy_epoch = on_policy_epoch
self.off_policy_epoch = off_policy_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.forward_loss_coef = forward_loss_coef
self.inverse_loss_coef = inverse_loss_coef
self.curiosity_reward_coef = curiosity_reward_coef
self.max_grad_norm = max_grad_norm
self.optimizer = optim.Adam(actor_critic.parameters(),
lr=lr,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad)
self.last_grad_norm = None
def update(self, rollouts):
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
self.forward_loss_epoch = 0
self.inverse_loss_epoch = 0
max_importance_weight_epoch = 0
on_policy = [0] * self.on_policy_epoch
off_policy = [1] * self.off_policy_epoch
epochs = on_policy + off_policy
random.shuffle(epochs)
for e in epochs:
if e == 0:
data_generator = rollouts.feed_forward_generator_with_next_state(
None, self.num_mini_batch, on_policy=True)
else:
data_generator = rollouts.feed_forward_generator_with_next_state(
None, self.num_mini_batch, on_policy=False)
for sample in data_generator:
observations_batch, next_observations_batch, states_batch, actions_batch, \
return_batch, masks_batch, old_action_log_probs_batch, \
adv_targ = sample
actions_batch_long = actions_batch.type(torch.cuda.LongTensor)
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy, next_states_batch = self.actor_critic.evaluate_actions(
observations_batch, states_batch, masks_batch, actions_batch)
# Curiosity
# Inverse Loss
state_feats = self.actor_critic.base.perception_unit(observations_batch)
next_state_feats = self.actor_critic.base.perception_unit(next_observations_batch)
pred_action = self.actor_critic.base.inverse_model(
state_feats,
next_state_feats)
self.inverse_loss = F.cross_entropy(pred_action,
actions_batch_long.squeeze(1))
# Forward Loss: Only works for categorical actions
one_hot_actions = torch.zeros((actions_batch.shape[0], self.actor_critic.dist.num_outputs),
device=actions_batch.device)
one_hot_actions.scatter_(1, actions_batch_long, 1.0)
pred_next_state = self.actor_critic.base.forward_model(
state_feats,
one_hot_actions)
self.forward_loss = F.mse_loss(pred_next_state,
next_state_feats)
curiosity_bonus = self.curiosity_reward_coef * self.forward_loss
adv_targ += curiosity_bonus.detach()
adv_targ = (adv_targ - adv_targ.mean()) / (adv_targ.std() + 1e-5)
ratio = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
self.action_loss = -torch.min(surr1, surr2).mean()
self.value_loss = F.mse_loss(values, return_batch)
self.dist_entropy = dist_entropy
self.optimizer.zero_grad()
self.get_loss().backward()
nn.utils.clip_grad_norm_(self.actor_critic.base.forward_model.parameters(), self.max_grad_norm)
nn.utils.clip_grad_norm_(self.actor_critic.base.inverse_model.parameters(), self.max_grad_norm)
self.last_grad_norm = nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += self.value_loss.item()
action_loss_epoch += self.action_loss.item()
dist_entropy_epoch += dist_entropy.item()
self.forward_loss_epoch += self.forward_loss.item()
self.inverse_loss_epoch += self.inverse_loss.item()
max_importance_weight_epoch = max(torch.max(ratio).item(), max_importance_weight_epoch)
self.last_update_max_importance_weight = max_importance_weight_epoch
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch, max_importance_weight_epoch, {}
def get_loss(self):
return self.value_loss * self.value_loss_coef \
+ self.action_loss \
- self.dist_entropy * self.entropy_coef \
+ self.forward_loss * self.forward_loss_coef \
+ self.inverse_loss * self.inverse_loss_coef
```
#### File: evkit/utils/misc.py
```python
import collections
import torch
import pprint
import string
from evkit.preprocess.transforms import rescale_centercrop_resize, rescale, grayscale_rescale, cross_modal_transform, \
identity_transform, rescale_centercrop_resize_collated, map_pool_collated, map_pool, taskonomy_features_transform, \
image_to_input_collated, taskonomy_multi_features_transform
from evkit.models.alexnet import alexnet_transform, alexnet_features_transform
from evkit.preprocess.baseline_transforms import blind, pixels_as_state
from evkit.models.srl_architectures import srl_features_transform
import warnings
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def count_trainable_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_total_parameters(model):
return sum(p.numel() for p in model.parameters())
def is_interactive():
try:
ip = get_ipython()
return ip.has_trait('kernel')
except:
return False
def is_cuda(model):
return next(model.parameters()).is_cuda
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
self._keys, self._vals = zip(*adict.items())
self._keys, self._vals = list(self._keys), list(self._vals)
def keys(self):
return self._keys
def vals(self):
return self._vals
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
def append_dict(d, u, stop_recurse_keys=[]):
for k, v in u.items():
if isinstance(v, collections.Mapping) and k not in stop_recurse_keys:
d[k] = append_dict(d.get(k, {}), v, stop_recurse_keys=stop_recurse_keys)
else:
if k not in d:
d[k] = []
d[k].append(v)
return d
def update_dict_deepcopy(d, u): # we need a deep dictionary update
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update_dict_deepcopy(d.get(k, {}), v)
else:
d[k] = v
return d
def eval_dict_values(d):
for k in d.keys():
if isinstance(d[k], collections.Mapping):
d[k] = eval_dict_values(d[k])
elif isinstance(d[k], str):
d[k] = eval(d[k].replace("---", "'"))
return d
def search_and_replace_dict(model_kwargs, task_initial):
for k, v in model_kwargs.items():
if isinstance(v, collections.Mapping):
search_and_replace_dict(v, task_initial)
else:
if isinstance(v, str) and 'encoder' in v and task_initial not in v:
new_pth = v.replace('curvature', task_initial) # TODO make this the string between / and encoder
warnings.warn(f'BE CAREFUL - CHANGING ENCODER PATH: {v} is being replaced for {new_pth}')
model_kwargs[k] = new_pth
return
```
#### File: utils/viz/core.py
```python
import numpy as np
from skimage.transform import resize
import skimage
import torchvision.utils as tvutils
import torch
import PIL
from PIL import Image
import torchvision
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = torch.tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3)
self.std = torch.tensor(std).unsqueeze(0).unsqueeze(2).unsqueeze(3)
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# for t, m, s in zip(tensor, self.mean, self.std):
# t * s + m
# # The normalize code -> t.sub_(m).div_(s)
return tensor * self.std.to(tensor.device) + self.mean.to(tensor.device)
imagenet_unnormalize = UnNormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
taskononomy_unnormalize = UnNormalize([0.5,0.5,0.5], [0.5, 0.5, 0.5])
def log_input_images(obs_unpacked, mlog, num_stack, key_names=['map'], meter_name='debug/input_images', step_num=0, reset_meter=True, phase='train', unnormalize=taskononomy_unnormalize):
# Plots the observations from the first process
stacked = []
for key_name in key_names:
if key_name not in obs_unpacked:
print(key_name, "not found")
continue
obs = obs_unpacked[key_name][0]
obs = (obs + 1.0) / 2.0
# obs = unnormalize(obs)
# obs = (obs * 2. - 1.)
try:
obs = obs.cpu()
except:
pass
obs_chunked = list(torch.chunk(obs, num_stack, dim=0))
if obs_chunked[0].shape[2] == 1 or obs_chunked[0].shape[2] == 3:
obs_chunked = [o.permute(2, 0, 1) for o in obs_chunked]
obs_chunked = [hacky_resize(obs) for obs in obs_chunked]
key_stacked = torchvision.utils.make_grid(obs_chunked, nrow=num_stack, padding=2)
stacked.append(key_stacked)
stacked = torch.cat(stacked, dim=1)
mlog.update_meter(stacked, meters={meter_name}, phase=phase)
if reset_meter:
mlog.reset_meter(step_num, meterlist={meter_name})
def hacky_resize(obs: torch.Tensor) -> torch.Tensor:
obs_img_format = np.transpose((255 * obs.cpu().numpy()).astype(np.uint8), (1,2,0))
obs_resized = torch.Tensor(np.array(Image.fromarray(obs_img_format).resize((84,84))).astype(np.float32)).permute((2,0,1))
return obs_resized / 255.
def rescale_for_display( batch, rescale=True, normalize=False ):
'''
Prepares network output for display by optionally rescaling from [-1,1],
and by setting some pixels to the min/max of 0/1. This prevents matplotlib
from rescaling the images.
'''
if rescale:
display_batch = [ rescale_image( im.copy(), new_scale=[0, 1], current_scale=[-1, 1] )
for im in batch ]
else:
display_batch = batch.copy()
if not normalize:
for im in display_batch:
im[0,0,0] = 1.0 # Adjust some values so that matplotlib doesn't rescale
im[0,1,0] = 0.0 # Now adjust the min
return display_batch
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
# im = im.astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
im = skimage.img_as_float(im)
return im
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
# # skimage is fast but only understands {1,3} channel images
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def pack_images(x, prediction, label, mask=None):
uncertainty = None
if isinstance(prediction, tuple):
prediction, uncertainty = prediction
if len(label.shape) == 4 and label.shape[1] == 2:
zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device)
label = torch.cat([label, zeros], dim=1)
prediction = torch.cat([prediction, zeros], dim=1)
if uncertainty is not None:
uncertainty = torch.cat([uncertainty, zeros], dim=1)
if mask is not None:
mask = torch.cat([mask, mask[:,0].unsqueeze(1)], dim=1)
if len(x.shape) == 4 and x.shape[1] == 2:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
to_cat.append(prediction.expand(shape_with_three_channels))
if uncertainty is not None:
print(uncertainty.min(), uncertainty.max())
uncertainty = 2*uncertainty - 1.0
uncertainty = uncertainty.clamp(min=-1.0, max=1.0)
to_cat.append(uncertainty.expand(shape_with_three_channels))
to_cat.append(label.expand(shape_with_three_channels))
if mask is not None:
to_cat.append(mask.expand(shape_with_three_channels))
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
def maybe_entriple(x, is_mask=False):
if x.shape[1] == 2:
if is_mask:
x = torch.cat([x, x[:,0].unsqueeze(1)], dim=1)
else:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
return x.expand(shape_with_three_channels)
def pack_chained_images(x, predictions, labels, mask=None):
x = maybe_entriple(x)
if mask is not None:
mask = maybe_entriple(mask, is_mask=True)
tripled_predictions, uncertainties = [], []
for p in predictions:
if isinstance(p, tuple):
p, u = p
uncertainties.append(maybe_entriple(u))
else:
uncertainties.append(None)
tripled_predictions.append(maybe_entriple(p))
predictions = tripled_predictions
labels = [maybe_entriple(l) for l in labels]
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
for pred, uncert, label in zip(predictions, uncertainties, labels):
to_cat.append(label)
to_cat.append(pred)
if uncert is not None:
print(uncert.min(), uncert.max())
uncert = 2*uncert - 1.0
uncert = uncert.clamp(min=-1.0, max=1.0)
to_cat.append(uncert)
if mask is not None:
to_cat.append(mask)
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
```
#### File: scripts/prep/make_masks.py
```python
import copy
import cv2
from functools import partial
import logging
from multiprocessing import Pool
import multiprocessing
import numpy as np
import os
from sacred import Experiment
import torch
import torch.nn.functional as F
import torchvision.transforms as transforms
from tqdm import tqdm as tqdm
from evkit.models.taskonomy_network import TaskonomyDecoder, TaskonomyNetwork
from tlkit.utils import get_parent_dirname, LIST_OF_TASKS, SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS
from tlkit.data.datasets.taskonomy_dataset import get_dataloaders, TRAIN_BUILDINGS, VAL_BUILDINGS, TEST_BUILDINGS
import tlkit.data.splits as splits
from evkit.models.taskonomy_network import TaskonomyEncoder
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ex = Experiment(name="Save activations")
SOURCE_TASK = 'depth_zbuffer'
def save_as_png(file_path, decoding):
decoding = 0.5 * decoding + 0.5
decoding *= (2 ** 16 - 1)
decoding = decoding.astype(np.uint16)
# This is fine but need to parse out the empty channel afterwords
decoding = np.transpose(decoding, (1,2,0))
if decoding.shape[2] > 1:
cv2.imwrite(file_path, cv2.cvtColor(decoding, cv2.COLOR_RGB2BGR))
else:
cv2.imwrite(file_path, decoding.astype(np.uint8))
return
def save_to_file(arr, original_image_fname, new_root, subfolder, filetype='.npy'):
abspath = os.path.abspath(original_image_fname)
base_name = os.path.basename(abspath).replace('.png', filetype)
parent_name = get_parent_dirname(abspath).replace(SOURCE_TASK, "mask_valid")
file_path = os.path.join(new_root, subfolder, parent_name, base_name)
os.makedirs(os.path.join(new_root, subfolder, parent_name), exist_ok=True)
if filetype == '.npy':
np.save(file_path, arr)
elif filetype == '.npz':
np.savez_compressed(file_path, arr)
elif filetype == '.png':
cv2.imwrite(file_path, np.uint8(arr[0]))
else:
raise NotImplementedError("Cannot save {}. Unrecognized filetype {}.".format(file_path, filetype))
def save_mappable(x):
return save_to_file(*x)
def build_mask(target, val=65000):
mask = (target >= val)
# mask = F.conv2d(mask.float(), torch.ones(1, 1, 5, 5, device=mask.device), padding=2, stride=2) != 0
# mask = F.conv2d(mask.float(), torch.ones(1, 1, 5, 5, device=mask.device), padding=2, stride=2) != 0
# mask2 = F.max_pool2d(mask.float(), 5, padding=2, stride=1) == 0
# mask = mask * 127 + mask2*127
mask = F.max_pool2d(mask.float(), 5, padding=2, stride=2) == 0
return(mask)*255
@ex.main
def make_mask(folders_to_convert,
split_to_convert,
data_dir,
save_dir,
n_dataloader_workers=4,
batch_size=64):
if folders_to_convert is None and split_to_convert is not None:
split_to_convert = eval(split_to_convert)
logger.info(f'Converting from split {split_to_convert}')
folders_to_convert = sorted(list(set(split_to_convert['train'] + split_to_convert['val'] + split_to_convert['test'])))
if folders_to_convert is None:
logger.info(f'Converting all folders in {data_dir}')
else:
logger.info(f'Converting folders {str(folders_to_convert)}')
dataloader = get_dataloaders(
data_path=data_dir, tasks=SOURCE_TASK,
batch_size=batch_size, batch_size_val=batch_size,
num_workers=n_dataloader_workers,
train_folders=None,
val_folders=folders_to_convert,
test_folders=None,
zip_file_name=True,
transform=transforms.Compose([transforms.ToTensor()]),
)['val']
pool = Pool(n_dataloader_workers)
for fpaths, x in tqdm(dataloader):
dirname = get_parent_dirname(fpaths[0])
with torch.no_grad():
x = build_mask(x)
pool.map(save_mappable, zip(x, fpaths,
[save_dir]*batch_size, ['mask_valid']*batch_size,
['.png']*batch_size))
# return
@ex.config
def cfg_base():
folders_to_convert = None
split_to_convert = None
batch_size = 64
n_dataloader_workers = 8
data_dir = '/mnt/data'
save_dir = '/mnt/data'
if __name__ == "__main__":
ex.run_commandline()
```
#### File: Side-tuning/scripts/train_lifelong.py
```python
import os
import GPUtil
# If you need one GPU, I will pick it here for you
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
gpu = [str(g) for g in GPUtil.getAvailable(maxMemory=0.2)]
assert len(gpu) > 0, 'No available GPUs'
print('Using GPU', ','.join(gpu))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpu)
import argparse
import copy
from docopt import docopt
import functools
import json
import logging
import math
import numpy as np
import pprint
import psutil
import random
import runpy
from sacred.arg_parser import get_config_updates
from sacred import Experiment
import subprocess
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.transforms as transforms
import torchvision.utils as tvutils
import torch.nn.functional as F
import torchsummary
from tqdm import tqdm as tqdm
from multiprocessing.pool import ThreadPool
import threading
import warnings
from tlkit.data.synset import synset_arr
from tlkit.models.ewc import EWC
from tlkit.models.student_models import FCN3, FCN4, FCN5, FCN8
from tlkit.models.lifelong_framework import load_submodule
from tlkit.logging_helpers import log, log_image, reset_log, add_classification_specific_logging, get_logger, write_logs
from tlkit.utils import update, var_to_numpy, index_to_image, load_state_dict_from_path
import tlkit.utils
import tlkit.data.datasets.taskonomy_dataset as taskonomy_dataset
import tlkit.data.datasets.fashion_mnist_dataset as fashion_mnist_dataset
import tlkit.data.datasets.imagenet_dataset as imagenet_dataset
import tlkit.data.datasets.icifar_dataset as icifar_dataset
import tlkit.data.splits as splits
from tlkit.utils import LIST_OF_TASKS, TASKS_TO_CHANNELS, SINGLE_IMAGE_TASKS
from evkit.saving.observers import FileStorageObserverWithExUuid
import evkit.saving.checkpoints as checkpoints
from evkit.utils.profiler import Profiler
from evkit.utils.random import set_seed
from evkit.utils.misc import cfg_to_md, count_trainable_parameters, count_total_parameters, search_and_replace_dict
from evkit.utils.parallel import _CustomDataParallel
from evkit.utils.losses import heteroscedastic_double_exponential, heteroscedastic_normal, weighted_mse_loss, softmax_cross_entropy, weighted_l1_loss, perceptual_l1_loss, perceptual_l2_loss, perceptual_cross_entropy_loss, identity_regularizer, transfer_regularizer, perceptual_regularizer, dense_cross_entropy, dense_softmax_cross_entropy, weighted_l2_loss
from evkit.utils.viz.core import pack_images, imagenet_unnormalize
from evkit.models.taskonomy_network import TaskonomyEncoder, TaskonomyDecoder, TaskonomyNetwork
from evkit.models.unet import UNet, UNetHeteroscedasticFull, UNetHeteroscedasticIndep, UNetHeteroscedasticPooled
from tlkit.models.student_models import FCN4Reshaped
from tlkit.models.resnet_cifar import ResnetiCifar44
from tlkit.models.sidetune_architecture import GenericSidetuneNetwork, TransferConv3, PreTransferedDecoder
from tlkit.models.models_additional import BoostedNetwork, ConstantModel
from tlkit.models.lifelong_framework import LifelongSidetuneNetwork
import tnt.torchnet as tnt
from tnt.torchnet.logger import FileLogger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
ex = Experiment(name="Train Lifelong Learning agent")
LOG_DIR = sys.argv[1]
sys.argv.pop(1)
runpy.run_module('configs.vision_lifelong', init_globals=globals())
runpy.run_module('configs.icifar_cfg', init_globals=globals())
runpy.run_module('configs.seq_taskonomy_cfg', init_globals=globals())
runpy.run_module('configs.seq_taskonomy_cfg_extra', init_globals=globals())
runpy.run_module('configs.shared', init_globals=globals())
@ex.command
def prologue(cfg, uuid):
os.makedirs(LOG_DIR, exist_ok=True)
assert not (cfg['saving']['obliterate_logs'] and cfg['training']['resume_training']), 'Cannot obliterate logs and resume training'
if cfg['saving']['obliterate_logs']:
assert LOG_DIR, 'LOG_DIR cannot be empty'
subprocess.call(f'rm -rf {LOG_DIR}', shell=True)
if cfg['training']['resume_training']:
checkpoints.archive_current_run(LOG_DIR, uuid)
@ex.main
def train(cfg, uuid):
set_seed(cfg['training']['seed'])
############################################################
# Logger
############################################################
logger.setLevel(logging.INFO)
logger.info(pprint.pformat(cfg))
logger.debug(f'Loaded Torch version: {torch.__version__}')
logger.debug(f'Using device: {device}')
logger.info(f"Training following tasks: ")
for i, (s, t) in enumerate(zip(cfg['training']['sources'], cfg['training']['targets'])):
logger.info(f"\tTask {i}: {s} -> {t}")
logger.debug(f'Starting data loaders')
############################################################
# Model (and possibly resume from checkpoint)
############################################################
logger.debug(f'Setting up model')
search_and_replace_dict(cfg['learner']['model_kwargs'], cfg['training']['targets'][0][0]) # switches to the proper pretrained encoder
model = eval(cfg['learner']['model'])(**cfg['learner']['model_kwargs'])
logger.info(f"Created model. Number of trainable parameters: {count_trainable_parameters(model)}. Number of total parameters: {count_total_parameters(model)}")
try:
logger.info(f"Number of trainable transfer parameters: {count_trainable_parameters(model.transfers)}. Number of total transfer parameters: {count_total_parameters(model.transfers)}")
if isinstance(model.encoder, nn.Module):
logger.info(f"Number of trainable encoder parameters: {count_trainable_parameters(model.base)}. Number of total encoder parameters: {count_total_parameters(model.base)}")
if isinstance(model.side_networks, nn.Module):
logger.info(f"Number of trainable side parameters: {count_trainable_parameters(model.sides)}. Number of total side parameters: {count_total_parameters(model.sides)}")
if isinstance(model.merge_operators, nn.Module):
logger.info(f"Number of trainable merge (alpha) parameters: {count_trainable_parameters(model.merge_operators)}. Number of total merge (alpha) parameters: {count_total_parameters(model.merge_operators)}")
except:
pass
ckpt_fpath = cfg['training']['resume_from_checkpoint_path']
loaded_optimizer = None
start_epoch = 0
if ckpt_fpath is not None and not cfg['training']['resume_training']:
warnings.warn('Checkpoint path provided but resume_training is set to False, are you sure??')
if ckpt_fpath is not None and cfg['training']['resume_training']:
if not os.path.exists(ckpt_fpath):
logger.warning(f'Trying to resume training, but checkpoint path {ckpt_fpath} does not exist. Starting training from beginning...')
else:
model, checkpoint = load_state_dict_from_path(model, ckpt_fpath)
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
logger.info(f"Loaded model (epoch {start_epoch if 'epoch' in checkpoint else 'unknown'}) from {ckpt_fpath}")
if 'optimizer' in checkpoint:
loaded_optimizer = checkpoint['optimizer']
else:
warnings.warn('No optimizer in checkpoint, are you sure?')
try: # we do not use state_dict, do not let it take up precious CUDA memory
del checkpoint['state_dict']
except KeyError:
pass
model.to(device)
if torch.cuda.device_count() > 1:
logger.info(f"Using {torch.cuda.device_count()} GPUs!")
assert cfg['learner']['model'] != 'ConstantModel', 'ConstantModel (blind) does not operate with multiple devices'
model = nn.DataParallel(model, range(torch.cuda.device_count()))
model.to(device)
############################################################
# Data Loading
############################################################
for key in ['sources', 'targets', 'masks']:
cfg['training']['dataloader_fn_kwargs'][key] = cfg['training'][key]
dataloaders = eval(cfg['training']['dataloader_fn'])(**cfg['training']['dataloader_fn_kwargs'])
if cfg['training']['resume_training']:
if 'curr_iter_idx' in checkpoint and checkpoint['curr_iter_idx'] == -1:
warnings.warn(f'curr_iter_idx is -1, Guessing curr_iter_idx to be start_epoch {start_epoch}')
dataloaders['train'].start_dl = start_epoch
elif 'curr_iter_idx' in checkpoint:
logger.info(f"Starting dataloader at {checkpoint['curr_iter_idx']}")
dataloaders['train'].start_dl = checkpoint['curr_iter_idx']
else:
warnings.warn(f'Guessing curr_iter_idx to be start_epoch {start_epoch}')
dataloaders['train'].start_dl = start_epoch
############################################################
# Loss Functions
############################################################
loss_fn_lst = cfg['training']['loss_fn']
loss_kwargs_lst = cfg['training']['loss_kwargs']
if not isinstance(loss_fn_lst, list):
loss_fn_lst = [ loss_fn_lst ]
loss_kwargs_lst = [ loss_kwargs_lst ]
elif isinstance(loss_kwargs_lst, dict):
loss_kwargs_lst = [loss_kwargs_lst for _ in range(len(loss_fn_lst))]
loss_fns = []
assert len(loss_fn_lst) == len(loss_kwargs_lst), 'number of loss fn/kwargs not the same'
for loss_fn, loss_kwargs in zip(loss_fn_lst, loss_kwargs_lst):
if loss_fn == 'perceptual_l1':
loss_fn = perceptual_l1_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
elif loss_fn == 'perceptual_l2':
loss_fn = perceptual_l2_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
elif loss_fn == 'perceptual_cross_entropy':
loss_fn = perceptual_cross_entropy_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings'])
else:
loss_fn = functools.partial(eval(loss_fn), **loss_kwargs)
loss_fns.append(loss_fn)
if len(loss_fns) == 1 and len(cfg['training']['sources']) > 1:
loss_fns = [loss_fns[0] for _ in range(len(cfg['training']['sources']))]
if 'regularizer_fn' in cfg['training'] and cfg['training']['regularizer_fn'] is not None:
assert torch.cuda.device_count() <= 1, 'Regularization does not support multi GPU, unable to access model attributes from DataParallel wrapper'
bare_model = model.module if torch.cuda.device_count() > 1 else model
loss_fns = [eval(cfg['training']['regularizer_fn'])(loss_fn=loss_fn, model=bare_model, **cfg['training']['regularizer_kwargs']) for loss_fn in loss_fns]
############################################################
# More Logging
############################################################
flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True)
mlog = get_logger(cfg, uuid)
mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text')
mlog.update_meter(cfg_to_md(cfg, uuid), meters={'config'}, phase='train')
for task, _ in enumerate(cfg['training']['targets']):
mlog.add_meter(f'alpha/task_{task}', tnt.meter.ValueSummaryMeter())
mlog.add_meter(f'output/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image')
mlog.add_meter(f'input/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image')
mlog.add_meter('weight_histogram/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='histogram')
for loss in cfg['training']['loss_list']:
mlog.add_meter(f'losses/{loss}_{task}', tnt.meter.ValueSummaryMeter())
if cfg['training']['task_is_classification'][task] :
mlog.add_meter(f'accuracy_top1/task_{task}', tnt.meter.ClassErrorMeter(topk=[1], accuracy=True))
mlog.add_meter(f'accuracy_top5/task_{task}', tnt.meter.ClassErrorMeter(topk=[5], accuracy=True))
mlog.add_meter(f'perplexity_pred/task_{task}', tnt.meter.ValueSummaryMeter())
mlog.add_meter(f'perplexity_label/task_{task}', tnt.meter.ValueSummaryMeter())
############################################################
# Training
############################################################
try:
if cfg['training']['train']:
# Optimizer
if cfg['training']['resume_training'] and loaded_optimizer is not None:
optimizer = loaded_optimizer
else:
optimizer = eval(cfg['learner']['optimizer_class'])(
[
{'params': [param for name, param in model.named_parameters() if 'merge_operator' in name or 'context' in name or 'alpha' in name], 'weight_decay': 0.0},
{'params': [param for name, param in model.named_parameters() if 'merge_operator' not in name and 'context' not in name and 'alpha' not in name]},
],
lr=cfg['learner']['lr'], **cfg['learner']['optimizer_kwargs']
)
# Scheduler
scheduler = None
if cfg['learner']['lr_scheduler_method'] is not None:
scheduler = eval(cfg['learner']['lr_scheduler_method'])(optimizer, **cfg['learner']['lr_scheduler_method_kwargs'])
model.start_training() # For PSP variant
# Mixed precision training
if cfg['training']['amp']:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
logger.info("Starting training...")
context = train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=start_epoch,
num_epochs=cfg['training']['num_epochs'], save_epochs=cfg['saving']['save_interval'],
scheduler=scheduler, mlog=mlog, flog=flog)
finally:
print(psutil.virtual_memory())
GPUtil.showUtilization(all=True)
####################
# Final Test
####################
if cfg['training']['test']:
run_kwargs = {
'cfg': cfg,
'mlog': mlog,
'flog': flog,
'optimizer': None,
'loss_fns': loss_fns,
'model': model,
'use_thread': cfg['saving']['in_background'],
}
context, _ = run_one_epoch(dataloader=dataloaders['val'], epoch=0, train=False, **run_kwargs)
logger.info('Waiting up to 10 minutes for all files to save...')
mlog.flush()
[c.join(600) for c in context]
logger.info('All saving is finished.')
def train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=0, num_epochs=250, save_epochs=25, scheduler=None, mlog=None, flog=None):
'''
Main training loop. Multiple tasks might happen in the same epoch.
0 to 1 random validation only
1 to 2 train task 0 labeled as epoch 2, validate all
i to {i+1} train task {i-1} labeled as epoch {i+1}
'''
checkpoint_dir = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir'])
run_kwargs = {
'cfg': cfg,
'mlog': mlog,
'flog': flog,
'optimizer': optimizer,
'loss_fns': loss_fns,
'model': model,
'use_thread': cfg['saving']['in_background'],
}
context = []
log_interval = cfg['saving']['log_interval']
log_interval = int(log_interval) if log_interval > 1 else log_interval
end_epoch = start_epoch + num_epochs
print(f'training for {num_epochs} epochs')
for epoch in range(start_epoch, end_epoch):
# tlkit.utils.count_open() # Turn on to check for memory leak
torch.cuda.empty_cache()
if epoch == 0 or epoch % save_epochs == save_epochs - 1:
context += save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
should_run_validation = (epoch == 0) or (log_interval <= 1) or ((epoch % log_interval) == (log_interval - 1))
if should_run_validation:
assert math.isnan(mlog.peek_meter()['losses/total_0']), 'Loggers are not empty at the beginning of evaluation. Were training logs cleared?'
context1, loss_dict = run_one_epoch(dataloader=dataloaders['val'], epoch=epoch, train=False, **run_kwargs)
context += context1
if scheduler is not None:
try:
scheduler.step(loss_dict['total'])
except:
scheduler.step()
# training starts logging at epoch 1, val epoch 0 is fully random, each task should only last ONE epoch
context1, _ = run_one_epoch(dataloader=dataloaders['train'], epoch=epoch+1, train=True, **run_kwargs)
context += context1
# Compute needed after the end of an epoch - e.g. EWC computes ~Fisher info matrix
post_training_epoch(dataloader=dataloaders['train'], epoch=epoch, **run_kwargs)
context1, _ = run_one_epoch(dataloader=dataloaders['val'], epoch=end_epoch, train=False, **run_kwargs)
context += context1
context += save_checkpoint(model, optimizer, end_epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background'])
return context
def post_training_epoch(dataloader=None, epoch=-1, model=None, loss_fns=None, **kwargs):
post_training_cache = {}
if hasattr(loss_fns[dataloader.curr_iter_idx], 'post_training_epoch'): # this lets respective loss_fn compute F
loss_fns[dataloader.curr_iter_idx].post_training_epoch(model, dataloader, post_training_cache, **kwargs)
for i, loss_fn in enumerate(loss_fns):
if hasattr(loss_fn, 'post_training_epoch') and i != dataloader.curr_iter_idx:
loss_fn.post_training_epoch(model, dataloader, post_training_cache, **kwargs)
def run_one_epoch(model: LifelongSidetuneNetwork, dataloader, loss_fns, optimizer, epoch, cfg, mlog, flog, train=True, use_thread=False)->(list,dict):
# logs through the progress of the epoch from [epoch, epoch + 1)
start_time = time.time()
model.train(train)
params_with_grad = model.parameters()
phase = 'train' if train else 'val'
sources = cfg['training']['sources']
targets = cfg['training']['targets']
tasks = [t for t in SINGLE_IMAGE_TASKS if len([tt for tt in cfg['training']['targets'] if t in tt]) > 0]
cache = {'phase': phase, 'sources': sources, 'targets': targets, 'tasks': tasks}
context = []
losses = {x:[] for x in cfg['training']['loss_list']}
log_steps = []
log_interval = cfg['saving']['log_interval']
log_interval = int(log_interval) if log_interval >= 1 else log_interval
if log_interval < 1 and train:
num_logs_per_epoch = int(1 // log_interval)
log_steps = [i * int(len(dataloader)/num_logs_per_epoch) for i in range(1, num_logs_per_epoch)]
if cfg['training']['post_aggregation_transform_fn'] is not None:
post_agg_transform = eval(cfg['training']['post_aggregation_transform_fn'])
if cfg['learner']['use_feedback']:
num_passes = cfg['learner']['feedback_kwargs']['num_feedback_iter']
backward_kwargs = {'retain_graph': True}
else:
num_passes = 1
backward_kwargs = {}
if isinstance(model, _CustomDataParallel):
warnings.warn('DataParallel does not allow you to put part of the model on CPU')
model.cuda()
with torch.set_grad_enabled(train):
# print(type(model.encoder.encoder), torch.norm(next(model.encoder.encoder.parameters())))
# print(type(model.encoder.side_network), torch.norm(next(model.encoder.side_network.parameters())))
seen = set()
for i, (task_idx, batch_tuple) in enumerate(tqdm(dataloader, desc=f"Epoch {epoch} ({phase})")):
if cfg['training']['post_aggregation_transform_fn'] is not None:
batch_tuple = post_agg_transform(batch_tuple, **cfg['training']['post_aggregation_transform_fn_kwargs'])
# Determine and handle new task
old_size = len(seen)
seen.add(task_idx)
if len(seen) > old_size:
logger.info(f"Moving to task: {task_idx}")
model.start_task(task_idx, train, print_alpha=True)
# Decompose batch, Forward, Compute Loss
x, label, masks = tlkit.utils.process_batch_tuple(batch_tuple, task_idx, cfg)
for pass_i in range(num_passes):
prediction = model(x, task_idx=task_idx, pass_i=pass_i)
loss_dict = loss_fns[task_idx](prediction, label, masks, cache)
# If training, Backward
if train:
optimizer.zero_grad()
loss_dict['total'].backward(**backward_kwargs)
if cfg['learner']['max_grad_norm'] is not None:
torch.nn.utils.clip_grad_norm_(params_with_grad, cfg['learner']['max_grad_norm'])
optimizer.step()
# Logging
mlog.update_meter(model.merge_operator.param, meters={f'alpha/task_{task_idx}'}, phase=phase)
for loss in cfg['training']['loss_list']:
assert loss in loss_dict.keys(), f'Promised to report loss {loss}, but missing from loss_dict'
mlog.update_meter(loss_dict[loss].detach().item(), meters={f'losses/{loss}_{task_idx}'}, phase=phase)
if cfg['training']['task_is_classification'][task_idx]:
add_classification_specific_logging(cache, mlog, task_idx, phase)
if len(seen) > old_size:
log_image(mlog, task_idx, cfg, x, label, prediction, masks=masks, cache=cache)
# for super long epochs where we want some information between epochs
if i in log_steps:
step = epoch + i / len(dataloader)
step = int(np.floor(step * cfg['saving']['ticks_per_epoch']))
for loss in cfg['training']['loss_list']:
losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item())
context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=False)
for loss in cfg['training']['loss_list']:
losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item())
if log_interval <= 1 or epoch % log_interval == log_interval - 1 or epoch == 0:
step = epoch + (len(dataloader) - 1) / len(dataloader)
step = int(np.floor(step * cfg['saving']['ticks_per_epoch']))
context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=True)
assert len(losses['total']) > 0, 'Need to report loss'
for k in losses.keys():
losses[k] = sum(losses[k]) / len(losses[k])
loss_str = ''.join([' | ' + k + ' loss: {0:.6f} '.format(v) for k, v in losses.items()])
duration = int(time.time() - start_time)
logger.info(f'End of epoch {epoch} ({phase}) ({duration//60}m {duration%60}s) {loss_str}') # this is cumulative from previous train epochs in the same log_interval
return context, losses
def save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=False):
dict_to_save = {
'state_dict': model.state_dict(),
'epoch': epoch,
'model': model,
'optimizer': optimizer,
'curr_iter_idx': dataloaders['train'].curr_iter_idx,
}
checkpoints.save_checkpoint(dict_to_save, checkpoint_dir, epoch)
return []
if __name__ == '__main__':
assert LOG_DIR, 'log dir cannot be empty'
# Manually parse command line opts
short_usage, usage, internal_usage = ex.get_usage()
args = docopt(internal_usage, [str(a) for a in sys.argv[1:]], help=False)
config_updates, named_configs = get_config_updates(args['UPDATE'])
ex.run('prologue', config_updates, named_configs, options=args)
ex.observers.append(FileStorageObserverWithExUuid.create(LOG_DIR))
ex.run_commandline()
else:
print(__name__)
```
#### File: data/datasets/expert_dataset.py
```python
from evkit.models.taskonomy_network import task_mapping
import os
from PIL import Image
import torch.utils.data as data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from tlkit.data.img_transforms import MAKE_RESCALE_0_1_NEG1_POS1
import numpy as np
from tqdm import tqdm
KEYS =['rgb_filled', 'taskonomy', 'map', 'target', 'global_pos', 'action']
KEYS.extend([f'taskonomy_{task}' for task in task_mapping.values()])
class ExpertData(data.Dataset):
def __init__(self, data_path, keys, num_frames, split='train', transform: dict = {}, load_to_mem=False,
remove_last_step_in_traj=True, removed_actions=[]):
"""
data expected format
/path/to/data/
scenek/
trajj/
rgb_1.png
map_1.npz
action_1.npz
...
rgb_24.png
map_24.npz
action_24.npz
"""
if not os.path.isdir(data_path):
assert "bad directory"
# write down all the data paths
self.keys = keys
self.urls = {k: [] for k in self.keys}
print(f'Loading {split} data')
for scene in tqdm(sorted(os.listdir(os.path.join(data_path, split)))):
for traj in sorted(os.listdir(os.path.join(data_path, split, scene))):
for step in sorted(os.listdir(os.path.join(data_path, split, scene, traj))):
path = os.path.join(data_path, split, scene, traj, step)
key = [k for k in self.keys if k in path]
if len(key) != 1:
continue
self.urls[key[0]].append(path)
if remove_last_step_in_traj:
for k in self.keys:
self.urls[k].pop() # remove the stop action, which is the last one in the sequence
lens = [len(v) for k, v in self.urls.items()]
assert max(lens) == min(lens), f'should have same number of each key: {keys} with len f{lens}'
self.load_to_mem = load_to_mem
if self.load_to_mem:
print('Loading trajectories to memory')
self.cached_data = {}
for k, objs in self.urls.items():
if 'rgb' in k:
self.cached_data[k] = [np.asarray(Image.open(obj)) for obj in objs]
else:
self.cached_data[k] = [np.load(obj) for obj in objs]
self.num_frames = num_frames
self.transform = transform
for k in self.transform.keys():
assert k in self.keys, f'transform {k} not in keys {self.keys}'
self.removed_actions = removed_actions
def __len__(self):
return len(self.urls[self.keys[0]])
def __getitem__(self, index):
episode_num = self._episode_num(index)
ret = [[] for _ in self.keys]
# stack previously seen frames. ret = [o_{t}, ..., o_{t-N}]
for i in range(self.num_frames):
if episode_num == self._episode_num(index - i):
for key_idx, data in enumerate(self._get_index(index - i)):
ret[key_idx].append(data)
else:
for key_idx in range(len(self.keys)):
ret[key_idx].append(np.zeros_like(ret[key_idx][0]))
for i in range(len(self.keys)):
if i == self.keys.index('action'):
ret[i] = ret[i][0] # pick only the last action - do not frame stack
if isinstance(ret[i], list) or (isinstance(ret[i], np.ndarray) and len(ret[i].shape) > 0):
num_acts = len(ret[i])
while np.argmax(ret[i]) in self.removed_actions: # we do not want to include some actions
rand_act = np.zeros(num_acts, dtype=np.uint8)
rand_act[np.random.randint(num_acts)] = 1
ret[i] = rand_act
keep_indices = [i for i in range(num_acts) if i not in self.removed_actions]
ret[i] = ret[i][keep_indices]
else:
if ret[i] in self.removed_actions:
ret[i] = np.array(np.random.randint(min(self.removed_actions))) # resample
else:
ret[i] = np.concatenate(ret[i][::-1], axis=0)
return ret
def _episode_num(self, index):
return self.urls[self.keys[0]][index].split('/')[-2]
def _get_index(self, index):
if self.load_to_mem:
ret = [self.cached_data[k][index] for k in self.keys()]
else:
ret = []
for k in self.keys:
path = self.urls[k][index]
if 'rgb' in k:
with open(path, 'rb') as f:
img = Image.open(f)
img.convert(img.mode)
ret.append(img)
else:
ret.append(np.load(path)['arr_0'])
for k, t in self.transform.items():
idx = self.keys.index(k)
ret[idx] = t(ret[idx])
return ret
def get_dataloaders(data_path,
tasks,
num_frames,
batch_size=64,
batch_size_val=4,
transform={},
num_workers=0,
load_to_mem=False,
pin_memory=False,
remove_last_step_in_traj=True,
removed_actions=[]):
if 'rgb_filled' in tasks:
transform['rgb_filled'] = transforms.Compose([
transforms.CenterCrop([256, 256]),
transforms.Resize(256),
transforms.ToTensor(),
MAKE_RESCALE_0_1_NEG1_POS1(3),
])
keys = [t for t in tasks if t in KEYS]
assert len(keys) == len(tasks), f'unrecognized task in {tasks} not in {KEYS}! cannot be added to Dataset'
dataloaders = {}
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='train', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='val', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
dataset = ExpertData(data_path, keys=keys, num_frames=num_frames, split='test', transform=transform, load_to_mem=load_to_mem, remove_last_step_in_traj=remove_last_step_in_traj, removed_actions=removed_actions)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = dataloader
return dataloaders
if __name__ == '__main__':
data_path = '/mnt/data/expert_trajs/largeplus'
keys = ['rgb_filled', 'taskonomy_denoising', 'map', 'target', 'action']
dataset = ExpertData(data_path, keys=keys, num_frames=4, split='train', transform={}, remove_last_step_in_traj=False)
```
#### File: data/datasets/icifar_dataset.py
```python
from itertools import chain, cycle
import torch
import torchvision
from torchvision import transforms
import torchvision.transforms.functional as TF
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import numpy as np
import threading
import math
from tqdm import tqdm
import warnings
from tlkit.data.sequential_tasks_dataloaders import KthDataLoader, CyclingDataLoader, ConcatenatedDataLoader
class iCIFAR100(torchvision.datasets.CIFAR100):
def __init__(self, root, class_idxs, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train, transform, target_transform, download)
self.class_idxs = list(class_idxs)
self.old_targets = self.targets
is_valid = np.isin(self.targets, self.class_idxs)
self.data = self.data[is_valid]
self.targets = np.int32(self.targets)[is_valid]
self.new_to_old_class_idx = np.sort(np.unique(self.targets))
self.old_to_new_class_idx = np.full((np.max(self.targets) + 1,), -1, dtype=np.int32)
self.old_to_new_class_idx[self.new_to_old_class_idx] = np.arange(len(self.new_to_old_class_idx))
self.targets = self.old_to_new_class_idx[self.targets]
self.targets = torch.LongTensor(self.targets)
self.classes = [c for c in self.classes if self.class_to_idx[c] in self.class_idxs]
# print(self.classes)
# self.data = self.data[:5]
# self.targets = self.targets[:5]
# print(len(self.data))
# return
# def __getitem__(self, index):
# """
# Args:
# index (int): Index
# Returns:
# tuple: (image, target) where target is index of the target class.
# """
# img, target = self.data[index], self.targets[index]
# # doing this so that it is consistent with all other datasets
# # to return a PIL Image
# img = Image.fromarray(img)
# if self.transform is not None:
# img = self.transform(img)
# if self.target_transform is not None:
# target = self.target_transform(target)
# return img, target
def get_dataloaders(data_path,
targets,
sources=None, # Ignored
masks=None, # Ignored
tasks=None, # Ignored
epochlength=20000,
epochs_until_cycle=1,
batch_size=64,
batch_size_val=4,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
imsize=256):
'''
Targets can either be of the form [iterable1, iterable2]
or of the form 'cifarXX-YY'
'''
if transform is None:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# transform = transforms.Compose([
# transforms.CenterCrop(imsize),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ])
dataloaders = {}
train_dataloaders = []
classes = []
for target in targets:
if isinstance(target[0], str):
start, end = [int(i) for i in target[0].lower().replace('cifar', '').split('-')]
classes.append(np.arange(start, end + 1))
else:
classes.append(target)
for i, task in enumerate(tqdm(classes, 'Loading training data')):
should_dl = int(i)==0
dataset = iCIFAR100(data_path, task, train=True, transform=transform, target_transform=None, download=should_dl)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
train_dataloaders.append(dataloader)
dataloaders['train'] = CyclingDataLoader(train_dataloaders, epochlength, epochs_until_cycle=epochs_until_cycle)
val_dataloaders = []
for task in tqdm(classes, 'Loading validation data'):
dataset = iCIFAR100(data_path, task, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory) #,
val_dataloaders.append(dataloader)
dataloaders['val'] = ConcatenatedDataLoader(val_dataloaders)
# dataset = iCIFAR100(data_path, train=False, transform=transform, target_transform=None, download=False)
# dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = []
return dataloaders
def get_limited_dataloaders(data_path,
sources, # Ignored
targets,
masks, # Ignored
tasks=None, # Ignored
epochlength=20000,
batch_size=64,
batch_size_val=4,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
imsize=256):
'''
Targets can either be of the form [iterable1, iterable2]
or of the form 'cifarXX-YY'
'''
if transform is None:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dataloaders = {}
train_dataloaders = []
classes = []
for target in targets:
if isinstance(target[0], str):
start, end = [int(i) for i in target[0].lower().replace('cifar', '').split('-')]
classes.append(np.arange(start, end + 1))
else:
classes.append(target)
for i, task in enumerate(tqdm(classes, 'Loading training data')):
should_dl = int(i)==0
dataset = iCIFAR100(data_path, task, train=True, transform=transform, target_transform=None, download=should_dl)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
train_dataloaders.append(dataloader)
dataloaders['train'] = KthDataLoader(train_dataloaders, k=0, epochlength=1000)
val_dataloaders = []
for task in tqdm(classes, 'Loading validation data'):
dataset = iCIFAR100(data_path, task, train=False, transform=transform, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory) #,
val_dataloaders.append(dataloader)
dataloaders['val'] = KthDataLoader(val_dataloaders, k=0)
dataloaders['test'] = []
return dataloaders
def get_cifar_dataloaders(data_path,
sources, # Ignored
targets,
masks, # Ignored
tasks=None, # Ignored
epochlength=20000,
batch_size=64,
batch_size_val=4,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
imsize=256):
'''
Targets can either be of the form [iterable1, iterable2]
or of the form 'cifarXX-YY'
'''
if transform is None:
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
else:
transform_train = transform
transform_val = transform
dataloaders = {}
dataset = torchvision.datasets.CIFAR10(data_path, train=True, transform=transform_train, target_transform=None, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
dataset = torchvision.datasets.CIFAR10(data_path, train=False, transform=transform_val, target_transform=None, download=False)
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory) #,
dataloaders['val'] = dataloader
dataloaders['test'] = []
return dataloaders
```
#### File: data/datasets/taskonomy_dataset.py
```python
from collections import namedtuple, Counter, defaultdict
from tlkit.data.sequential_tasks_dataloaders import ConcatenatedDataLoader, CyclingDataLoader, ErrorPassingConcatenatedDataLoader, ErrorPassingCyclingDataLoader
from tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS
import torch
import torch.utils.data as utils
import torchvision.transforms as transforms
import torchvision.datasets as ds
import torch.utils.data as data
from tqdm import tqdm
from PIL import Image, ImageFile
import numpy as np
import os
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import warnings
from tlkit.data.img_transforms import default_loader, get_transform
from tlkit.data.splits import SPLIT_TO_NUM_IMAGES, taskonomy_no_midlevel as split_taskonomy_no_midlevel
TRAIN_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['train']
VAL_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['val']
TEST_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['test']
ImageFile.LOAD_TRUNCATED_IMAGES = True # TODO Test this
class TaskonomyData(data.Dataset):
'''
Loads data for the Taskonomy dataset.
This expects that the data is structured
/path/to/data/
rgb/
modelk/
point_i_view_j.png
...
depth_euclidean/
... (other tasks)
If one would like to use pretrained representations, then they can be added into the directory as:
/path/to/data/
rgb_encoding/
modelk/
point_i_view_j.npy
...
Basically, any other folder name will work as long as it is named the same way.
'''
def __init__(self, data_path,
tasks,
buildings,
transform=None,
load_to_mem=False,
zip_file_name=False,
max_images=None):
'''
data_path: Path to data
tasks: Which tasks to load. Any subfolder will work as long as data is named accordingly
buildings: Which models to include. See `splits.taskonomy`
transform: one transform per task.
Note: This assumes that all images are present in all (used) subfolders
'''
self.return_tuple = True
if isinstance(tasks, str):
tasks = [tasks]
transform = [transform]
self.return_tuple = False
self.buildings = buildings
self.cached_data = {}
self.data_path = data_path
self.load_to_mem = load_to_mem
self.tasks = tasks
self.zip_file_name = zip_file_name
self.urls = {task: make_dataset(os.path.join(data_path, task), buildings, max_images)
for task in tasks}
# Validate number of images
n_images_task = [(len(obs), task) for task, obs in self.urls.items()]
print("\t" + " | ".join(["{}: {}".format(k, task) for task, k in n_images_task]))
if max(n_images_task)[0] != min(n_images_task)[0]:
print("Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \n\t{}".format(
max(n_images_task)[0], min(n_images_task)[0], "\n\t".join([str(t) for t in n_images_task])))
# count number of frames per building per task
all_buildings = defaultdict(dict)
for task, obs in self.urls.items():
c = Counter([url.split("/")[-2] for url in obs])
for building in c:
all_buildings[building][task] = c[building]
# find where the number of distinct counts is more than 1
print('Removing data from the following buildings')
buildings_to_remove = []
for b, count in all_buildings.items():
if len(set(list(count.values()))) > 1:
print(f"\t{b}:", count)
buildings_to_remove.append(b)
# [(len(obs), task) for task, obs in self.urls.items()]
# redo the loading with fewer buildings
buildings_redo = [b for b in buildings if b not in buildings_to_remove]
self.urls = {task: make_dataset(os.path.join(data_path, task), buildings_redo)
for task in tasks}
n_images_task = [(len(obs), task) for task, obs in self.urls.items()]
print("\t" + " | ".join(["{}: {}".format(k, task) for task, k in n_images_task]))
assert max(n_images_task)[0] == min(n_images_task)[0], \
"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \n\t{}".format(
max(n_images_task)[0], min(n_images_task)[0], "\n\t".join([str(t) for t in n_images_task]))
self.size = max(n_images_task)[0]
# Perhaps load some things into main memory
if load_to_mem:
print('Writing activations to memory')
for t, task in zip(transform, tasks):
self.cached_data[task] = [None] * len(self)
for i, url in enumerate(self.urls[task]):
self.cached_data[task][i] = t(default_loader(url))
self.cached_data[task] = torch.stack(self.cached_data[task])
# self.cached_data = torch.stack(self.cached_data)
print('Finished writing some activations to memory')
self.transform = transform
def __len__(self):
return self.size
def __getitem__(self, index):
fpaths = [self.urls[task][index] for task in self.tasks]
if self.load_to_mem:
result = tuple([self.cached_data[task][index] for task in self.tasks])
else:
result = [default_loader(path) for path in fpaths]
if self.transform is not None:
# result = [transform(tensor) for transform, tensor in zip(self.transform, result)]
result_post = []
for i, (transform, tensor) in enumerate(zip(self.transform, result)):
try:
result_post.append(transform(tensor))
except Exception as e:
print(self.tasks[i], transform, tensor)
raise e
result = result_post
# handle 2 channel outputs
for i in range(len(self.tasks)):
task = self.tasks[i]
base_task = [t for t in SINGLE_IMAGE_TASKS if t in task]
if len(base_task) == 0:
continue
else:
base_task = base_task[0]
num_channels = TASKS_TO_CHANNELS[base_task]
if 'decoding' in task and result[i].shape[0] != num_channels:
assert torch.sum(result[i][num_channels:,:,:]) < 1e-5, 'unused channels should be 0.'
result[i] = result[i][:num_channels,:,:]
if self.zip_file_name:
result = tuple(zip(fpaths, result))
if self.return_tuple:
return result
else:
return result[0]
def make_dataset(dir, folders=None, max_images=None):
# folders are building names. If None, get all the images (from both building folders and dir)
has_reached_capacity = lambda images, max_images: not max_images is None and len(images) >= max_images
images = []
dir = os.path.expanduser(dir)
if not os.path.isdir(dir):
assert "bad directory"
for subfolder in sorted(os.listdir(dir)):
subfolder_path = os.path.join(dir, subfolder)
if os.path.isdir(subfolder_path) and (folders is None or subfolder in folders):
for fname in sorted(os.listdir(subfolder_path)):
path = os.path.join(subfolder_path, fname)
if not has_reached_capacity(images, max_images):
images.append(path)
# If folders/buildings are not specified, use images in dir
if folders is None and os.path.isfile(subfolder_path) and not has_reached_capacity(images, max_images):
images.append(subfolder_path)
return images
def get_dataloaders(data_path,
tasks,
batch_size=64,
batch_size_val=4,
zip_file_name=False,
train_folders=TRAIN_BUILDINGS,
val_folders=VAL_BUILDINGS,
test_folders=TEST_BUILDINGS,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
max_images=None):
"""
:param data_path: directory that data is stored at
:param tasks: names of subdirectories to return observations from
:param batch_size:
:param zip_file_name: when returning an observation, this will zip the fpath to it. E.g. (/path/to/img.png, OBS)
:param train_folders: in a big data dir, which subfolders contain our training data
:param val_folders: in a big data dir, which subfolders contain our val data
:param max_images: maximum number of images in any dataset
:return: dictionary of dataloaders
"""
if transform is None:
if isinstance(tasks, str):
transform = get_transform(tasks)
else:
transform = [get_transform(task) if len(task.split(' ')) == 1 else get_transform(*task.split(' ')) for task in tasks]
tasks = [t.split(' ')[0] for t in tasks] # handle special data operations
if isinstance(train_folders, str):
train_folders = split_taskonomy_no_midlevel[train_folders]['train']
if isinstance(val_folders, str):
val_folders = split_taskonomy_no_midlevel[val_folders]['val']
if isinstance(test_folders, str):
test_folders = split_taskonomy_no_midlevel[test_folders]['test']
dataloaders = {}
print(f"Taskonomy dataset TRAIN folders: {train_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=train_folders,
transform=transform, zip_file_name=zip_file_name,
load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
print(f"Taskonomy dataset VAL folders: {val_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=val_folders,
transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
print(f"Taskonomy dataset TEST folders: {test_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=test_folders,
transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = dataloader
return dataloaders
def get_lifelong_dataloaders(data_path,
sources,
targets,
masks,
epochs_per_task=5,
epochs_until_cycle=0,
split='fullplus',
batch_size=64,
batch_size_val=4,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
speedup_no_rigidity=False,
max_images_per_task=None):
phases = ['train', 'val', 'test']
dataloaders = {phase: [] for phase in phases}
if isinstance(masks, bool):
masks = [masks] * len(sources)
masks = [['mask_valid'] if mask else [] for mask in masks]
for i, (source, target, mask) in enumerate(zip(sources, targets, masks)):
print(f'# Task {i} dataloader: {source} -> {target}')
tasks = source + target + mask
dl = get_dataloaders(
data_path,
tasks,
batch_size=batch_size,
batch_size_val=batch_size_val,
train_folders=split,
val_folders=split,
test_folders=split,
transform=transform,
num_workers=num_workers,
load_to_mem=load_to_mem,
pin_memory=pin_memory,
max_images=max_images_per_task,
)
for phase in phases:
dataloaders[phase].append(dl[phase])
if speedup_no_rigidity:
# For methods that do not forget (no intransigence) by construction.
# In validation, we only compute task performance for just-trained task and next-to-be-trained task
epoch_lengths = [len(dl.dataset) for dl in dataloaders['val']]
epoch_length = min(epoch_lengths) if min(epoch_lengths) == max(epoch_lengths) else None
dl_just_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=1, start_dl=0,
epoch_length_per_dl=epoch_length)
dl_next_to_be_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=0, start_dl=0,
epoch_length_per_dl=epoch_length)
dataloaders['val'] = ErrorPassingConcatenatedDataLoader([dl_just_trained, dl_next_to_be_trained], zip_idx=False)
else:
dataloaders['val'] = ErrorPassingConcatenatedDataLoader(dataloaders['val'])
train_epoch_length = SPLIT_TO_NUM_IMAGES[split] if split is not None else min([len(dl.dataset) for dl in dataloaders['train']])
dataloaders['train'] = ErrorPassingCyclingDataLoader(dataloaders['train'], epoch_length_per_dl=epochs_per_task * train_epoch_length, epochs_until_cycle=epochs_until_cycle)
dataloaders['test'] = ErrorPassingConcatenatedDataLoader(dataloaders['test'])
return dataloaders
```
#### File: tlkit/data/img_transforms.py
```python
from PIL import Image
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from tlkit.utils import TASKS_TO_CHANNELS, FEED_FORWARD_TASKS
MAKE_RESCALE_0_1_NEG1_POS1 = lambda n_chan: transforms.Normalize([0.5]*n_chan, [0.5]*n_chan)
RESCALE_0_1_NEG1_POS1 = transforms.Normalize([0.5], [0.5]) # This needs to be different depending on num out chans
MAKE_RESCALE_0_MAX_NEG1_POS1 = lambda maxx: transforms.Normalize([maxx / 2.], [maxx * 1.0])
RESCALE_0_255_NEG1_POS1 = transforms.Normalize([127.5,127.5,127.5], [255, 255, 255])
shrinker = nn.Upsample(scale_factor=0.125, mode='nearest')
def get_transform(task, special=None):
if task in ['rgb', 'normal', 'reshading']:
if special is None:
return transform_8bit
elif special == 'compressed':
return lambda x: downsample_group_stack(transform_8bit(x))
elif task in ['mask_valid']:
return transforms.ToTensor()
elif task in ['keypoints2d', 'keypoints3d', 'depth_euclidean', 'depth_zbuffer', 'edge_texture', 'edge_occlusion']:
# return transform_16bit_int
return transform_16bit_single_channel
elif task in ['principal_curvature', 'curvature']:
if special is None:
return transform_8bit_n_channel(2)
elif special == 'compressed':
return lambda x: downsample_group_stack(transform_8bit_n_channel(2)(x))
elif task in ['segment_semantic']: # this is stored as 1 channel image (H,W) where each pixel value is a different class
return transform_dense_labels
elif len([t for t in FEED_FORWARD_TASKS if t in task]) > 0:
return torch.Tensor
elif 'decoding' in task:
return transform_16bit_n_channel(TASKS_TO_CHANNELS[task.replace('_decoding', '')])
elif 'encoding' in task:
return torch.Tensor
else:
raise NotImplementedError("Unknown transform for task {}".format(task))
def downsample_group_stack(img):
# (k, 256, 256) -> (k, 32, 32) -> (4*k, 16, 16)
no_batch = False
if len(img.shape) == 3:
no_batch = True
img = img.unsqueeze(dim=0)
assert len(img.shape) == 4
img = shrinker(img)
assert img.shape[2] == img.shape[3] == 32
img = F.unfold(img, kernel_size=2, stride=2).view(img.shape[0],-1,16,16)
# img = F.unfold(img, kernel_size=2, stride=2).view(1,-1,8,8)
img = img[:,:8,:,:] # keep only first 8 channels
if no_batch:
img = img.squeeze()
return img
transform_dense_labels = lambda img: torch.Tensor(np.array(img)).long() # avoids normalizing
transform_8bit = transforms.Compose([
transforms.ToTensor(),
MAKE_RESCALE_0_1_NEG1_POS1(3),
])
def transform_8bit_n_channel(n_channel=1):
crop_channels = lambda x: x[:n_channel] if x.shape[0] > n_channel else x
return transforms.Compose([
transforms.ToTensor(),
crop_channels,
MAKE_RESCALE_0_1_NEG1_POS1(n_channel),
])
def transform_16bit_single_channel(im):
im = transforms.ToTensor()(im)
im = im.float() / (2 ** 16 - 1.0)
return RESCALE_0_1_NEG1_POS1(im)
def transform_16bit_n_channel(n_channel=1):
if n_channel == 1:
return transform_16bit_single_channel # PyTorch handles these differently
else:
return transforms.Compose([
transforms.ToTensor(),
MAKE_RESCALE_0_1_NEG1_POS1(n_channel),
])
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
if '.npy' in path:
return np.load(path)
elif '.json' in path:
raise NotImplementedError("Not sure how to load files of type: {}".format(os.path.basename(path)))
else:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
im = accimage_loader(path)
else:
im = pil_loader(path)
return im
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert(img.mode)
```
#### File: Side-tuning/tlkit/logging_helpers.py
```python
import numpy as np
import torch
from torch.distributions import Categorical
import torch.nn as nn
import torch.nn.functional as F
from evkit.utils.viz.core import pack_images
import tnt.torchnet as tnt
import warnings
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
unorm = UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
def log(mlog, key, val, phase):
if mlog is not None:
# logger.add_scalar(tag=key, scalar_value=val, global_step=t)
if 'image' in key:
if isinstance(val, np.ndarray):
val = torch.from_numpy(val)
val = unorm(val)
elif 'histogram' in key:
if isinstance(val, torch.Tensor):
val = val.cpu().numpy()
outlier_bound = 10.
while np.count_nonzero(val > outlier_bound) > val.size * 0.01: # high number of outliers
val = val[ val < outlier_bound ]
outlier_bound -= 0.5
mlog.update_meter(val, meters={key}, phase=phase)
def reset_log(mlog, flog, epoch, phase, use_thread=False):
if use_thread:
warnings.warn('use_threads set to True, but done synchronously still')
if not mlog:
return
results = mlog.peek_meter(phase=phase) # need to be run before reset
mlog.reset_meter(epoch, mode=phase)
# Log to file
results_to_log = {}
results['step_num'] = epoch
for k in results.keys():
if 'input/task' in k or 'output/task' in k: # these are too big to log
continue
else:
results_to_log[k] = results[k]
if flog:
flog.log('all_results', results_to_log)
return []
def add_classification_specific_logging(cache, mlog, task=None, phase='train'):
''' Adds in top1 and top5
'''
prediction = cache['predictions']
label = cache['labels']
is_one_hot = len(label.shape) == 1
if is_one_hot: # one-hot labels:
top5_label = torch.stack([label] * 5, dim=-1)
else:
top5_label = torch.argsort(label, dim=1, descending=True)[:, :5]
meter_suffix = f'/task_{task}' if task is not None else ''
top1_label = top5_label[:,:1]
mlog.update_meter(prediction, target=top1_label, meters={f'accuracy_top1{meter_suffix}'}, phase=phase)
mlog.update_meter(prediction, target=top1_label, meters={f'accuracy_top5{meter_suffix}'}, phase=phase)
top5_pred = torch.argsort(prediction, dim=1, descending=True)[:, :5]
top1_pred = top5_pred[:,:1]
entropy_pred = -1 * torch.sum(torch.softmax(prediction, dim=1) * F.log_softmax(prediction, dim=1)) / prediction.shape[0]
perplexity_pred = torch.exp(entropy_pred).cpu()
mlog.update_meter(perplexity_pred, meters={f'perplexity_pred{meter_suffix}'}, phase=phase)
if is_one_hot:
perplexity_label = 1
else:
if 0 <= torch.min(label) and torch.max(label) <= 1: # already probably measure
entropy_label = -1 * torch.sum(label * torch.log(label)) / label.shape[0]
else:
entropy_label = -1 * torch.sum(torch.softmax(label, dim=1) * F.log_softmax(label, dim=1)) / label.shape[0]
perplexity_label = torch.exp(entropy_label).cpu()
mlog.update_meter(perplexity_label, meters={f'perplexity_label{meter_suffix}'}, phase=phase)
cache['top5_label'] = top5_label
cache['top5_pred'] = top5_pred
def add_imitation_specific_logging(prediction, label, mlog, phase):
perplexity = torch.mean(torch.exp(Categorical(logits=prediction).entropy()))
mlog.update_meter(perplexity.cpu(), meters={'diagnostics/perplexity'}, phase=phase)
if len(label.shape) == 2:
mlog.update_meter(prediction.cpu(), target=torch.argmax(label.cpu(), dim=1), meters={'diagnostics/accuracy'}, phase=phase)
elif len(label.shape) == 1:
mlog.update_meter(prediction.cpu(), target=label.cpu(), meters={'diagnostics/accuracy'}, phase=phase)
MAX_NUM_IMAGES = 64
def log_image(mlog, task, cfg, x, label, prediction, masks=None, cache={}):
targets = cache['targets']
phase = cache['phase']
encoding_only = all(['encoding' in t for t in targets]) and not isinstance(cfg['training']['loss_fn'], list) and not 'perceptual' in cfg['training']['loss_fn']
masks = masks.cpu() if masks is not None else None
if len(label.shape) == 4 and not encoding_only:
if not isinstance(x, torch.Tensor):
x = x[0]
if any(['encoding' in t for t in targets]): # there should have been something to do this earlier, where'd it go?
prediction = cache['inputs_decoded']
if 'targets_decoded' in cache:
label = cache['targets_decoded']
if 'class_object' in targets[task]: # handle classification tasks
warnings.warn('index_to_image will crash the program on k')
return
if not isinstance(x, torch.Tensor):
x = x[0]
_, _, img_size, _ = x.shape
label = index_to_image(cache['top5_label'].cpu(), synset_arr, img_size).cuda()
prediction = index_to_image(cache['top5_pred'].cpu(), synset_arr, img_size).cuda()
if prediction.shape[1] == 2: # handle 2 channels
zero_layer = torch.zeros_like(prediction)[:,:1,:,:]
prediction = torch.cat((prediction, zero_layer), dim=1)
label = torch.cat((label, zero_layer), dim=1)
if len(label.shape) == 4 and not encoding_only:
# Unnormalize
x_out = to_human(x.cpu())[:MAX_NUM_IMAGES]
prediction_out = to_human(prediction.cpu())[:MAX_NUM_IMAGES]
label_out = to_human(label.cpu())[:MAX_NUM_IMAGES]
if masks is not None:
masks_out = to_human(masks.cpu())[:MAX_NUM_IMAGES]
else:
masks_out = None
im_samples = pack_images(x_out, prediction_out, label_out, mask=masks_out)
log(mlog, f'output/task_{task}', im_samples, phase=phase)
if isinstance(x, list): # for more than single inputs (rgb, curv) ... can I do this earlier? not sure...
x = x[0]
if len(x.shape) == 4:
x_out = to_human(x.cpu())
log(mlog, f'input/task_{task}', x_out[0], phase=phase)
def write_logs(mlog, flog, task, step, cfg, cache={}, to_print=True)->list:
phase = cache['phase']
logs = mlog.peek_meter(phase=phase)
if to_print:
loss_str = ''
for loss in cfg['training']['loss_list']:
loss_name = f'losses/{loss}' if task is None else f'losses/{loss}_{task}'
loss_value = logs[loss_name] # warning: this will become nan if nothing has been logged
loss_value = loss_value.item() if not isinstance(loss_value, float) else loss_value
loss_str += ' | ' + loss + ' loss: {0:.6f} '.format(loss_value)
print(f'Logging step {step} ({phase}) {loss_str}')
context = reset_log(mlog, flog, step, phase)
return context
def get_logger(cfg, uuid):
if cfg['saving']['logging_type'] == 'visdom':
mlog = tnt.logger.VisdomMeterLogger(
title=uuid, env=uuid, server=cfg['saving']['visdom_server'],
port=cfg['saving']['visdom_port'],
log_to_filename=cfg['saving']['visdom_log_file']
)
elif cfg['saving']['logging_type'] == 'tensorboard':
mlog = tnt.logger.TensorboardMeterLogger(
env=uuid,
log_dir=cfg['saving']['log_dir'],
plotstylecombined=True
)
else:
assert False, 'no proper logger!'
return mlog
def multidim_apply(x, dims, fn):
if len(dims) == 0:
return x
else:
return multidim_apply(fn(x, dim=dims[0], keepdim=True)[0], dims[1:], fn)
def to_human(x):
# normalizes batch of image to (0,1)
assert len(x.shape) == 4, 'working with batched images only'
max_dim = multidim_apply(x, dims=[0, 2, 3], fn=torch.max)
min_dim = multidim_apply(x, dims=[0, 2, 3], fn=torch.min)
x_out = (x - min_dim) / (max_dim - min_dim)
return x_out
```
#### File: tlkit/models/resnet_cifar.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
import warnings
from tlkit.utils import forward_sequential
from .superposition import HashBasicBlock
from tlkit.models.basic_models import EvalOnlyModel
from .basic_models import LambdaLayer
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A', batchnorm_kwargs={'track_running_stats': True}):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, **batchnorm_kwargs)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, **batchnorm_kwargs)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, fdf8:f53e:61e4::18, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes, **batchnorm_kwargs)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(EvalOnlyModel):
def __init__(self, block, num_blocks, num_classes=10, period=None, debug=False, **kwargs):
super(ResNet, self).__init__(**kwargs)
self.in_planes = 16
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1, period=period, debug=debug)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2, period=period, debug=debug)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2, period=period, debug=debug)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride, period, debug):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
try:
layers.append(block(self.in_planes, planes, period=period, stride=stride, debug=debug))
except TypeError:
layers.append(block(self.in_planes, planes, stride=stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20(num_classes=10):
return ResNet(BasicBlock, [3, 3, 3], num_classes)
def resnet32(num_classes=10):
return ResNet(BasicBlock, [5, 5, 5], num_classes)
def resnet44(num_classes=10):
return ResNet(BasicBlock, [7, 7, 7], num_classes)
def resnet56(num_classes=10):
return ResNet(BasicBlock, [9, 9, 9], num_classes)
def resnet110(num_classes=10):
return ResNet(BasicBlock, [18, 18, 18], num_classes)
def resnet1202(num_classes=10):
return ResNet(BasicBlock, [200, 200, 200], num_classes)
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
#### new stuff
class ResnetiCifar44(ResNet):
def __init__(self, bsp=False, **new_kwargs):
if bsp:
super().__init__(HashBasicBlock, [7, 7, 7], **new_kwargs)
else:
super().__init__(BasicBlock, [7, 7, 7], **new_kwargs)
self.bsp = bsp
def forward(self, x, task_idx:int=-1):
out = F.relu(self.bn1(self.conv1(x)))
if self.bsp: # bsp mode
out = forward_sequential(out, self.layer1, task_idx)
out = forward_sequential(out, self.layer2, task_idx)
out = forward_sequential(out, self.layer3, task_idx)
else:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class ResnetiCifar(ResNet):
def __init__(self, **new_kwargs):
super().__init__(BasicBlock, [3, 3, 3], **new_kwargs)
def forward(self, x):
return super().forward(x)
class ResnetiCifar44NoLinear(ResNet):
def __init__(self, bsp=False, final_act=True, **new_kwargs):
if bsp:
super().__init__(HashBasicBlock, [7, 7, 7], **new_kwargs)
else:
super().__init__(BasicBlock, [7, 7, 7], **new_kwargs)
self.bsp = bsp
self.final_act = final_act
del self.linear
def forward(self, x, time:int=-1):
out = F.relu(self.bn1(self.conv1(x)))
if self.bsp: # bsp mode
out = forward_sequential(out, self.layer1, time)
out = forward_sequential(out, self.layer2, time)
out = forward_sequential(out, self.layer3, time)
else:
out = self.layer1(out)
out = self.layer2(out)
if self.final_act:
out = self.layer3(out)
else:
for i in range(6):
out = self.layer3[i](out)
basic_block = self.layer3[6]
block_input = out
out = F.relu(basic_block.bn1(basic_block.conv1(out)))
# out = basic_block.bn2(basic_block.conv2(out))
out = basic_block.conv2(out)
out += basic_block.shortcut(block_input)
# out = F.relu(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
return out
def start_training(self):
if self.bsp:
warnings.warn('Before training: Applying context to weights, Are you sure?')
for name, param in self.named_parameters():
if 'conv' in name and 'weight' in name and 'layer' in name:
o = torch.from_numpy( np.random.binomial(p=0.5, n=1, size=param.shape[1:]).astype(np.float32) * 2 - 1 ).cuda()
self.state_dict()[name] = param * o
class ResnetiCifar44NoLinearWithCache(ResnetiCifar44NoLinear):
def forward(self, x, time:int=-1):
self.x_pre_layer1 = F.relu(self.bn1(self.conv1(x))) # (16,32,32)
self.x_layer1 = self.layer1(self.x_pre_layer1) # (16,32,32)
self.x_layer2 = self.layer2(self.x_layer1) # (32,16,16)
self.x_layer3 = self.layer3(self.x_layer2) # (64, 8, 8)
out = F.avg_pool2d(self.x_layer3, self.x_layer3.size()[3])
out = out.view(out.size(0), -1)
return out, [self.x_pre_layer1.detach(), self.x_layer1.detach(), self.x_layer2.detach(), self.x_layer3.detach()]
```
#### File: torchnet/meter/medianimagemeter.py
```python
import torch
import numpy as np
class MedianImageMeter(object):
def __init__(self, bit_depth, im_shape, device='cpu'):
self.bit_depth = bit_depth
self.im_shape = list(im_shape)
self.device = device
if bit_depth == 8:
self.dtype = np.uint8
elif bit_depth == 16:
self.dtype = np.uint16
else:
raise NotImplementedError(
"MedianMeter cannot find the median of non 8/16 bit-depth images.")
self.reset()
def reset(self):
self.freqs = self.make_freqs_array()
def add(self, val, mask=1):
self.val = torch.LongTensor( val.astype(np.int64).flatten()[np.newaxis,:] ).to(self.device)
if type(mask) == int:
mask = torch.IntTensor(self.val.size()).fill_(mask).to(self.device)
else:
mask = torch.IntTensor(mask.astype(np.int32).flatten()[np.newaxis,:]).to(self.device)
self.freqs.scatter_add_(0, self.val, mask)
self.saved_val = val
def value(self):
self._avg = np.cumsum(
self.freqs.cpu().numpy(),
axis=0)
self._avg = np.apply_along_axis(
lambda a: a.searchsorted(a[-1] / 2.),
axis=0,
arr=self._avg)\
.reshape(tuple([-1] + self.im_shape))
return np.squeeze(self._avg, 0)
def make_freqs_array(self):
# freqs has shape N_categories x W x H x N_channels
shape = tuple([2**self.bit_depth] + self.im_shape)
freqs = torch.IntTensor(shape[0], int(np.prod(shape[1:]))).zero_()
return freqs.to(self.device)
``` |
{
"source": "JozielOliveira/estrtura-de-dados-2",
"score": 4
} |
#### File: estrtura-de-dados-2/Estrutura/Arvore.py
```python
import sys
class Tree :
def __init__(self):
self.item = None
self.direita = None
self.esquerda = None
def insert(self,item):
if self.item == None: # verifiaca se há elemento na raiz
self.item = item
return
elif item < self.item:
if self.esquerda == None:
self.esquerda = Tree()
return self.esquerda.insert(item)
else:
if self.direita == None:
self.direita = Tree()
return self.direita.insert(item)
def search(self, item):
if self.item == item: # verifiaca se há elemento na raiz
return True
elif item < self.item:
if self.esquerda == None:
return False
return self.esquerda.search(item)
else:
if self.direita == None:
return False
return self.direita.search(item)
def inOrdem(self):
if self.item != None:
if self.esquerda != None :
self.esquerda.inOrdem()#E
print(self.item) #R
if self.direita != None :
self.direita.inOrdem() #D
def posOrdem(self) :
if self.item != None:
if self.esquerda != None :
self.esquerda.posOrdem()#E
if self.direita != None :
self.direita.posOrdem() #D
print(self.item) #R
def preOrdem(self) :
if self.item != None:
sys.stdout.write("(")
sys.stdout.write(str(self.item)) #R
if self.esquerda != None :
self.esquerda.preOrdem()#E
if self.direita != None :
self.direita.preOrdem() #D
sys.stdout.write(")")
def regra1(self):
if self.esquerda != None:
return self.esquerda.regra2() # Vai pra esquerda e procura o maior valor pra atribuir no lugar
elif self.direita != None :
return self.direita.regra22() # Vai pra direita e procura o menor valor pra atribuir no lugar
else :
return None
def regra2(self):
if self.direita != None :
return self.direita.regra2()
elif self.esquerda != None :
value = self.item
self.item = self.esquerda.item
self.direita = self.esquerda.direita
self.esquerda = self.esquerda.esquerda
return value
else :
value = self.item
self.item = None
self.direita = None
self.esquerda = None
return value
def regra22(self):
if self.esquerda != None :
return self.esquerda.regra22()
elif self.direita != None :
value = self.item
self.item = self.direita.item
self.direita = self.direita.direita
self.esquerda = self.direita.esquerda
return value
else :
value = self.item
self.item = None
self.direita = None
self.esquerda = None
return value
def delete(self, item):
if self.item == item:
self.item = self.regra1()
elif item < self.item and self.esquerda != None:
self.esquerda.delete(item)
elif self.direita != None:
self.direita.delete(item)
return
arvore = Tree()
arvore.insert(10)
arvore.insert(5)
arvore.insert(4)
arvore.insert(7)
arvore.insert(6)
arvore.insert(3)
arvore.insert(13)
arvore.insert(12)
arvore.insert(11)
arvore.insert(14)
arvore.insert(15)
arvore.insert(8)
arvore.insert(9)
# Mostrar em preOrdem
print("Mostrar em preOrdem")
arvore.preOrdem()
# Mostrar em ordem
print()
print("Mostrar em ordem")
arvore.inOrdem()
# Mostrar em posOrdem
print("Mostrar em posOrdem")
arvore.posOrdem()
print("Buscar numero 3")
print(arvore.search(3))
print("Buscar numero 18")
print(arvore.search(18))
print("")
print("Delete 10")
arvore.delete(10)
print("Mostrar em preOrdem")
arvore.preOrdem()
```
#### File: estrtura-de-dados-2/EstruturaPython/Pilha.py
```python
class Pilha :
# construtor
def __init__(self, lista):
self.lista = lista
# metodo de insercao
def push(self, elem):
self.lista.append(elem)
# metodo de remocao do
def pop(self):
self.lista.pop()
# metodo de listagem
def listar(self):
a = 0
for item in self.lista:
a += 1
print(a,' ', item)
``` |
{
"source": "JoZimmer/Beam-Models",
"score": 2
} |
#### File: Beam-Models/2DBeam/element_functions.py
```python
import numpy as np
def get_stiffness_matrix_var(alpha, L=15.0, E=8750.0,Iy=17500.0):
k_yy_11 = 12.0 * E * Iy / L**3
k_yy_12 = -k_yy_11
k_gg_11 = 4.0 * E * Iy / L
k_gg_12 = 2.0 * E * Iy / L
k_yg = 6.0 * E * Iy / L**2
akyg = alpha * k_yg
k = np.array([[ k_yy_11, -akyg, k_yy_12, -akyg],
[-akyg, k_gg_11, akyg, k_gg_12],
[ k_yy_12, akyg, k_yy_11, akyg],
[-akyg, k_gg_12, akyg, k_gg_11]])
return k
def get_stiffness_matrix_tar():
return get_stiffness_matrix_var(alpha=1.0)
def get_mass_matrix_var(beta1, beta2, rho=150.0, L=15.0):
m_yy_11 = rho * L * 13./35.
m_yy_12 = rho * L * 9./70.
m_gg_11 = rho * L**3 /105.
m_gg_12 = rho * L**3 /140.
m_yg_11 = rho * (L**2) * 11./210.
m_yg_12 = rho * (L**2) * 13./420.
b1myg = beta1 * m_yg_11
b2myg = beta2 * m_yg_12
m = np.array([[ m_yy_11, -b1myg, m_yy_12, b2myg],
[-b1myg, m_gg_11, -m_yg_12, -m_gg_12],
[ m_yy_12, -m_yg_12, m_yy_11, b1myg],
[ b2myg, -m_gg_12, b1myg, m_gg_11 ]])
return m
def get_mass_matrix_tar():
return get_mass_matrix_var(beta1=1.0,beta2=1.0)
if __name__ == '__main__':
print('Stiffness target: ')
print(get_stiffness_matrix_tar())
print('Stiffness initial: ')
print(get_stiffness_matrix_var(alpha=0.00005))
print('Mass target: ')
print(get_mass_matrix_tar())
print('Mass initial: ')
print(get_mass_matrix_var(beta1=0.00025,beta2=0.00075))
```
#### File: Beam-Models/2DBeam/system_functions.py
```python
import numpy as np
from element_functions import get_stiffness_matrix_var, get_stiffness_matrix_tar, get_mass_matrix_var, get_mass_matrix_tar
def build_system_matrix(var_handles, n_el=3, n_dofs_per_node=2):
n_nodes = n_el + 1
nodes_per_elem = 2
k_sys = np.zeros((n_nodes * n_dofs_per_node,
n_nodes * n_dofs_per_node))
m_sys = np.zeros((n_nodes * n_dofs_per_node,
n_nodes * n_dofs_per_node))
for el_id in range(0,n_el,1):
k_el = get_stiffness_matrix_var(var_handles[0])
m_el = get_mass_matrix_var(var_handles[1],var_handles[2])
start = n_dofs_per_node * el_id
end = start + n_dofs_per_node * nodes_per_elem
k_sys[start: end, start: end] += k_el
m_sys[start: end, start: end] += m_el
return k_sys, m_sys
def apply_bc_by_reduction(matrix, dofs_of_bc=[0,1], n_el=3, n_dofs_per_node=2, axis='both'):
n_nodes = n_el + 1
nodes_per_elem = 2
n_dofs_total = np.arange(n_nodes * n_dofs_per_node)
dofs_to_keep = list(set(n_dofs_total) - set(dofs_of_bc))
if axis == 'both':
ixgrid = np.ix_(dofs_to_keep, dofs_to_keep)
# for a force vector
elif axis == 'row_vector':
ixgrid = np.ix_(dofs_to_keep, [0])
matrix = matrix.reshape([len(matrix), 1])
return matrix[ixgrid]
def recuperate_bc_by_extension(matrix, dofs_of_bc=[0,1], n_el=3, n_dofs_per_node=2, axis='both'):
n_nodes = n_el + 1
nodes_per_elem = 2
n_dofs_total = np.arange(n_nodes * n_dofs_per_node)
dofs_to_keep = list(set(n_dofs_total) - set(dofs_of_bc))
if axis == 'both':
rows = len(n_dofs_total)
cols = rows
ixgrid = np.ix_(dofs_to_keep,dofs_to_keep)
extended_matrix = np.zeros((rows,cols))
elif axis == 'row':
rows = len(n_dofs_total)
cols = matrix.shape[1]
# make a grid of indices on interest
ixgrid = np.ix_(dofs_to_keep, np.arange(matrix.shape[1]))
extended_matrix = np.zeros((rows, cols))
elif axis == 'column':
rows = matrix.shape[0]
cols = len(n_dofs_total)
# make a grid of indices on interest
ixgrid = np.ix_(np.arange(matrix.shape[0]), dofs_to_keep)
extended_matrix = np.zeros((rows, cols))
elif axis == 'row_vector':
rows = len(n_dofs_total)
cols = 1
ixgrid = np.ix_(dofs_to_keep, [0])
matrix = matrix.reshape([len(matrix), 1])
extended_matrix = np.zeros((rows, cols))
elif axis == 'column_vector':
rows = len(n_dofs_total)
cols = 1
ixgrid = np.ix_(dofs_to_keep)
extended_matrix = np.zeros((rows,))
extended_matrix[ixgrid] = matrix
return extended_matrix
if __name__ == '__main__':
print('Full system stiffness matrix: ')
k_full, m_full = build_system_matrix([1.0,1.0,1.0])
print(k_full)
print(np.shape(k_full))
print('Reduced system stiffness matrix: ')
k_red = apply_bc_by_reduction(k_full)
print(k_red)
print(np.shape(k_red))
print(np.array_equal(k_full[2:,2:],k_red))
print('Extended (padded by zeros) stiffness matrix: ')
k_ext = recuperate_bc_by_extension(k_red)
print(k_ext)
print(np.array_equal(np.array(np.shape(k_full)),np.array(np.shape(k_ext))))
print('Full system mass matrix: ')
print(m_full)
print(np.shape(m_full))
print('Reduced system stiffness matrix: ')
m_red = apply_bc_by_reduction(m_full)
print(m_red)
print(np.shape(m_red))
print(np.array_equal(m_full[2:,2:],m_red))
print('Extended (padded by zeros) mass matrix: ')
m_ext = recuperate_bc_by_extension(m_red)
print(m_ext)
print(np.array_equal(np.array(np.shape(m_full)),np.array(np.shape(m_ext))))
```
#### File: 3DBeam/source/optimizations.py
```python
import numpy as np
import matplotlib.pyplot as plt
from os.path import join as os_join
import scipy.optimize as spo
from scipy.optimize import minimize, minimize_scalar
from scipy import linalg
from functools import partial
import source.postprocess
from source.utilities import utilities as utils
class Optimizations(object):
def __init__(self, model, optimization_parameters=None):
'''
an inital model is given to this object
'''
self.model = model
self.opt_geometric_props = {}
if optimization_parameters:
self.opt_params = optimization_parameters
self.consider_mode = optimization_parameters['consider_mode']
self.method = optimization_parameters['method']
self.weights = optimization_parameters['weights']
else:
self.opt_params = False
# FOR EIGENFREQUENCIES
'''
These are functions taken from ParOptBeam
'''
def adjust_sway_y_stiffness_for_target_eigenfreq(self, target_freq, target_mode, print_to_console=False):
'''
displacement in z direction -> sway_y = schwingung um y - Achse
'''
initial_iy = list(e.Iy for e in self.model.elements)
# using partial to fix some parameters for the
self.optimizable_function = partial(self.bending_y_geometric_stiffness_objective_function,
target_freq,
target_mode,
initial_iy)
init_guess = 1.0
upper_bnd = self.model.elements[0].Iz / self.model.elements[0].Iy
bnds_iy = (0.001, upper_bnd)#100) # (1/8,8)
# minimization_result = minimize(self.optimizable_function,
# init_guess,
# method='L-BFGS-B', # 'SLSQP',#
# bounds=(bnds_iy, bnds_a_sz))
min_res = minimize_scalar(self.optimizable_function, tol=1e-06)#, options={'disp':True})
# returning only one value!
opt_fctr = min_res.x
# NOTE this is only for constant Iy over the height
self.opt_geometric_props['Iy'] = [min_res.x * iy_i for iy_i in initial_iy]
if print_to_console:
print('INITIAL Iy:', ', '.join([str(val) for val in initial_iy]))
print()
print('OPTIMIZED Iy: ', ', '.join([str(opt_fctr * val) for val in initial_iy]))
print()
print('FACTOR: ', opt_fctr)
print()
def bending_y_geometric_stiffness_objective_function(self, target_freq, target_mode, initial_iy, multiplier_fctr):
for e in self.model.elements:
e.Iy = multiplier_fctr * initial_iy[e.index]
# assuming a linear dependency of shear areas
# NOTE: do not forget to update further dependencies
e.evaluate_relative_importance_of_shear()
e.evaluate_torsional_inertia()
# re-evaluate
self.model.build_system_matricies(self.model.parameters['inital_params_yg'],
self.model.parameters['params_k_ya'],
self.model.parameters['params_m_ya'])
self.model.eigenvalue_solve()
eig_freq_cur = self.model.eigenfrequencies[target_mode]
return (eig_freq_cur - target_freq)**2 / target_freq**2
def adjust_sway_z_stiffness_for_target_eigenfreq(self, target_freq, target_mode, print_to_console=False):
'''
sway_z = schwingung in y richtung, um z Achse,
'''
initial_iz = list(e.Iz for e in self.model.elements)
# using partial to fix some parameters for the
self.optimizable_function = partial(self.bending_z_geometric_stiffness_objective_function,
target_freq,
target_mode,
initial_iz)
initi_guess = 1.0
# NOTE this is correct only for homogenous cross section along length
upper_bnd = self.model.elements[0].Iy / self.model.elements[0].Iz
bnds_iz = (0.001, upper_bnd)#(0.001, 100) # (1/8,8)
# minimization_result = minimize(self.optimizable_function,
# initi_guess,
# method ='L-BFGS-B',
# bounds = bnds_iz)
min_res = minimize_scalar(self.optimizable_function, method='Bounded', tol=1e-06, bounds=bnds_iz)#, options={'disp':True})
# returning only one value!
#opt_iz_fctr = minimization_result.x
opt_iz_fctr = min_res.x
self.opt_geometric_props['Iz'] = [min_res.x * iz_i for iz_i in initial_iz]
if print_to_console:
print(' INITIAL iz:', ', '.join([str(val) for val in initial_iz]))
print()
print(' OPTIMIZED iz: ', ', '.join(
[str(opt_iz_fctr * val) for val in initial_iz]))
print()
print(' FACTOR: ', opt_iz_fctr)
print (' Final Func:', min_res.fun)
print()
def bending_z_geometric_stiffness_objective_function(self, target_freq, target_mode, initial_iz, multiplier_fctr):
for e in self.model.elements:
e.Iz = multiplier_fctr * initial_iz[e.index]
# NOTE: do not forget to update further dependencies
e.evaluate_relative_importance_of_shear()
e.evaluate_torsional_inertia()
# re-evaluate
self.model.build_system_matricies(self.model.parameters['inital_params_yg'],
self.model.parameters['params_k_ya'],
self.model.parameters['params_m_ya'])
self.model.eigenvalue_solve()
eig_freq_cur = self.model.eigenfrequencies[target_mode] # mode_type_results is an ordered list
result = (eig_freq_cur - target_freq)**2 / target_freq**2
return result
def adjust_torsional_stiffness_for_target_eigenfreq(self, target_freq, target_mode, print_to_console=False):
initial_it = list(e.It for e in self.model.elements)
initial_ip = list(e.Ip for e in self.model.elements)
# NOTE: single parameter optimization seems not to be enough
# using partial to fix some parameters for the
self.optimizable_function = partial(self.torsional_geometric_stiffness_objective_function,
target_freq,
target_mode,
initial_it,
initial_ip)
self.weights = [0.0,0.0,0.0]
# NOTE: some additional reduction factor so that ip gets changes less
init_guess = (1.0, 1.0)
# NOTE: this seems not to be enough
# bnds_it = (1/OptimizableStraightBeam.OPT_FCTR, OptimizableStraightBeam.OPT_FCTR)
# bnds_ip = (1/OptimizableStraightBeam.OPT_FCTR, OptimizableStraightBeam.OPT_FCTR)
# NOTE: seems that the stiffness contribution takes lower bound, the inertia one the upper bound
bnds_it = (1/100, 10)
bnds_ip = (1/11, 20)
if self.opt_params:
init_guess = self.opt_params['init_guess']
bnds_it = self.opt_params['bounds']
bnds_ip = self.opt_params['bounds']
# NOTE: TNC, SLSQP, L-BFGS-B seems to work with bounds correctly, COBYLA not
min_res = minimize(self.optimizable_function,
init_guess,
method='L-BFGS-B',
bounds=(bnds_it, bnds_ip),
options={'disp':False})
# returning only one value!
opt_fctr = min_res.x
self.opt_geometric_props['It'] = [min_res.x[0] * it_i for it_i in initial_it]
self.opt_geometric_props['Ip'] = [min_res.x[1] * ip_i for ip_i in initial_ip]
if print_to_console:
print('\nFACTORS It, Ip: ', ', '.join([str(val) for val in opt_fctr]))
print ('final frequency: ', self.model.eigenfrequencies[target_mode])
print()
def torsional_geometric_stiffness_objective_function(self, target_freq, target_mode, initial_it, initial_ip, multiplier_fctr):
for e in self.model.elements:
e.It = multiplier_fctr[0] * initial_it[e.index]
e.Ip = multiplier_fctr[1] * initial_ip[e.index]
# re-evaluate
self.model.build_system_matricies(self.model.parameters['inital_params_yg'],
self.model.parameters['params_k_ya'],
self.model.parameters['params_m_ya'])
self.model.eigenvalue_solve()
weights = [0]
eig_freq_cur = self.model.eigenfrequencies[target_mode]
return (eig_freq_cur - target_freq)**2 *100# / target_freq**2
# TORSION COUPLING OPTIMIZATIONS
'''
Coupling with one design variable.
Either y-a or g-a
'''
def eigen_ya_stiffness_opt(self, which = 'kya'):
'''
Optimizes EITHER 'kya' or 'kga' to couple the y-displacement (gamma-rotation) to the torsional twist.
The optimization target is hard coded in here -> see eigenmodes_target_*y*a
The eigenfrequnecy_target is mostly not used (uncomment it in the objective function to see what happens).
which: 'kya' or 'kga'
'''
if self.model.parameters['params_k_ya'] != [0.0,0.0]:
raise Exception('inital parameters of ya are not 0 - check if sensible')
eigenmodes_target_y = self.model.eigenmodes['y'][self.consider_mode]*0.9 # an assumption: y gets less if a is also deforming
eigenmodes_target_a = np.linspace(0, eigenmodes_target_y[-1] * 0.012, eigenmodes_target_y.shape[0]) # 0.12 is the ratio of caarc tip a / tip y 1st mode
eigenfreq_target = self.model.eigenfrequencies[self.consider_mode]
self.inital = {'y':self.model.eigenmodes['y'][self.consider_mode],'a':self.model.eigenmodes['a'][self.consider_mode]}
self.targets = {'y':eigenmodes_target_y, 'a':eigenmodes_target_a}
self.optimizable_function = partial(self.obj_func_eigen_ya_stiffnes, self.consider_mode,
eigenmodes_target_y, eigenmodes_target_a, eigenfreq_target,
which)
ids = ['kya','kga']
bounds = None
#bounds = self.opt_params['bounds'][ids.index(which)]
method_scalar = 'brent'
#bounds = (0.001, 100)#,(0.001, 100))#,(0.001, 100))
if bounds:
method_scalar = 'bounded'
res_scalar = minimize_scalar(self.optimizable_function, method=method_scalar, bounds= bounds, tol=1e-6)
# SLSQP works with bounds
#res_scalar = minimize(self.optimizable_function, x0= 0.0, method=self.method, bounds=bounds, tol=1e-6, options={'disp': True})
# SLSQP works with constraints as well
#res_scalar = minimize(self.optimizable_function, x0 = init_guess, method='SLSQP', constraints=cnstrts, tol=1e-3, options={'gtol': 1e-3, 'ftol': 1e-3, 'disp': True})
#print( 'final F: ', str(self.optimizable_function))
#self.optimized_design_params = res_scalar.x
if which == 'kya':
self.optimized_design_params = {'params_k_ya':[res_scalar.x, 0.0]}
elif which == 'kga':
self.optimized_design_params = {'params_k_ya':[0.0, res_scalar.x]}
print('\noptimization result for design variable k'+which+':', res_scalar.x)
def obj_func_eigen_ya_stiffnes(self, mode_id, eigenmodes_target_y, eigenmodes_target_a, eigenfreq_target, which, design_param):
'''
Objective function for one design variable (either kya or kga).
'''
if isinstance(design_param, np.ndarray):
if design_param.size == 2:
if design_param[0] == design_param[1]:
design_param = design_param[0]
else:
raise Exception('design parameter has 2 variables that differ')
else:
design_param = design_param[0]
if which == 'kya':
self.model.build_system_matricies(params_k_ya=[design_param, 0.0])
elif which == 'kga':
self.model.build_system_matricies(params_k_ya=[0.0, design_param])
self.model.eigenvalue_solve()
eigenmodes_cur = self.model.eigenmodes
eigenfreq_cur = self.model.eigenfrequencies[self.consider_mode]
f1 = utils.evaluate_residual(eigenmodes_cur['y'][mode_id], eigenmodes_target_y)
f2 = utils.evaluate_residual(eigenmodes_cur['a'][mode_id], eigenmodes_target_a)
#f3 = utils.evaluate_residual([eigenfreq_cur], [eigenfreq_target])
weights = self.weights
f = weights[0]*f1**2 + weights[1]*f2**2 # + weights[2] * f3**2
return f
'''
Coupling with two design variables.
Either y-a and g-a
'''
def eigen_vectorial_ya_opt(self, target_to_use = 'custom'):
'''
optimizing BOTH the stiffness coupling entries
K_ya
K_ga
and the mass coupling entries
mostly mass couling is not necessary or sensible - see also Thesis JZ ch. 3.3.3
in optimization_parameters a boolean for turning this option on and off is used
M_ya, M_yg (both with the same parameter )
target_to_use:
- 'custom': 0.9 times the initial lateral displacement & ratio alpha/disp = 0.012; a displacement is assumed linear
- 'realistic': taking values from full 3D FE simulation of exccentirc building (ARiedls work)
- 'semi_realistic': uses values from the optimization_params: 'ratio_a_y_tar', 'factor_y'; a twist displacement is amplified -> original shape is taken
'''
include_mass = self.opt_params['include_mass']
# defining bounds
# NOTE: k_ya takes lower bounds than 0.1
bnds = self.opt_params['bounds']
init_guess = self.opt_params['init_guess']#,1.0]#[0.0, 0.0,0.0]#[0.12, 0.15, 0.17]
self.n_iter = 0
self.optimization_history = {'iter':[0],'func':[], 'k_ya':[init_guess[0]], 'k_ga':[init_guess[1]]}
if include_mass:
self.optimization_history['m_ya_ga'] = [init_guess[2]]
def get_callback(x):
# not really working
self.n_iter += 1
#self.optimization_history['func'].append(self.optimizable_function(x))
self.optimization_history['k_ya'].append(x[0])
self.optimization_history['k_ga'].append(x[1])
self.optimization_history['iter'].append(self.n_iter)
if include_mass:
self.optimization_history['m_ya_ga'].append(x[2])
def print_callback(x):
print (x[0], x[1], x[2], self.optimizable_function(x))
if self.model.parameters['params_k_ya'] != [0.0,0.0]:
raise Exception('inital parameters of ya are not 0 - check if the targets are still sensible')
if target_to_use == 'custom':
eigenmodes_target_y = self.model.eigenmodes['y'][self.consider_mode]*0.9
eigenmodes_target_a = np.linspace(0, eigenmodes_target_y[-1] * self.opt_params['ratio_a_y_tar'], eigenmodes_target_y.shape[0]) # 0.012 is the ratio of caarc tip a / tip y 1st mode
eigenfreq_target = self.model.eigenfrequencies[self.consider_mode]
elif target_to_use == 'realistic':
modi = np.load(os_join(*['inputs', 'eigenvectors', 'EigenvectorsGid.npy']))
z_coords = np.load(os_join(*['inputs','eigenvectors', 'z_coords_gid_45.npy']))
# is only available with 45 nodes but is fitted if the current model has a different number of nodes
if self.model.nodal_coordinates['x0'].size == 46:
eigenmodes_target_y = modi[self.consider_mode][:,4]
eigenmodes_target_a = modi[self.consider_mode][:,2] # here the ratio is 0.00373
else:
modi_fitted = utils.get_eigenform_polyfit(modi[self.consider_mode], z_coords, self.model.nodal_coordinates['x0'], plot_compare=False)
eigenmodes_target_y = modi_fitted['eigenmodes']['y']
eigenmodes_target_a = -1*modi_fitted['eigenmodes']['a']
eigenfreq_target = self.opt_params['eigen_freqs_tar'] #self.model.eigenfrequencies[self.consider_mode]
elif target_to_use == 'semi_realistic':
'''
assumes a reduction of y displacement by a custom factor < 1
uses shape of a initial with a factor to get a-y tip ratio as specified
-> reason see inital shapes uncoupled max normed: a has the typical torsion shape -> needs amplification
'''
ratio_a_y = self.opt_params['ratio_a_y_tar']
factor_y = self.opt_params['factor_y']
eigenmodes_target_y = self.model.eigenmodes['y'][self.consider_mode]*factor_y
a_factor = ratio_a_y * max(eigenmodes_target_y)/max(self.model.eigenmodes['a'][self.consider_mode])
eigenmodes_target_a = self.model.eigenmodes['a'][self.consider_mode] * a_factor
eigenfreq_target = self.opt_params['eigen_freqs_tar'] #self.model.eigenfrequencies[self.consider_mode]
self.inital = {'y':self.model.eigenmodes['y'][self.consider_mode],'a':self.model.eigenmodes['a'][self.consider_mode]}
self.targets = {'y':eigenmodes_target_y, 'a':eigenmodes_target_a}
self.optimizable_function = partial(self.obj_func_eigen_vectorial_k_ya, self.consider_mode, eigenmodes_target_y, eigenmodes_target_a, eigenfreq_target, include_mass)
self.optimization_history['func'].append(self.optimizable_function(init_guess))
if not include_mass:
print ('\nnot optimizing the mass entries, thus...')
if len(bnds) != 2:
bnds = bnds[:2]
print (' ...dropping the 3rd bound given')
if len(init_guess) != 2:
init_guess = init_guess[:2]
print (' ...dropping the 3rd initial guess given\n')
# alternatively inequality constraints
cnstrts = [{'type': 'ineq', 'fun': lambda x: 100 - x[0]},
{'type': 'ineq', 'fun': lambda x: 100 - x[1]},
{'type': 'ineq', 'fun': lambda x: 100 - x[2]},
{'type': 'ineq', 'fun': lambda x: x[0] - 0.001},
{'type': 'ineq', 'fun': lambda x: x[1] - 0.001},
{'type': 'ineq', 'fun': lambda x: x[2] - 0.001}]
# SLSQP works with bounds
res_scalar = minimize(self.optimizable_function,
x0 = init_guess,
method=self.method,
bounds=bnds,
callback=get_callback,
options={'ftol': 1e-6, 'disp': True})
evals = [0,10,10]
#print ('func with manual opt params: ', self.optimizable_function(evals))
self.optimized_design_params = {'params_k_ya':res_scalar.x[:2]}
if include_mass:
self.optimized_design_params['params_m_ya'] = [res_scalar.x[-1],res_scalar.x[-1],0.0]
self.optimization_history['k_ya'].append(res_scalar.x[0])
self.optimization_history['k_ga'].append(res_scalar.x[1])
self.optimization_history['iter'].append(self.n_iter+1)
digits = 5
# SLSQP works with constraints as well
# res_scalar = minimize(self.optimizable_function, x0 = init_guess, method='SLSQP', constraints=cnstrts, tol=1e-3, options={'gtol': 1e-3, 'ftol': 1e-3, 'disp': True})
print()
print('optimized parameters:')
print (' k_ya:', round(res_scalar.x[0],digits), 'absolute:', round(self.model.comp_k[1][3]))
print (' k_ga:', round(res_scalar.x[1],digits), 'absolute:', round(self.model.comp_k[3][5]))
if include_mass:
print (' m_ya:', round(res_scalar.x[2],digits+4), 'absolute m_ya_11:', round(self.model.comp_m[1][3]), 'absolute m_ya_12:', round(self.model.comp_m[1][9]))
def obj_func_eigen_vectorial_k_ya(self, mode_id, eigenmodes_target_y, eigenmodes_target_a, eigenfreq_target, include_mass, design_params):
'''
Objective function for more than one design variable (kya and kga optional mass entries).
'''
if include_mass:
self.model.build_system_matricies(params_k_ya = design_params[:2], params_m_ya=[design_params[-1], design_params[-1],0])
else:
self.model.build_system_matricies(params_k_ya = design_params)
self.model.eigenvalue_solve()
eigenmodes_cur = self.model.eigenmodes
eigenfreq_cur = self.model.eigenfrequencies[mode_id]
f1 = utils.evaluate_residual(eigenmodes_cur['y'][mode_id], eigenmodes_target_y)
f2 = utils.evaluate_residual(eigenmodes_cur['a'][mode_id], eigenmodes_target_a)
f3 = utils.evaluate_residual([eigenfreq_cur], [eigenfreq_target])
weights = self.weights
gamma = 2
components = [weights[0]*f1**gamma, weights[1]*f2**gamma, weights[2]*f3**gamma]
f = sum(components)
return f
# MASS MATRIX OPTIMIZATIONS
def mass_entries_opt_ya(self):
target = np.eye(self.model.n_dofs_node * self.model.n_elems)
self.optimizable_function = partial(self.obj_func_gen_mass, target)
bounds = self.opt_params['bounds']#,(0.001, 100))
init_guess = self.opt_params['init_guess']#,1.0]#[0.0, 0.0,0.0]#[0.12, 0.15, 0.17]
#res_scalar = minimize_scalar(self.optimizable_function, method=method, bounds= bounds, options={'gtol': 1e-6, 'disp': True})
# SLSQP works with bounds
res_scalar = minimize(self.optimizable_function, x0= init_guess, method=self.method, bounds=bounds, options={'ftol': 1e-5, 'disp': True})
print ('optimizaion result:', res_scalar.x)
def obj_func_gen_mass(self, target, design_params):
'''
1. design_params are psi1, psi2 -> only ya entries the rest 0
'''
self.model.build_system_matricies(params_m_ya=[design_params[0],design_params[1], 0.0])
eig_values_raw, eigen_modes_raw = linalg.eigh(self.model.comp_k, self.model.comp_m)
gen_mass_cur = np.matmul(np.matmul(np.transpose(eigen_modes_raw), self.model.comp_m), eigen_modes_raw)
f1 = utils.evaluate_residual(gen_mass_cur, target)
return f1**2
```
#### File: solving_strategies/strategies/linear_solver.py
```python
from source.solving_strategies.strategies.solver import Solver
class LinearSolver(Solver):
def __init__(self,
array_time, time_integration_scheme, dt,
comp_model,
initial_conditions,
force,
structure_model):
super().__init__(array_time, time_integration_scheme, dt,
comp_model, initial_conditions, force, structure_model)
def _print_solver_info(self):
print("Linear Solver")
def solve(self):
# time loop
for i in range(0, len(self.array_time)):
self.step = i
current_time = self.array_time[i]
#print("time: {0:.2f}".format(current_time))
self.scheme.solve_single_step(self.force[:, i])
# appending results to the list
self.displacement[:, i] = self.scheme.get_displacement()
self.velocity[:, i] = self.scheme.get_velocity()
self.acceleration[:, i] = self.scheme.get_acceleration()
# TODO: only calculate reaction when user wants it
# if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# reaction computed in dynamic analysis
# TODO: only calculate reaction when user wants it
# moved reaction computation to dynamic analysis level
# AK . this doesnt considers the support reaction check
#if self.structure_model is not None:
# self.dynamic_reaction[:, i] = self._compute_reaction()
# update results
self.scheme.update()
```
#### File: source/utilities/CAARC_utilities.py
```python
import numpy as np
from os.path import join as os_join
import matplotlib.pyplot as plt
import source.postprocess as post
from source.utilities import utilities as utils
from source.utilities import global_definitions as GD
from plot_settings import plot_settings
COLORS = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']
#params = plot_settings.get_params(w=7.3, h=5.2)
dest_folder = 'plots_new\\CAARC'
def get_CAARC_properties(src_file_name = 'CAARC_advanced_eigenmodes.txt', evaluate_at = None, interpolation_degree = 3):
'''
a dictionary is returned with information about the building A
- storey (number), storey_level, mass, frequencies, eigenmodes
the eigenmodes are appened to a list
for each dof a list is created in a dictionary
'''
src = os_join(*['inputs','eigenvectors', src_file_name])
caarc = {}
caarc['storey'] = np.flip(np.loadtxt(src, usecols = (0,))) # [-]
caarc['storey_level'] = np.flip(np.loadtxt(src, usecols = (1,))) # [m]
caarc['dimensons'] = {'x':240,'y':24, 'z':72}
caarc['mass'] = 1231000.0
caarc['frequencies'] = [0.231, 0.429, 0.536]
caarc['eigenmodes'] = {'x':[],'y':[],'z':[],'a':[]}
caarc['eigenmodes_fitted'] = {'x':[],'y':[],'z':[],'a':[]}
for i in range (3):
caarc['eigenmodes']['x'].append(np.zeros(60))
caarc['eigenmodes']['y'].append(np.flip(np.loadtxt(src, usecols = (3+3*i,))))
caarc['eigenmodes']['z'].append(np.flip(np.loadtxt(src, usecols = (2+3*i,))))
caarc['eigenmodes']['a'].append(np.flip(np.loadtxt(src, usecols = (4+3*i,))))
if not evaluate_at:
x = caarc['storey_level']
else:
x = evaluate_at
for dof_label in ['y', 'z', 'a']:
for mode_id in range(3):
y = caarc['eigenmodes'][dof_label][mode_id]
current_polynomial = np.poly1d(np.polyfit(caarc['storey_level'] ,y , interpolation_degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
caarc['eigenmodes_fitted'][dof_label].append(np.asarray(values))
return caarc
def get_CAARC_eigenform_polyfit (CAARC_eigenmodes, evaluate_at = None, degree = 5):
'''
retruns the values of a fitted caarc eigenmode.
evaluate_at must be a list of x coordiantes at which the fitted curve should be evaluated.
if it is not provided the fitted curve is evaluated at each storey level of caarc.
'''
eigenmodes_fitted = {}
#CAARC_eigenmodes = self.structure_model.CAARC_eigenmodes
# returns the fitted polynomial and the discrete array of displacements
if not evaluate_at:
x = CAARC_eigenmodes['storey_level']
else:
x = evaluate_at
eigenmodes_fitted['storey_level'] = np.copy(x)
eigenmodes_fitted['eigenmodes'] = []
for mode_id in range(1,4):
eigenmodes_fitted['eigenmodes'].append({})
for dof_label in ['y', 'z', 'a']:
y = CAARC_eigenmodes['eigenmodes'][mode_id][dof_label]
current_polynomial = np.poly1d(np.polyfit(CAARC_eigenmodes['storey_level'],y,degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
eigenmodes_fitted['eigenmodes'][mode_id][dof_label] = np.asarray(values)
return eigenmodes_fitted
def get_m_eff(eigenmodes_dict, mode_id, main_direction_only, print_to_console):
'''
retruns the generalized mass and the participation factor of a mode
prints the effective mass that should be around 60% of the total mass (first modes)
'''
mass = eigenmodes_dict['mass'] # constant over height
phi_y = eigenmodes_dict['eigenmodes']['y'][mode_id]
phi_z = eigenmodes_dict['eigenmodes']['z'][mode_id]
if main_direction_only:
if mode_id == 1:
participation_factor = (mass * sum(phi_y))**2 # mass not in the sum since it is constant
elif mode_id == 2:
participation_factor = (mass * sum(phi_z))**2
else:
participation_factor = (mass * sum(np.add(phi_y, phi_z)))**2
if main_direction_only:
if mode_id == 1:
generalized_mass = mass * sum(np.square(phi_y))
elif mode_id == 2:
generalized_mass = mass * sum(np.square(phi_z))
else:
generalized_mass = mass * sum(np.add(np.square(phi_y), np.square(phi_z)))
total_mass = 60*mass
m_eff = participation_factor/generalized_mass
if print_to_console:
print('m_eff of mode_id', mode_id, ':', round(m_eff/total_mass, 4), 'of m_tot')
print ('generalized_mass:', round(generalized_mass, 2), 'should be 1 if mass normalized')
print ('participation_factor:', round(participation_factor,2))
print ()
return participation_factor, generalized_mass
def plot_caarc_eigenmodes(caarc_dict, number_of_modes = 3, dofs_to_plot = ['y','a','z'],
max_normed = False, do_rad_scale =False, use_caarc_fitted = False,
savefig = False, savefig_latex = False,
fig_title = '', filename_for_save = '0_no_name'):
c_norm = 1
rad_scale = np.sqrt(caarc_dict['dimensons']['y'] *caarc_dict['dimensons']['z'])
if max_normed:
do_rad_scale = False
if use_caarc_fitted:
c_modes = caarc_dict['eigenmodes_fitted']
else:
c_modes = caarc_dict['eigenmodes']
if number_of_modes == 1:
raise Exception('for 1 mode not implemented yet')
fig, ax = plt.subplots(ncols = number_of_modes, num='eigenmode results')#figsize=(2,3.5),
if not self.savefig and not self.savefig_latex:
fig.suptitle(fig_title)
x = beam_model.nodal_coordinates['x0']
ax.plot( beam_model.nodal_coordinates['y0'],
x,
#label = r'$structure$',
color = 'grey',
linestyle = '--')
ax.set_title(r'$mode\,1$ '+'\n' +r'$f = $ ' + r'${}$'.format(str(round(beam_model.eigenfrequencies[0],3))) + r' $Hz$' + weights)
for d_i, dof in enumerate(dofs_to_plot):
scale=1.0
if do_rad_scale:
if dof in ['a','b','g']:
scale = rad_scale
else:
scale = 1.0
y = utils.check_and_flip_sign_array(beam_model.eigenmodes[dof][0])
if include_caarc:
c_y = utils.check_and_flip_sign_array(c_modes[dof][0])
if max_normed:
norm = 1/max(y)
c_norm = 1/max(c_y)
if opt_targets:
if dof in opt_targets.keys():
y2 = opt_targets[dof] *scale
ax.plot(y2*norm,
x,
label = r'${}$'.format(GD.greek[dof]) + r'$_{target}$',
linestyle = '--',
color = COLORS[d_i])
if initial:
if dof in initial.keys():
y3 = initial[dof]
ax.plot(y3*norm*scale,
x,
label = r'${}$'.format(GD.greek[dof]) + r'$_{inital}$',
linestyle = ':',
color = COLORS[d_i])
lab = GD.greek[dof]
ax.plot(y*norm*scale,
x,
label = r'${}$'.format(GD.greek[dof]) + r'$_{max}:$' + r'${0:.2e}$'.format(max(y)),#'max: ' +
linestyle = '-',
color = COLORS[d_i])
if include_caarc:
ax.plot(c_y*c_norm*scale,
c_x,
label = r'${}$'.format(GD.greek[dof]) + r'$benchmark$',#'max: ' +
linestyle = ':',
color = caarc_cols[d_i])
ax.legend()
ax.grid()
ax.set_ylim(bottom=0)
ax.set_xlabel(r'$deflection$')
ax.set_ylabel(r'$x \, [m]$')
ratio = max(utils.check_and_flip_sign_array(beam_model.eigenmodes['a'][0])) / max(utils.check_and_flip_sign_array(beam_model.eigenmodes['y'][0]))
ax.plot(0,0, label = r'$\alpha_{max}/y_{max}: $' + str(round(ratio,3)), linestyle = 'None')
ax.legend()
else:
fig, ax = plt.subplots(ncols = number_of_modes, sharey=True, num='eigenmode results')#figsize=(5,4),
for i in range(number_of_modes):
x = caarc_dict['storey_level']
ax[i].plot( np.zeros(len(x)),
x,
#label = r'$structure$',
color = 'grey',
linestyle = '--')
ax[i].set_title(r'$mode$ ' + r'${}$'.format(str(i+1)) + '\n' +r'$f=$ ' + r'${}$'.format(str(round(caarc_dict['frequencies'][i],3))) +r' $Hz$')
for d_i, dof in enumerate(dofs_to_plot):
scale=1.0
if do_rad_scale:
if dof in ['a','b','g']:
scale = rad_scale
else:
scale = 1.0
y = utils.check_and_flip_sign_array(c_modes[dof][i])
if max_normed:
c_norm = 1/max(y)
ax[i].plot(y*c_norm*scale,
x,
label = r'${}$'.format(GD.greek[dof]),# + r'$_{max}:$ ' + r'${0:.2e}$'.format(max(y)),
linestyle = '-',
color = COLORS[d_i])
ax[i].legend(loc = 'lower right')
ax[i].grid()
ax[i].set_ylim(bottom=0)
ax[i].set_xlabel(r'$deflection$')
ax[0].set_ylabel(r'$x \, [m]$')
ratio = max(utils.check_and_flip_sign_array(caarc_dict['eigenmodes']['a'][0])) / max(utils.check_and_flip_sign_array(caarc_dict['eigenmodes']['y'][0]))
#ax[0].plot(0,0, label = r'$\alpha_{max}/y_{max} = $' + str(round(ratio,3)), linestyle = 'None')
ax[0].legend(loc= 'lower right')
#plt.tight_layout()
#if self.show_plots:
#plt.grid()
plt.show()
plt.close()
if __name__ == '__main__':
plt.rcParams.update({'axes.formatter.limits':(-3,3)})
plt.rcParams.update(params)
caarc_dict = get_CAARC_properties()
plot_caarc_eigenmodes(caarc_dict, do_rad_scale=True,
savefig=1, savefig_latex=1,
filename_for_save='caarc_modes')
```
#### File: source/utilities/utilities.py
```python
import numpy as np
import json
import os.path
import matplotlib.pyplot as plt
from os.path import join as os_join
from os.path import sep as os_sep
from source.utilities import statistics_utilities as stats_utils
caarc_freqs = [0.231, 0.429, 0.536]
# VALUES COPIED WITH FULL MATRICIES CALCULATED
eigenmodes_target_2D_3_elems = {'y':[np.array([0. , 0.0040305 , 0.013317 , 0.02434817]),
np.array([ 0. , -0.01444802, -0.01037197, 0.02449357]),
np.array([ 0. , -0.01819986, 0.01604842, -0.02446053])],
'g':[np.array([ 0. , -0.0004894 , -0.00070823, -0.00074479]),
np.array([ 0. , 0.00095991, -0.00161083, -0.00260447]),
np.array([ 0. , -0.0009034 , -0.00068957, 0.00432069])]}
eigenmodes_target_2D_10_elems ={'y':[np.array([0.0, -0.000223647046255273, -0.0008516138734941783, -0.0018197756020471587, -0.0030651302400258222, -0.00452698257707041, -0.006148471228742031, -0.007878364112695452, -0.009673052432583382, -0.011498680245678633, -0.01333335614230312]),
np.array([ 0. , 0.00123514, 0.00401433, 0.00701557, 0.00911353, 0.00951618, 0.0078602 , 0.00422764, -0.00093387, -0.00698381, -0.01333421]),
np.array([ 0. , 0.00304246, 0.00806418, 0.01008837, 0.007016 , 0.00026276, -0.00632 , -0.00877015, -0.00526778, 0.00304816, 0.01334002])],
'g':[np.array([0.00000000e+00, 2.91028048e-05, 5.39127464e-05, 7.44741342e-05,9.08970510e-05, 1.03382795e-04, 1.12244221e-04, 1.17921246e-04,1.20991889e-04, 1.22179408e-04, 1.22356253e-04]),
np.array([ 0.00000000e+00, -1.49126242e-04, -2.06595585e-04, -1.80905410e-04,-8.99100753e-05, 4.02818206e-05, 1.79513859e-04, 2.99658399e-04,3.81148479e-04, 4.18652321e-04, 4.24986410e-04]),
np.array([ 0.00000000e+00, -3.34883542e-04, -2.77308592e-04, 3.15745412e-05,3.61060122e-04, 4.93762038e-04, 3.37172087e-04, -3.17237701e-05,-4.21134643e-04, -6.52640493e-04, -6.98021127e-04])]}
def evaluate_residual(a_cur, a_tar):
residual = np.linalg.norm(np.subtract(a_cur, a_tar)) / np.amax(np.absolute(a_tar))
# print ('current: ', a_cur)
# print ('target: ', a_tar)
# print('residual:', residual)
# print()
return residual
def cm2inch(value):
return value/2.54
def increasing_by(val_old, val_new):
'''
returns the increase in % from the origin = old
'''
increase = (val_new - val_old)/val_old * 100
return round(increase,2)
def check_and_flip_sign_dict(eigenmodes_dict):
'''
flips the sign of y and a deformation of modes to be positive at the first node after ground
dependend/coupled dofs are flip accordingly
'''
for idx in range(len(eigenmodes_dict['y'])):
if eigenmodes_dict['y'][idx][1] < 0:
eigenmodes_dict['y'][idx] = np.negative(eigenmodes_dict['y'][idx])
try:
eigenmodes_dict['g'][idx] = np.negative(eigenmodes_dict['g'][idx])
except KeyError:
pass
try:
if eigenmodes_dict['a'][idx][1] < 0:
eigenmodes_dict['a'][idx] = np.negative(eigenmodes_dict['a'][idx])
except KeyError:
pass
return eigenmodes_dict
def check_and_flip_sign_array(mode_shape_array):
'''
check_and_change_sign
change the sign of the mode shape such that the first entry is positive
'''
if mode_shape_array[1] < 0:
return mode_shape_array * -1
elif mode_shape_array [1] > 0:
return mode_shape_array
def analytic_function_static_disp(parameters, x, load_type = 'single'):
l = parameters['lx_total_beam']
EI = parameters['E_Modul'] * parameters['Iy']
magnitude = parameters['static_load_magnitude']
if load_type == 'single':
#print (' w_max soll:', magnitude*l**3/(3*EI))
return (magnitude/EI) * (0.5 * l * (x**2) - (x**3)/6)
elif load_type == 'continous':
#print (' w_max soll:', magnitude*l**4/(8*EI))
return -(magnitude/EI) * (-x**4/24 + l * x**3 /6 - l**2 * x**2 /4)
def analytic_eigenfrequencies(beam_model):
# von https://me-lrt.de/eigenfrequenzen-eigenformen-beim-balken
parameters = beam_model.parameters
l = parameters['lx_total_beam']
EI = parameters['E_Modul'] * parameters['Iy']
A = parameters['cross_section_area']
rho = parameters['material_density']
lambdas = [1.875, 4.694, 7.855]
f_j = np.zeros(3)
for i, l_i in enumerate(lambdas):
f_j[i] = np.sqrt((l_i**4 * EI)/(l**4 * rho *A)) / (2*np.pi)
return f_j
def analytic_eigenmode_shapes(beam_model):
# von https://me-lrt.de/eigenfrequenzen-eigenformen-beim-balken
#parameters = beam_model.parameters
l = beam_model.parameters['lx_total_beam']
x = beam_model.nodal_coordinates['x0']
lambdas = [1.875, 4.694, 7.855] # could also be computed as seen in the link
w = []
for j in range(3):
zeta = lambdas[j] * x / l
a = (np.sin(lambdas[j]) - np.sinh(lambdas[j])) / (np.cos(lambdas[j]) + np.cosh(lambdas[j]))
w_j = np.cos(zeta) - np.cosh(zeta) + a*(np.sin(zeta) -np.sinh(zeta))
w.append(w_j)
reduced_m = beam_model.m[::2,::2]
gen_mass = np.dot(np.dot(w_j, reduced_m), w_j)
norm_fac = np.sqrt(gen_mass)
w_j /= norm_fac
is_unity = np.dot(np.dot(w_j, reduced_m), w_j)
if round(is_unity, 4) != 1.0:
raise Exception ('analytic mode shape normalization failed')
return w
def get_eigenform_polyfit(modeshape_i, z_coords, evaluate_at, degree = 5, plot_compare = False):
'''
- modeshape_i: all modal deformations als 2D array, each column belongs to one dof
- z_coords: the original floor levels from the 45 floor model
- evaluate_at: nodal coordiantes at whcih the fitted curve should be evaluated
-> this is returned
'''
eigenmodes_fitted = {}
#CAARC_eigenmodes = self.structure_model.CAARC_eigenmodes
# returns the fitted polynomial and the discrete array of displacements
if not evaluate_at.any():
raise Exception('provied evaluation coordiantes of the eigenform')
else:
x = evaluate_at
eigenmodes_fitted['storey_level'] = np.copy(x)
eigenmodes_fitted['eigenmodes'] = {}
dof_direction_map = {'y':4, 'z':3,'a':2}
for dof_label in ['y', 'z', 'a']:
y = modeshape_i[:, dof_direction_map[dof_label]]
current_polynomial = np.poly1d(np.polyfit(z_coords,y,degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
if values[0] != 0.0:
values[0] = 0.0
eigenmodes_fitted['eigenmodes'][dof_label] = np.asarray(values)
if plot_compare:
fig, ax = plt.subplots(ncols=3, num='fitted compared')
for d_i, dof_label in enumerate(['y', 'z', 'a']):
ax[d_i].plot(modeshape_i[:, dof_direction_map[dof_label]], z_coords, label = 'origin ' + dof_label)
ax[d_i].plot(eigenmodes_fitted['eigenmodes'][dof_label], x, label = 'fitted ' + dof_label)
ax[d_i].legend()
ax[d_i].grid()
plt.show()
return eigenmodes_fitted
def save_optimized_beam_parameters(opt_beam_model, fname):
new = 'optimized_parameters'+os_sep+fname+'.json'
if os.path.isfile(new):
print('WARNING', new, 'already exists!')
new = 'optimized_parameters'+os_sep+fname+'_1.json'
f = open(new,'w')
json.dump(opt_beam_model.parameters, f)
f.close()
print('\nsaved:', new)
def get_targets(beam_model, target='semi_realistic', opt_params =None):
'''
just used to have them especially for the initial comparison
'''
if target == 'realistic':
modi = np.load(os_join(*['inputs', 'EigenvectorsGid.npy']))
z_coords = np.load(os_join(*['inputs', 'z_coords_gid_45.npy']))
modi_fitted = get_eigenform_polyfit(modi[0], z_coords, beam_model.nodal_coordinates['x0'], plot_compare=False)
eigenmodes_target_y = modi_fitted['eigenmodes']['y']
eigenmodes_target_a = -1*modi_fitted['eigenmodes']['a']
elif target == 'semi_realistic':
ratio_a_y = opt_params['ratio_a_y_tar']
factor_y = opt_params['factor_y']
eigenmodes_target_y = beam_model.eigenmodes['y'][0]*factor_y
a_factor = ratio_a_y * max(eigenmodes_target_y)/max(beam_model.eigenmodes['a'][0])
eigenmodes_target_a = beam_model.eigenmodes['a'][0] * a_factor
return {'y':eigenmodes_target_y, 'a':eigenmodes_target_a}
def prepare_string_for_latex(string):
greek = {'ya':'y'+r'\alpha','ga':r'\gamma' + r'\alpha'}
if '_' in string:
var, label = string.split('_')[0], string.split('_')[1]
latex = r'${}$'.format(var) + r'$_{{{}}}$'.format(greek[label])
#return string.replace('_','')
return latex
else:
return string
def join_whitespaced_string(string):
return string.replace(' ','_')
# # DYNAMIC ANALYSIS
def get_fft(given_series, sampling_freq):
'''
The function get_fft estimates the Fast Fourier transform of the given signal
sampling_freq = 1/dt
'''
signal_length=len(given_series)
freq_half = np.arange(0,
sampling_freq/2 - sampling_freq/signal_length + sampling_freq/signal_length,
sampling_freq/signal_length)
# single sided fourier
series_fft = np.fft.fft(given_series)
series_fft = np.abs(series_fft[0:int(np.floor(signal_length/2))])/np.floor(signal_length/2)
max_length = len(freq_half)
if max_length < len(series_fft):
max_length = len(series_fft)
freq_half = freq_half[:max_length-1]
series_fft = series_fft[:max_length-1]
return freq_half, series_fft
def extreme_value_analysis_nist(given_series, dt, response_label = None, type_of_return = 'estimate', P1 = 0.98):
'''
dynamic_analysi_solved: dynamic_analysis.solver object
response: label given as dof_label, if given as response label it is convertedd
type_of_return: wheter the estimated or the quantile value of P1 is returned (both are computed)
'''
T_series = dt * len(given_series)
dur_ratio = 600 / T_series
# # MAXMINEST NIST
#P1 = 0.98
max_qnt, min_qnt, max_est, min_est, max_std, min_std, Nupcross = stats_utils.maxmin_qnt_est(given_series ,
cdf_p_max = P1 , cdf_p_min = 0.0001, cdf_qnt = P1, dur_ratio = dur_ratio)
abs_max_est = max([abs(max_est[0][0]), abs(min_est[0][0])])
abs_max_qnt = max([abs(max_qnt[0]), abs(min_qnt[0])])
if type_of_return == 'estimate':
extreme_response = abs_max_est
elif type_of_return == 'quantile':
extreme_response = abs_max_qnt
glob_max = max(abs(given_series))
return abs_max_qnt, abs_max_est
``` |
{
"source": "JoZimmer/ParOptBeam",
"score": 3
} |
#### File: source/analysis/analysis_type.py
```python
class AnalysisType(object):
"""
Base class for the different analysis types
"""
def __init__(self, structure_model, name="DefaultAnalysisType"):
self.name = name
# the structure model - geometry and physics - has the Dirichlet BC
# for the bottom node included
self.structure_model = structure_model
self.displacement = None
self.rotation = None
self.force = None
self.reaction = None
self.moment = None
def solve(self):
"""
Solve for something
"""
print("Solving for something in AnalysisType base class \n")
pass
def postprocess(self):
"""
Postprocess something
"""
print("Postprocessing in AnalysisType base class \n")
pass
```
#### File: source/auxiliary/other_utilities.py
```python
from os.path import sep as os_sep
def get_adjusted_path_string(path_string):
for separator in ['\\\\', '\\', '/', '//']:
path_string = path_string.replace(separator, os_sep)
return path_string[:]
```
#### File: postprocess/skin_model/line_structure_model.py
```python
import numpy as np
from source.postprocess.skin_model.node_model import Node
class LineStructure:
def __init__(self, params):
"""
initializing line structure with dofs
"""
# initializing beam info with param
self.params = params
self.beam_length = params["length"]
self.num_of_nodes = len(params["dofs_input"]["x0"])
self.dofs_input = params["dofs_input"]
self.steps = len(self.dofs_input["x"][0])
self.input_x = np.asarray(self.dofs_input["x"]).reshape(
self.steps * self.num_of_nodes)
self.input_y = np.asarray(self.dofs_input["y"]).reshape(
self.steps * self.num_of_nodes)
self.input_z = np.asarray(self.dofs_input["z"]).reshape(
self.steps * self.num_of_nodes)
self.input_a = np.asarray(self.dofs_input["a"]).reshape(
self.steps * self.num_of_nodes)
self.input_b = np.asarray(self.dofs_input["b"]).reshape(
self.steps * self.num_of_nodes)
self.input_g = np.asarray(self.dofs_input["g"]).reshape(
self.steps * self.num_of_nodes)
# initializing variables needed by LineStructure
self.nodes = np.empty(self.num_of_nodes, dtype=Node)
self.undeformed = np.ndarray((3, self.num_of_nodes), dtype=float)
self.deformed = np.ndarray((3, self.num_of_nodes), dtype=float)
self.displacement = np.ndarray((3, self.num_of_nodes), dtype=float)
self.angular_displacement = np.ndarray(
(3, self.num_of_nodes), dtype=float)
self.init()
self.print_line_structure_info()
def init(self):
for i in range(self.num_of_nodes):
position = [self.dofs_input["x0"][i],
self.dofs_input["y0"][i],
self.dofs_input["z0"][i]]
self.nodes[i] = Node(position)
self.undeformed[0][i] = self.nodes[i].undeformed[0]
self.undeformed[1][i] = self.nodes[i].undeformed[1]
self.undeformed[2][i] = self.nodes[i].undeformed[2]
self.deformed[0][i] = self.nodes[i].deformed[2]
self.deformed[1][i] = self.nodes[i].deformed[2]
self.deformed[2][i] = self.nodes[i].deformed[2]
self.update_dofs(0)
print("Undeformed Nodes added successfully!")
def update_dofs(self, step):
x = self.input_x[step::self.steps]
y = self.input_y[step::self.steps]
z = self.input_z[step::self.steps]
a = self.input_a[step::self.steps]
b = self.input_b[step::self.steps]
g = self.input_g[step::self.steps]
self.displacement = np.array([x, y, z])
self.angular_displacement = np.array([a, b, g])
displacement = self.displacement.transpose().reshape(self.num_of_nodes, 3)
angular_displacement = self.angular_displacement.transpose().reshape(self.num_of_nodes, 3)
def assign_nodal_dof(i):
self.nodes[i].assign_dofs(displacement[i], angular_displacement[i])
return self.nodes[i]
for i in range(self.num_of_nodes):
self.nodes[i].assign_dofs(displacement[i], angular_displacement[i])
def print_line_structure_info(self):
msg = "=============================================\n"
msg += "LINE STRUCTURE MODEL INFO \n"
msg += "NUMBER OF NODES:\t" + str(self.num_of_nodes) + "\n"
msg += "UNDEFORMED GEOMETRY:\t" + str(self.undeformed) + "\n"
msg += "=============================================\n"
print(msg)
def print_nodal_info(self):
for node in self.nodes:
node.print_info()
def apply_transformation_for_line_structure(self):
def apply_nodal_transformation(i):
self.nodes[i].apply_transformation()
self.deformed[0][i] = self.nodes[i].deformed[0]
self.deformed[1][i] = self.nodes[i].deformed[1]
self.deformed[2][i] = self.nodes[i].deformed[2]
merged = [self.nodes[i], self.deformed[0][i],
self.deformed[1][i], self.deformed[2][i]]
return merged
for i in range(self.num_of_nodes):
self.nodes[i].apply_transformation()
self.deformed[0][i] = self.nodes[i].deformed[0]
self.deformed[1][i] = self.nodes[i].deformed[1]
self.deformed[2][i] = self.nodes[i].deformed[2]
def test():
param = {"length": 100.0,
"dofs_input": {
"x0": [0.0, 25.0, 50.0, 75.0, 100.0],
"y0": [0.0, 0.0, 0.0, 0.0, 0.0],
"z0": [0.0, 0.0, 0.0, 0.0, 0.0],
"a0": [0.0, 0.0, 0.0, 0.0, 0.0],
"b0": [0.0, 0.0, 0.0, 0.0, 0.0],
"g0": [0.0, 0.0, 0.0, 0.0, 0.0],
"y": [[0.0, 1.0], [0.0, 2.0], [0.0, 3.0], [0.0, 4.0], [0.4, 5.0]],
"z": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [4.0, 0.0]],
"a": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"b": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"g": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [np.pi, np.pi / 2]],
"x": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]}}
ls = LineStructure(param)
ls.apply_transformation_for_line_structure()
solution = np.array([-100, 0.4, 4.0])
err = solution - ls.nodes[4].deformed
print(ls.nodes[4].deformed)
try:
assert all(err < 1e-12), "Transformation wrong"
print("passed test")
except AssertionError:
print("failed test")
if __name__ == "__main__":
test()
```
#### File: postprocess/skin_model/mapper.py
```python
from scipy.interpolate import CubicSpline
import numpy as np
from source.postprocess.skin_model.node_model import Node
from source.postprocess.skin_model.line_structure_model import LineStructure
# curvature - 2nd order deriv
DERIV_ORDER = 2
PRESCRIBED_2ND_ORDER_DERIV = [0, 0]
INTERPOLATION_DENSITY = 5
def interpolate_points(v, x, y):
interp_x = CubicSpline(x, y, bc_type=(
(DERIV_ORDER, PRESCRIBED_2ND_ORDER_DERIV[0]),
(DERIV_ORDER, PRESCRIBED_2ND_ORDER_DERIV[1])))
# normal = interp_x.derivative()(v)
return interp_x(v)
class Mapper:
def __init__(self, line_structure, structure):
self.structure = structure
self.line_structure = line_structure
self.map_line_structure_to_structure()
def map_line_structure_to_structure(self):
s_vec = self.line_structure.undeformed[int(
self.structure.beam_direction)]
disp_vec = self.line_structure.displacement
ang_disp_vec = self.line_structure.angular_displacement
for i in range(self.structure.num_of_elements):
mid_p = self._get_element_mid_points(self.structure.elements[i])
s = mid_p[int(self.structure.beam_direction)]
inter_disp, inter_ang_disp = self._interpolate_dofs(
s, s_vec, disp_vec, ang_disp_vec)
for node in self.structure.elements[i].nodes:
node.assign_dofs(inter_disp, inter_ang_disp)
for frame in self.structure.frames:
frame.nodes[i].assign_dofs(inter_disp, inter_ang_disp)
@staticmethod
def _get_element_mid_points(element):
mid_x = sum(element.undeformed[0]) / element.num_of_nodes
mid_y = sum(element.undeformed[1]) / element.num_of_nodes
mid_z = sum(element.undeformed[2]) / element.num_of_nodes
return [mid_x, mid_y, mid_z]
@staticmethod
def _interpolate_dofs(s, s_vec, displacement, angular_displacement):
dx = interpolate_points(s, s_vec, displacement[0])
dy = interpolate_points(s, s_vec, displacement[1])
dz = interpolate_points(s, s_vec, displacement[2])
inter_disp = [dx, dy, dz]
theta_x = interpolate_points(s, s_vec, angular_displacement[0])
theta_y = interpolate_points(s, s_vec, angular_displacement[1])
theta_z = interpolate_points(s, s_vec, angular_displacement[2])
inter_ang_disp = [theta_x, theta_y, theta_z]
return inter_disp, inter_ang_disp
def test():
param = {"length": 100.0, "num_of_elements": 5,
"geometry": [[0, 15.0, 3.0], [0, 6.0, 9.0], [0, -6.0, 9.0],
[0, -15.0, 3.0], [0, -6.0, -9.0], [0, 6.0, -9.0]
],
"contour_density": 1,
"record_animation": False,
"visualize_line_structure": True,
"beam_direction": "x",
"scaling_vector": [1.5, 1.0, 2.0],
"dofs_input": {
"x0": [0.0, 25.0, 50.0, 75.0, 100.0],
"y0": [0.0, 0.0, 0.0, 0.0, 0.0],
"z0": [0.0, 0.0, 0.0, 0.0, 0.0],
"a0": [0.0, 0.0, 0.0, 0.0, 0.0],
"b0": [0.0, 0.0, 0.0, 0.0, 0.0],
"g0": [0.0, 0.0, 0.0, 0.0, 0.0],
"y": [[0.0, 1.0], [0.0, 2.0], [0.0, 3.0], [0.0, 4.0], [0.4, 5.0]],
"z": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [4.0, 0.0]],
"a": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"b": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"g": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [np.pi, np.pi/2]],
"x": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]}}
from source.postprocess.skin_model.StructureModel import Structure
from source.postprocess.skin_model.LineStructureModel import LineStructure
s = Structure(param)
ls = LineStructure(param)
m = Mapper(ls, s)
if __name__ == "__main__":
test()
```
#### File: postprocess/skin_model/node_model.py
```python
from math import sin, cos
import numpy as np
class Node:
def __init__(self, undeformed):
self.undeformed = undeformed
self.deformed = undeformed
self.displacement = np.empty(3, dtype=float)
self.angular_displacement = np.empty(3, dtype=float)
def assign_dofs(self, displacement, angular_displacement):
self.displacement = displacement
self.angular_displacement = angular_displacement
def print_info(self):
msg = "########################################################\n"
msg += "Node Fix Position [" + str(self.undeformed[0]) + " " + \
str(self.undeformed[1]) + " " + \
str(self.undeformed[2]) + "]: \n"
try:
msg += "dx = " + str(self.displacement[0]) + "\t"
msg += "dy = " + str(self.displacement[1]) + "\t"
msg += "dz = " + str(self.displacement[2]) + "\n"
msg += "theta_x = " + str(self.angular_displacement[0]) + "\t"
msg += "theta_y = " + str(self.angular_displacement[1]) + "\t"
msg += "theta_z = " + str(self.angular_displacement[2]) + "\n"
msg += "Transformed Position: [" + str(self.deformed[0]) + " " + \
str(self.deformed[1]) + " " + str(self.deformed[2]) + "]:\n"
msg += "########################################################\n"
except AttributeError:
pass
print(msg)
def apply_transformation(self):
dx = self.displacement[0]
dy = self.displacement[1]
dz = self.displacement[2]
alpha = self.angular_displacement[0]
beta = self.angular_displacement[1]
gamma = self.angular_displacement[2]
# transformation matrix derived from the symbolic generation
M = np.array([
[1.0 * cos(beta) * cos(gamma),
-1.0 * cos(alpha) * sin(gamma) + 1.0 *
cos(gamma) * sin(alpha) * sin(beta),
1.0 * cos(alpha) * cos(gamma) * sin(beta) + 1.0 * sin(alpha) * sin(gamma), 1.0 * dx],
[1.0 * cos(beta) * sin(gamma), 1.0 * cos(alpha) * cos(gamma) + 1.0 * sin(alpha) * sin(beta) * sin(gamma),
1.0 * cos(alpha) * sin(beta) * sin(gamma) - 1.0 * cos(gamma) * sin(alpha), 1.0 * dy],
[-1.0 * sin(beta), 1.0 * cos(beta) * sin(alpha),
1.0 * cos(alpha) * cos(beta), 1.0 * dz],
[0, 0, 0, 1.0]
])
previous_coordinate = np.append(
self.undeformed, np.array(1.0)).reshape(4, 1)
new_coordinate = M.dot(previous_coordinate)
new_coordinate = new_coordinate.reshape(1, 4)
self.deformed = new_coordinate[0][0:3]
def test():
p = np.array([1.0, 1.0, 0.0])
displacement = np.array([0.0, 0.0, 3.0])
angular_displacement = np.array([0.0, 0.0, np.pi/2])
node = Node(p)
node.assign_dofs(displacement, angular_displacement)
node.apply_transformation()
node.print_info()
solution = np.array([-1.0, 1.0, 3.0])
err = solution - node.deformed
try:
assert all(err < 1e-12), "Transformation wrong"
print("passed test")
except AssertionError:
print("failed test")
if __name__ == "__main__":
test()
```
#### File: postprocess/skin_model/skin_components_model.py
```python
from typing import List
import numpy as np
from enum import IntEnum
from source.postprocess.skin_model.node_model import Node
from source.postprocess.skin_model.mapper import interpolate_points
DIRECTION_VECTOR = ["x", "y", "z", "x", "y", "z"]
class BeamDirection(IntEnum):
x = 0
y = 1
z = 2
class Element:
def __init__(self, geometry, current_length, beam_direction, scale=1.):
"""
creating single element based on the given element geometry with the element height
"""
self.num_of_nodes = len(geometry)
self.nodes = np.empty(self.num_of_nodes, dtype=Node)
self.undeformed = np.ndarray((3, self.num_of_nodes), dtype=float)
self.deformed = np.ndarray((3, self.num_of_nodes), dtype=float)
self.scale = scale
for i in range(len(geometry)):
x = geometry[i][0]
y = geometry[i][1]
z = geometry[i][2]
# Note: beam_direction won't be scaled
position = [x, y, z]
position *= scale
position[int(beam_direction)] = current_length
self.nodes[i] = Node(position)
self.undeformed[0][i] = self.nodes[i].undeformed[0]
self.undeformed[1][i] = self.nodes[i].undeformed[1]
self.undeformed[2][i] = self.nodes[i].undeformed[2]
self.deformed[0][i] = self.nodes[i].deformed[0]
self.deformed[1][i] = self.nodes[i].deformed[1]
self.deformed[2][i] = self.nodes[i].deformed[2]
def print_element(self):
for node in self.nodes:
node.print_info()
class Frame:
def __init__(self, elements, index):
"""
connecting all points from the same geometry point for each element
"""
self.num_of_nodes = len(elements)
self.nodes = np.empty(self.num_of_nodes, dtype=Node)
self.undeformed = np.ndarray((3, self.num_of_nodes), dtype=float)
self.deformed = np.ndarray((3, self.num_of_nodes), dtype=float)
for i in range(len(elements)):
self.nodes[i] = elements[i].nodes[index]
self.undeformed[0][i] = self.nodes[i].undeformed[0]
self.undeformed[1][i] = self.nodes[i].undeformed[1]
self.undeformed[2][i] = self.nodes[i].undeformed[2]
self.deformed[0][i] = self.nodes[i].deformed[0]
self.deformed[1][i] = self.nodes[i].deformed[1]
self.deformed[2][i] = self.nodes[i].deformed[2]
def print_frame(self):
for node in self.nodes:
node.print_info()
class Structure:
def __init__(self, params):
"""
initializing structure with geometry
"""
self.dofs = params["dofs_input"]
self.element_geometry = params["geometry"]
self.beam_length = params["length"]
self.scaling_vector = params["scaling_vector"]
self.num_of_frames = len(self.element_geometry)
self.num_of_elements = len(self.scaling_vector) + 1
self.beam_direction = BeamDirection[params["beam_direction"]]
self.element_length = self.beam_length / (self.num_of_elements - 1)
self.print_structure_info()
self.elements = np.empty(self.num_of_elements, dtype=Element)
self.create_elements()
self.frames = np.empty(self.num_of_frames, dtype=Frame)
self.create_frames()
def print_structure_info(self):
msg = "=============================================\n"
msg += "VISUALISING SKIN MODEL"
msg += "BEAM MODEL INFO \n"
msg += str(self.beam_direction) + "\n"
msg += "LENGTH:\t" + str(self.beam_length) + "\n"
msg += "#ELEMENTS:\t" + str(self.num_of_elements) + "\n"
msg += "ELEMENT LENGTH:\t" + str(self.element_length) + "\n"
msg += "============================================="
print(msg)
def create_elements(self):
element_vec = np.linspace(self.element_length / 2, self.beam_length - self.element_length / 2,
self.num_of_elements - 1)
def create_single_element(i):
current_length = i * self.element_length
current_scale = interpolate_points(
current_length, element_vec, self.scaling_vector)
element = Element(self.element_geometry, current_length,
self.beam_direction, current_scale)
return element
for i in range(self.num_of_elements):
current_length = i * self.element_length
current_scale = interpolate_points(
current_length, element_vec, self.scaling_vector)
element = Element(self.element_geometry, current_length,
self.beam_direction, current_scale)
self.elements[i] = element
def create_frames(self):
def create_single_frame(i):
frame = Frame(self.elements, i)
return frame
for i in range(self.num_of_frames):
frame = Frame(self.elements, i)
self.frames[i] = frame
def apply_transformation_for_structure(self):
for e in self.elements:
for i in range(len(e.nodes)):
e.nodes[i].apply_transformation()
e.deformed[0][i] = e.nodes[i].deformed[0]
e.deformed[1][i] = e.nodes[i].deformed[1]
e.deformed[2][i] = e.nodes[i].deformed[2]
for f in self.frames:
for i in range(len(f.nodes)):
f.nodes[i].apply_transformation()
f.deformed[0][i] = f.nodes[i].deformed[0]
f.deformed[1][i] = f.nodes[i].deformed[1]
f.deformed[2][i] = f.nodes[i].deformed[2]
def print_structure_element(self, element_id):
print("Printing element: " + str(element_id))
self.elements[element_id].print_element()
def print_structure_frame(self, frame_id):
print("Printing frame: " + str(frame_id))
self.frames[frame_id].print_frame()
def test():
param = {"length": 100.0,
"geometry": [[0, 15.0, 3.0], [0, 6.0, 9.0], [0, -6.0, 9.0],
[0, -15.0, 3.0], [0, -6.0, -9.0], [0, 6.0, -9.0]
],
"record_animation": False,
"visualize_line_structure": True,
"beam_direction": "x",
"scaling_vector": [1.01, 1.0, 1.02],
'result_path': '.',
'mode': '1',
'frequency': 0.4,
'period': 2.5,
"deformation_scaling_factor": 2.0,
"dofs_input": {
"x0": [0.0, 25.0, 50.0, 75.0, 100.0],
"y0": [0.0, 0.0, 0.0, 0.0, 0.0],
"z0": [0.0, 0.0, 0.0, 0.0, 0.0],
"a0": [0.0, 0.0, 0.0, 0.0, 0.0],
"b0": [0.0, 0.0, 0.0, 0.0, 0.0],
"g0": [0.0, 0.0, 0.0, 0.0, 0.0],
"y": [[0.1, 1.0], [0.2, 2.0], [0.3, 3.0], [0.4, 4.0], [0.4, 5.0]],
"z": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [4.0, 0.0]],
"a": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"b": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
"g": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [np.pi, np.pi / 2]],
"x": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]}}
s = Structure(param)
s.apply_transformation_for_structure()
# s.print_structure_element(1)
# s.print_structure_frame(0)
if __name__ == "__main__":
test()
```
#### File: source/preprocess/load_type_TO_MAKE_FUNCTIONAL.py
```python
import numpy as np
# TODO: update script so that it works with current code structure
# =========================dynamic analysis ==========================
# generating signals in time domain
def load_type(force_dynamic, array_time, num_of_levels, freq=10, force_static=1):
"""
Choose from "signalSin", "signalRand", "signalConst", "signalSuperposed" or
for free vibration choose "signalNone"
"""
if (force_dynamic == "signalNone"):
# constant signal with given amplitude = 10
force_dynamic = np.zeros([num_of_levels, len(array_time)])
elif (force_dynamic == "signalSin"):
# sine with given amplitude = 1 and frequency
amplSin = force_static[:, np.newaxis]
force_dynamic = amplSin * np.sin(2*np.pi*freq * array_time)
elif (force_dynamic == "signalRand"):
# normal random signal with given mean m = 0 and standard dev sd = 0.25 ->
# check out difference compared to unifrnd - uniform random distribution
force_dynamic = np.random.normal(
0, 0.25, [num_of_levels, len(array_time)])
elif (force_dynamic == "signalConst"):
# constant signal with given amplitude = 10
amplConst = force_static[:, np.newaxis]
force_dynamic = amplConst * np.ones(len(array_time))
elif (force_dynamic == "signalSuperposed"):
# superposing the signals
amplConst = force_static[:, np.newaxis]
signalConst = amplConst * np.ones(len(array_time))
amplSin = 1 * np.ones([num_of_levels, 1])
signalSin = amplSin * np.sin(2*np.pi*freq * array_time)
signalRand = np.random.normal(
0, 0.25, [num_of_levels, len(array_time)])
# superposition weighting
coefSignal1 = 1
coefSignal2 = 0.25
coefSignal3 = 0.25
force_dynamic = coefSignal1 * signalConst + \
coefSignal2 * signalSin + coefSignal3 * signalRand
else:
err_msg = "The requested dynamic load \"" + force_dynamic
err_msg += "\" is not available \n"
err_msg += "Choose one of: \"signalConst\", \"signalSin\", \"signalRand\", \"signalSuperposed\", \"signalNone\""
raise Exception(err_msg)
return force_dynamic
```
#### File: solving_strategies/schemes/time_integration_scheme.py
```python
import numpy as np
class TimeIntegrationScheme(object):
def __init__(self, dt, comp_model, initial_conditions):
# time step
self.dt = dt
# mass, damping and spring stiffness
self.M = comp_model[0]
self.B = comp_model[1]
self.K = comp_model[2]
# initial displacement, velocity and acceleration
self.u0 = initial_conditions[0]
self.v0 = initial_conditions[1]
self.a0 = initial_conditions[2]
# initial previous step displacement, velocity and acceleration
self.un1 = self.u0
self.vn1 = self.v0
self.an1 = self.a0
# initial current step displacement, velocity and acceleration
self.u1 = self.u0
self.v1 = self.v0
self.a1 = self.a0
# force from a previous time step (initial force)
self.f0 = None
self.f1 = None
def _print_time_integration_setup(self):
pass
def predict_displacement(self):
return 2.0 * self.u1 - self.u0
def predict_velocity(self, u1):
pass
def predict_acceleration(self, v1):
pass
def solve_single_step(self, f1):
pass
def update(self):
pass
def update_displacement(self, u_new):
self.u1 = u_new
self.v1 = self.predict_velocity(self.u1)
self.a1 = self.predict_acceleration(self.v1)
def update_comp_model(self, new_comp_model):
self.M = new_comp_model[0]
self.B = new_comp_model[1]
self.K = new_comp_model[2]
def print_values_at_current_step(self, n):
print("Printing values at step no: ", n, " (+1)")
print("u0: ", self.u1)
print("v0: ", self.v1)
print("a0: ", self.a1)
print("f0: ", self.f1)
print(" ")
def get_displacement(self):
return self.u1
def get_velocity(self):
return self.v1
def get_acceleration(self):
return self.a1
def get_old_displacement(self):
return self.un1
def get_old_velocity(self):
return self.vn1
def get_old_acceleration(self):
return self.an1
```
#### File: solving_strategies/strategies/symbolic_derivation.py
```python
from sympy import *
init_printing(use_unicode=True)
a_n1, a_n, u_n2, u_n1, u_n, u_nm1, u_nm2, u_nm3, t, dt = symbols(
'a1 an u2 u1 self.un1 self.un2 self.un3 self.un4 t dt')
f, C, M, K = symbols('f self.B self.M self.K')
def euler():
# ### euler ###
# v_n+1 = v_n + dt f(tn, v_n)
print("##### Euler #####")
v_n = (u_n1 - u_n) / dt
v_nm1 = (u_n - u_nm1) / dt
a_nm1 = (v_n - v_nm1) / dt
du, ru = symbols('du ru')
r_u = f - (M * a_nm1 + C * v_nm1 + K * u_nm1)
print("ru = ", r_u)
drudu = diff(r_u, u_n1)
eq_u = ru + drudu * du
sol = solve(eq_u, du)
du = (sol[0])
print("du = ", du)
def bdf1():
# ### BDF1 ###
# v_n+1 = v_n + dt f(tn+1, v_n+1)
print("##### BDF1 #####")
v_n1 = (u_n1 - u_n) / dt
v_n = (u_n - u_nm1) / dt
a_n1 = (v_n1 - v_n) / dt
du, ru = symbols('du ru')
r_u = f - (M * a_n1 + C * v_n1 + K * u_n1)
print("ru = ", r_u)
drudu = diff(r_u, u_n1)
eq_u = ru + drudu * du
sol = solve(eq_u, du)
du = (sol[0])
print("du = ", du)
def bdf2():
# ### BDF2 ###
# v_n+1 = 4/3 v_n - 1/3 v_n-1 + 2/3 dt f(tn+1, v_n+1)
print("##### BDF2 #####")
bdf0, bdf1, bdf2 = symbols('self.bdf0 self.bdf1 self.bdf2')
v_n1 = bdf0 * u_n1 + bdf1 * u_n + bdf2 * u_nm1
v_n = bdf0 * u_n + bdf1 * u_nm1 + bdf2 * u_nm2
v_nm1 = bdf0 * u_nm1 + bdf1 * u_nm2 + bdf2 * u_nm3
a_n1 = bdf0 * v_n1 + bdf1 * v_n + bdf2 * v_nm1
du, ru = symbols('du ru')
r_u = f - (M * a_n1 + C * v_n1 + K * u_n1)
print("ru = ", r_u)
drudu = diff(r_u, u_n1)
eq_u = ru + drudu * du
sol = solve(eq_u, du)
du = (sol[0])
print("du = ", du)
if __name__ == "__main__":
euler()
bdf1()
bdf2()
```
#### File: ParOptBeam/test_scripts/test_residual_solver_dynamic.py
```python
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from source.solving_strategies.strategies.residual_based_newton_raphson_solver import ResidualBasedNewtonRaphsonSolver
from source.solving_strategies.strategies.residual_based_picard_solver import ResidualBasedPicardSolver
from source.model.structure_model import StraightBeam
np.set_printoptions(suppress=False, precision=2, linewidth=140)
params = {
"name": "CaarcBeamPrototypeOptimizable",
"domain_size": "3D",
"system_parameters": {
"element_params": {
"type": "CRBeam",
"is_nonlinear": True
},
"material": {
"density": 7850.0,
"youngs_modulus": 2069000000,
"poisson_ratio": 0.29,
"damping_ratio": 0.1
},
"geometry": {
"length_x": 1.2,
"number_of_elements": 1,
"defined_on_intervals": [{
"interval_bounds": [0.0, "End"],
"length_y": [1.0],
"length_z": [1.0],
"area": [0.0001],
"shear_area_y": [0.0],
"shear_area_z": [0.0],
"moment_of_inertia_y": [0.0001],
"moment_of_inertia_z": [0.0001],
"torsional_moment_of_inertia": [0.0001],
"outrigger_mass": [0.0],
"outrigger_stiffness": [0.0]}]
}
},
"boundary_conditions": "fixed-free"
}
dt = 0.1
tend = 10.
steps = int(tend / dt)
array_time = np.linspace(0.0, tend, steps + 1)
array_time_kratos = np.linspace(0.1, 10, 101)
def test_residual_based_solvers():
f_ext = np.array([np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 100.0 * np.sin(t), 0.0, 0.0, 0.0])
for t in np.sin(array_time)])
u0 = np.zeros(6)
v0 = np.zeros(6)
a0 = np.zeros(6)
scheme = "BackwardEuler1"
beam = StraightBeam(params)
f_ext = beam.apply_bc_by_reduction(f_ext, 'column').T
newton_solver = ResidualBasedNewtonRaphsonSolver(array_time, scheme, dt,
[beam.comp_m, beam.comp_b, beam.comp_k],
[u0, v0, a0], f_ext, beam)
picard_solver = ResidualBasedPicardSolver(array_time, scheme, dt,
[beam.comp_m, beam.comp_b, beam.comp_k],
[u0, v0, a0], f_ext, beam)
newton_solver.solve()
picard_solver.solve()
reference_file = "kratos_reference_results/dynamic_displacement_z.txt"
disp_z_soln = np.loadtxt(reference_file)[:, 1]
plt.plot(array_time, newton_solver.displacement[2, :], c='b', label='Newton Raphson')
plt.plot(array_time, picard_solver.displacement[2, :], c='g', label='Picard')
plt.plot(array_time_kratos, disp_z_soln, c='k', label='Kratos reference')
plt.grid()
plt.legend()
plt.show()
```
#### File: ParOptBeam/test_scripts/test_structure_model.py
```python
from source.model.structure_model import StraightBeam
import numpy as np
params = {
"name": "CaarcBeamPrototypeOptimizable",
"domain_size": "3D",
"system_parameters": {
"element_params": {
"type": "CRBeam",
"is_nonlinear": True
},
"material": {
"density": 7850.0,
"youngs_modulus": 2069000000,
"poisson_ratio": 0.29,
"damping_ratio": 1
},
"geometry": {
"length_x": 1.2,
"number_of_elements": 1,
"defined_on_intervals": [{
"interval_bounds": [0.0, "End"],
"length_y": [1.0],
"length_z": [1.0],
"area": [0.0001],
"shear_area_y": [0.0],
"shear_area_z": [0.0],
"moment_of_inertia_y": [0.0001],
"moment_of_inertia_z": [0.0001],
"torsional_moment_of_inertia": [0.0001],
"outrigger_mass": [0.0],
"outrigger_stiffness": [0.0]}]
}
},
"boundary_conditions": "fixed-free"
}
def test_structure_model():
beam = StraightBeam(params)
``` |
{
"source": "jozo/ioi-assignment",
"score": 3
} |
#### File: ioi-assignment/api/db.py
```python
import logging
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import sessionmaker
from api import settings
from api.models import Currency
log = logging.getLogger(__name__)
def get_session(db: AsyncEngine):
return sessionmaker(db, expire_on_commit=False, class_=AsyncSession)
async def save_price(currency: str, bid: float, timestamp: int, db: AsyncEngine):
try:
async with get_session(db)() as session:
session.add(Currency(currency=currency, price=bid, date_=timestamp))
await session.commit()
except SQLAlchemyError:
log.exception("Can't save price to DB")
async def fetch_history(page: int, db: AsyncEngine):
items = []
async with get_session(db)() as session:
stmt = (
select(Currency)
.order_by(Currency.id.desc())
.slice(page * settings.PAGE_SIZE, (page + 1) * settings.PAGE_SIZE)
)
rows = await session.execute(stmt)
for currency in rows.scalars().all():
items.append(
{
"currency": currency.currency,
"bid": currency.price,
"timestamp": currency.date_,
}
)
return items
``` |
{
"source": "jozsa/AirBnB_clone",
"score": 3
} |
#### File: tests/test_models/test_amenity.py
```python
import unittest
from datetime import datetime
from models.base_model import BaseModel
from models.amenity import Amenity
class TestAmenity(unittest.TestCase):
"""Testing Amenity"""
def setUp(self):
"""
Create a new instance of Amenity before each test
"""
self.a1 = Amenity()
def tearDown(self):
"""
Delete Amenity instance before next test
"""
del self.a1
def test_uniqueUUID(self):
"""
Make sure each UUID is unique
"""
a2 = Amenity()
self.assertNotEqual(self.a1.id, a2.id)
def test_id_type(self):
"""
Make sure id is a string not uuid data type
"""
self.assertEqual(type(self.a1.id), str)
def test_created_at_type(self):
"""
Make sure created_at is datetime data type
"""
self.assertEqual(type(self.a1.created_at), datetime)
def test_updated_at_type(self):
"""
Make sure updated_at is datetime data type
"""
self.assertEqual(type(self.a1.updated_at), datetime)
def test_name_type(self):
"""
Make sure name is str data type
"""
self.assertEqual(type(Amenity.name), str)
def test_save(self):
"""
Make sure save does update the updated_at attribute
"""
old_updated_at = self.a1.updated_at
self.a1.save()
self.assertNotEqual(old_updated_at, self.a1.updated_at)
def test_str(self):
"""
Testing return of __str__
"""
self.assertEqual(str(self.a1), "[Amenity] ({}) {}".
format(self.a1.id, self.a1.__dict__))
def test_to_dict(self):
"""
Make sure to_dict returns the right dictionary
and the dict has the right attributes with the right types.
"""
model_json = self.a1.to_dict()
self.assertEqual(type(model_json), dict)
self.assertTrue(hasattr(model_json, '__class__'))
self.assertEqual(type(model_json['created_at']), str)
self.assertEqual(type(model_json['updated_at']), str)
def test_kwargs(self):
"""
Test passing kwargs to Amenity instantation
"""
json_dict = self.a1.to_dict()
a2 = Amenity(**json_dict)
self.assertEqual(self.a1.id, a2.id)
self.assertEqual(self.a1.created_at, a2.created_at)
self.assertEqual(self.a1.updated_at, a2.updated_at)
self.assertNotEqual(self.a1, a2)
``` |
{
"source": "jozsa/pixguise",
"score": 2
} |
#### File: pixguise/app/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db.models.manager import BaseManager
from django.core.validators import RegexValidator
from uuid import uuid4
from django.conf import settings
import zipfile
import os
from PIL import Image
from io import BytesIO
from django.core.files.base import ContentFile
# Goal: Add this feature and uncomment it out
"""
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Mobile number must be entered in the format:"
" '+999999999'. Up to 15 digits allowed.")
"""
class MyUserManager(BaseUserManager):
"""
Custom UserManager with email normalization
"""
def create_user(self, email):
"""
Creates and saves User
"""
if not email:
raise ValueError('Email is required')
user = self.model(email=MyUserManager.normalize_email(email))
user.save(using=self._db)
return user
class CustomUser(AbstractBaseUser):
"""
Custom user model with email and email_verified
fields
"""
email = models.EmailField(max_length=255,
unique=True,
blank=True,
null=True)
email_verified = models.BooleanField(default=False)
# Goal: add this feature
"""
mobile = models.CharField(validators=[phone_regex], max_length=15, unique=True, blank=True, null=True)
mobile_verified = models.BooleanField(default=False)
"""
objects = MyUserManager()
USERNAME_FIELD = 'email'
class Base(models.Model):
"""
Base model for all models to inherit from.
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class AlbumManager(models.Manager):
""" Naive manager """
def create(self, title, owner_id):
"""
Create album object with required parameters:
title and owner_id.
"""
if not title:
raise ValueError('Title is required')
if not owner_id:
raise ValueError('Owner id is required')
album = self.model(title=title,
owner_id=owner_id)
album.save(using=self._db)
return album
class Album(Base):
"""
Album objects - each photo is linked to one or more Album.
title and owner_id must be passed into object instantation.
archive_id is updated after archive of album is created.
is_private - True by default, users can choose to make their albums public.
"""
title = models.CharField(max_length=100,
null=True)
owner_id = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
archive_id = models.ForeignKey('app.Archive',
on_delete=models.CASCADE,
null=True)
is_private = models.BooleanField(default=True)
def add_archive(self, archive):
"""
Update album.archive_id with appropriate archive object.
"""
self.archive_id = archive
self.save()
class PhotoManager(models.Manager):
""" Naive manager. """
def create(self, filename, albums):
if not filename:
raise ValueError('Filename is required')
photo = self.model(filename=filename)
photo.save(using=self._db)
return photo
class Photo(Base):
"""
Photo class that stores all information
related to each uploaded image.
"""
# TODO: Figure out filepath. Currently this saves to uploads/uploads - we just want it to save to uploads/ - until we figure out S3 - when we will change the MEDIA_ROOT to s3.
filename = models.ImageField(upload_to='photos/',
default='image.jpg')
users = models.ManyToManyField(settings.AUTH_USER_MODEL)
albums = models.ManyToManyField(Album)
thumbnail = models.ImageField(upload_to='thumbnails/',
default='thumbnail.jpg')
def save(self, *args, **kwargs):
"""
Overrides default save() method by calling
create_thumbnail before Photo object is saved.
"""
if not self.create_thumbnail():
raise Exception('Invalid file type')
super(Photo, self).save(*args, **kwargs)
def create_thumbnail(self):
"""
Instance method to create thumbnail of Photo
"""
thumbnail_name, thumbnail_extension = os.path.splitext(self.filename.name)
thumbnail_extension = thumbnail_extension.lower()
if thumbnail_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumbnail_extension == '.gif':
FTYPE = 'GIF'
elif thumbnail_extension == '.png':
FTYPE = 'PNG'
elif thumbnail_extension == '.bmp':
FTYPE = 'BMP'
else:
return False
image = Image.open(self.filename)
image.thumbnail((400, 400))
temp_thumbnail = BytesIO()
image.save(temp_thumbnail, FTYPE)
temp_thumbnail.seek(0)
self.thumbnail.save('{}_thumbnail{}'.format(thumbnail_name,
thumbnail_extension),
ContentFile(temp_thumbnail.read()),
save=False)
temp_thumbnail.close()
return True
class ArchiveManager(models.Manager):
"""
Manager for Archive objects - creates zip archive
containing all photos within an album
"""
def create(self, album_id):
"""
Overrides create method of models.Manager
by including compression of images.
"""
if not album_id:
raise ValueError('Album id is required')
filename = os.path.join(settings.MEDIA_ROOT,
'zipfiles/{}.zip'.format(uuid4()))
# Get a list of all filenames where album id matches the requested album
photos = Photo.objects.filter(albums=album_id).values_list('filename',
flat=True)
# Compress all photos into one zip file
with zipfile.ZipFile(filename, 'w') as archive:
for photo in photos:
photo = os.path.join(settings.MEDIA_ROOT,
photo)
archive.write(photo, os.path.relpath(photo,
settings.MEDIA_ROOT))
archive = self.model(album_id=album_id,
filename=filename)
archive.save(using=self._db)
return archive
class Archive(Base):
"""
Class for archive objects containing
file name and foreign key for album.
"""
filename = models.URLField(unique=True)
album_id = models.ForeignKey(Album,
on_delete=models.CASCADE)
objects = ArchiveManager()
class LinkManager(models.Manager):
"""
Manager to create Link objects
"""
def create(self, archive):
"""
Overrides default create method to include url
creation for each archive.
"""
if not archive:
raise ValueError('Archive id is required')
# TODO - finalize URL name and edit download route and HTML based on final URL
url = 'http://localhost:8000/download/{}/{}/'.format(archive.id, uuid4())
link = self.model(url=url, archive_id=archive)
link.save(using=self._db)
return link
class Link(Base):
"""
Link class containing URL, is_expired,
and archive_id Foreign Key for related archives.
"""
is_expired = models.BooleanField(default=False)
url = models.URLField(unique=True)
archive_id = models.ForeignKey(Archive, on_delete=models.CASCADE)
objects = LinkManager()
``` |
{
"source": "jozsinakhivnak/diacriticrestoration",
"score": 3
} |
#### File: jozsinakhivnak/diacriticrestoration/accent_ngram.py
```python
import operator
import re
import csv
from unicodedata import normalize
import os
from io import open
import xml.etree.ElementTree as ET
import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
import getopt
import time
import pickle
import argparse
import codecs
import math
from sys import stdin
import unicodedata
from importlib import import_module
# Import common module for shared operations
common = import_module("common")
# Accents word with n-gram solution using local context
def accentWithNgram(buffer, deaccented, padding_char, diff, N, accents,words_dictionary):
# Remove first unnecessary element
buffer.pop(0)
# Append the new one
buffer.append(deaccented)
# Create local context
prevText = padding_char.join(buffer[0:diff])
follText = padding_char.join(buffer[diff+1:N])
word = buffer[diff]
# Invoke the shared NGram accent method
word = common.ngramAccent(word,words_dictionary, diff, accents,prevText, follText, padding_char)
return word
def main():
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--ngram", help="N value for N-gram, such as 1,2,3,4,5..",type=int, default=2)
parser.add_argument('--timer', dest='timer', help="Timer enabled", action='store_true')
parser.add_argument('-d', '--dict', dest='dict', help="Dictionary file name", default="../Resources/HU_2gram_dict")
parser.add_argument('-s', '--dsize', dest='dsize', help="Dictionary size in lines")
parser.add_argument('-a', '--accents', type=str, default='áaéeíióoöoőoúuüuűu',
help='accent mapping')
parser.set_defaults(feature=False)
args = parser.parse_args()
timer_enabled = args.timer
accents = args.accents
dictionary_size = int(args.dsize)
# N-gram parameter
N = (args.ngram*2)+1
diff = args.ngram
# Start timer if enabled
if (timer_enabled):
start = time.time()
# Get the dictionary for the ngrams
dictionary_filename = args.dict
# Declare the dictionary
words_dictionary = {}
#dictionary_temp = list(csv.reader(open(dictionary_filename,'r',encoding='utf8'), delimiter='\t'))
# Build dictionary
common.buildDict(words_dictionary, dictionary_filename, dictionary_size)
# Get the shared padding char
padding_char = common.getPaddingChar()
word_buffer = []
for i in range(0,N):
word_buffer.append("")
initCounter = 0
# read every line of the input
for l in stdin:
#TEXT = l.translate(None, '()?,.:{}[]')
TEXT = l.decode("utf-8")
TEXT = TEXT.rstrip('\n') # strip newline from the end of the line
if (common.isAccentable(TEXT, accents)):
TEXT = common.replace(TEXT)
deaccented = common.remove_accents(unicode(TEXT))
if (initCounter < diff):
initCounter += 1
word_buffer.pop(0)
word_buffer.append(deaccented)
else:
# Invoke the shared NGram accent method
word = accentWithNgram(word_buffer, deaccented, padding_char, diff,N,
accents, words_dictionary)
print (word)
# Last ngram_diff iterations
for i in range(0,diff):
#Invoke the shared NGram accent method
word = accentWithNgram(word_buffer, "", padding_char, diff,N,
accents, words_dictionary)
print (word)
# Print timer info
if (timer_enabled):
end = time.time()
print ("Finished in " + str(end-start)+" seconds.")
if __name__ == '__main__':
main()
```
#### File: diacriticrestoration/Helpers/dictionarify.py
```python
from sys import stdin
from collections import defaultdict
import re
def replace(text):
for ch in ['\\', ',', '"', '\'', '?',';',':','`','*','_','{','}','[',']','(',')','>','<','#','+','-','.','!','$','\'']:
text = text.replace(ch," ")
return text
def main():
counter = defaultdict(int)
for l in stdin: # every line in the input
text = l.decode('utf8') # decode from utf-8 encoded string
text = text.rstrip('\n') # strip newline from the end of the line
text = replace(text)
words = text.split()
for word in words:
print (word.encode('utf8').strip())
# if the script is called directly (as opposed to being imported)
# call the main function.
# This prevents it from being run when this module is imported
if __name__ == '__main__':
main()
``` |
{
"source": "jozz024/smash-amiibo-editor",
"score": 2
} |
#### File: jozz024/smash-amiibo-editor/ssbu_amiibo.py
```python
from amiibo import AmiiboDump
from amiibo.crypto import AmiiboBaseError
import copy
class InvalidAmiiboDump(AmiiboBaseError):
pass
class IncorrectGameDataIdException(Exception):
pass
class InvalidSsbuChecksum(Exception):
pass
class SettingsNotInitializedError(Exception):
pass
class SsbuAmiiboDump(AmiiboDump):
"""
Class that's a thin wrapper around AmiiboDump.
Checks the amiibo has the super smash bros game id in the game data section on unlock
Writes the HMAC for the game data before locking
"""
def __init__(self, master_keys, dump, is_locked=True):
super().__init__(master_keys, dump, is_locked)
self.dumpcopy = copy.deepcopy(self)
if is_locked == True:
self.dumpcopy.unlock()
def unlock(self, verify=True):
super().unlock(verify=verify)
# Checks if the amiibo has been initialized with an owner and name.
if not (self.data[0x14] >> 4) & 1:
raise SettingsNotInitializedError
# Checks if the amiibo's game is Super Smash Bros. Ultimate, and if not, we initialize it.
if bytes(self.data[266:270]).hex() != "34f80200":
self.data[0x14] = self.data[0x14] | (1 << 5)
self.data[266:270] = bytes.fromhex("34f80200")
self.data[0x100:0x108] = bytes.fromhex('01006A803016E000')
self.data[0x130:0x208] = bytes.fromhex("00" * 0xD8)
self.data[304:308] = self._calculate_crc32(self.data[308:520]).to_bytes(4, "little")
if self.data[304:308].hex() != self._calculate_crc32(self.data[308:520]).to_bytes(4, "little").hex():
raise InvalidSsbuChecksum(f'The checksum for this game data is not correct. Please use an untampered amiibo')
def lock(self):
if self.data[444:502] != self.dumpcopy.data[444:502]:
self.data[311] = self.data[311] | 1
if self.amiibo_nickname[-1] != '□':
if len(self.amiibo_nickname) == 10:
self.amiibo_nickname = self.amiibo_nickname[:-1] + '□'
else:
self.amiibo_nickname = self.amiibo_nickname + '□'
elif self.dumpcopy.amiibo_nickname[-1] == '□' and self.amiibo_nickname[-1] != '□':
if len(self.amiibo_nickname) == 10:
self.amiibo_nickname = self.amiibo_nickname[:-1] + '□'
else:
self.amiibo_nickname = self.amiibo_nickname + '□'
checksum = self._calculate_crc32(self.data[308:520])
mii_checksum = str(hex(self.crc16_ccitt_wii(self.data[0xA0:0xFE]))).lstrip('0x')
while len(mii_checksum) < 4:
mii_checksum = '0' + mii_checksum
self.data[304:308] = checksum.to_bytes(4, "little")
self.data[0xFE:0x100] = bytes.fromhex(mii_checksum)
super().lock()
@staticmethod
def _calculate_crc32(input):
# Setup CRC 32 table. Translated from js to python from amiibox codebase
# (should move this out so it sets up once, but it's quick enough as is)
p0 = 0xEDB88320 | 0x80000000
p0 = p0 >> 0
u0 = [0] * 0x100
i = 1
while (i & 0xFF):
t0 = i
for _ in range(8):
b = (t0 & 0x1) >> 0
t0 = (t0 >> 0x1) >> 0
if b:
t0 = (t0 ^ p0) >> 0
u0[i] = t0 >> 0
i += 1
# Calculate CRC32 from table
t = 0x0
for k in input:
t = ((t >> 0x8) ^ u0[(k ^ t) & 0xFF]) >> 0
return (t ^ 0xFFFFFFFF) >> 0
def crc16_ccitt_wii(self, data):
crc = 0
for byte in data:
byte = int.from_bytes([byte], 'big')
crc = crc ^ (byte << 8)
for _ in range(8):
crc = crc << 1
if (crc & 0x10000) > 0:
crc ^= 0x1021
return (crc & 0xFFFF)
@property
def amiibo_nickname(self):
# TODO: why is the Amiibo nickname big endian,
# but the Mii nickname litle endian?
return self.data[0x020:0x034].decode('utf-16-be').rstrip('\x00')
@amiibo_nickname.setter
def amiibo_nickname(self, name):
utf16 = name.encode('utf-16-be')
if len(utf16) > 20:
raise ValueError
self.data[0x020:0x034] = utf16.ljust(20, b'\x00')
``` |
{
"source": "JP007-star/Django-Crud",
"score": 2
} |
#### File: Django-Crud/operations/views.py
```python
from django.http.response import HttpResponse
from operations.models import Employee
from django.shortcuts import redirect, render
from operations.forms import EmployeeForm
# Create your views here.
def index(request):
obj=Employee.objects.all()
return render(request, 'index.html',{'data':obj})
def create(request):
if request.method =="POST":
form=EmployeeForm(request.POST)
if form.is_valid():
form.save()
return redirect('read')
else:
form = EmployeeForm
context={
'form':form
}
return render(request, 'create.html',context)
def update(request,id):
obj=Employee.objects.get(id=id)
if request.method =="POST":
form=EmployeeForm(request.POST, instance=obj)
if form.is_valid():
form.save()
return redirect('read')
else:
form = EmployeeForm
context={
'form':form
}
return render(request, 'update.html',context)
def delete(request,id):
obj=Employee.objects.get(id=id)
obj.delete()
return redirect('read')
``` |
{
"source": "jp172/covid19-hospital-scheduler",
"score": 3
} |
#### File: src/schedulers/capacity_coefficient_scheduler.py
```python
from .utils import get_feasible_hospitals
from ..objects.proposal import RankedProposal
class CapacityScheduler:
def assign_request(self, instance, request):
hospitals = instance.get_hospitals_in_area(request.person.position)
# get feasible hospitals checks for vehicle range and free beds
feasible_hospitals = get_feasible_hospitals(
hospitals, request.person.position
)
# todo: Take the min really from all hospitals here?
if not feasible_hospitals:
best_hospital = min(instance.hospitals.values(), key=lambda h: h.capacity_coefficient)
return RankedProposal([best_hospital])
else:
feasible_hospitals = sorted(feasible_hospitals, key=lambda h: h.capacity_coefficient)[:3]
return RankedProposal(feasible_hospitals)
```
#### File: covid19-hospital-scheduler/src/visualize.py
```python
import plotly.express as px
import pandas as pd
def add_data(data, instance, hospital_scores, cur_time):
for ident, h in instance.hospitals.items():
data.append(
[cur_time, ident, h.position.lat, h.position.lon, hospital_scores[ident]]
)
x_min = 5.6
x_max = 15.35
y_min = 47.1
y_max = 55.1
def hospital_visualization(instance, start, end, ticks, index):
instance.snapshots = sorted(instance.snapshots, key=lambda s: s.filed_at)
data_list = []
hospital_scores = {ident: 0 for ident, h in instance.hospitals.items()}
i = 0
for t in range(ticks):
cur_time = round(start + t / ticks * (end - start))
while (
i + 1 < len(instance.snapshots)
and instance.snapshots[i + 1].filed_at <= cur_time
):
s = instance.snapshots[i]
hospital_scores[str(s.hospital_ident)] = s.capacity_coefficient
i += 1
add_data(data_list, instance, hospital_scores, cur_time)
df = pd.DataFrame(data_list, columns=["time", "id", "y", "x", "capacity_score"])
fig = px.scatter(
df,
x="x",
y="y",
animation_frame="time",
animation_group="id",
color="capacity_score",
color_continuous_scale=[(0.00, "green"), (0.5, "yellow"), (1, "red")],
range_color=[0, 2],
hover_name="id",
range_x=[x_min, x_max],
range_y=[y_min, y_max],
width=900,
height=1200,
)
fig.add_layout_image(
dict(
source="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e3/Karte_Deutschland.svg/1000px-Karte_Deutschland.svg.png"
),
xref="x",
yref="y",
x=x_min,
y=y_max,
sizex=(x_max - x_min),
sizey=(y_max - y_min),
sizing="stretch",
opacity=0.5,
layer="below",
)
fig.update_traces(marker=dict(size=10))
for frame in fig.frames:
for i in range(len(frame.data)):
frame.data[i]["marker"]["symbol"] = "square"
# fig.show()
html_dump = fig.to_html()
with open(f"data/visualization/hospitals{index}.html", "w") as f:
f.write(html_dump)
def corona_visualization(instance, start, end, ticks, index):
# visible for more time steps
nbr_ticks_visible = 10
reqs = sorted(instance.requests.values(), key=lambda r: r.filed_at)
data_list = []
times = [round(start + t / ticks * (end - start)) for t in range(ticks)]
r_id = 0
for t in range(ticks):
cur_time = times[t]
while r_id + 1 < len(reqs) and reqs[r_id + 1].filed_at <= cur_time:
r = instance.requests[str(r_id)]
for i in range(nbr_ticks_visible):
if i + t >= ticks:
break
data_list.append(
[
times[i + t],
r.ident,
r.person.position.lon,
r.person.position.lat,
]
)
r_id += 1
df = pd.DataFrame(data_list, columns=["time", "id", "x", "y"])
fig = px.scatter(
df,
x="x",
y="y",
animation_frame="time",
animation_group="id",
hover_name="id",
range_x=[x_min, x_max],
range_y=[y_min, y_max],
width=900,
height=1200,
)
fig.add_layout_image(
dict(
source="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e3/Karte_Deutschland.svg/1000px-Karte_Deutschland.svg.png"
),
xref="x",
yref="y",
x=x_min,
y=y_max,
sizex=(x_max - x_min),
sizey=(y_max - y_min),
sizing="stretch",
opacity=0.5,
layer="below",
)
# fig.show()
html_dump = fig.to_html()
with open(f"data/visualization/patients{index}.html", "w") as f:
f.write(html_dump)
def visualize(instance, snapshot_list):
for index in range(len(snapshot_list)):
instance.snapshots = snapshot_list[index]
time_frame_start = min(r.filed_at for r in instance.requests.values())
time_frame_end = max(r.filed_at for r in instance.requests.values())
nbr_ticks = 50
hospital_visualization(
instance, time_frame_start, time_frame_end, nbr_ticks, index
)
corona_visualization(
instance, time_frame_start, time_frame_end, nbr_ticks, index
)
``` |
{
"source": "jp19-lafa/IoT-Node",
"score": 2
} |
#### File: IoT-Node/pair/bluetoothManager.py
```python
import subprocess
import time
import pair.autopair as pairable
import pair.config as config
import pair.helper as helper
import pair.logger as logger
import pair.wifi as wifi
import pair.wpa as wpa
import bluetooth
from bluetooth.ble import DiscoveryService
# The MAC address of a Bluetooth adapter on the server. Leave blank to use default connection
hostMACAddress = ""
port = bluetooth.PORT_ANY
backlog = 1
size = 1024
server = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
class wifiConnection:
def __init__(self):
self.password = None
self.ssid = None
logger.log("WPA2 connection created")
def try_connect(self):
"""
Trying to connect to the network
"""
logger.log("Trying to connect to the network {}".format(self.ssid))
# TODO: try to connect to the network using the given credentials
cli = wpa.wpa(["ssid {}".format(self.ssid), "psk {}".format(self.password)])
cli.execute()
# give time to connect
time.sleep(config.WIFI_WAIT_UNTIL_CONNECTION)
return wifi.ConnectedToTheNetwork()
class wifiMCHAPConnection:
def __init__(self):
self.password = None
self.username = None
self.ssid = None
logger.log("MCHAP connection created")
def try_connect(self):
"""
Trying to connect to the network
"""
logger.log("Trying to connect to the network {}".format(self.ssid))
# TODO: try to connect to the network using the given credentials
cli = wpa.wpa(["ssid {}".format(self.ssid),
"key_mgmt WPA_EAP", "eap PEAP", "identity {}".format(self.username), "password {}".format(self.password)])
cli.execute()
# give time to connect
time.sleep(config.WIFI_WAIT_UNTIL_CONNECTION)
return wifi.ConnectedToTheNetwork()
def pair():
"""
Function to try to pair with a phone.
Note that this function accepts all pair request that come in
"""
logger.log("Trying to pair with a device", logger.LOG_DEBUG)
# only continue if there is no network
if wifi.ConnectedToTheNetwork():
return
# wait unitl we pair with a devices
AutoPair = pairable.BtAutoPair()
logger.log("Configuring discoverability settings")
AutoPair.enable_pairing()
logger.log("Done configuring bluetooth settings")
# check if we paired with a new device
has_connected = subprocess.check_output("bluetoothctl info | head -n1",
shell=True).decode("utf-8")
logger.log(has_connected)
while "Missing" in has_connected:
has_connected = subprocess.check_output("bluetoothctl info | head -n1",
shell=True).decode("utf-8")
time.sleep(0.1)
logger.log("Connected with device: " + has_connected)
return has_connected.split(" ")[1]
def startup(server):
"""
Initialize a bluetooth server so that we can communicate with the client phone
@server is the bluetooth connection server
"""
logger.log("Starting up the bluetooth module", logger.LOG_DEBUG)
logger.log("Connected to bluetooth device: {} ".format(pair()),
logger.LOG_DEBUG)
if wifi.ConnectedToTheNetwork():
return
server.bind((hostMACAddress, port))
server.listen(backlog)
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
bluetooth.advertise_service(
sock=server,
name="Bluetooth Speaker",
service_id=uuid,
service_classes=[bluetooth.SERIAL_PORT_CLASS],
profiles=[bluetooth.SERIAL_PORT_PROFILE],
)
return server
def idle(server):
"""
Wait until a bluetooth connection is made
@server is the bluetooth connection server
"""
logger.log("Waiting for a bluetooth connection", logger.LOG_DEBUG)
if wifi.ConnectedToTheNetwork():
return None, None
try:
client, clientInfo = server.accept()
except:
print("Closing socket")
client.close()
server.close()
return client, clientInfo
def extractData(command, data):
"""
We split the payload and extract the command and value from it.
If the command is equal to the expected command then we can return the value
Otherwise we return a nullptr
@command is a string containing the expected bluetooth command
@data is the payload send over bluetooth (also a string)
"""
split = data.replace("\r\n", "").split(":")
if len(data) < 2:
logger.log("Incomming data payload is to small, {}".format(split))
return None
if not split[0] == command:
logger.log(
"Extracted data doesn't match expected type, {} but got {} instead"
.format(command, split[0]))
return None
logger.log(
"Retreived data from bluetooth socker: {}".format("".join(split[1:])),
logger.LOG_DEBUG,
)
return "".join(split[1:])
def extractSSID(data):
"""
Retreive the network ssid from the bluetooth
Command should be as followed SSID:Your_ssid
"""
return extractData("SSID", data)
def extractPassword(data):
"""
Retreive the password from the bluetooth connection
Command should be as followed PWD:Your_password
"""
return extractData("PWD", data)
def extractUsername(data):
"""
Retreive the password from the bluetooth connection
Command should be as followed PWD:<PASSWORD>_password
"""
return extractData("USER", data)
def getWifiData(client, clientInfo, server):
"""
Comminicate with the phone over bluetooth to gather the wifi data here
"""
if wifi.ConnectedToTheNetwork():
return
logger.log("Receiving wifi credentials", logger.LOG_DEBUG)
connection = None
while 1:
data = client.recv(size).decode("utf-8")
if data:
if "TYPE:" in data:
type = extractData("TYPE", data)
if type == "wpa2":
connection = wifiConnection()
elif type == "mchap":
connection = wifiMCHAPConnection()
else:
client.send("ERROR:2 - Server doesn't recognize wifi type")
if "SSID:" in data:
if connection:
connection.ssid = extractSSID(data)
else:
client.send("ERROR:1 - No connection specified")
elif "PWD:" in data:
if connection:
connection.password = extractPassword(data)
else:
client.send("ERROR:1 - No connection specified")
elif "USER:" in data:
if connection:
if isinstance(connection, wifiMCHAPConnection):
connection.username = extractUsername(data)
else:
client.send("ERROR:4 cannot set property that is not part of the connection type")
else:
client.send("ERROR:1 - No connection specified")
elif "TRY:1" in data:
if connection:
if connection.try_connect(
): # try to connect to the network
client.send("SUCCESS:1 - connected to a network")
else:
client.send("ERROR:3 - Network credentials are wrong")
else:
client.send("ERROR:1 - No connection specified")
else:
client.send(data) # Echo back to client
def set_name(name):
"""
We change the bluetooth pretier name here
"""
logger.log("Changing bluetooth name to {}".format(name))
subprocess.call("bluetoothctl <<EOF\nsystem-alias {}\nEOF".format(name), shell=True)
subprocess.call("bluetoothctl <<EOF\ndiscoverable-timeout 86400\nEOF", shell=True)
subprocess.call("bluetoothctl <<EOF\ndiscoverable on\nEOF", shell=True)
logger.log("Done setting bluetooth settings {}".format(name))
def EstablishConnection():
# TODO: make a unique id as the farm name
set_name(config.BLUETOOTH_NAME + helper.unique_id())
startup(server)
client, info = idle(server)
getWifiData(client, info, server)
logger.log("Established wifi data. Starting up farm now", logger.LOG_WARM)
```
#### File: IoT-Node/pair/bluezutils.py
```python
import dbus
SERVICE_NAME = "org.bluez"
ADAPTER_INTERFACE = SERVICE_NAME + ".Adapter1"
DEVICE_INTERFACE = SERVICE_NAME + ".Device1"
def get_managed_objects():
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez", "/"),
"org.freedesktop.DBus.ObjectManager")
return manager.GetManagedObjects()
def find_adapter(pattern=None):
return find_adapter_in_objects(get_managed_objects(), pattern)
def find_adapter_in_objects(objects, pattern=None):
bus = dbus.SystemBus()
for path, ifaces in objects.iteritems():
adapter = ifaces.get(ADAPTER_INTERFACE)
if adapter is None:
continue
if not pattern or pattern == adapter["Address"] or \
path.endswith(pattern):
obj = bus.get_object(SERVICE_NAME, path)
return dbus.Interface(obj, ADAPTER_INTERFACE)
raise Exception("Bluetooth adapter not found")
def find_device(device_address, adapter_pattern=None):
return find_device_in_objects(get_managed_objects(), device_address,
adapter_pattern)
def find_device_in_objects(objects, device_address, adapter_pattern=None):
bus = dbus.SystemBus()
path_prefix = ""
if adapter_pattern:
adapter = find_adapter_in_objects(objects, adapter_pattern)
path_prefix = adapter.object_path
for path, ifaces in objects.iteritems():
device = ifaces.get(DEVICE_INTERFACE)
if device is None:
continue
if (device["Address"] == device_address and
path.startswith(path_prefix)):
obj = bus.get_object(SERVICE_NAME, path)
return dbus.Interface(obj, DEVICE_INTERFACE)
raise Exception("Bluetooth device not found")
```
#### File: IoT-Node/src/sensordata.py
```python
import glob
import os
import time
import random
import src.config as config
import src.logger as logger
# TODO: uncomment when using rpi
import smbus
os.system("modprobe w1-gpio") # enable one wire gpio interface
os.system("modprobe w1-therm")
bus = smbus.SMBus(1) # RPI used i2C bus 1
class Payload:
"""
Payload to send data over a mqtt connection
"""
def __init__(self, payload, topic):
self.payload = payload
self.topic = topic
def getOneWireSensor():
"""
Returns the file that contains the one wire sensor data
"""
base_dir = "/sys/bus/w1/devices/"
device_folder = glob.glob(base_dir + "28*")[0]
return device_folder + "/w1_slave"
def read_temp_raw(file):
"""
Read out the temperature file and return the raw lines.
It returns a list of lines in the file (in string format utf-8)
"""
f = open(file, "r")
lines = f.readlines()
f.close()
return lines
def readLevel(addr):
"""
Read one byte from the I2C bus based on its address
"""
data = None
try:
time.sleep(0.1)
data = bus.read_byte(addr)
except Exception as e:
print(e)
# don't call to quick on the I2C bus
time.sleep(1.5)
if data != None:
return "{}".format(data)
return "0"
def readHumidity():
try:
bus.write_quick(0x27)
time.sleep(0.1)
# HIH6020 address, 0x27(39)
# Read data back from 0x00(00), 4 bytes
# humidity MSB, humidity LSB, temp MSB, temp LSB
data = bus.read_i2c_block_data(0x27, 0x00, 4)
# Convert the data to 14-bits
humidity = ((((data[0] & 0x3F) * 256.0) + data[1]) * 100.0) / 16382.0
temp = ((data[2] * 256) + (data[3] & 0xFC)) / 4
cTemp = (temp / 16382.0) * 165.0 - 40.0
# Output data to screen
return humidity, cTemp
except Exception as e:
print(e)
return -1, -1
def GetWaterLevel():
"""
Read from the 3 sensors and determin the worst level to relay back
This is the watersensor with the highest value
"""
water1 = int(readLevel(config.sensorPins[1]))
water2 = int(readLevel(config.sensorPins[2]))
water3 = int(readLevel(config.sensorPins[3]))
if water1 > water2 and water1 > water3:
return str(water1)
elif water2 > water3:
return str(water2)
return str(water3)
def readlight():
bus.write_byte_data(config.sensorPins[3], 0x00 | 0x80, 0x03)
# TSL2561 address, 0x39(57)
# Select timing register, 0x01(01) with command register, 0x80(128)
# 0x02(02) Nominal integration time = 402ms
bus.write_byte_data(config.sensorPins[3], 0x01 | 0x80, 0x02)
time.sleep(0.5) # poll light data
# pull data
data = bus.read_i2c_block_data(config.sensorPins[3], 0x0C | 0x80, 2)
# convert to lux
ch0 = data[1] * 256 + data[0]
return ch0
# Read section
def readMock():
"""
Used during testing.
This can be usefull when not all sensors are present
"""
return [
Payload(str((random.random()*4) + 5), "/sensor/waterph"),
Payload(str((random.random()*10) + 14), "/sensor/watertemp"),
Payload(str((random.random()*600) + 100), "/sensor/lightstr"),
Payload(str((random.random()*20) + 40), "/sensor/airhumidity"),
Payload(str((random.random()*10) + 14), "/sensor/airtemp")
]
def readReal():
"""
Used to read real sensor data
"""
phValue = str(float(readLevel(config.sensorPins[-1]))/18.214)
phValue = str(random.random() + 7)
try:
waterTemp = str(readTemperature(getOneWireSensor()))
logger.log("Read from w1 the water temp {} C".format(waterTemp), logger.LOG_DEBUG)
except Exception as e:
waterTemp = "-1"
logger.log("Count not read temperature from w1", logger.LOG_ERROR)
light = readlight()
humidity, humidityTemp = readHumidity()
humidity = str((random.random()*20) + 40)
return [
Payload(phValue, "/sensor/waterph"),
Payload(waterTemp, "/sensor/watertemp"),
Payload(light, "/sensor/lightstr"),
Payload(humidity, "/sensor/airhumidity"),
Payload(waterTemp, "/sensor/airtemp")
]
def readAll():
"""
Read all sensors out. Build a MQTT payload and send it over
"""
return readReal()
def readTemperature(file):
"""
Returns the temperature of the one wire sensor.
Pass in the file containing the one wire data (ds18b20+)
"""
lines = read_temp_raw(file)
while lines[0].strip()[-3:] != "YES":
time.sleep(0.2)
lines = read_temp_raw(file)
equals_pos = lines[1].find("t=")
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
# convert temperature to C
temp_c = float(temp_string) / 1000.0
return temp_c
return -273.15 # absolute 0
if __name__ == "__main__":
file = getOneWireSensor()
while True:
time.sleep(1)
logger.log(readTemperature(file), logger.LOG_DEBUG)
logger.log(readLevel(config.sensors[0]), logger.LOG_DEBUG) # readout the first i2C sensor
``` |
{
"source": "jp2011/spatial-poisson-mixtures",
"score": 3
} |
#### File: src/data/create_crime_db.py
```python
import click
import logging
from dotenv import find_dotenv, load_dotenv
import os
import sqlite3
from sqlite3 import Error
import pandas as pd
TABLE_NAME = "LONDON"
CSV_COL_NAMES = ["Month", "Latitude", "Longitude", "Location", "Crime type"]
DB_COL_NAMES = ["MONTH", "LATITUDE", "LONGITUDE", "DESCRIPTION", "CRIME_TYPE"]
DB_COL_TYPES = ["TEXT", "REAL", "REAL", "TEXT", "TEXT"]
def list_files(startpath):
full_file_paths = []
for root, directories, filenames in os.walk(startpath):
for filename in filenames:
if filename.endswith('.csv'):
full_file_paths.append(os.path.join(root, filename))
return full_file_paths
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
return conn
except Error as e:
print(e)
def create_crime_table(db_conn):
cursor = db_conn.cursor()
cursor.execute("CREATE TABLE {tn} (ID INTEGER)".format(tn=TABLE_NAME))
for (col_name, col_type) in zip(DB_COL_NAMES, DB_COL_TYPES):
cursor.execute("ALTER TABLE {tn} ADD COLUMN {cn} {ct}".format(tn=TABLE_NAME, cn=col_name, ct=col_type))
def move_csv_to_sql(sql_conn, csv_file_paths):
for file_path in csv_file_paths:
print(file_path)
crime_df = pd.read_csv(file_path, usecols=CSV_COL_NAMES)
crime_df.columns = DB_COL_NAMES
print(crime_df.shape)
crime_df.to_sql(TABLE_NAME, sql_conn, if_exists='append', index_label="ID")
def get_specific_crimes(db_conn, crime_type, start_date, end_date):
query = """
SELECT * FROM {tn}
WHERE {tn}.CRIME_TYPE='{ct}' AND LATITUDE IS NOT NULL
""".format(tn=TABLE_NAME, ct=crime_type)
crime_all_period = pd.read_sql(query, db_conn, parse_dates=["MONTH"], index_col="ID")
return crime_all_period[(crime_all_period['MONTH'] >= start_date) & (crime_all_period['MONTH'] <= end_date)]
def bootstrap_scripts(input_filepath, output_filepath):
db_conn = create_connection(output_filepath)
try:
create_crime_table(db_conn)
except sqlite3.OperationalError as error:
print(error)
all_crime_csv_file_paths = list_files(input_filepath)
move_csv_to_sql(db_conn, all_crime_csv_file_paths)
db_conn.close()
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Making final data set from raw data')
if os.path.isfile(output_filepath):
logger.info("Removing previous database.")
os.remove(output_filepath)
bootstrap_scripts(input_filepath, output_filepath)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
load_dotenv(find_dotenv())
main()
```
#### File: src/inference/priors.py
```python
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.gaussian_process.kernels import Matern, RBF
from src.numeric.kronecker_algebra import kron_invert_matrices, kron_mv_prod, kron_log_det
class BetaPriorWithIntercept:
"""
Normal-InvGamma prior for the regression coefficients. The intercept is given a uniform (improper) prior.
"""
def __init__(self, a=1, b=0.001):
self.a = a
self.b = b
def log_pdf(self, beta, J):
non_intercept_beta = np.delete(beta, np.arange(0, beta.shape[0], J))
output = (-self.a - 0.5) * np.sum(np.log(0.5 * np.square(non_intercept_beta) + self.b))
return output
def nabla_beta_log_pdf(self, beta, J):
"""
Return the gradient of the log of the pdf of the prior with respect to the beta values.
"""
part1 = (-self.a - 0.5) * beta
part2 = 0.5 * np.square(beta) + self.b
gradient = np.divide(part1, part2)
gradient[::J] = 0 # intercept has uniform prior
return gradient
class BetaPriorNoIntercept:
"""
Normal-InvGamma prior for the regression coefficients which do not include the intercept.
"""
def __init__(self, a=1, b=0.001):
self.a = a
self.b = b
def log_pdf(self, beta):
"""
Return the log of the pdf of the prior for the regression coefficients.
"""
output = (-self.a - 0.5) * np.sum(np.log(0.5 * np.square(beta) + self.b))
return output
def nabla_beta_log_pdf(self, beta):
"""
Return the gradient of the log of the pdf of the prior with respect to the beta values.
"""
part1 = (-self.a - 0.5) * beta
part2 = 0.5 * np.square(beta) + self.b
gradient = np.divide(part1, part2)
return gradient
class GaussianPrior:
"""A generic prior for N iid Gaussian random variables"""
def __init__(self, mean, variance):
self.mean = mean
self.variance = variance
def log_pdf(self, x):
output = -0.5 * np.dot(np.square(x - self.mean), 1 / self.variance)
return output
def nabla_x_log_pdf(self, x):
"""
The gradient of the log-pdf with respect to the values of x
"""
output = -1 * np.divide(x - self.mean, self.variance)
return output
class GPPrior:
"""
Generic class for the zero-mean Gaussian process prior with the covariance function that has paremeters theta.
"""
def __init__(self):
pass
def get_cov_mmatrix(self, *, variance=1, lengthscale=1):
pass
def get_logpdf(self, *, variance=1, lengthscale=1, f=None):
pass
def get_nabla_f(self, *, variance=None, lengthscale=None, f=None):
pass
def get_nabla_theta(self, *, variance=None, lengthscale=None, f=None):
pass
class GPGridPriorMatern:
"""
Gaussian process prior for a 2D grid with zero-mean and Matern covariance function.
Given that the domain is a grid, Kronecker algebra can be used to speed up the computations. Before reading the
code, we recommend reading the relevant parts of PhD thesis of Yunus Saatci:
<NAME>. 2012. ‘Scalable Inference for Structured Gaussian Process Models’. PhD Thesis, Citeseer.
"""
def __init__(self, coord_x=None, coord_y=None, smoothness=1.5):
self.smoothness = smoothness
self.x_coordinates = np.reshape(coord_x, (-1, 1))
self.y_coordinates = np.reshape(coord_y, (-1, 1))
def get_cov_matrix(self, *, variance=1, lengthscale=1, expanded=False):
k1 = Matern(length_scale=lengthscale, nu=self.smoothness)
k2 = Matern(length_scale=lengthscale, nu=self.smoothness)
# we need to split the signal variance into two parts
K1 = np.sqrt(variance) * k1(self.x_coordinates)
K2 = np.sqrt(variance) * k2(self.y_coordinates)
return np.kron(K1, K2) if expanded else [K1, K2]
def get_logpdf(self, *, variance=1, lengthscale=1, f=None):
"""
The log-pdf of the GP
"""
Ks = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
log_det_K = kron_log_det(Ks)
Ks_inv = kron_invert_matrices(Ks)
return -0.5 * log_det_K - 0.5 * np.dot(f, kron_mv_prod(Ks_inv, f))
def get_nabla_f(self, *, variance=None, lengthscale=None, f=None):
"""
Gradient of the log-pdf of the GP with respect to the GP values.
"""
Ks = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
Ks_inv = kron_invert_matrices(Ks)
output = -1 * kron_mv_prod(Ks_inv, f)
return output
def get_nabla_theta(self, *, variance=1, lengthscale=1, f=None):
"""
Gradient of the log-pdf of the GP with respect to the hyper-parameters.
"""
k1 = Matern(length_scale=lengthscale, nu=self.smoothness)
k2 = Matern(length_scale=lengthscale, nu=self.smoothness)
C1, C_grad_1 = k1(self.x_coordinates, eval_gradient=True)
C2, C_grad_2 = k2(self.y_coordinates, eval_gradient=True)
# we need to split the signal variance into two parts
K1 = np.sqrt(variance) * C1
K2 = np.sqrt(variance) * C2
Ks = [K1, K2]
K_invs = kron_invert_matrices(Ks)
K1_nabla_var = (0.5 / np.sqrt(variance)) * C1
K2_nabla_var = (0.5 / np.sqrt(variance)) * C2
K1_nabla_l = np.sqrt(variance) * C_grad_1[:, :, 0]
K2_nabla_l = np.sqrt(variance) * C_grad_2[:, :, 0]
trace_comp_lengtscale = -0.5 * (
np.trace(np.dot(K_invs[0], K1_nabla_l)) * np.trace(np.dot(K_invs[1], Ks[1])) + np.trace(
np.dot(K_invs[1], K2_nabla_l)) * np.trace(np.dot(K_invs[0], Ks[0])))
trace_comp_var = -0.5 * (
np.trace(np.dot(K_invs[0], K1_nabla_var)) * np.trace(np.dot(K_invs[1], Ks[1])) + np.trace(
np.dot(K_invs[1], K2_nabla_var)) * np.trace(np.dot(K_invs[0], Ks[0])))
# non-trace component l
temp = kron_mv_prod(K_invs, f)
temp = kron_mv_prod([K1_nabla_l, K2], temp) + kron_mv_prod([K1, K2_nabla_l], temp)
temp = kron_mv_prod(K_invs, temp)
non_trace_lengthscale = 0.5 * np.dot(f, temp)
# non-trace component var
temp = kron_mv_prod(K_invs, f)
temp = kron_mv_prod([K1_nabla_var, K2], temp) + kron_mv_prod([K1, K2_nabla_var], temp)
temp = kron_mv_prod(K_invs, temp)
non_trace_var = 0.5 * np.dot(f, temp)
return np.asarray([trace_comp_var + non_trace_var, trace_comp_lengtscale + non_trace_lengthscale])
class GPNonGridPriorSqExp:
"""
Gaussian process prior for an arbitrary 2D domain with zero-mean and squared exponential covariance function.
"""
def __init__(self, coord_x=None, coord_y=None):
self.x_coordinates = coord_x
self.y_coordinates = coord_y
self.cov_func = RBF
def get_cov_matrix(self, *, variance=1, lengthscale=1):
self.XY = np.stack((self.x_coordinates, self.y_coordinates), axis=1)
distances = cdist(self.XY, self.XY) / lengthscale
K = np.sqrt(variance) * np.exp(-0.5 * np.square(distances))
return K
def get_logpdf(self, *, variance=1, lengthscale=1, f=None):
K = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
sign, val = np.linalg.slogdet(K)
log_det_K = sign * val
K_inv = np.linalg.pinv(K)
return -0.5 * log_det_K - 0.5 * np.dot(f, np.dot(K_inv, f))
def get_nabla_f(self, *, variance=None, lengthscale=None, f=None):
"""
Gradient of the log-pdf of the GP with respect to the GP values.
"""
K = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
K_inv = np.linalg.pinv(K)
output = -1 * np.dot(K_inv, f)
return output
def get_nabla_theta(self, *, variance=1, lengthscale=1, f=None):
"""
Gradient of the log-pdf of the GP with respect to the hyper-parameters.
"""
K = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
K_inv = np.linalg.pinv(K)
K_nabla_theta = (1 / lengthscale ** 3) * K
K_inv__time__K_nabla_theta = np.dot(K_inv, K_nabla_theta)
part1 = 0.5 * np.dot(f, np.dot(K_inv__time__K_nabla_theta, np.dot(K_inv, f)))
part2 = 0.5 * np.trace(K_inv__time__K_nabla_theta)
return np.asarray([part1 - part2])
class GPNonGridPriorSqExpFixed:
"""
Gaussian process prior for an arbitrary 2D domain with zero-mean and squared exponential covariance function, but
with fixed hyper-parameters.
"""
def __init__(self, coord_x=None, coord_y=None, variance=1, lengthscale=1):
self.cov_func = RBF
self.XY = np.stack((coord_x, coord_y), axis=1)
distances = cdist(self.XY, self.XY) / lengthscale
self.K = np.sqrt(variance) * np.exp(-0.5 * np.square(distances))
self.K_inv = np.linalg.pinv(self.K)
def get_logpdf(self, *, f=None):
return - 0.5 * np.dot(f, np.dot(self.K_inv, f))
def get_nabla_f(self, *, f=None):
"""
Gradient of the log-pdf of the GP with respect to the GP values.
"""
output = -1 * np.dot(self.K_inv, f)
return output
class GPGridPriorSqExp:
"""
Gaussian process prior for a 2D grid with zero-mean and squared exponential covariance function.
Given that the domain is a grid, Kronecker algebra can be used to speed up the computations. Before reading the
code, we recommend reading the relevant parts of PhD thesis of Yunus Saatci:
<NAME>. 2012. ‘Scalable Inference for Structured Gaussian Process Models’. PhD Thesis, Citeseer.
"""
def __init__(self, coord_x=None, coord_y=None):
self.x_coordinates = np.reshape(coord_x, (-1, 1))
self.y_coordinates = np.reshape(coord_y, (-1, 1))
self.cov_func = RBF
def get_cov_matrix(self, *, variance=1, lengthscale=1, expanded=False):
distances_X = cdist(self.x_coordinates, self.x_coordinates) / lengthscale
K1 = np.sqrt(variance) * np.exp(-0.5 * np.square(distances_X))
distances_Y = cdist(self.y_coordinates, self.y_coordinates) / lengthscale
K2 = np.sqrt(variance) * np.exp(-0.5 * np.square(distances_Y))
return np.kron(K1, K2) if expanded else [K1, K2]
def get_logpdf(self, *, variance=1, lengthscale=1, f=None):
Ks = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
log_det_K = kron_log_det(Ks)
Ks_inv = kron_invert_matrices(Ks)
return -0.5 * log_det_K - 0.5 * np.dot(f, kron_mv_prod(Ks_inv, f))
def get_nabla_f(self, *, variance=None, lengthscale=None, f=None):
"""
Gradient of the log-pdf of the GP with respect to the GP values.
"""
Ks = self.get_cov_matrix(variance=variance, lengthscale=lengthscale)
Ks_inv = kron_invert_matrices(Ks)
output = -1 * kron_mv_prod(Ks_inv, f)
return output
def get_nabla_theta(self, *, variance=1, lengthscale=1, f=None):
"""
Gradient of the log-pdf of the GP with respect to the hyper-parameters.
"""
distances_X = cdist(self.x_coordinates, self.x_coordinates) / lengthscale
C1 = np.exp(-0.5 * np.square(distances_X))
C_grad_1 = np.multiply(C1, np.square(distances_X) / lengthscale)
distances_Y = cdist(self.y_coordinates, self.y_coordinates) / lengthscale
C2 = np.exp(-0.5 * np.square(distances_Y))
C_grad_2 = np.multiply(C2, np.square(distances_Y) / lengthscale)
# we need to split the signal variance into two parts
K1 = np.sqrt(variance) * C1
K2 = np.sqrt(variance) * C2
Ks = [K1, K2]
K_invs = kron_invert_matrices(Ks)
K1_nabla_var = (0.5 / np.sqrt(variance)) * C1
K2_nabla_var = (0.5 / np.sqrt(variance)) * C2
K1_nabla_l = np.sqrt(variance) * C_grad_1
K2_nabla_l = np.sqrt(variance) * C_grad_2
trace_comp_lengtscale = -0.5 * (
np.trace(np.dot(K_invs[0], K1_nabla_l)) * np.trace(np.dot(K_invs[1], Ks[1])) + np.trace(
np.dot(K_invs[1], K2_nabla_l)) * np.trace(np.dot(K_invs[0], Ks[0])))
trace_comp_var = -0.5 * (
np.trace(np.dot(K_invs[0], K1_nabla_var)) * np.trace(np.dot(K_invs[1], Ks[1])) + np.trace(
np.dot(K_invs[1], K2_nabla_var)) * np.trace(np.dot(K_invs[0], Ks[0])))
# non-trace component l
temp = kron_mv_prod(K_invs, f)
temp = kron_mv_prod([K1_nabla_l, K2], temp) + kron_mv_prod([K1, K2_nabla_l], temp)
temp = kron_mv_prod(K_invs, temp)
non_trace_lengthscale = 0.5 * np.dot(f, temp)
# non-trace component var
temp = kron_mv_prod(K_invs, f)
temp = kron_mv_prod([K1_nabla_var, K2], temp) + kron_mv_prod([K1, K2_nabla_var], temp)
temp = kron_mv_prod(K_invs, temp)
non_trace_var = 0.5 * np.dot(f, temp)
return np.asarray([trace_comp_var + non_trace_var, trace_comp_lengtscale + non_trace_lengthscale])
```
#### File: src/models/block_mixture_flat.py
```python
import logging
import os
import pickle
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import zsampler
from dotenv import load_dotenv, find_dotenv
from src.inference.context_geo import GridContextGeo
from src.inference.hmc import HMCSampler
from src.inference.priors import BetaPriorWithIntercept
from src.experiment.model_management import build_block_mixture_flat_uid
from src.experiment.visualize import plot_traceplots
class FlatRegionMixture:
def __init__(self, *, uid=None,
grid_context=None,
K=1,
thinning=5,
n_info_interval=1000,
block_type="msoa"):
self.uid = uid
self.context = grid_context
self.K = K
self.NN = self.context.mask.shape[0]
self.thinning = thinning
self.n_info_interval = n_info_interval
N = grid_context.counts.shape[0]
# do a random assignment to mixtures
initial_Z = np.zeros((N, K), dtype=int)
initial_Z[np.arange(N), np.random.choice(K, N)] = 1
# Create an (N x 1) vector which gives the corresponding block for each cell.
if block_type == "lad":
block_assignment = np.asarray(grid_context.lads)
elif block_type == "msoa":
block_assignment = np.asarray(grid_context.msoas)
elif block_type == "ward":
block_assignment = np.asarray(grid_context.wards)
elif block_type == "lsoa":
block_assignment = np.asarray(grid_context.lsoas)
else:
block_assignment = np.repeat(1, N) # a single block
unique_block_labels = np.unique(block_assignment)
self.block_assignment_numeric = np.zeros(block_assignment.shape[0], dtype=np.int)
for idx_cell, block_label in enumerate(block_assignment):
self.block_assignment_numeric[idx_cell] = np.where(unique_block_labels == block_label)[0]
self.block_assignment = block_assignment
self.B = np.max(self.block_assignment_numeric) + 1
# Create B x K matrix which counts number of cells in a block b with assignment k.
self.block_label_counts = np.zeros(shape=(self.B, K), dtype=np.int64)
for i in range(N):
self.block_label_counts[self.block_assignment_numeric[i], :] += initial_Z[i, :]
# Beta prior
self.beta_prior = BetaPriorWithIntercept(a=1, b=0.01)
self.Z = initial_Z
self.Z_samples = []
self.alpha_samples = []
self.beta_samples = []
self.logger = logging.getLogger(__name__)
def loglik(self, estimand):
""" Compute log p(y | beta, Z) + log p(beta)"""
J = self.context.J
K = self.K
covariates = self.context.covariates
counts = self.context.counts
beta = estimand[:(J * K)]
beta_matrix = beta.reshape((J, K), order='F')
fixed_effects = np.sum(np.multiply(self.Z, np.dot(covariates, beta_matrix)), axis=1)
poisson_part = np.sum(np.multiply(counts, fixed_effects) - np.exp(fixed_effects))
self.logger.debug(f"Poisson part: {poisson_part}")
beta_part = self.beta_prior.log_pdf(beta, J)
output = poisson_part + beta_part
return output
def nabla_loglik(self, estimand):
""" Compute the gradient of log p(y | beta, Z) + log p(beta) with respect to beta"""
J = self.context.J
K = self.K
covariates = self.context.covariates
counts = self.context.counts
beta = estimand[:(J * K)]
beta_matrix = beta.reshape((J, K), order='F')
fixed_effects = np.sum(np.multiply(self.Z, np.dot(covariates, beta_matrix)), axis=1)
# nabla beta
nabla_beta_matrix = np.zeros(beta_matrix.shape)
nabla_beta_matrix += np.dot(covariates.T, self.Z * counts[:, np.newaxis])
temp = np.exp(fixed_effects)
nabla_beta_matrix += (- np.dot(covariates.T, self.Z * temp[:, np.newaxis]))
nabla_beta = nabla_beta_matrix.flatten('F')
nabla_beta += self.beta_prior.nabla_beta_log_pdf(beta, J)
output = nabla_beta
return output
def plot_traces(self, hmc_samples):
samples_array = np.asarray(hmc_samples)
S = samples_array.shape[0]
J = self.context.J
N = self.context.N
K = self.K
# discard irrelevant samples
self.Z_samples = self.Z_samples[(-S):]
Z_samples_array = np.asarray(self.Z_samples)
mixture_allocation = np.zeros((S, N, K))
mixture_allocation[np.repeat(range(S), N), np.tile(range(N), S), Z_samples_array.flatten(order='C')] = 1
average_alloc = np.mean(mixture_allocation, axis=0)
for k in range(self.K):
plt.figure()
self.context.plot_realisations(average_alloc[:, k], 111)
plt.show()
beta_k_samples = samples_array[:, (k * J):((k + 1) * J)]
plot_traceplots(beta_k_samples, self.context.covariates_names)
plt.show()
k_component_indices = np.where(average_alloc[:, k] > (1 / K))[0]
# Fitted surface
fitted_surface_map = np.multiply(average_alloc[:, k],
np.dot(self.context.covariates, np.mean(beta_k_samples, axis=0)))
plt.figure()
self.context.plot_realisations(fitted_surface_map, 111, plot_title="Log-intensity fitted")
plt.show()
# Correlation Matrix
crime_surface_k = np.dot(self.context.covariates[k_component_indices, :],
np.mean(beta_k_samples, axis=0))
surface_vars = np.concatenate((np.log(1 + self.context.counts[k_component_indices]).reshape(-1, 1),
crime_surface_k.reshape(-1, 1),
self.context.covariates[k_component_indices, 1:]), axis=1)
surface_vars_df = pd.DataFrame(surface_vars)
surface_vars_df.columns = ['log-y', 'log-fitted'] + self.context.covariates_names[1:]
corr = surface_vars_df.corr()
f, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(corr, annot=True, linewidths=.5, ax=ax)
plt.show()
def load_samples_snapshot(self, iteration_no):
beta_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"beta-samples--{self.uid}--{iteration_no}.npy"
z_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"Z-samples--{self.uid}--{iteration_no}.npy"
alpha_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"alpha-samples--{self.uid}--{iteration_no}.npy"
beta_samples = np.load(beta_filepath)
z_samples = np.load(z_filepath)
try:
alpha_samples = np.load(alpha_filepath)
return beta_samples, z_samples, alpha_samples
except FileNotFoundError:
alpha_samples = np.zeros((1, self.K))
return beta_samples, z_samples, alpha_samples
def get_initial_estimand(self):
beta = np.random.normal(0, 1, self.context.J * self.K)
return beta
def get_mass_matrix_diag(self):
beta_m_diag = 5e2 * np.ones(self.context.J * self.K)
return beta_m_diag
def __save_output(self, iteration):
folder_name = Path(os.getcwd()) / "models" / "snapshots"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
Z_full_path = folder_name / f"Z-samples--{self.uid}--{iteration}"
Z_samples_array = np.asarray(self.Z_samples)
if Z_samples_array.shape[0] > 0:
np.save(Z_full_path, Z_samples_array[::self.thinning, :])
alpha_full_path = folder_name / f"alpha-samples--{self.uid}--{iteration}"
alpha_array = np.asarray(self.alpha_samples)
if alpha_array.shape[0] > 0:
np.save(alpha_full_path, alpha_array[::self.thinning, :])
beta_full_path = folder_name / f"beta-samples--{self.uid}--{iteration}"
beta_array = np.asarray(self.beta_samples)
if beta_array.shape[0] > 0:
np.save(beta_full_path, beta_array[::self.thinning, :])
def run_sampling(self, beta_sampler, number_of_iterations):
N = self.context.N
J = self.context.J
K = self.K
B = self.block_label_counts.shape[0]
alpha = np.repeat(1/K, K)
covariates = self.context.covariates
counts = self.context.counts
iteration = 0
while iteration < number_of_iterations:
##########################################################################################
# BOOKKEEPING
##########################################################################################
# The HMC sampler is adaptive and therefore will discard samples during adaptive phase.
if len(beta_sampler.samples) < len(self.Z_samples):
num_current_samples = len(beta_sampler.samples)
self.Z_samples = self.Z_samples[(-num_current_samples):]
self.alpha_samples = self.alpha_samples[(-num_current_samples):]
self.beta_samples = beta_sampler.samples
if (iteration + 1) % self.n_info_interval == 0:
self.__save_output(iteration)
##########################################################################################
# SAMPLE ALPHA
##########################################################################################
# We fix alpha to 1/K so there is no need for sampling. Should we choose to treat alpha
# as random, the samples would have to be saved so we keep the code below for that eventuality.
self.alpha_samples.append(alpha)
self.logger.debug(f"Alpha: {alpha[0]}")
##########################################################################################
# SAMPLE BETA
##########################################################################################
beta_sampler.sample_one()
##########################################################################################
# SAMPLE Z
##########################################################################################
current_hmc_estimand = beta_sampler.estimand
current_beta = current_hmc_estimand[:(J * K)].reshape((J, K), order='F')
Z_new = zsampler.sample_region(self.Z.astype(np.int64),
counts.astype(np.int64),
covariates.astype(np.float64),
current_beta.astype(np.float64),
alpha.astype(np.float64),
self.block_assignment_numeric.astype(np.int64),
self.block_label_counts)
self.Z_samples.append(np.where(Z_new > 0)[1])
self.Z = Z_new
iteration += 1
self.logger.info("Sampling completed - saving model.")
self.beta_samples = beta_sampler.samples
self.__save_output(iteration)
@click.command()
@click.option('--year', '-y', type=str, default='12015-122015')
@click.option('--type', '-t', default='burglary')
@click.option('--resolution', '-r', type=int, default=400)
@click.option('--model_name', '-m', type=str, default='burglary_raw_0')
@click.option('--interpolation', '-i', type=str, default='weighted')
@click.option('--num_mixtures', '-K', type=int, default=3)
@click.option('--uid', type=str, default=None)
@click.option('--verbose', is_flag=True)
@click.option('--block_type', type=str, default="msoa")
@click.option('--collection_unit', type=str, default="lsoa")
def main(year, type, resolution, model_name, interpolation, num_mixtures, uid, verbose,
block_type, collection_unit):
if uid is None:
uid = build_block_mixture_flat_uid(prefix="BLOCK-MIXTURE-FLAT", chain_no=1, block_scheme=block_type,
c_type=type, t_period=year, model_spec=model_name,
cov_interpolation=interpolation, resolution=resolution, K=num_mixtures)
log_fmt = '[%(levelname)s] [%(asctime)s] [%(name)s] %(message)s'
datefmt = '%H:%M:%S'
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=log_fmt)
else:
logging.basicConfig(filename=Path('models') / f"log-{uid}.log",
filemode='a',
format=log_fmt,
datefmt=datefmt,
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info("Building the context.")
grid_context = GridContextGeo(interpolation=interpolation,
year=year,
resolution=resolution,
crime_type=type,
model_name=model_name,
cov_collection_unit=collection_unit,
covariates_type='raw')
logger.info("Writing sampling context into a file.")
context_filename = Path(os.getcwd()) / "models" / f"context--{uid}.pickle"
with open(context_filename, 'wb') as context_file:
context_info = {
'context': grid_context,
'K': num_mixtures
}
pickle.dump(context_info, context_file)
logger.info("Initialising the model with estimand and mass matrix diagonal")
model = FlatRegionMixture(uid=uid,
grid_context=grid_context,
K=num_mixtures,
n_info_interval=50_000,
thinning=10,
block_type=block_type)
init_estimand = model.get_initial_estimand()
mass_matrix_diag = model.get_mass_matrix_diag()
logger.info("Launching HMC sampler.")
hmc_all_iterations = 120_000
sampler = HMCSampler(func_lpdf=model.loglik,
func_nabla_lpdf=model.nabla_loglik,
func_plot=model.plot_traces if verbose else None,
init_estimand=init_estimand,
init_M_diag=mass_matrix_diag,
init_L=10,
init_epsilon=5.0e-2,
n_burnin=30_000,
n_calib=60_000,
S=hmc_all_iterations,
n_info_interval=model.n_info_interval,
thinning=model.thinning,
unique_estimation_id=uid,
adaptive=True)
if verbose:
plt.figure()
grid_context.plot_realisations(np.log(model.block_assignment_numeric + 1), 111)
plt.show()
model.run_sampling(sampler, hmc_all_iterations)
logger.info("Procedure finished.")
if __name__ == "__main__":
load_dotenv(find_dotenv())
main()
``` |
{
"source": "jp20indian/HacktoberFest2021",
"score": 3
} |
#### File: jp20indian/HacktoberFest2021/weather_notifier.py
```python
import requests
from bs4 import BeautifulSoup
from win10toast import ToastNotifier
# create an object to ToastNotifier class
n = ToastNotifier()
# define a function
def getdata(url):
r = requests.get(url)
return r.text
htmldata = getdata("https://weather.com/en-IN/weather/today/l/25.59,85.14?par=google&temp=c/")
soup = BeautifulSoup(htmldata, 'html.parser')
current_temp = soup.find_all("span", class_= "_-_-components-src-organism-CurrentConditions-CurrentConditions--tempValue--MHmYY")
chances_rain = soup.find_all("div", class_= "_-_-components-src-organism-CurrentConditions-CurrentConditions--precipValue--2aJSf")
temp = (str(current_temp))
temp_rain = str(chances_rain)
result = "current_temp " + temp[128:-9] + " in patna bihar" + "\n" + temp_rain[131:-14]
n.show_toast("live Weather update",
result, duration = 10)
``` |
{
"source": "jp2321/dl4cv_dev",
"score": 3
} |
#### File: dl4cv_dev/exercises/solution_03_01.py
```python
from tensorflow.keras import layers, models, datasets
import numpy as np
#(X_train, y_train), (X_test,y_test) = datasets.cifar10.load_data()
X_train = np.zeros((60000,32,32))
X_train_reshape=np.reshape(X_train,(60000, 1024))
def model():
input_layer = layers.Input(shape=(1024,))
hidden_layer = layers.Dense(10, activation="relu") (input_layer)
hidden_layer = layers.Dense(20, activation="relu") (hidden_layer)
output_layer = layers.Dense(10, activation="softmax") (hidden_layer)
m = models.Model(input_layer, output_layer)
m.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["acc"])
return m
model = model() # get model
print(model.summary())
```
#### File: dl4cv_dev/exercises/solution_04_01.py
```python
from tensorflow.keras import layers, models, datasets, optimizers
import numpy as np
def neural_network():
input_ = layers.Input(shape=(32,32,3))
cnn = layers.Conv2D(16, (3,3), activation="relu") (input_)
cnn = layers.MaxPooling2D() (cnn)
cnn = layers.Conv2D(32, (3,3), activation="relu") (cnn)
cnn = layers.MaxPooling2D() (cnn)
flatten = layers.Flatten() (cnn)
dense = layers.Dense(32, activation="relu") (flatten)
dense = layers.Dense(16, activation="relu") (dense)
output = layers.Dense(10, activation="softmax") (dense)
opt = optimizers.Adam()
m= models.Model(input_, output)
m.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
return m
model = neural_network() # get model
print(model.summary())
```
#### File: dl4cv_dev/exercises/test_05_02.py
```python
def test():
from tensorflow.keras import datasets
assert model.layers[3].get_config()["strides"] == (2,2), "Check the stride sizes"
assert model.layers[3].get_config()["filters"] == 16, "Check the number of filters"
assert model.layers[4].get_config()["filters"] == 16, "Check the number of filters in the bottelnack"
assert model.layers[4].get_config()["strides"] == (2,2), "Check the stride sizes"
assert model.layers[7].get_config()["filters"] == 32, "Check the number of filters in the bottelnack"
assert model.layers[7].get_config()["strides"] == (1,1), "Check the stride sizes"
assert model.layers[10].get_config()["name"] == "concatenate", "Did you concatenate the results"
assert model.layers[10].output.shape[3] == 128, "You might missed some layers in the concatenation"
assert model.layers[11].get_config()["strides"]==(1,1), "In the second inception module there is no downsampling anymore"
__msg__.good("WELL DONE!")
```
#### File: dl4cv_dev/exercises/test_05_03.py
```python
def test():
from tensorflow.keras import datasets
for i in range(0,10):
if transf_model.layers[i].trainable != False:
assert transf_model.layers[i].trainable != False, "Are the first 10 layers frozen?"
assert transf_model.layers[19].get_config()["name"] == 'global_average_pooling2d', "Did you use a global average pooling?"
assert transf_model.layers[0].get_config()['batch_input_shape'] == (None, 224, 224, 3) , "Do you use the right input?"
__msg__.good("WELL DONE!")
```
#### File: dl4cv_dev/exercises/test_06_01.py
```python
def test():
from tensorflow.keras import datasets
assert model.get_layer("class_prediction").get_config()["units"]==43, "Check the number of output classes"
assert model.get_layer("class_prediction").get_config()["activation"]=="softmax", "Check your activation function"
assert model.output[0].name== 'class_prediction/Identity:0', "How does the output look like?"
assert model.output[2].name== 'y1_prediction/Identity:0', "How does the output look like?"
assert model.output[3].name== 'x2_prediction/Identity:0', "How does the output look like?"
assert model.output[4].name== 'y2_prediction/Identity:0', "How does the output look like?"
assert model.get_layer("y1_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("y2_prediction").get_config()["units"]==1, "Check the number of outputs"
assert model.get_layer("x1_prediction").get_config()["units"]==1, "Check the number of outputs"
__msg__.good("WELL DONE!")
``` |
{
"source": "Jp29tkDg79/samplewebsite",
"score": 3
} |
#### File: samplewebsite/python/samplesite.py
```python
from flask import Flask
from flask import render_template
from flask import request
from flask import url_for
import os
from database import person
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
return render_template('login.html', info='')
if request.method == 'POST':
# get data and check data
username = request.form.get('username')
password = request.form.get('password')
info = ''
if username == '':
info = 'ユーザ名が未入力です'
elif password == '':
info = 'パスワードが未入力です'
else:
# create persons object
persondb = person.persondb()
# check login data
match_count = persondb.check_login(username, password)
if match_count == 1:
return viewhome(username)
else:
info = '登録されていません'
return render_template('login.html', info=info)
@app.route('/newentry', methods=['GET', 'POST'])
def newuser():
if request.method == 'GET':
return render_template('newentry.html', info='')
if request.method == 'POST':
# get data and check data
username = request.form.get('username')
password = request.form.get('password')
info = ''
if username == '':
info = 'ユーザ名が未入力です'
elif 14 < len(username):
info = 'ユーザ名は14文字内で入力してください'
elif password == '':
info = 'パスワードが未入力です'
elif password != request.form.get('re<PASSWORD>'):
info = '入力したパスワードが異なります 再度入力してください'
else:
# create persons object
persondb = person.persondb()
# insert data
err = persondb.insert(username, password)
if err == '':
return viewhome(username)
else:
info = '既に登録されています'
return render_template('newentry.html', info=info)
@app.route('/change_pw/<username>', methods=['GET', 'POST'])
def change_pw(username):
if request.method == 'GET':
return render_template('change_pw.html', username=username, info='')
if request.method == 'POST':
befor_pw = request.form.get('befor_pw')
after_pw = request.form.get('after_pw')
info = ''
if befor_pw == '':
info = '変更前のパスワードが入力されていません'
elif after_pw == '':
info = '変更後のパスワードが入力されていません'
# check password
elif after_pw != request.form.get('check_pw'):
info = '変更後と再確認のパスワードが相違しています'
else:
# create person object
persondb = person.persondb()
err = persondb.update(username, befor_pw, after_pw)
if err == '':
return viewhome(username)
else:
info = '変更前のパスワードが誤っています'
return render_template('change_pw.html', username=username, info=info)
@app.route('/home/<username>', methods=['GET'])
def viewhome(username):
return render_template('home.html', username=username)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path, endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
def main(debug=False):
app.run(host='0.0.0.0', port='5000', debug=debug)
``` |
{
"source": "jp30101995/AWS-practise",
"score": 2
} |
#### File: pythonDeployment/lib/sensim.py
```python
import pandas as pd
import numpy as np
import scipy
import math
import os
import matplotlib.pyplot as plt
import seaborn as sns
import requests
import nltk
import gensim
import csv
from sklearn.metrics.pairwise import cosine_similarity
from collections import Counter
import math
from nltk import word_tokenize
import gensim.downloader as api
from gensim.models import Word2Vec
from gensim.scripts.glove2word2vec import glove2word2vec
import argparse
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from nltk.corpus import wordnet as wn
import re
from subprocess import check_output
from nltk.metrics import edit_distance
def callMe():
return 'you called me'
#method 1
def run_avg_benchmark(sentences1, sentences2, model=None, use_stoplist=False, doc_freqs=None):
if doc_freqs is not None:
N = doc_freqs["NUM_DOCS"]
sims = []
for (sent1, sent2) in zip(sentences1, sentences2):
tokens1 = sent1.tokens_without_stop if use_stoplist else sent1
tokens2 = sent2.tokens_without_stop if use_stoplist else sent2
tokens1 = [token for token in tokens1 if token in model]
tokens2 = [token for token in tokens2 if token in model]
if len(tokens1) == 0 or len(tokens2) == 0:
sims.append(0)
continue
tokfreqs1 = Counter(tokens1)
tokfreqs2 = Counter(tokens2)
weights1 = [tokfreqs1[token] * math.log(N/(doc_freqs.get(token, 0)+1))
for token in tokfreqs1] if doc_freqs else None
weights2 = [tokfreqs2[token] * math.log(N/(doc_freqs.get(token, 0)+1))
for token in tokfreqs2] if doc_freqs else None
embedding1 = np.average([model[token] for token in tokfreqs1], axis=0, weights=weights1).reshape(1, -1)
embedding2 = np.average([model[token] for token in tokfreqs2], axis=0, weights=weights2).reshape(1, -1)
sim = cosine_similarity(embedding1, embedding2)[0][0]
sims.append(sim)
print (sum(sims) / float(len(sims)))
return sims
#method 3
def findSentiment(sentense):
client = language.LanguageServiceClient()
document = types.Document(
content=sentense,
type=enums.Document.Type.PLAIN_TEXT)
jsonStr = client.analyze_sentiment(document=document)
return jsonStr.document_sentiment.score
def download_sick(f):
response = requests.get(f).text
lines = response.split("\n")[1:]
lines = [l.split("\t") for l in lines if len(l) > 0]
lines = [l for l in lines if len(l) == 5]
df = pd.DataFrame(lines, columns=["idx", "sent_1", "sent_2", "sim", "label"])
df['sim'] = pd.to_numeric(df['sim'])
return df
def read_tsv(f):
frequencies = {}
with open(f) as tsv:
tsv_reader = csv.reader(tsv, delimiter="\t")
for row in tsv_reader:
frequencies[row[0]] = int(row[1])
return frequencies
def tokenize(q1, q2):
"""
q1 and q2 are sentences/questions. Function returns a list of tokens for both.
"""
return word_tokenize(q1), word_tokenize(q2)
def posTag(q1, q2):
"""
q1 and q2 are lists. Function returns a list of POS tagged tokens for both.
"""
return nltk.pos_tag(q1), nltk.pos_tag(q2)
def stemmer(tag_q1, tag_q2):
"""
tag_q = tagged lists. Function returns a stemmed list.
"""
stem_q1 = []
stem_q2 = []
for token in tag_q1:
stem_q1.append(stem(token))
for token in tag_q2:
stem_q2.append(stem(token))
return stem_q1, stem_q2
def path(set1, set2):
return wn.path_similarity(set1, set2)
def wup(set1, set2):
return wn.wup_similarity(set1, set2)
def edit(word1, word2):
if float(edit_distance(word1, word2)) == 0.0:
return 0.0
return 1.0 / float(edit_distance(word1, word2))
def computePath(q1, q2):
R = np.zeros((len(q1), len(q2)))
for i in range(len(q1)):
for j in range(len(q2)):
if q1[i][1] == None or q2[j][1] == None:
sim = edit(q1[i][0], q2[j][0])
else:
sim = path(wn.synset(q1[i][1]), wn.synset(q2[j][1]))
if sim == None:
sim = edit(q1[i][0], q2[j][0])
R[i, j] = sim
# print R
return R
def computeWup(q1, q2):
R = np.zeros((len(q1), len(q2)))
for i in range(len(q1)):
for j in range(len(q2)):
if q1[i][1] == None or q2[j][1] == None:
sim = edit(q1[i][0], q2[j][0])
else:
sim = wup(wn.synset(q1[i][1]), wn.synset(q2[j][1]))
if sim == None:
sim = edit(q1[i][0], q2[j][0])
R[i, j] = sim
# print R
return R
def overallSim(q1, q2, R):
sum_X = 0.0
sum_Y = 0.0
for i in range(len(q1)):
max_i = 0.0
for j in range(len(q2)):
if R[i, j] > max_i:
max_i = R[i, j]
sum_X += max_i
for i in range(len(q1)):
max_j = 0.0
for j in range(len(q2)):
if R[i, j] > max_j:
max_j = R[i, j]
sum_Y += max_j
if (float(len(q1)) + float(len(q2))) == 0.0:
return 0.0
overall = (sum_X + sum_Y) / (2 * (float(len(q1)) + float(len(q2))))
return overall
def semanticSimilarity(q1, q2):
tokens_q1, tokens_q2 = tokenize(q1, q2)
# stem_q1, stem_q2 = stemmer(tokens_q1, tokens_q2)
tag_q1, tag_q2 = posTag(tokens_q1, tokens_q2)
sentence = []
for i, word in enumerate(tag_q1):
if 'NN' in word[1] or 'JJ' in word[1] or 'VB' in word[1]:
sentence.append(word[0])
sense1 = Lesk(sentence)
sentence1Means = []
for word in sentence:
sentence1Means.append(sense1.lesk(word, sentence))
sentence = []
for i, word in enumerate(tag_q2):
if 'NN' in word[1] or 'JJ' in word[1] or 'VB' in word[1]:
sentence.append(word[0])
sense2 = Lesk(sentence)
sentence2Means = []
for word in sentence:
sentence2Means.append(sense2.lesk(word, sentence))
# for i, word in enumerate(sentence1Means):
# print sentence1Means[i][0], sentence2Means[i][0]
R1 = computePath(sentence1Means, sentence2Means)
R2 = computeWup(sentence1Means, sentence2Means)
R = (R1 + R2) / 2
# print R
return overallSim(sentence1Means, sentence2Means, R)
def clean_sentence(val):
"remove chars that are not letters or numbers, downcase, then remove stop words"
regex = re.compile('([^\s\w]|_)+')
sentence = regex.sub('', val).lower()
sentence = sentence.split(" ")
for word in list(sentence):
if word in STOP_WORDS:
sentence.remove(word)
sentence = " ".join(sentence)
return sentence
class Lesk(object):
def __init__(self, sentence):
self.sentence = sentence
self.meanings = {}
for word in sentence:
self.meanings[word] = ''
def getSenses(self, word):
# print word
return wn.synsets(word.lower())
def getGloss(self, senses):
gloss = {}
for sense in senses:
gloss[sense.name()] = []
for sense in senses:
gloss[sense.name()] += word_tokenize(sense.definition())
return gloss
def getAll(self, word):
senses = self.getSenses(word)
if senses == []:
return {word.lower(): senses}
return self.getGloss(senses)
def Score(self, set1, set2):
# Base
overlap = 0
# Step
for word in set1:
if word in set2:
overlap += 1
return overlap
def overlapScore(self, word1, word2):
gloss_set1 = self.getAll(word1)
if self.meanings[word2] == '':
gloss_set2 = self.getAll(word2)
else:
# print 'here'
gloss_set2 = self.getGloss([wn.synset(self.meanings[word2])])
# print gloss_set2
score = {}
for i in gloss_set1.keys():
score[i] = 0
for j in gloss_set2.keys():
score[i] += self.Score(gloss_set1[i], gloss_set2[j])
bestSense = None
max_score = 0
for i in gloss_set1.keys():
if score[i] > max_score:
max_score = score[i]
bestSense = i
return bestSense, max_score
def lesk(self, word, sentence):
maxOverlap = 0
context = sentence
word_sense = []
meaning = {}
senses = self.getSenses(word)
for sense in senses:
meaning[sense.name()] = 0
for word_context in context:
if not word == word_context:
score = self.overlapScore(word, word_context)
if score[0] == None:
continue
meaning[score[0]] += score[1]
if senses == []:
return word, None, None
self.meanings[word] = max(meaning.keys(), key=lambda x: meaning[x])
return word, self.meanings[word], wn.synset(self.meanings[word]).definition()
sick_train = download_sick("https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_train.txt")
sick_dev = download_sick("https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_trial.txt")
sick_test = download_sick("https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_test_annotated.txt")
sick_all = sick_train.append(sick_test).append(sick_dev)
PATH_TO_WORD2VEC = os.path.expanduser("/mount/data/GoogleNews-vectors-negative300.bin")
#PATH_TO_GLOVE = os.path.expanduser("D:\\Backup\\nlp-notebooks-master\\data\\sentence_similarity\\glove.840B.300d.txt")
# PATH_TO_FREQUENCIES_FILE = "D:\\Backup\\nlp-notebooks-master\\data\\sentence_similarity\\frequencies.tsv"
# PATH_TO_DOC_FREQUENCIES_FILE = "D:\\Backup\\nlp-notebooks-master\\data\\sentence_similarity\\doc_frequencies.tsv"
word2vec = gensim.models.KeyedVectors.load_word2vec_format(PATH_TO_WORD2VEC, binary=True)
# frequencies = read_tsv(PATH_TO_FREQUENCIES_FILE)
# doc_frequencies = read_tsv(PATH_TO_DOC_FREQUENCIES_FILE)
# doc_frequencies["NUM_DOCS"] = 1288431
word_vectors = api.load("glove-wiki-gigaword-100")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="/mount/data/astral-shape-187315-e8e3ba35bd82.json"
STOP_WORDS = nltk.download('stopwords')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
``` |
{
"source": "jp3141/60Hz",
"score": 3
} |
#### File: jp3141/60Hz/60Hz.py
```python
import time
import sys
import os
import datetime as dt
import serial
from signal import signal, SIGINT
from sys import exit
def handler(signal_received, frame):
print('\nCtrl+C detected.')
TeensyFreq.reset_input_buffer()
TeensyFreq.close()
LOGFile.close()
exit(0)
signal(SIGINT, handler)
######################################### main ##################################
LineNum = 0
TeensyFreq = serial.Serial(baudrate=57600)
Interval = 5.0 # time between frequency requests
#Interval = 2.0 # time between frequency requests
#TeensyFreq.port = '/dev/ttyS5'
TeensyFreq.port = '/dev/serial0'
#TeensyFreq.port = 'COM5'
TeensyFreq.timeout = 0.5
TeensyFreq.open()
TeensyCMD = b' \n'
LOGName = "60Hz.log"
#LOGName = "/home/pi/networkdrive/" + LOGName
LOGName = "/home/pi/60HzDrive/" + LOGName
LOGFile = open(LOGName, 'a', 1) # buffered by line
TeensyFreq.reset_input_buffer()
TeensyFreq.reset_output_buffer()
# This is the header. includes a leading \n
TeensyFreq.write(b' \n') # write a space to make Teensy initialize
TimeStamp = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ","
TeensyFreq.readline() # This is a blank line
F0 = 60.0
deltaF = 0.025
PlotSpan = 25
#print("# Plot resolution = %6.3f Hz < %+5.3f Hz ..............0.............. %+5.3f Hz >" % (deltaF/PlotSpan, -deltaF, deltaF))
print("# Plot resolution = %6.3f Hz < %+5.3f Hz ··············0·············· %+5.3f Hz >" % (deltaF/PlotSpan, -deltaF, deltaF))
#print("# < %+5.3f Hz ..............0.............. %+5.3f Hz >" % (-deltaF, deltaF))
#print("# <--------------------------------------------------->")
#LinePlot = "-" + PlotSpan * "." + "0" + PlotSpan * "." + "+"
LinePlot = "-" + PlotSpan * "·" + "0" + PlotSpan * "·" + "+"
TotalTimeError = 0.0
#lastStep = datetime.now()
lastPlotFreq=0
print(TimeStamp, '#')
print(TimeStamp, '#', file = LOGFile)
FreqString=TeensyFreq.readline().decode("ascii", errors = "replace")[:-1]
print(TimeStamp, FreqString)
print(TimeStamp, FreqString, file = LOGFile)
starttime = int((time.time()+(Interval-1))/10)*10 # on whole 5 s intervals
while True:
LineNum += 1
TeensyFreq.write(b'\n')
TimeStamp = dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + ","
FreqString=TeensyFreq.readline().decode("ascii", errors = "replace")[:-1]
# print(TimeStamp, FreqString)
# print(TimeStamp, FreqString, file = LOGFile)
ThisFreq=float(FreqString.split(", ")[2])
try:
PlotFreq=round(PlotSpan*(float(ThisFreq)-F0)/deltaF)
except ValueError:
print("# Missing Data %s" % TimeStamp)
#print("# Missing Data %s" % TimeStamp, file=LOGFile)
LineNum -= 1
TotalTimeError = 0
continue
PlotChar = "|"
if (PlotFreq > lastPlotFreq):
PlotChar = "\\"
if (PlotFreq < lastPlotFreq):
PlotChar = "/"
lastPlotFreq = PlotFreq
if (PlotFreq < -(PlotSpan+1)):
PlotFreq = -(PlotSpan+1)
PlotChar = "<"
if (PlotFreq > (PlotSpan+1)):
PlotFreq = (PlotSpan+1)
PlotChar = ">"
#print("%4i, %9.4f " % (PlotFreq, CounterString), end='')
ThisPlot=LinePlot[:(PlotSpan+PlotFreq+1)] + PlotChar + LinePlot[(PlotSpan+PlotFreq+2):]
# deltaT = (ThisStep-lastStep).total_seconds()
# lastStep = ThisStep
# TotalTimeError += (float(CounterString)-F0)*deltaT/F0
# print("%8d, %s, %11s, %10.6f" % (LineNum, now, CounterString, TotalTimeError), file=LOGFile)
print(TimeStamp, FreqString+",", ThisPlot)
print(TimeStamp, FreqString, file = LOGFile)
# should not be any line available to read
FreqString=TeensyFreq.readline().decode("ascii", errors = "replace")[:-1]
# should get a timeout here; avoids resetting buffer and losing partial lines.
# should never actually get a string
if (FreqString != ""): # but print anyway if one is read
print(TimeStamp, "#" + FreqString[1:])
print(TimeStamp, "#" + FreqString[1:], file = LOGFile)
time.sleep(Interval - ((time.time() - starttime) % Interval))
LOGFile.flush()
TeensyFreq.close()
LOGFile.close()
print("Done")
``` |
{
"source": "jp3141/Vector-Network-Analyzer",
"score": 3
} |
#### File: jp3141/Vector-Network-Analyzer/VNA.py
```python
import visa
import time
import math
import pdb
import sys
import numpy as np
import matplotlib.pyplot as plt
import re
import os
import keyboard
def HelpAndExit():
print("Usage: ", sys.argv[0], " [-b BeginF] [-e EndF] [-p Points/Decade] [-f FILE_Prefix]\
\noptional: [-n] Don't plot after gathering data\
\noptional: [-z] Set current-measuring resistance and plot Z data\
\nFILE_Prefix defaults to RG1054Z\
\n<NAME> Sep 2, 2018"\
)
sys.exit(1)
def NextArg(i): #Return the next command line argument (if there is one)
if ((i+1) >= len(sys.argv)):
Fatal("'%s' expected an argument" % sys.argv[i])
return(1, sys.argv[i+1])
######################################### main ##################################
debug = 0
FILEPREFIX = "RG1054Z"
MDEPTH = 30000
#pdb.set_trace()
StartF = 1
StopF = 1e6
PlotOK = True
ListOnly = False
PointsPerDecade = 10 # Changed from 30, 8/28/2018 KEAP
HighFrequency = False # used to indicate crossed over max Sync frequency from SDG1025
SweepModeLog = True
Voltage = 1.0
Resistance = 0.0 # non-zero when -z resistance argument is used -> Z is plotted
Sine = True
# Parse command line
skip = 0
for i in range(1, len(sys.argv)):
if not skip:
if sys.argv[i][:2] == "-d": debug = 1
elif sys.argv[i][:2] == "-f": (skip,FILEPREFIX) = NextArg(i)
elif sys.argv[i][:2] == "-n": PlotOK = False
elif sys.argv[i][:2] == "-l": ListOnly = True
elif sys.argv[i][:2] == "-q": Sine = False
elif sys.argv[i][:2] == "-v": (skip,Voltage) = 1, float(NextArg(i)[1])
elif sys.argv[i][:2] == "-z": (skip,Resistance) = 1, float(NextArg(i)[1])
elif sys.argv[i][:2] == "-s": (skip,StepSizeF) = 1, float(NextArg(i)[1]); SweepModeLog = False
elif sys.argv[i][:2] == "-b": (skip,StartF) = 1, float(NextArg(i)[1])
elif sys.argv[i][:2] == "-e": (skip,StopF) = 1, float(NextArg(i)[1])
elif sys.argv[i][:2] == "-p": (skip,PointsPerDecade) = 1, int(NextArg(i)[1]); SweepModeLog = True
elif sys.argv[i][:2] == "-h": HelpAndExit()
elif sys.argv[i][:1] == "-":
sys.stderr.write("%s: Bad argument\n" % (sys.argv[0]))
sys.exit(1)
else: pass
else: skip = 0
#if (len(sys.argv) <= 1): HelpAndExit()
if (ListOnly): PlotOK = False # don't plot if just listing
GPIB = visa.ResourceManager()
#print(GPIB.list_resources())
GPIB_Resources = GPIB.list_resources()
for resource in GPIB_Resources: print(resource)
print()
LOGFile = open(FILEPREFIX+"_VNA.log" if (not ListOnly) else os.devnull, 'w')
#else: LOGFile = open(os.devnull, 'w')
print(time.strftime("# %Y-%m-%d %H:%M"), file = LOGFile)
#GPIB_BUS = GPIB.open_resource('GPIB0::INTFC')
SDG1025 = GPIB.open_resource([_ for _ in GPIB_Resources if re.search('^USB.*:SDG.*INSTR$', _)][0])
#SDG1025 = GPIB.open_resource('USB0::0xF4ED::0xEE3A::SDG00004120363::INSTR')
Q = SDG1025.query("*IDN?")
print("SDG1025:", Q, end='')
print("# SDG1025:", Q, end='', file=LOGFile)
DS1054Z = GPIB.open_resource([_ for _ in GPIB_Resources if re.search('^USB.*:DS.*INSTR$', _)][0])
#DS1054Z = GPIB.open_resource('USB0::0x1AB1::0x04CE::DS1ZA201003553::INSTR')
Q = DS1054Z.query("*IDN?")
print("DS1054Z:", Q)
print("# DS1054Z:", Q, end='', file=LOGFile)
DS1054Z.timeout = 2000 # ms
DecadesF = math.log10(StopF/StartF)
print(sys.argv)
if SweepModeLog:
print( "Analysing from %f Hz to %f Hz, %6.1f points/decade; %i decades" % (StartF, StopF, PointsPerDecade, DecadesF))
print("# Analysing from %f Hz to %f Hz, %6.1f points/decade; %i decades" % (StartF, StopF, PointsPerDecade, DecadesF), file = LOGFile)
else:
print( "Analysing from %f Hz to %f Hz, %f Hz steps; %i total steps" % (StartF, StopF, StepSizeF, 1 + math.ceil((StopF-StartF)/StepSizeF)))
print("# Analysing from %f Hz to %f Hz, %f Hz steps; %i total steps" % (StartF, StopF, StepSizeF, 1 + math.ceil((StopF-StartF)/StepSizeF)), file = LOGFile)
# Signal Generator
SYNCMax = 2.0e6
SDG1025.write("C1: BSWV FRQ, %11.3f" % StartF)
if (Sine): SDG1025.write("C1: BSWV WVTP,SINE,AMP,%5.2f,OFST,0,PHSE,0" % Voltage) # 1 Vpp
else: SDG1025.write("C1: BSWV WVTP,SQUARE,AMP,%5.2f,OFST,0,PHSE,0" % Voltage) # 1 Vpp
SDG1025.write("C1:OUTP ON,LOAD,HZ")
SDG1025.write("C1:SYNC ON")
DS1054Z.write(":WAV:FORMAT BYTE;:WAV:MODE RAW")
#DS1054Z.write(":SYSTEM:BEEPER OFF")
DS1054Z.write(":STOP") # so preamble can get YINCR
#Channel 1
DS1054Z.write(":CHANNEL1:COUPLING AC")
DS1054Z.write(":CHANNEL1:DISPLAY ON")
DS1054Z.write(":CHANNEL1:SCALE 5")
DS1054Z.write(":CHANNEL1:BWLimit 20M")
print("1: ",DS1054Z.query(":WAV:SOURCE CHAN1;:WAV:PREAMBLE?"), end='')
#Channel 2
DS1054Z.write(":CHANNEL2:COUPLING AC")
DS1054Z.write(":CHANNEL2:DISPLAY ON")
DS1054Z.write(":CHANNEL2:SCALE 5")
DS1054Z.write(":CHANNEL2:BWLimit 20M")
print("2: ",DS1054Z.query(":WAV:SOURCE CHAN2;:WAV:PREAMBLE?"))
#Channel 4 (Trigger)
DS1054Z.write(":CHANNEL4:COUPLING DC")
DS1054Z.write(":CHANNEL4:DISPLAY OFF")
DS1054Z.write(":CHANNEL4:SCALE 5.0")
DS1054Z.write(":TRIGGER:MODE EDGE")
DS1054Z.write(":TRIGGER:EDGE:SOURCE CHANNEL4")
DS1054Z.write(":TRIGGER:COUPLING DC")
DS1054Z.write(":TRIGGER:EDGE:SLOPE POSITIVE")
DS1054Z.write(":TRIGGER:EDGE:LEVEL 2.5")
DS1054Z.write(":RUN")
DS1054Z.write(":ACQUIRE:MDEPTH %i" % MDEPTH)
VNA=[]
#DS1054Z.chunk_size = MDEPTH
print("#Sample, Frequency, Mag1, Mag2, Ratio (dB), Phase", file = LOGFile)
if SweepModeLog:
LastTestPOINT = 1 + math.ceil(PointsPerDecade*math.log10(StopF/StartF))
else:
LastTestPOINT = 1 + math.ceil((StopF-StartF)/StepSizeF)
#for TestPOINT in range(-1, 1+math.ceil(PointsPerDecade*math.log10(StopF/StartF))):
for TestPOINT in range(-1, LastTestPOINT):
# 1st cycle which is used to initialize vertical scale isn't logged
POINT = max(0, TestPOINT) # -1 maps to 0
TestF = StartF*math.pow(10,POINT/PointsPerDecade) if SweepModeLog else StartF+POINT*StepSizeF
if (TestF > StopF): break
if (TestF > 25e6): break # Max frequency of SDG1025
if keyboard.is_pressed('q'):
print('Key pressed')
break
print("Sample %3i, %11.3f Hz" % (TestPOINT, TestF), end='')
if (ListOnly): print();continue # only list sample frequencies
if (TestF >= SYNCMax) and not HighFrequency: # Can't generate sync above 2 MHz -- so switch to Channel 1. This also allows S/s to double
SDG1025.write("C1:SYNC OFF")
DS1054Z.write(":CHANNEL4:DISPLAY OFF")
DS1054Z.write(":TRIGGER:COUPLING LFReject")
DS1054Z.write(":TRIGGER:MODE EDGE")
DS1054Z.write(":TRIGGER:EDGE:SOURCE CHANNEL1")
DS1054Z.write(":TRIGGER:EDGE:SLOPE POSITIVE")
DS1054Z.write(":TRIGGER:EDGE:LEVEL 0")
time.sleep(0.3) # wait for this to change
MDEPTH *= 2 # double sample rate when Ch4 is not used by trigger
HighFrequency = True # Only switchover once
SDG1025.write("C1: BSWV FRQ, %11.3f" % TestF)
#pdb.set_trace()
DS1054Z.write(":TIMEBASE:MAIN:SCALE %13.9f" % (1./TestF/12.)) # Scope rounds up
ActualTB = float(DS1054Z.query(":TIMEBASE:MAIN:SCALE?").rstrip())
ActualSs = min(250e6 if (TestF < SYNCMax) else 500e6, round(MDEPTH/(ActualTB*12),0))
ActualSs_ = str(int(ActualSs))
if (ActualSs_[-9:] == "000000000"): ActualSs_ = ActualSs_[:-9] + " G"
if (ActualSs_[-6:] == "000000"): ActualSs_ = ActualSs_[:-6] + " M"
if (ActualSs_[-3:] == "000"): ActualSs_ = ActualSs_[:-3] + " k"
print(", %sS/s" % ActualSs_, end='')
DS1054Z.write(":RUN;:TRIGGER:SWEEP SINGLE")
while (DS1054Z.query(":TRIGGER:STATUS?")[:4] != "STOP"): pass
PreambleList = DS1054Z.query(":WAV:SOURCE CHAN1;:WAV:PREAMBLE?").split(',')
XINCR = float(PreambleList[4])
YINCR1 = float(PreambleList[7])
YOFF1 = int(PreambleList[8]) + int(PreambleList[9])
NPOINTS_PerCycle = 1./(TestF*XINCR) # Number of points for 1 cycle of this frequency
NCYCLES = math.floor(MDEPTH/NPOINTS_PerCycle)
NPOINTS = int(round(NPOINTS_PerCycle * NCYCLES))
print(", %i points; %i cycle%s @ %10.1f/cycle" % (NPOINTS, NCYCLES, " " if (NCYCLES==1) else "s", NPOINTS_PerCycle))
SAMPLEPOINTS = np.linspace(0, NCYCLES*2*np.pi, NPOINTS)
SINEARRAY = np.sin(SAMPLEPOINTS)
COSARRAY = np.cos(SAMPLEPOINTS)
# DS1054Z has transfer errors over USB if over ~ 8200; download whole array and truncate is simplest
CURVE1=DS1054Z.query_binary_values(":WAV:START 1;:WAV:STOP %i;:WAV:DATA?" %(MDEPTH), datatype='b', container=np.array, header_fmt=u'ieee')[:NPOINTS]
CURVE1 = (CURVE1-YOFF1)*YINCR1
PreambleList = DS1054Z.query(":WAV:SOURCE CHAN2;:WAV:PREAMBLE?").split(',') # Refresh after range change
YINCR2 = float(PreambleList[7])
YOFF2 = int(PreambleList[8]) + int(PreambleList[9])
CURVE2=DS1054Z.query_binary_values(":WAV:START 1;:WAV:STOP %i;:WAV:DATA?" %(MDEPTH), datatype='b', container=np.array, header_fmt=u'ieee')[:NPOINTS]
CURVE2 = (CURVE2-YOFF2)*YINCR2
SINDOT1 = np.dot(CURVE1,SINEARRAY)/NPOINTS
COSDOT1 = np.dot(CURVE1,COSARRAY)/NPOINTS
CHANNEL1 = complex(SINDOT1, COSDOT1)
MAG1 = 2*abs(CHANNEL1)
PHASE1 = np.angle(CHANNEL1)*180/math.pi
DS1054Z.write(":CHANNEL1:SCALE %9.4f" % (MAG1/3))
SINDOT2 = np.dot(CURVE2,SINEARRAY)/NPOINTS
COSDOT2 = np.dot(CURVE2,COSARRAY)/NPOINTS
CHANNEL2 = complex(SINDOT2, COSDOT2)
MAG2 = 2*abs(CHANNEL2)
PHASE2 = np.angle(CHANNEL2)*180/math.pi
DS1054Z.write(":CHANNEL2:SCALE %9.4f" % (MAG2/3))
Channel_Z = CHANNEL2/(CHANNEL1-CHANNEL2)*Resistance
print("Ch1: Sin, Cos = %9.4f, %9.4f; Mag = %9.5f, Phase = %7.2f deg." % (SINDOT1, COSDOT1, MAG1, PHASE1))
print("Ch2: Sin, Cos = %9.4f, %9.4f; Mag = %9.5f, Phase = %7.2f deg." % (SINDOT2, COSDOT2, MAG2, PHASE2))
Mag_dB = 20*math.log10(MAG2/MAG1)
Phase = (PHASE2-PHASE1) % 360
if (Phase) > 180: Phase -= 360 # center around +/- 180
CHZ = '%12.4f %sj%12.4f' % (Channel_Z.real, '+-'[Channel_Z.imag < 0], abs(Channel_Z.imag))
print("Ch2:Ch1 = %7.2f dB @ %7.2f deg.; Z =" % (Mag_dB, Phase), CHZ, '\n')
if (TestPOINT >= 0): # only after 1st round
VNA.append((TestF, Mag_dB, Phase, Channel_Z))
print("%6i, %12.3f, %9.5f, %9.5f, %7.2f, %7.2f, " %\
(POINT, TestF, MAG1, MAG2, Mag_dB, Phase), CHZ, file = LOGFile)
LOGFile.close()
DS1054Z.close()
SDG1025.close()
GPIB.close()
print("Done")
if (PlotOK):
fig, ax1 = plt.subplots()
fig.canvas.set_window_title('VNA 2:1')
plt.title("Channel 2 : Channel 1")
color = 'tab:red'
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('dB', color=color)
if SweepModeLog:
ax1.semilogx([_[0] for _ in VNA], [_[1] for _ in VNA], color=color)
else:
ax1.plot([_[0] for _ in VNA], [_[1] for _ in VNA], color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax1.grid(True)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Phase (°)', color=color) # we already handled the x-label with ax1
if SweepModeLog:
ax2.semilogx([_[0] for _ in VNA], [_[2] for _ in VNA], color=color)
ax2.semilogx([_[0] for _ in VNA], [_[2] for _ in VNA], color=color)
else:
ax2.plot([_[0] for _ in VNA], [_[2] for _ in VNA], color=color)
ax2.tick_params(axis='y', labelcolor=color)
#ax2.grid(True)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show(block=False) # should only block if R=0 ?
#plt.show(block=False)
if (PlotOK and Resistance != 0):
fig, ax1 = plt.subplots()
fig.canvas.set_window_title('VNA Z')
plt.title("Impedance (ohms)")
color = 'tab:green'
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('|Z| (ohms)', color=color)
if SweepModeLog:
ax1.loglog( [_[0] for _ in VNA], [abs(_[3]) for _ in VNA], color=color)
else:
ax1.semilogy([_[0] for _ in VNA], [abs(_[3]) for _ in VNA], color=color) # Mag(Z)
ax1.tick_params(axis='y', labelcolor=color)
ax1.grid(True)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:purple'
ax2.set_ylabel('Z∠ (°)', color=color) # we already handled the x-label with ax1
if SweepModeLog:
ax2.semilogx([_[0] for _ in VNA], [np.angle(_[3])*180/math.pi for _ in VNA], color=color)
else:
ax2.plot( [_[0] for _ in VNA], [np.angle(_[3])*180/math.pi for _ in VNA], color=color)
ax2.tick_params(axis='y', labelcolor=color)
#ax2.grid(True)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show(block=True)
``` |
{
"source": "jp3477/curation",
"score": 3
} |
#### File: controlled_tier_qc/code/check_concept_suppression.py
```python
import pandas as pd
from utils.helpers import run_check_by_row
from sql.query_templates import (QUERY_SUPPRESSED_CONCEPT)
def check_concept_suppression(check_df, project_id, post_dataset_id, pre_deid_dataset=None, mapping_dataset=None):
"""Run concept suppression check
Parameters
----------
check_df: pd.DataFrame
Dataframe containing the checks that need to be done
project_id: str
Google Bigquery project_id
post_dataset_id: str
Bigquery dataset after de-id rules were run
pre_deid_dataset: str
Bigquery dataset before de-id rules were run
Returns
-------
pd.DataFrame
"""
concept_check = run_check_by_row(check_df, QUERY_SUPPRESSED_CONCEPT,
project_id, post_dataset_id)
return concept_check.reset_index(drop=True)
```
#### File: controlled_tier_qc/code/check_field_suppression.py
```python
import pandas as pd
from utils.helpers import run_check_by_row
from sql.query_templates import (QUERY_SUPPRESSED_NULLABLE_FIELD_NOT_NULL,
QUERY_SUPPRESSED_REQUIRED_FIELD_NOT_EMPTY,
QUERY_SUPPRESSED_NUMERIC_NOT_ZERO,
QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD9, QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD10,
QUERY_CANCER_CONCEPT_SUPPRESSION, QUERY_SUPPRESSED_FREE_TEXT_RESPONSE,
QUERY_GEOLOCATION_SUPPRESSION)
def check_field_suppression(check_df, project_id, post_dataset_id, pre_deid_dataset=None, mapping_dataset=None):
"""Run field suppression check
Parameters
----------
check_df: pd.DataFrame
Dataframe containing the checks that need to be done
project_id: str
Google Bigquery project_id
post_dataset_id: str
Bigquery dataset after de-id rules were run
pre_deid_dataset: str
Bigquery dataset before de-id rules were run
Returns
-------
pd.DataFrame
"""
nullable_field = check_df[check_df['is_nullable'] == 'YES']
required_numeric_field = check_df[(check_df['is_nullable'] == 'NO') & (check_df['data_type'] == 'INT64')]
required_other_field = check_df[(check_df['is_nullable'] == 'NO') & (check_df['data_type'] != 'INT64')]
nullable_field_check = run_check_by_row(nullable_field, QUERY_SUPPRESSED_NULLABLE_FIELD_NOT_NULL,
project_id, post_dataset_id)
required_numeric_field_check = run_check_by_row(required_numeric_field, QUERY_SUPPRESSED_NUMERIC_NOT_ZERO,
project_id, post_dataset_id)
required_other_field_check = run_check_by_row(required_other_field, QUERY_SUPPRESSED_REQUIRED_FIELD_NOT_EMPTY,
project_id, post_dataset_id)
return pd.concat([nullable_field_check, required_numeric_field_check, required_other_field_check], sort=True)
def check_vehicle_accident_suppression(check_df, project_id, post_deid_dataset, pre_deid_dataset=None, mapping_dataset=None):
"""Run motor vehicle accident suppression check
Parameters
----------
check_df: pd.DataFrame
Dataframe containing the checks that need to be done
project_id: str
Google Bigquery project_id
post_dataset_id: str
Bigquery dataset after de-id rules were run
pre_deid_dataset: str
Bigquery dataset before de-id rules were run
Returns
-------
pd.DataFrame
"""
icd9_vehicle_accident = run_check_by_row(check_df, QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD9,
project_id, post_deid_dataset)
icd10_vehicle_accident = run_check_by_row(check_df, QUERY_VEHICLE_ACCIDENT_SUPPRESSION_ICD10,
project_id, post_deid_dataset)
return pd.concat([icd9_vehicle_accident, icd10_vehicle_accident], sort=True)
def check_field_cancer_concept_suppression(check_df, project_id, post_deid_dataset, pre_deid_dataset=None, mapping_dataset=None):
"""Run suppression check for some cancer concepts
Parameters
----------
check_df: pd.DataFrame
Dataframe containing the checks that need to be done
project_id: str
Google Bigquery project_id
post_dataset_id: str
Bigquery dataset after de-id rules were run
pre_deid_dataset: str
Bigquery dataset before de-id rules were run
Returns
-------
pd.DataFrame
"""
cancer_concept = run_check_by_row(check_df, QUERY_CANCER_CONCEPT_SUPPRESSION,
project_id, post_deid_dataset)
return cancer_concept
def check_field_freetext_response_suppression(check_df, project_id, post_deid_dataset, pre_deid_dataset=None, mapping_dataset=None):
free_text_concept = run_check_by_row(check_df, QUERY_SUPPRESSED_FREE_TEXT_RESPONSE,
project_id, post_deid_dataset)
return free_text_concept
def check_field_geolocation_records_suppression(check_df, project_id, post_deid_dataset, pre_deid_dataset=None, mapping_dataset=None):
return run_check_by_row(check_df, QUERY_GEOLOCATION_SUPPRESSION,
project_id, post_deid_dataset)
```
#### File: tools/email_generator/create_dqms.py
```python
from dictionaries_and_lists import \
metric_type_to_english_dict, data_quality_dimension_dict, \
columns_to_document_for_sheet_email, table_based_on_column_provided
from functions_to_create_dqm_objects import find_hpo_row, \
get_info
from data_quality_metric_class import DataQualityMetric
def create_dqm_objects_for_sheet(
dataframe, hpo_names, user_choice, metric_is_percent,
date):
"""
Function is used to create DataQualityMetric objects for all of
the pertinent values on the various sheets being loaded.
Parameters
---------
dataframe (df): contains the information for a particular dimension
of data quality on a particular date
hpo_names (list): list of the strings that should go
into an HPO ID column. for use in generating HPO objects.
user_choice (string): represents the sheet from the analysis reports
whose metrics will be compared over time
metric_is_percent (bool): determines whether the data will be seen
as 'percentage complete' or individual instances of a
particular error
date (datetime): datetime object that represents the time that the
data quality metric was documented (corresponding to the
title of the file from which it was extracted)
Returns
-------
dqm_objects (list): list of DataQualityMetrics objects
these are objects that all should have the same
metric_type, data_quality_dimension, and date attributes
columns (list): the column names that whose data will be extracted.
these will eventually be converted to either the rows of
dataframes or the names of the different dataframes to be
output.
"""
# to instantiate dqm objects later on
metric_type = metric_type_to_english_dict[user_choice]
dqm_type = data_quality_dimension_dict[user_choice]
columns = columns_to_document_for_sheet_email[user_choice]
dqm_objects = []
# for each HPO (row) in the dataframe
for name in hpo_names:
row_number = find_hpo_row(sheet=dataframe, hpo=name)
data_dict = get_info(
sheet=dataframe, row_num=row_number,
percentage=metric_is_percent, sheet_name=user_choice,
columns_to_collect=columns)
# for each table / class (column) in the dataframe
for table, data in data_dict.items():
table_or_class_name = table_based_on_column_provided[table]
new_dqm_object = DataQualityMetric(
hpo=name, table_or_class=table_or_class_name,
metric_type=metric_type,
value=data, data_quality_dimension=dqm_type,
date=date)
dqm_objects.append(new_dqm_object)
return dqm_objects, columns
def create_dqm_list(dfs, file_names, datetimes, user_choice,
percent_bool, hpo_names):
"""
Function is used to create all of the possible 'DataQualityMetric'
objects that are needed given all of the inputted data.
Parameters
----------
dfs (list): list of pandas dataframes. each dataframe contains
info about data quality for all of the sites for a date. each
index of the list should represent a particular date's metrics.
file_names (list): list of the user-specified Excel files that are
in the current directory. Files are analytics reports to be
scanned.
datetimes (list): list of datetime objects that
represent the dates of the files that are being
ingested
user_choies (string): represents the sheet from the analysis reports
whose metrics will be compared over time
percent_bool (bool): determines whether the data will be seen
as 'percentage complete' or individual instances of a
particular error
hpo_names (list): list of the strings that should go
into an HPO ID column. for use in generating subsequent
dataframes.
Return
-------
dqm_list (lst): list of DataQualityMetric objects
"""
dqm_list = []
# creating the DQM objects and assigning to HPOs
for dataframe, file_name, date in zip(dfs, file_names, datetimes):
dqm_objects, col_names = create_dqm_objects_for_sheet(
dataframe=dataframe, hpo_names=hpo_names,
user_choice=user_choice, metric_is_percent=percent_bool,
date=date)
dqm_list.extend(dqm_objects)
return dqm_list
```
#### File: cleaning_rules/deid/genaralize_cope_insurance_answers.py
```python
import logging
# Project imports
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, OBSERVATION
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
ANSWER_GENERALIZATION_QUERY = JINJA_ENV.from_string("""
UPDATE
`{{project_id}}.{{dataset_id}}.observation`
SET
value_source_concept_id = 1333127,
value_as_concept_id = 1333127,
value_source_value = 'cope_a_33'
WHERE
observation_source_concept_id = 1332737
AND value_source_concept_id IN (1332904,
1333140)
""")
REMOVE_DUPLICATE_GENERALIZED_ANSWERS = JINJA_ENV.from_string("""
DELETE
FROM
`{{project_id}}.{{dataset_id}}.observation`
WHERE
observation_id IN (
SELECT
observation_id
FROM (
SELECT
observation_id,
ROW_NUMBER() OVER(PARTITION BY person_id ORDER BY observation_date DESC) AS rn
FROM
`{{project_id}}.{{dataset_id}}.observation`
WHERE
(observation_source_concept_id = 1332737
AND value_source_concept_id = 1333127
AND value_as_concept_id = 1333127
AND value_source_value = 'cope_a_33'))
WHERE
rn <> 1)
""")
class GeneralizeCopeInsuranceAnswers(BaseCleaningRule):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "This cleaning rules generalizes answers to COPE insurance question."
super().__init__(issue_numbers=['DC1665'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=[OBSERVATION],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def get_sandbox_tablenames(self):
raise NotImplementedError("Please fix me.")
def get_query_specs(self, *args, **keyword_args):
"""
Interface to return a list of query dictionaries.
:returns: a list of query dictionaries. Each dictionary specifies
the query to execute and how to execute. The dictionaries are
stored in list order and returned in list order to maintain
an ordering.
"""
insurance_answers_generalization_query = dict()
insurance_answers_generalization_query[
cdr_consts.QUERY] = ANSWER_GENERALIZATION_QUERY.render(
project_id=self.project_id, dataset_id=self.dataset_id)
generalized_answers_deduplication_query = dict()
generalized_answers_deduplication_query[
cdr_consts.QUERY] = REMOVE_DUPLICATE_GENERALIZED_ANSWERS.render(
project_id=self.project_id, dataset_id=self.dataset_id)
return [
insurance_answers_generalization_query,
generalized_answers_deduplication_query
]
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_rule(self, client, *args, **keyword_args):
"""
Load required resources prior to executing cleaning rule queries.
Method to run data upload options before executing the first cleaning
rule of a class. For example, if your class requires loading a static
table, that load operation should be defined here. It SHOULD NOT BE
defined as part of get_query_specs().
"""
pass
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(GeneralizeCopeInsuranceAnswers,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(GeneralizeCopeInsuranceAnswers,)])
```
#### File: cdr_cleaner/cleaning_rules/field_mapping.py
```python
import re
from collections import OrderedDict
from io import open
import resources
from cdr_cleaner.cleaning_rules import domain_mapping
from cdr_cleaner.cleaning_rules.domain_mapping import DOMAIN_TABLE_NAMES, get_field_mappings,\
exist_domain_mappings, NULL_VALUE
from resources import get_domain_id_field, get_domain
NAME_FIELD = 'name'
FIELD_MODE = 'mode'
FIELD_REQUIRED = 'required'
TYPE_CONCEPT_SUFFIX = '_type_concept_id'
DOMAIN_COMMON_FIELDS = 'common_fields'
DOMAIN_SPECIFIC_FIELDS = 'specific_fields'
DOMAIN_DATE_FIELDS = 'date_fields'
COMMON_DOMAIN_FIELD_SUFFIXES = [
'person_id', 'visit_occurrence_id', 'provider_id', '_concept_id',
'_type_concept_id', '_source_value', '_source_concept_id'
]
DATE_FIELD_SUFFIXES = [
'_start_date', '_start_datetime', '_end_date', '_end_datetime', '_date',
'_datetime'
]
ATTRIBUTE_MAPPING_TEMPLATE = '{src_table},{dest_table},{src_field},{dest_field},{translation}'
CDM_TABLE_SCHEMAS = resources.cdm_schemas(False, False)
FIELD_MAPPING_HEADER = 'src_table,dest_table,src_field,dest_field,translation\n'
def generate_field_mappings(_src_table, _dest_table, src_table_fields,
dest_table_fields):
"""
This functions generates a list of field mappings between the src_table and dest_table
:param _src_table: the source CDM table
:param _dest_table: the destination CDM table
:param src_table_fields: the dictionary that contains all the source fields (common fields, date fields and domain specific fields)
:param dest_table_fields: the dictionary that contains all the destination fields (common fields, date fields and domain specific fields)
:return: a list of field mappings between _src_table and _dest_table
"""
_field_mappings = OrderedDict()
for field_type in dest_table_fields:
if field_type == DOMAIN_SPECIFIC_FIELDS:
specific_field_mappings = resolve_specific_field_mappings(
_src_table, _dest_table,
dest_table_fields[DOMAIN_SPECIFIC_FIELDS])
_field_mappings.update(specific_field_mappings)
elif field_type == DOMAIN_DATE_FIELDS:
date_field_mappings = resolve_date_field_mappings(
src_table_fields[DOMAIN_DATE_FIELDS],
dest_table_fields[DOMAIN_DATE_FIELDS])
_field_mappings.update(date_field_mappings)
else:
common_field_mappings = resolve_common_field_mappings(
src_table_fields[DOMAIN_COMMON_FIELDS],
dest_table_fields[DOMAIN_COMMON_FIELDS])
_field_mappings.update(common_field_mappings)
return _field_mappings
def resolve_common_field_mappings(src_common_fields, dest_common_fields):
"""
This function generates a list of field mappings for common domain fields.
:param src_common_fields: a dictionary that contains the source common fields
:param dest_common_fields: a dictionary that contains the destination common fields
:return:
"""
common_field_mappings = OrderedDict()
for field_suffix in dest_common_fields:
_dest_field = dest_common_fields[field_suffix]
_src_field = src_common_fields[
field_suffix] if field_suffix in src_common_fields else NULL_VALUE
common_field_mappings[_dest_field] = _src_field
return common_field_mappings
def resolve_specific_field_mappings(_src_table, _dest_table,
_dest_specific_fields):
"""
This function generates a list of field mappings between _src_table and _dest_table for the domain specific fields.
E.g. The fields value_as_number and value_as_concept_id can be mapped between observation and measurement.
:param _src_table: the source CDM table
:param _dest_table: the destination CDM table
:param _dest_specific_fields: an array that contains the specific destination fields
:return:
"""
specific_field_mappings = OrderedDict()
# If the src_table and dest_table are the same, map all the fields onto themselves.
if _src_table == _dest_table:
for dest_specific_field in _dest_specific_fields:
specific_field_mappings[dest_specific_field] = dest_specific_field
else:
# Retrieve the field mappings and put them into the dict
specific_field_mappings.update(
get_field_mappings(_src_table, _dest_table))
# For dest_specific_field that is not defined, map it to NULL
for dest_specific_field in _dest_specific_fields:
if dest_specific_field not in specific_field_mappings:
specific_field_mappings[dest_specific_field] = NULL_VALUE
return specific_field_mappings
def resolve_date_field_mappings(src_date_fields, dest_date_fields):
"""
This function generates a list of date field mappings based on simple heuristics.
1. if numbers of date fields are equal between src_date_fields and dest_date_fields,
that means both have either two date fields (domain_date, domain_datetime)
or four date fields (domain_start_date, domain_start_datetime, domain_end_date, domain_end_datetime).
So we can map the corresponding date fields to each other.
2. if numbers of date fields are not equal, one must have two fields (domain_date, domain_datetime),
and the other must have four fields (domain_start_date, domain_start_datetime, domain_end_date, domain_end_datetime).
We need to map the domain_date to domain_start_date and domain_datetime to domain_start_datetime
:param src_date_fields: an array that contains the source date fields
:param dest_date_fields: an array that contains the destination date fields
:return: a list of date field mappings
"""
date_field_mappings = OrderedDict()
if len(src_date_fields) == len(dest_date_fields):
for src_date_suffix in src_date_fields:
if src_date_suffix in dest_date_fields:
_src_field = src_date_fields[src_date_suffix]
_dest_field = dest_date_fields[src_date_suffix]
date_field_mappings[_dest_field] = _src_field
else:
for dest_date_suffix in dest_date_fields:
if '_end' in dest_date_suffix:
src_date_suffix = None
elif '_start' in dest_date_suffix:
src_date_suffix = dest_date_suffix[len('_start'):]
else:
src_date_suffix = '_start{}'.format(dest_date_suffix)
_src_field = src_date_fields[
src_date_suffix] if src_date_suffix is not None else NULL_VALUE
_dest_field = dest_date_fields[dest_date_suffix]
date_field_mappings[_dest_field] = _src_field
return date_field_mappings
def create_domain_field_dict():
"""
This function categorizes the CDM table fields and puts them into different 'buckets' of the dictionary.
The purpose of creating this dictionary is to facilitate the mapping of the fields in the downstream process.
person_id
:return: a dictionary that contains CDM table fields
"""
domain_fields = OrderedDict()
for domain_table in domain_mapping.DOMAIN_TABLE_NAMES:
_field_mappings = OrderedDict()
common_field_mappings = OrderedDict()
date_field_mappings = OrderedDict()
specific_fields = []
domain = get_domain(domain_table)
domain_id_field = get_domain_id_field(domain_table)
for field_name in [
field_name for field_name in get_domain_fields(domain_table)
if field_name != domain_id_field
]:
# Added a special check for drug_exposure because the drug_exposure columns don't follow the same pattern
# E.g. drug_exposure_start_time doesn't follow the pattern {domain}_start_datetime
if field_name.find(domain_table) != -1:
field_suffix = re.sub(domain_table, '', field_name)
else:
field_suffix = re.sub(domain.lower(), '', field_name)
# Put different types of fields into dictionary
if field_suffix in COMMON_DOMAIN_FIELD_SUFFIXES:
common_field_mappings[field_suffix] = field_name
elif field_suffix in DATE_FIELD_SUFFIXES:
date_field_mappings[field_suffix] = field_name
elif field_name in COMMON_DOMAIN_FIELD_SUFFIXES:
common_field_mappings[field_name] = field_name
elif field_name != domain_id_field:
specific_fields.append(field_name)
_field_mappings[DOMAIN_COMMON_FIELDS] = common_field_mappings
_field_mappings[DOMAIN_SPECIFIC_FIELDS] = specific_fields
_field_mappings[DOMAIN_DATE_FIELDS] = date_field_mappings
domain_fields[domain_table] = _field_mappings
return domain_fields
def get_domain_fields(_domain_table):
"""
This function retrieves all field names of a CDM table
:param _domain_table:
:return:
"""
fields = CDM_TABLE_SCHEMAS[_domain_table]
return [field[NAME_FIELD] for field in fields]
def is_field_required(_domain_table, field_name):
"""
The function checks if the field is nullable
:param _domain_table: the name of the domain table
:param field_name: the name of the field
:return:
"""
fields = CDM_TABLE_SCHEMAS[_domain_table]
for field in fields:
if field[NAME_FIELD] == field_name:
return field[FIELD_MODE] == FIELD_REQUIRED
return False
if __name__ == '__main__':
with open(resources.field_mappings_replaced_path, 'w') as fr:
fr.write(FIELD_MAPPING_HEADER)
field_dict = create_domain_field_dict()
for src_table in DOMAIN_TABLE_NAMES:
for dest_table in DOMAIN_TABLE_NAMES:
if src_table == dest_table or exist_domain_mappings(
src_table, dest_table):
field_mappings = generate_field_mappings(
src_table, dest_table, field_dict[src_table],
field_dict[dest_table])
for dest_field, src_field in field_mappings.items():
translation = 1 if TYPE_CONCEPT_SUFFIX in src_field \
and TYPE_CONCEPT_SUFFIX in dest_field \
and src_table != dest_table else 0
field_mapping = ATTRIBUTE_MAPPING_TEMPLATE.format(
src_table=src_table,
dest_table=dest_table,
src_field=src_field,
dest_field=dest_field,
translation=translation)
fr.write(field_mapping)
fr.write('\n')
fr.close()
```
#### File: cdr_cleaner/cleaning_rules/null_concept_ids_for_numeric_ppi.py
```python
import logging
# Project imports
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from common import OBSERVATION, JINJA_ENV
from constants.cdr_cleaner import clean_cdr as cdr_consts
LOGGER = logging.getLogger(__name__)
SAVE_TABLE_NAME = "dc_703_obs_changed_rows_saved"
# Query to create tables in sandbox with the rows that will be removed per cleaning rule
SANDBOX_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE
`{{project}}.{{sandbox_dataset}}.{{intermediary_table}}` AS (
SELECT *
FROM
`{{project}}.{{dataset}}.observation`
WHERE
questionnaire_response_id IS NOT NULL
AND
value_as_number IS NOT NULL
AND
(value_source_concept_id IS NOT NULL OR value_as_concept_id IS NOT NULL))
""")
CLEAN_NUMERIC_PPI_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project}}.{{dataset}}.observation` AS (
SELECT
observation_id,
person_id,
observation_concept_id,
observation_date,
observation_datetime,
observation_type_concept_id,
value_as_number,
CASE
WHEN
questionnaire_response_id IS NOT NULL AND value_as_number IS NOT NULL AND (value_source_concept_id IS NOT NULL OR value_as_concept_id IS NOT NULL) THEN NULL
ELSE value_as_string
END AS
value_as_string,
CASE
WHEN
questionnaire_response_id IS NOT NULL AND value_as_number IS NOT NULL AND (value_source_concept_id IS NOT NULL OR value_as_concept_id IS NOT NULL) THEN NULL
ELSE value_as_concept_id
END AS
value_as_concept_id,
qualifier_concept_id,
unit_concept_id,
provider_id,
visit_occurrence_id,
observation_source_value,
observation_source_concept_id,
unit_source_value,
qualifier_source_value,
CASE
WHEN
questionnaire_response_id IS NOT NULL AND value_as_number IS NOT NULL AND (value_source_concept_id IS NOT NULL OR value_as_concept_id IS NOT NULL) THEN NULL
ELSE value_source_concept_id
END AS
value_source_concept_id,
CASE
WHEN
questionnaire_response_id IS NOT NULL AND value_as_number IS NOT NULL AND (value_source_concept_id IS NOT NULL OR value_as_concept_id IS NOT NULL) THEN NULL
ELSE value_source_value
END AS
value_source_value,
questionnaire_response_id
FROM
{{project}}.{{dataset}}.observation
)""")
class NullConceptIDForNumericPPI(BaseCleaningRule):
"""
Nulls answer concept_ids for numeric PPI questions
"""
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = (
'Nulls answer concept_ids for numeric PPI questions if:\n'
'(1) questionnaire_response_id is not null\n'
'(2) value_as_number is not null\n'
'(3) value_source_concept_id or value_as_concept_id is not null')
super().__init__(issue_numbers=['DC-537', 'DC-703', 'DC-1098'],
description=desc,
affected_datasets=[cdr_consts.RDR],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
affected_tables=[OBSERVATION])
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a single query
and a specification for how to execute that query. The specifications
are optional but the query is required.
"""
save_changed_rows = {
cdr_consts.QUERY:
SANDBOX_QUERY.render(project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
intermediary_table=SAVE_TABLE_NAME),
}
clean_numeric_ppi_query = {
cdr_consts.QUERY:
CLEAN_NUMERIC_PPI_QUERY.render(project=self.project_id,
dataset=self.dataset_id),
}
return [save_changed_rows, clean_numeric_ppi_query]
def setup_rule(self, client):
"""
Function to run any data upload options before executing a query.
"""
pass
def get_sandbox_tablenames(self):
return [SAVE_TABLE_NAME]
def setup_validation(self, client):
"""
Run required steps for validation setup
This abstract method was added to the base class after this rule was authored.
This rule needs to implement logic to setup validation on cleaning rules that
will be updating or deleting the values.
Until done no issue exists for this yet.
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client):
"""
Validates the cleaning rule which deletes or updates the data from the tables
This abstract method was added to the base class after this rule was authored.
This rule needs to implement logic to run validation on cleaning rules that will
be updating or deleting the values.
Until done no issue exists for this yet.
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.clean_cdr_engine as clean_engine
import cdr_cleaner.args_parser as parser
ARGS = parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(NullConceptIDForNumericPPI,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullConceptIDForNumericPPI,)])
```
#### File: cdr_cleaner/cleaning_rules/remove_ehr_data_past_deactivation_date.py
```python
import logging
# Third party imports
import google.cloud.bigquery as gbq
# Project imports
from utils import bq, pipeline_logging
import utils.participant_summary_requests as psr
import retraction.retract_deactivated_pids as rdp
import retraction.retract_utils as ru
from constants.retraction.retract_deactivated_pids import DEACTIVATED_PARTICIPANTS
LOGGER = logging.getLogger(__name__)
DEACTIVATED_PARTICIPANTS_COLUMNS = [
'participantId', 'suspensionStatus', 'suspensionTime'
]
def remove_ehr_data_queries(client, api_project_id, project_id, dataset_id,
sandbox_dataset_id):
"""
Sandboxes and drops all EHR data found for deactivated participants after their deactivation date
:param client: BQ client
:param api_project_id: Project containing the RDR Participant Summary API
:param project_id: Identifies the project containing the target dataset
:param dataset_id: Identifies the dataset to retract deactivated participants from
:param sandbox_dataset_id: Identifies the sandbox dataset to store records for dataset_id
:returns queries: List of query dictionaries
"""
# gets the deactivated participant dataset to ensure it's up-to-date
df = psr.get_deactivated_participants(api_project_id,
DEACTIVATED_PARTICIPANTS_COLUMNS)
# To store dataframe in a BQ dataset table named _deactivated_participants
destination_table = f'{sandbox_dataset_id}.{DEACTIVATED_PARTICIPANTS}'
psr.store_participant_data(df, project_id, destination_table)
fq_deact_table = f'{project_id}.{destination_table}'
deact_table_ref = gbq.TableReference.from_string(f"{fq_deact_table}")
LOGGER.info(f"Retracting deactivated participants from '{dataset_id}'")
LOGGER.info(
f"Using sandbox dataset '{sandbox_dataset_id}' for '{dataset_id}'")
# creates sandbox and truncate queries to run for deactivated participant data drops
queries = rdp.generate_queries(client, project_id, dataset_id,
sandbox_dataset_id, deact_table_ref)
return queries
if __name__ == '__main__':
parser = rdp.get_base_parser()
parser.add_argument(
'-d',
'--dataset_id',
action='store',
dest='dataset_id',
help=
'Identifies the target dataset to retract deactivated participant data',
required=True)
parser.add_argument(
'-q',
'--api_project_id',
action='store',
dest='api_project_id',
help='Identifies the RDR project for participant summary API',
required=True)
parser.add_argument('-b',
'--sandbox_dataset_id',
action='store',
dest='sandbox_dataset_id',
help='Identifies sandbox dataset to store records',
required=True)
args = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG,
add_console_handler=args.console_log)
client = bq.get_client(args.project_id)
# keep only datasets existing in project
dataset_ids = ru.get_datasets_list(args.project_id, [args.dataset_id])
# dataset_ids should contain only one dataset (unioned_ehr)
if len(dataset_ids) == 1:
dataset_id = dataset_ids[0]
else:
raise RuntimeError(f'More than one dataset specified: {dataset_ids}')
LOGGER.info(
f"Dataset to retract deactivated participants from: {dataset_id}. "
f"Using sandbox dataset: {args.sandbox_dataset_id}")
deactivation_queries = remove_ehr_data_queries(client, args.api_project_id,
args.project_id, dataset_id,
args.sandbox_dataset_id)
job_ids = []
for query in deactivation_queries:
job_id = rdp.query_runner(client, query)
job_ids.append(job_id)
LOGGER.info(
f"Retraction of deactivated participants from {dataset_id} complete")
```
#### File: cdr_cleaner/cleaning_rules/remove_ehr_data_without_consent.py
```python
import logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
# Project imports
import common
import constants.cdr_cleaner.clean_cdr as cdr_consts
from resources import get_person_id_tables
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from cdr_cleaner.clean_cdr_utils import get_tables_in_dataset
LOGGER = logging.getLogger(__name__)
EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE = '_ehr_unconsented_pids'
AFFECTED_TABLES = [
table for table in get_person_id_tables(common.AOU_REQUIRED)
if table not in [common.PERSON, common.DEATH]
]
UNCONSENTED_PID_QUERY = common.JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE
`{{project}}.{{sandbox_dataset}}.{{unconsented_lookup}}` AS (
WITH
ordered_response AS (
SELECT
person_id,
value_source_concept_id,
observation_datetime,
ROW_NUMBER() OVER(PARTITION BY person_id ORDER BY observation_datetime DESC, value_source_concept_id ASC) AS rn
FROM
`{{project}}.{{dataset}}.observation`
WHERE
observation_source_value = 'EHRConsentPII_ConsentPermission')
SELECT
person_id
FROM
`{{project}}.{{dataset}}.person`
WHERE
person_id NOT IN (
SELECT
person_id
FROM
ordered_response
WHERE
rn = 1
AND value_source_concept_id = 1586100)
)
""")
SANDBOX_ROWS = common.JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE
`{{project}}.{{sandbox_dataset}}.{{sandbox_table}}` AS (
SELECT
*
FROM
`{{project}}.{{dataset}}.{{domain_table}}` d
JOIN
`{{project}}.{{dataset}}.{{mapping_domain_table}}` md
USING
({{domain_table}}_id)
WHERE
person_id IN (
SELECT
person_id
FROM
`{{project}}.{{sandbox_dataset}}.{{unconsented_lookup}}`)
AND src_dataset_id LIKE '%ehr%'
)
""")
DROP_ROWS = common.JINJA_ENV.from_string("""
DELETE
FROM
`{{project}}.{{dataset}}.{{domain_table}}`
WHERE
{{domain_table}}_id IN (
SELECT
{{domain_table}}_id
FROM
`{{project}}.{{sandbox_dataset}}.{{sandbox_table}}`)
""")
class RemoveEhrDataWithoutConsent(BaseCleaningRule):
"""
All EHR data associated with a participant if their EHR consent is not present in the observation table is to be
sandboxed and dropped from the CDR.
"""
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
:params: truncation_date: the last date that should be included in the
dataset
"""
desc = (
'All EHR data associated with a participant if their EHR consent is not present in the observation '
'table will be sandboxed and dropped from the CDR.')
super().__init__(issue_numbers=['DC1644'],
description=desc,
affected_datasets=[cdr_consts.COMBINED],
affected_tables=AFFECTED_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a single query
and a specification for how to execute that query. The specifications
are optional but the query is required.
"""
lookup_queries = []
sandbox_queries = []
drop_queries = []
unconsented_lookup_query = {
cdr_consts.QUERY:
UNCONSENTED_PID_QUERY.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
unconsented_lookup=EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE
)
}
lookup_queries.append(unconsented_lookup_query)
for table in self.affected_tables:
sandbox_query = {
cdr_consts.QUERY:
SANDBOX_ROWS.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
sandbox_table=self.get_sandbox_tablenames(table),
domain_table=table,
unconsented_lookup=
EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE,
mapping_domain_table=f'_mapping_{table}')
}
sandbox_queries.append(sandbox_query)
drop_query = {
cdr_consts.QUERY:
DROP_ROWS.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
domain_table=table,
sandbox_table=self.get_sandbox_tablenames(table))
}
drop_queries.append(drop_query)
return lookup_queries + sandbox_queries + drop_queries
def setup_rule(self, client):
"""
Function to run any data upload options before executing a query.
"""
try:
self.affected_tables = get_tables_in_dataset(
client, self.project_id, self.dataset_id, self.affected_tables)
except GoogleCloudError as error:
LOGGER.error(error)
raise
def setup_validation(self, client):
"""
Run required steps for validation setup
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client):
"""
Validates the cleaning rule which deletes or updates the data from the tables
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self, table_name):
"""
Generates sandbox table name for a given domain table
"""
return f'{self._issue_numbers[0].lower()}_{table_name}'
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ext_parser = parser.get_argument_parser()
ARGS = ext_parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(RemoveEhrDataWithoutConsent,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id, ARGS.cutoff_date,
[(RemoveEhrDataWithoutConsent,)])
```
#### File: cdr_cleaner/cleaning_rules/round_ppi_values_to_nearest_integer.py
```python
import logging
import constants.cdr_cleaner.clean_cdr as cdr_consts
LOGGER = logging.getLogger(__name__)
ROUND_PPI_VALUES_QUERY = """
UPDATE
`{project}.{dataset}.observation`
SET
value_as_number = CAST(ROUND(value_as_number) AS INT64)
WHERE
observation_source_concept_id IN (1585889,
1585890,
1585795,
1585802,
1585820,
1585864,
1585870,
1585873,
1586159,
1586162)
AND value_as_number IS NOT NULL
"""
def get_round_ppi_values_queries(project_id,
dataset_id,
sandbox_dataset_id=None):
"""
This function parser the query required to round the PPI numeric values to nearest integer
:param project_id: Name of the project
:param dataset_id: Name of the dataset where the queries should be run
:param sandbox_dataset_id: Identifies the sandbox dataset to store rows
#TODO use sandbox_dataset_id for CR
:return:
"""
queries_list = []
query = dict()
query[cdr_consts.QUERY] = ROUND_PPI_VALUES_QUERY.format(dataset=dataset_id,
project=project_id)
queries_list.append(query)
return queries_list
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(get_round_ppi_values_queries,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.sandbox_dataset_id,
ARGS.dataset_id,
[(get_round_ppi_values_queries,)])
```
#### File: data_steward/cdr_cleaner/reporter.py
```python
import csv
import logging
import os
from copy import copy
# Third party imports
from googleapiclient.errors import HttpError
from google.api_core.exceptions import BadRequest
from google.auth.exceptions import DefaultCredentialsError
# Project imports
import cdr_cleaner.args_parser as cleaning_parser
import cdr_cleaner.clean_cdr as control
import cdr_cleaner.clean_cdr_engine as engine
import constants.cdr_cleaner.clean_cdr as cdr_consts
import constants.cdr_cleaner.reporter as report_consts
LOGGER = logging.getLogger(__name__)
def parse_args(raw_args=None):
"""
Parse command line arguments for the cdr_cleaner package reporting utility.
:param raw_args: The argument to parse, if passed as a list form another
module. If None, the command line is parsed.
:returns: a namespace object for the given arguments.
"""
parser = cleaning_parser.get_report_parser()
return parser.parse_args(raw_args)
def get_function_info(func, fields_list):
"""
For a function, provide the info that can be provided.
Defaults all requested fields to 'unknown' values. Adds a 'name' and
'module' field even if not requested to give more information in the clean
rules report. Adds the documentation string of the function only if a
description is requested.
:param func: The function that is part of the clean rules report.
:param fields_list: The list of fields the user requested.
:return: A dictionary of values representing the known and requested
fields for the given function.
"""
func_info = dict()
for field in fields_list:
func_info[field] = report_consts.UNKNOWN
if report_consts.NAME not in fields_list:
LOGGER.info(
f"Adding '{report_consts.NAME}' field to notify report reader this "
f"function ({func.__qualname__}), "
"needs to be implemented as a class.")
func_info[report_consts.NAME] = func.__name__
if report_consts.MODULE not in fields_list:
LOGGER.info(
f"Adding '{report_consts.MODULE}' field to notify report reader this "
f"function ({func.__qualname__}) "
"needs to be implemented as a class.")
func_info[report_consts.MODULE] = func.__module__
if report_consts.DESCRIPTION in fields_list:
func_info[report_consts.DESCRIPTION] = func.__doc__
return func_info
def get_stage_elements(data_stage, fields_list):
"""
Return the field info for rules defined for this data_stage.
For the given data_stage, this will determine the values of the
requested fields. This information will be returned as a list
of dictionaries to preserve the information order. Each dictionary
contains values for the report for a single cleaning rule.
:param data_stage: The data stage to report for.
:param fields_list: The user defined fields to report back.
:returns: a list of dictionaries representing the requested fields.
"""
report_rows = []
for rule in control.DATA_STAGE_RULES_MAPPING.get(data_stage, []):
rule_info = {}
try:
clazz = rule[0]
instance = clazz('foo', 'bar', 'baz')
LOGGER.info(f"{clazz} ducktyped to a class")
except (RuntimeError, TypeError, HttpError, BadRequest,
DefaultCredentialsError):
LOGGER.info(f"{rule} did NOT ducktype to a class")
rule_info = get_function_info(clazz, fields_list)
report_rows.append(rule_info)
else:
try:
# this is a class
_ = instance.get_query_specs()
LOGGER.info(f"{clazz} is a class")
for field in fields_list:
try:
value = 'NO DATA'
if field in report_consts.FIELDS_PROPERTIES_MAP:
func = report_consts.FIELDS_PROPERTIES_MAP[field]
value = getattr(instance, func, 'no data')
elif field in report_consts.FIELDS_METHODS_MAP:
func = report_consts.FIELDS_METHODS_MAP[field]
value = getattr(instance, func, 'no data')()
elif field in report_consts.CLASS_ATTRIBUTES_MAP:
func = report_consts.CLASS_ATTRIBUTES_MAP[field]
value = None
for item in func.split('.'):
if not value:
value = getattr(instance, item)
else:
value = getattr(value, item)
rule_info[field] = value
except AttributeError:
# an error occurred trying to access an expected attribute.
# did the base class definition change recently?
LOGGER.exception(
f'An error occurred trying to get the value for {field}'
)
rule_info[field] = report_consts.UNKNOWN
except (TypeError, AttributeError):
# an error occurred indicating this is not a rule extending the
# base cleaning rule. provide the info we can and move on.
LOGGER.exception(f'{clazz} is not a class')
# this is a function
rule_info = get_function_info(clazz, fields_list)
report_rows.append(rule_info)
return report_rows
def separate_sql_statements(unformatted_values):
"""
Separate SQL statements into items with other identical fields.
This must maintain the SQL statement order. For example, if the user
requests the fields 'name module sql', the input for this function will be
a list of dictionaries of the where each dictionary will have the keys,
'{name: <value>, module: <value>, sql: "unknown" or [{dictionary attributes}]}'.
The purpose of this function is to break into the 'sql' values (because a
cleaning rule may contain more than one sql statement) and copy the other
field values.
For example, the following input:
[{'name': 'foo', 'sql':[{'query': 'q1',...},{'query': 'q2'...}]}]
Should be formatted as:
[{'name': 'foo', 'sql': 'q1'}
{'name': 'foo', 'sql': 'q2'}]
"""
formatted_values = []
for rule_values in unformatted_values:
sql_list = []
# gather the queries as a list
sql_value = rule_values.get(report_consts.SQL, [])
for query_dict in sql_value:
try:
sql_list.append(
query_dict.get(report_consts.QUERY, report_consts.UNKNOWN))
except AttributeError:
if sql_value == report_consts.UNKNOWN:
sql_list.append(report_consts.UNKNOWN)
break
else:
raise
if sql_list:
# generate a dictionary for each query
for query in sql_list:
# get a fresh copy for each rule.
separated = copy(rule_values)
separated[report_consts.SQL] = query.strip()
formatted_values.append(separated)
else:
separated = copy(rule_values)
separated[report_consts.SQL] = report_consts.UNKNOWN
formatted_values.append(separated)
return formatted_values
def format_values(rules_values, fields_list):
"""
Format the fields' values for input to the DictWriter.
This formats fields whose values are lists as joined strings.
If the sql field is chosen, a line break is used to joing the sql strings.
:param rules_values: The list of dictionaries containing field/value pairs for
each field specified via arguments for each cleaning rule.
"""
formatted_values = []
if report_consts.SQL in fields_list:
LOGGER.debug("SQL field exists")
rules_values = separate_sql_statements(rules_values)
for rule_values in rules_values:
field_values = {}
for field, value in rule_values.items():
if isinstance(value, list):
try:
value = ', '.join(value)
except TypeError:
LOGGER.exception(f"erroneous field is {field}\n"
f"erroneous value is {value}")
raise
field_values[field] = value
formatted_values.append(field_values)
return formatted_values
def check_field_list_validity(fields_list, required_fields_dict):
"""
Helper function to create a valid fields list for writing the CSV file.
The CSV writer is a dictionary writer.
:param fields_list: list of fields provided via the parse arguments
command. these are user requested feilds.
:param required_fields_dict: The list of dictionaries that are actually
generated by the get_stage_elements function. It may have some
additional fields that are not user specified.
:returns: a list of fields that should be written to the csv file
"""
known_fields = set()
for value_dict in required_fields_dict:
keys = value_dict.keys()
known_fields.update(keys)
final_fields = [field for field in fields_list]
for field in known_fields:
if field not in fields_list:
final_fields.append(field)
return final_fields
def write_csv_report(output_filepath, stages_list, fields_list):
"""
Write a csv file for the indicated stages and fields.
:param output_filepath: the filepath of a csv file.
:param stages_list: a list of strings indicating the data stage to
report for. Should match to a stage value in
curation/data_steward/constants/cdr_cleaner/clean_cdr.py DataStage.
:param fields_list: a list of string fields that will be added to the
csv file.
"""
if not output_filepath.endswith('.csv'):
raise RuntimeError(f"This file is not a csv file: {output_filepath}.")
required_fields_dict = [{}]
output_list = [{}]
for stage in stages_list:
# get the fields and values
required_fields_dict = get_stage_elements(stage, fields_list)
# format dictionaries for writing
required_fields_dict = format_values(required_fields_dict, fields_list)
output_list.extend(required_fields_dict)
fields_list = check_field_list_validity(fields_list, output_list)
# write the contents to a csv file
with open(output_filepath, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile,
fields_list,
delimiter=',',
lineterminator=os.linesep,
quoting=csv.QUOTE_ALL)
writer.writeheader()
for info in output_list:
writer.writerow(info)
def main(raw_args=None):
"""
Entry point for the clean rules reporter module.
If you provide a list of arguments and settings, these will be parsed.
If you leave this blank, the command line arguments are parsed. This allows
this module to be easily called from other python modules.
:param raw_args: The list of arguments to parse. Defaults to parsing the
command line.
"""
args = parse_args(raw_args)
engine.add_console_logging(args.console_log)
if cdr_consts.DataStage.UNSPECIFIED.value in args.data_stage:
args.data_stage = [
s.value
for s in cdr_consts.DataStage
if s is not cdr_consts.DataStage.UNSPECIFIED
]
LOGGER.info(
f"Data stage was {cdr_consts.DataStage.UNSPECIFIED.value}, so all stages "
f"will be reported on: {args.data_stage}")
write_csv_report(args.output_filepath, args.data_stage, args.fields)
LOGGER.info("Finished the reporting module")
if __name__ == '__main__':
# run as main
main()
```
#### File: data_steward/tools/clean_project_datasets.py
```python
import argparse
import logging
import sys
# Third party imports
from googleapiclient.errors import HttpError
# Project imports
from utils import bq
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
def get_datasets_with_substrings(datasets_list, name_substrings):
"""
Filters list of datasets with specified substrings (e.g. github usernames) in them
:param datasets_list: list of dataset_ids
:param name_substrings: identifies substrings that help identify datasets to delete
:return: list of dataset_ids with any substring in their dataset_id
"""
datasets_with_substrings = []
for dataset in datasets_list:
if any(name_substring in dataset for name_substring in name_substrings):
datasets_with_substrings.append(dataset)
return datasets_with_substrings
def delete_datasets(project_id, datasets_to_delete_list):
"""
Deletes datasets using their dataset_ids
:param project_id: identifies the project
:param datasets_to_delete_list: list of dataset_ids to delete
:return:
"""
failed_to_delete = []
for dataset in datasets_to_delete_list:
try:
bq.delete_dataset(project_id, dataset)
logging.info(f'Deleted dataset {dataset}')
except HttpError:
logging.exception(f'Could not delete dataset {dataset}')
failed_to_delete.append(dataset)
logging.info(
f'The following datasets could not be deleted: {failed_to_delete}')
def run_deletion(project_id, name_substrings):
"""
Deletes datasets from project containing any of the name_substrings
:param project_id: identifies the project
:param name_substrings: Identifies substrings that help identify datasets to delete
:return:
"""
all_datasets = [
dataset.dataset_id for dataset in bq.list_datasets(project_id)
]
datasets_with_substrings = get_datasets_with_substrings(
all_datasets, name_substrings)
logging.info(f'Datasets marked for deletion: {datasets_with_substrings}')
logging.info('Proceed?')
response = get_response()
if response == "Y":
delete_datasets(project_id, datasets_with_substrings)
elif response.lower() == "n":
logging.info("Aborting deletion")
# Make sure user types Y to proceed
def get_response():
"""Return input from user denoting yes/no"""
prompt_text = 'Please press Y/n\n'
response = input(prompt_text)
while response not in ('Y', 'n', 'N'):
response = input(prompt_text)
return response
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Deletes datasets containing specific strings in the dataset_id.',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p',
'--project_id',
action='store',
dest='project_id',
help='Identifies the project to delete datasets from',
required=True)
parser.add_argument(
'-n',
'--name_substrings',
nargs='+',
dest='name_substrings',
help='Identifies substrings that help identify datasets to delete. '
'A dataset containing any of these substrings within in their dataset_id will be deleted. ',
required=True)
args = parser.parse_args()
run_deletion(args.project_id, args.name_substrings)
```
#### File: data_steward/utils/slack_alerts.py
```python
import sys
import os
import logging
# Third party imports
import slack
from slack.errors import SlackApiError
# environment variable names
SLACK_TOKEN = 'SLACK_TOKEN'
SLACK_CHANNEL = 'SLACK_CHANNEL'
UNSET_SLACK_TOKEN_MSG = 'Slack token not set in environment variable %s' % SLACK_TOKEN
UNSET_SLACK_CHANNEL_MSG = 'Slack channel not set in environment variable %s' % SLACK_CHANNEL
class SlackConfigurationError(RuntimeError):
"""
Raised when the required slack variables are not properly configured
"""
def __init__(self, msg):
super(SlackConfigurationError, self).__init__(msg)
self.msg = msg
def _get_slack_token():
"""
Get the token used to interact with the Slack API
:raises:
SlackConfigurationError: token is not configured
:return: configured Slack API token as str
"""
if SLACK_TOKEN not in os.environ.keys():
raise SlackConfigurationError(UNSET_SLACK_TOKEN_MSG)
return os.environ[SLACK_TOKEN]
def _get_slack_channel_name():
"""
Get name of the Slack channel to post notifications to
:raises:
SlackConfigurationError: channel name is not configured
:return: the configured Slack channel name as str
"""
if SLACK_CHANNEL not in os.environ.keys():
raise SlackConfigurationError(UNSET_SLACK_CHANNEL_MSG)
return os.environ[SLACK_CHANNEL]
def is_channel_available():
"""
Test if the Slack channel is available
:return:
"""
try:
client = _get_slack_client()
channel_name = _get_slack_channel_name()
response = client.conversations_list(limit=sys.maxsize)
if response.status_code == 200:
for channel in response.data['channels']:
if channel['name'] == channel_name:
return True
except (SlackConfigurationError, SlackApiError) as e:
# if the environment variables are missing or the slack api failed to identify the channel
logging.error(e)
return False
def _get_slack_client():
"""
Get web client for Slack
:return: WebClient object to communicate with Slack
"""
slack_token = _get_slack_token()
return slack.WebClient(slack_token)
def post_message(text):
"""
Post a system notification
:param text: the message to post
:return:
"""
slack_client = _get_slack_client()
slack_channel_name = _get_slack_channel_name()
return slack_client.chat_postMessage(channel=slack_channel_name,
text=text,
verify=False)
def log_event_factory(job_name=None):
# TODO: This is only a temporary solution. The problem is that slack_logging_handler is
# set to the WARNING level, calling logging.info would not send the message to the slack
# channel. The reason we set it to WARNING is that we don't want to flood the slack
# channel with trivial information and only want to get logs at the warning level and
# above. We need to replace the below code with the unified logging infrastructure in
# the future
def log_event(func):
display_job_name = job_name if job_name else func.__name__
start_message = f'The {display_job_name} job has started.'
end_message = f'The {display_job_name} job has completed successfully.'
def wrapper(*args, **kwargs):
try:
post_message(start_message)
logging.info(start_message)
returned_val = func(*args, **kwargs)
post_message(end_message)
logging.info(end_message)
return returned_val
except (SlackConfigurationError, SlackApiError) as e:
# if the environment variables are missing or the slack api failed to identify the
# channel
logging.exception(
f'Slack is not configured for posting messages, refer to playbook. {e}'
)
raise
return wrapper
return log_event
```
#### File: data_steward/tools/load_vocab_test.py
```python
import os
import shutil
import tempfile
import unittest
from pathlib import Path
import mock
from google.cloud import storage, bigquery
import app_identity
from common import CONCEPT, VOCABULARY
from tests.test_util import TEST_VOCABULARY_PATH
from tools import load_vocab as lv
class LoadVocabTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = app_identity.get_application_id()
self.dataset_id = os.environ.get('UNIONED_DATASET_ID')
self.staging_dataset_id = f'{self.dataset_id}_staging'
self.bucket = os.environ.get('BUCKET_NAME_FAKE')
self.bq_client = bigquery.Client(project=self.project_id)
self.gcs_client = storage.Client(project=self.project_id)
self.test_vocabs = [CONCEPT, VOCABULARY]
# copy files to temp dir where safe to modify
self.test_vocab_folder_path = Path(tempfile.mkdtemp())
for vocab in self.test_vocabs:
filename = lv._table_name_to_filename(vocab)
file_path = os.path.join(TEST_VOCABULARY_PATH, filename)
shutil.copy(file_path, self.test_vocab_folder_path)
# mock dataset_properties_from_file
# using the default properties
dataset = self.bq_client.create_dataset(
f'{self.project_id}.{self.dataset_id}', exists_ok=True)
mock_dataset_properties_from_file = mock.patch(
'tools.load_vocab.dataset_properties_from_file')
self.mock_bq_query = mock_dataset_properties_from_file.start()
self.mock_bq_query.return_value = {
'access_entries': dataset.access_entries
}
self.addCleanup(mock_dataset_properties_from_file.stop)
@mock.patch('tools.load_vocab.VOCABULARY_TABLES', [CONCEPT, VOCABULARY])
def test_upload_stage(self):
lv.main(self.project_id, self.bucket, self.test_vocab_folder_path,
self.dataset_id, 'fake_dataset_props.json')
expected_row_count = {CONCEPT: 101, VOCABULARY: 52}
for dataset in [self.staging_dataset_id, self.dataset_id]:
for vocab in self.test_vocabs:
content_query = f'SELECT * FROM `{self.project_id}.{dataset}.{vocab}`'
content_job = self.bq_client.query(content_query)
rows = content_job.result()
self.assertEqual(len(list(rows)), expected_row_count[vocab])
def tearDown(self) -> None:
# Delete files using a single API request
bucket = self.gcs_client.bucket(self.bucket)
blob_names = [lv._table_name_to_filename(t) for t in self.test_vocabs]
bucket.delete_blobs(blob_names)
# Drop tables using a single API request
drop_tables_query = '\n'.join([
f'''DROP TABLE IF EXISTS `{self.project_id}.{dataset}.{table}`;'''
for dataset in [self.dataset_id, self.staging_dataset_id]
for table in self.test_vocabs
])
self.bq_client.query(drop_tables_query).result()
# remove the temp dir
shutil.rmtree(self.test_vocab_folder_path)
```
#### File: data_steward/utils/sandbox_test.py
```python
import os
import unittest
import app_identity
# Project Imports
from utils import sandbox
from utils.bq import get_client, list_datasets, delete_dataset
class SandboxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = app_identity.get_application_id()
self.dataset_id = os.environ.get('UNIONED_DATASET_ID')
self.sandbox_id = sandbox.get_sandbox_dataset_id(self.dataset_id)
# Removing any existing datasets that might interfere with the test
self.client = get_client(self.project_id)
self.client.delete_dataset(f'{self.project_id}.{self.sandbox_id}',
delete_contents=True,
not_found_ok=True)
def test_create_sandbox_dataset(self):
# Create sandbox dataset
sandbox_dataset = sandbox.create_sandbox_dataset(
self.project_id, self.dataset_id)
all_datasets_obj = list_datasets(self.project_id)
all_datasets = [d.dataset_id for d in all_datasets_obj]
self.assertTrue(sandbox_dataset in all_datasets)
# Try to create same sandbox, which now already exists
self.assertRaises(RuntimeError, sandbox.create_sandbox_dataset,
self.project_id, self.dataset_id)
# Remove fake dataset created in project
delete_dataset(self.project_id, sandbox_dataset)
```
#### File: data_steward/validation/achilles_test.py
```python
import os
import unittest
import bq_utils
import gcs_utils
import resources
import tests.test_util as test_util
from tests.test_util import FAKE_HPO_ID
from validation import achilles
import validation.sql_wrangle as sql_wrangle
# This may change if we strip out unused analyses
ACHILLES_LOOKUP_COUNT = 215
ACHILLES_RESULTS_COUNT = 2779
class AchillesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.hpo_bucket = gcs_utils.get_hpo_bucket(test_util.FAKE_HPO_ID)
test_util.empty_bucket(self.hpo_bucket)
test_util.delete_all_tables(bq_utils.get_dataset_id())
def tearDown(self):
test_util.delete_all_tables(bq_utils.get_dataset_id())
test_util.empty_bucket(self.hpo_bucket)
def _load_dataset(self):
for cdm_table in resources.CDM_TABLES:
cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH,
cdm_table + '.csv')
if os.path.exists(cdm_file_name):
test_util.write_cloud_file(self.hpo_bucket, cdm_file_name)
else:
test_util.write_cloud_str(self.hpo_bucket, cdm_table + '.csv',
'dummy\n')
bq_utils.load_cdm_csv(FAKE_HPO_ID, cdm_table)
def test_load_analyses(self):
achilles.create_tables(FAKE_HPO_ID, True)
achilles.load_analyses(FAKE_HPO_ID)
cmd = sql_wrangle.qualify_tables(
'SELECT DISTINCT(analysis_id) FROM %sachilles_analysis' %
sql_wrangle.PREFIX_PLACEHOLDER, FAKE_HPO_ID)
result = bq_utils.query(cmd)
self.assertEqual(ACHILLES_LOOKUP_COUNT, int(result['totalRows']))
def test_run_analyses(self):
# Long-running test
self._load_dataset()
achilles.create_tables(FAKE_HPO_ID, True)
achilles.load_analyses(FAKE_HPO_ID)
achilles.run_analyses(hpo_id=FAKE_HPO_ID)
cmd = sql_wrangle.qualify_tables(
'SELECT COUNT(1) FROM %sachilles_results' %
sql_wrangle.PREFIX_PLACEHOLDER, FAKE_HPO_ID)
result = bq_utils.query(cmd)
self.assertEqual(int(result['rows'][0]['f'][0]['v']),
ACHILLES_RESULTS_COUNT)
```
#### File: data_steward/validation/export_test.py
```python
import json
import os
import unittest
import mock
import bq_utils
import common
import gcs_utils
from tests import test_util
from tests.test_util import FAKE_HPO_ID
from validation import export, main
BQ_TIMEOUT_RETRIES = 3
class ExportTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print(
'\n**************************************************************')
print(cls.__name__)
print('**************************************************************')
fake_bucket = gcs_utils.get_hpo_bucket(test_util.FAKE_HPO_ID)
dataset_id = bq_utils.get_dataset_id()
test_util.delete_all_tables(dataset_id)
test_util.get_synpuf_results_files()
test_util.populate_achilles(fake_bucket)
def setUp(self):
self.hpo_bucket = gcs_utils.get_hpo_bucket(FAKE_HPO_ID)
def _empty_bucket(self):
bucket_items = gcs_utils.list_bucket(self.hpo_bucket)
for bucket_item in bucket_items:
gcs_utils.delete_object(self.hpo_bucket, bucket_item['name'])
def _test_report_export(self, report):
data_density_path = os.path.join(export.EXPORT_PATH, report)
result = export.export_from_path(data_density_path, FAKE_HPO_ID)
return result
# TODO more strict testing of result payload. The following doesn't work because field order is random.
# actual_payload = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
# expected_path = os.path.join(test_util.TEST_DATA_EXPORT_SYNPUF_PATH, report + '.json')
# with open(expected_path, 'r') as f:
# expected_payload = f.read()
# self.assertEqual(actual_payload, expected_payload)
# return result
@mock.patch('validation.export.is_hpo_id')
def test_export_data_density(self, mock_is_hpo_id):
# INTEGRATION TEST
mock_is_hpo_id.return_value = True
export_result = self._test_report_export('datadensity')
expected_keys = [
'CONCEPTS_PER_PERSON', 'RECORDS_PER_PERSON', 'TOTAL_RECORDS'
]
for expected_key in expected_keys:
self.assertTrue(expected_key in export_result)
self.assertEqual(
len(export_result['TOTAL_RECORDS']['X_CALENDAR_MONTH']), 283)
@mock.patch('validation.export.is_hpo_id')
def test_export_person(self, mock_is_hpo_id):
# INTEGRATION TEST
mock_is_hpo_id.return_value = True
export_result = self._test_report_export('person')
expected_keys = [
'BIRTH_YEAR_HISTOGRAM', 'ETHNICITY_DATA', 'GENDER_DATA',
'RACE_DATA', 'SUMMARY'
]
for expected_key in expected_keys:
self.assertTrue(expected_key in export_result)
self.assertEqual(
len(export_result['BIRTH_YEAR_HISTOGRAM']['DATA']['COUNT_VALUE']),
72)
@mock.patch('validation.export.is_hpo_id')
def test_export_achillesheel(self, mock_is_hpo_id):
# INTEGRATION TEST
mock_is_hpo_id.return_value = True
export_result = self._test_report_export('achillesheel')
self.assertTrue('MESSAGES' in export_result)
self.assertEqual(len(export_result['MESSAGES']['ATTRIBUTENAME']), 14)
@mock.patch('validation.export.is_hpo_id')
def test_run_export(self, mock_is_hpo_id):
# validation/main.py INTEGRATION TEST
mock_is_hpo_id.return_value = True
folder_prefix = 'dummy-prefix-2018-03-24/'
main._upload_achilles_files(FAKE_HPO_ID, folder_prefix)
main.run_export(datasource_id=FAKE_HPO_ID, folder_prefix=folder_prefix)
bucket_objects = gcs_utils.list_bucket(self.hpo_bucket)
actual_object_names = [obj['name'] for obj in bucket_objects]
for report in common.ALL_REPORT_FILES:
prefix = folder_prefix + common.ACHILLES_EXPORT_PREFIX_STRING + FAKE_HPO_ID + '/'
expected_object_name = prefix + report
self.assertIn(expected_object_name, actual_object_names)
datasources_json_path = folder_prefix + common.ACHILLES_EXPORT_DATASOURCES_JSON
self.assertIn(datasources_json_path, actual_object_names)
datasources_json = gcs_utils.get_object(self.hpo_bucket,
datasources_json_path)
datasources_actual = json.loads(datasources_json)
datasources_expected = {
'datasources': [{
'name': FAKE_HPO_ID,
'folder': FAKE_HPO_ID,
'cdmVersion': 5
}]
}
self.assertDictEqual(datasources_expected, datasources_actual)
def test_run_export_without_datasource_id(self):
# validation/main.py INTEGRATION TEST
with self.assertRaises(RuntimeError):
main.run_export(datasource_id=None, target_bucket=None)
@mock.patch('validation.export.is_hpo_id')
def test_run_export_with_target_bucket_and_datasource_id(
self, mock_is_hpo_id):
# validation/main.py INTEGRATION TEST
mock_is_hpo_id.return_value = True
folder_prefix = 'dummy-prefix-2018-03-24/'
bucket_nyc = gcs_utils.get_hpo_bucket('nyc')
main.run_export(datasource_id=FAKE_HPO_ID,
folder_prefix=folder_prefix,
target_bucket=bucket_nyc)
bucket_objects = gcs_utils.list_bucket(bucket_nyc)
actual_object_names = [obj['name'] for obj in bucket_objects]
for report in common.ALL_REPORT_FILES:
prefix = folder_prefix + common.ACHILLES_EXPORT_PREFIX_STRING + FAKE_HPO_ID + '/'
expected_object_name = prefix + report
self.assertIn(expected_object_name, actual_object_names)
datasources_json_path = folder_prefix + common.ACHILLES_EXPORT_DATASOURCES_JSON
self.assertIn(datasources_json_path, actual_object_names)
datasources_json = gcs_utils.get_object(bucket_nyc,
datasources_json_path)
datasources_actual = json.loads(datasources_json)
datasources_expected = {
'datasources': [{
'name': FAKE_HPO_ID,
'folder': FAKE_HPO_ID,
'cdmVersion': 5
}]
}
self.assertDictEqual(datasources_expected, datasources_actual)
def tearDown(self):
self._empty_bucket()
bucket_nyc = gcs_utils.get_hpo_bucket('nyc')
test_util.empty_bucket(bucket_nyc)
@classmethod
def tearDownClass(cls):
dataset_id = bq_utils.get_dataset_id()
test_util.delete_all_tables(dataset_id)
```
#### File: cdr_cleaner/cleaning_rules/update_family_history_qa_codes_test.py
```python
import unittest
import constants.cdr_cleaner.clean_cdr as cdr_consts
from cdr_cleaner.cleaning_rules import update_family_history_qa_codes as family_history
class UpdateFamilyHistory(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.dataset_id = 'dataset_id'
self.project_id = 'project_id'
def test_get_update_family_history_qa_queries(self):
actual_dict = family_history.get_update_family_history_qa_queries(
self.project_id, self.dataset_id)
actual = actual_dict[0][cdr_consts.QUERY]
expected = family_history.UPDATE_FAMILY_HISTORY_QUERY.format(
project_id=self.project_id, dataset_id=self.dataset_id)
self.assertEqual(expected, actual)
``` |
{
"source": "jp3cyc/jam19_underwater_balloon_detect",
"score": 3
} |
#### File: jp3cyc/jam19_underwater_balloon_detect/detect_object.py
```python
import cv2
import numpy as np
# 指定した画像(path)の物体を検出し、外接矩形の画像を出力します
def detect_contour(path):
# 画像を読込
src = cv2.imread(path, cv2.IMREAD_COLOR)
hsvLower = np.array([90,100,100])
hsvUpper = np.array([150,255,255])
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
hsvMask = cv2.inRange(hsv, hsvLower, hsvUpper)
srcMasked = cv2.bitwise_and(src, src, mask = hsvMask)
cv2.imshow('output1', srcMasked)
# グレースケール画像へ変換
gray = cv2.cvtColor(srcMasked, cv2.COLOR_BGR2GRAY)
# 2値化
retval, bw = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# 輪郭を抽出
# contours : [領域][Point No][0][x=0, y=1]
# cv2.CHAIN_APPROX_NONE: 中間点も保持する
# cv2.CHAIN_APPROX_SIMPLE: 中間点は保持しない
contours, hierarchy = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# 矩形検出された数(デフォルトで0を指定)
detect_count = 0
# 各輪郭に対する処理
for i in range(0, len(contours)):
# 輪郭の領域を計算
area = cv2.contourArea(contours[i])
# ノイズ(小さすぎる領域)と全体の輪郭(大きすぎる領域)を除外
if area < 1e4 : #or 1e8 < area:
continue
# 外接矩形
if len(contours[i]) > 0:
rect = contours[i]
x, y, w, h = cv2.boundingRect(rect)
cv2.rectangle(src, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 外接矩形毎に画像を保存
cv2.imwrite('photos/' + str(detect_count) + '.jpg', src[y:y + h, x:x + w])
detect_count = detect_count + 1
# 外接矩形された画像を表示
cv2.imshow('output', src)
cv2.waitKey(0)
# 終了処理
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_contour('photos/RGB.bmp')
``` |
{
"source": "jp43/DockingToolBox",
"score": 2
} |
#### File: DockingToolBox/dockbox/dock.py
```python
import os
import sys
import method
import shutil
import subprocess
from glob import glob
from mdkit.amber import ambertools
from mdkit.utility import reader
from mdkit.utility import mol2
from mdkit.utility import utils
required_programs = ['chimera', 'dms', 'sphgen_cpp', 'sphere_selector', 'showbox', 'grid', 'dock6']
default_settings = {'probe_radius': '1.4', 'minimum_sphere_radius': '1.4', 'maximum_sphere_radius': '4.0', \
'grid_spacing': '0.3', 'extra_margin': '2.0', 'attractive_exponent': '6', 'repulsive_exponent': '12', \
'max_orientations': '10000', 'num_scored_conformers': '5000', 'nposes': '20', 'charge_method': 'gas', 'rmsd': '2.0', 'grid_dir': None}
class Dock(method.DockingMethod):
def __init__(self, instance, site, options):
super(Dock, self).__init__(instance, site, options)
self.options['center'] = '\"' + ' '.join(map(str.strip, site[1].split(','))) + '\"' # set box center
self.options['site'] = site[0]
# set box size
self.options['boxsize'] = map(float, map(str.strip, site[2].split(',')))
self.options['sphgen_radius'] = str(max(self.options['boxsize'])/2)
if self.options['site'] is None:
self.options['dockdir'] = 'dock'
else:
self.options['dockdir'] = 'dock.' + self.options['site']
def write_rescoring_script(self, filename, file_r, files_l):
"""Rescore using DOCK6 grid scoring function"""
locals().update(self.options)
self.write_script_ligand_prep()
# cat mol2 files into a single mol2
file_all_poses = 'poses.mol2'
if self.options['charge_method']:
amber_version = utils.check_amber_version()
ambertools.run_antechamber(files_l[0], 'pose-1.mol2', at='sybyl', c=self.options['charge_method'], version=amber_version)
else:
shutil.copyfile(files_l[0], 'pose-1.mol2')
for idx, file_l in enumerate(files_l):
if idx > 0:
if self.options['charge_method']:
# if not first one, do not regenerate the charges, copy charges generated the first time
coords_l = mol2.get_coordinates(file_l)
struct = mol2.Reader('pose-1.mol2').next()
struct = mol2.replace_coordinates(struct, coords_l)
mol2.Writer().write('pose-%i.mol2'%(idx+1), struct)
else:
shutil.copyfile(file_l, 'pose-%i.mol2'%(idx+1))
subprocess.check_output("cat pose-%i.mol2 >> %s"%(idx+1, file_all_poses), shell=True)
if idx > 0:
os.remove('pose-%i.mol2'%(idx+1))
script ="""#!/bin/bash
set -e
# shift ligand coordinates
python prepare_ligand_dock.py pose-1.mol2 pose-1-centered.mol2 %(center)s\n"""%locals()
if self.options['grid_dir'] is None:
script += """\n# remove hydrogens from target
echo "delete element.H
write format pdb #0 target_noH.pdb" > removeH.cmd
chimera --nogui %(file_r)s removeH.cmd
rm -rf removeH.cmd
# prepare receptor (add missing h, add partial charges,...)
echo "import chimera
from DockPrep import prep
models = chimera.openModels.list(modelTypes=[chimera.Molecule])
prep(models)
from WriteMol2 import writeMol2
writeMol2(models, 'target.mol2')" > dockprep.py
chimera --nogui %(file_r)s dockprep.py
# generating receptor surface
dms target_noH.pdb -n -w %(probe_radius)s -v -o target_noH.ms
# generating spheres
echo "target_noH.ms
R
X
0.0
%(maximum_sphere_radius)s
%(minimum_sphere_radius)s
target_noH_site.sph" > INSPH
sphgen_cpp
# selecting spheres within a user-defined radius (sphgen_radius)
sphere_selector target_noH_site.sph pose-1-centered.mol2 %(sphgen_radius)s
# create box - the second argument in the file showbox.in
# is the extra margin to also be enclosed to the box (angstroms)
echo "Y
%(extra_margin)s
selected_spheres.sph
1
target_noH_box.pdb" > showbox.in
showbox < showbox.in
dock6path=`which dock6`
vdwfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/vdw_AMBER_parm99.defn'"`
flexfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex.defn'"`
flexdfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex_drive.tbl'"`
# create grid
echo "compute_grids yes
grid_spacing %(grid_spacing)s
output_molecule no
contact_score yes
energy_score yes
energy_cutoff_distance 9999
atom_model a
attractive_exponent %(attractive_exponent)s
repulsive_exponent %(repulsive_exponent)s
distance_dielectric yes
dielectric_factor 4
bump_filter yes
bump_overlap 0.75
receptor_file target.mol2
box_file target_noH_box.pdb
vdw_definition_file $vdwfile
score_grid_prefix grid
contact_cutoff_distance 4.5" > grid.in
grid -i grid.in\n"""%locals()
else:
# get directory where grid files are located
grid_prefix = self.options['grid_dir'] + '/' + self.options['dockdir'] + '/grid'
# check if grid file exists
if os.path.isfile(grid_prefix+'.in'):
# copy grid files to avoid opening the same file from multiple locations
for gridfile in glob(grid_prefix+'*'):
basename = os.path.basename(gridfile)
shutil.copyfile(gridfile, basename)
else:
raise ValueError('No grid file detected in specified location %s'%self.options['grid_dir'])
script += """\ndock6path=`which dock6`
vdwfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/vdw_AMBER_parm99.defn'"`
flexfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex.defn'"`
flexdfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex_drive.tbl'"`\n"""
script += """\necho "ligand_atom_file %(file_all_poses)s
limit_max_ligands no
skip_molecule no
read_mol_solvation no
calculate_rmsd no
use_database_filter no
orient_ligand no
use_internal_energy yes
internal_energy_rep_exp 12
flexible_ligand no
bump_filter no
score_molecules yes
contact_score_primary no
contact_score_secondary no
grid_score_primary yes
grid_score_secondary no
grid_score_rep_rad_scale 1
grid_score_vdw_scale 1
grid_score_es_scale 1
grid_score_grid_prefix grid
multigrid_score_secondary no
dock3.5_score_secondary no
continuous_score_secondary no
descriptor_score_secondary no
gbsa_zou_score_secondary no
gbsa_hawkins_score_secondary no
SASA_descriptor_score_secondary no
amber_score_secondary no
minimize_ligand no
atom_model all
vdw_defn_file $vdwfile
flex_defn_file $flexfile
flex_drive_file $flexdfile
ligand_outfile_prefix poses_out
write_orientations no
num_scored_conformers 1
rank_ligands no" > dock6.in
dock6 -i dock6.in > dock.out\n"""%locals()
# write DOCK6 rescoring script
with open(filename, 'w') as ff:
ff.write(script)
def write_docking_script(self, filename, file_r, file_l):
"""Dock using DOCK6 flexible docking with grid scoring as primary score"""
locals().update(self.options)
self.write_script_ligand_prep()
if self.options['charge_method']:
amber_version = utils.check_amber_version()
ambertools.run_antechamber(file_l, 'ligand-ref.mol2', at='sybyl', c=self.options['charge_method'], version=amber_version)
else:
shutil.copyfile(file_l, 'ligand-ref.mol2')
script ="""#!/bin/bash
set -e
# shift ligand coordinates
python prepare_ligand_dock.py ligand-ref.mol2 ligand-ref-centered.mol2 %(center)s\n"""%locals()
if self.options['grid_dir'] is None:
script += """\n# remove hydrogens from target
echo "delete element.H
write format pdb #0 target_noH.pdb" > removeH.cmd
chimera --nogui %(file_r)s removeH.cmd
rm -rf removeH.cmd
# prepare receptor (add missing h, add partial charges,...)
echo "import chimera
from DockPrep import prep
models = chimera.openModels.list(modelTypes=[chimera.Molecule])
prep(models)
from WriteMol2 import writeMol2
writeMol2(models, 'target.mol2')" > dockprep.py
chimera --nogui %(file_r)s dockprep.py
# generating receptor surface
dms target_noH.pdb -n -w %(probe_radius)s -v -o target_noH.ms
# generating spheres
echo "target_noH.ms
R
X
0.0
%(maximum_sphere_radius)s
%(minimum_sphere_radius)s
target_noH_site.sph" > INSPH
sphgen_cpp
# selecting spheres within a user-defined radius (sphgen_radius)
sphere_selector target_noH_site.sph ligand-ref-centered.mol2 %(sphgen_radius)s
# create box - the second argument in the file showbox.in
# is the extra margin to also be enclosed to the box (angstroms)
echo "Y
%(extra_margin)s
selected_spheres.sph
1
target_noH_box.pdb" > showbox.in
showbox < showbox.in
dock6path=`which dock6`
vdwfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/vdw_AMBER_parm99.defn'"`
flexfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex.defn'"`
flexdfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex_drive.tbl'"`
# create grid
echo "compute_grids yes
grid_spacing %(grid_spacing)s
output_molecule no
contact_score yes
energy_score yes
energy_cutoff_distance 9999
atom_model a
attractive_exponent %(attractive_exponent)s
repulsive_exponent %(repulsive_exponent)s
distance_dielectric yes
dielectric_factor 4
bump_filter yes
bump_overlap 0.75
receptor_file target.mol2
box_file target_noH_box.pdb
vdw_definition_file $vdwfile
score_grid_prefix grid
contact_cutoff_distance 4.5" > grid.in
grid -i grid.in
# create box - the second argument in the file showbox.in
# is the extra margin to also be enclosed to the box (angstroms)
echo "Y
%(extra_margin)s
selected_spheres.sph
1
target_noH_box.pdb" > showbox.in
showbox < showbox.in\n"""%locals()
else:
# get directory where grid files are located
grid_prefix = self.options['grid_dir'] + '/' + self.options['dockdir'] + '/grid'
# check if grid file exists
if os.path.isfile(grid_prefix+'.in'):
# copy grid files to avoid opening the same file from multiple locations
for gridfile in glob(grid_prefix+'*'):
basename = os.path.basename(gridfile)
shutil.copyfile(gridfile, basename)
else:
raise ValueError('No grid file detected in specified location %s'%self.options['grid_dir'])
sphfile = self.options['grid_dir'] + '/' + self.options['dockdir'] + '/selected_spheres.sph'
# check if sphere file exists
if os.path.isfile(sphfile):
shutil.copyfile(sphfile, 'selected_spheres.sph')
else:
raise ValueError('No selected_spheres.sph file detected in specified location %s'%self.options['grid_dir'])
script += """\ndock6path=`which dock6`
vdwfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/vdw_AMBER_parm99.defn'"`
flexfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex.defn'"`
flexdfile=`python -c "print '/'.join('$dock6path'.split('/')[:-2]) + '/parameters/flex_drive.tbl'"`\n"""
script += """\n# flexible docking using grid score as primary score and no secondary score
echo "ligand_atom_file ligand-ref-centered.mol2
limit_max_ligands no
skip_molecule no
read_mol_solvation no
calculate_rmsd no
use_database_filter no
orient_ligand yes
automated_matching yes
receptor_site_file selected_spheres.sph
max_orientations %(max_orientations)s
critical_points no
chemical_matching no
use_ligand_spheres no
use_internal_energy yes
internal_energy_rep_exp 12
flexible_ligand yes
user_specified_anchor no
limit_max_anchors no
min_anchor_size 5
pruning_use_clustering yes
pruning_max_orients 1000
pruning_clustering_cutoff 100
pruning_conformer_score_cutoff 100
use_clash_overlap yes
clash_overlap 0.5
write_growth_tree no
bump_filter yes
bump_grid_prefix grid
max_bumps_anchor 12
max_bumps_growth 12
score_molecules yes
contact_score_primary no
contact_score_secondary no
grid_score_primary yes
grid_score_secondary no
grid_score_rep_rad_scale 1
grid_score_vdw_scale 1
grid_score_es_scale 1
grid_score_grid_prefix grid
multigrid_score_secondary no
dock3.5_score_secondary no
continuous_score_secondary no
descriptor_score_secondary no
gbsa_zou_score_secondary no
gbsa_hawkins_score_secondary no
SASA_descriptor_score_secondary no
pbsa_score_secondary no
amber_score_secondary no
minimize_ligand yes
minimize_anchor yes
minimize_flexible_growth yes
use_advanced_simplex_parameters no
simplex_max_cycles 1
simplex_score_converge 0.1
simplex_cycle_converge 1.0
simplex_trans_step 1.0
simplex_rot_step 0.1
simplex_tors_step 10.0
simplex_anchor_max_iterations 1000
simplex_grow_max_iterations 1000
simplex_grow_tors_premin_iterations 0
simplex_random_seed 0
simplex_restraint_min no
atom_model all
vdw_defn_file $vdwfile
flex_defn_file $flexfile
flex_drive_file $flexdfile
ligand_outfile_prefix poses_out
write_orientations no
num_scored_conformers %(num_scored_conformers)s
write_conformations no
cluster_conformations yes
cluster_rmsd_threshold %(rmsd)s
rank_ligands no" > dock6.in
dock6 -i dock6.in\n"""%locals()
# write DOCK6 script
with open(filename, 'w') as ff:
ff.write(script)
def extract_docking_results(self, file_s, input_file_r, input_file_l):
# save scores
if os.path.isfile('poses_out_scored.mol2'):
with open('poses_out_scored.mol2', 'r') as ffin:
with open(file_s, 'w') as ffout:
idx = 0
for line in ffin:
if line.startswith('########## Grid Score:'):
ffout.write(line.split()[3]+'\n')
idx += 1
if idx == int(self.options['nposes']):
break
# create multiple mol2 files
ligname = reader.open('poses_out_scored.mol2').ligname
mol2.update_mol2file('poses_out_scored.mol2', 'pose-.mol2', ligname=ligname, multi=True, last=int(self.options['nposes']))
else:
open(file_s, 'w').close()
def extract_rescoring_results(self, filename, nligands=None):
with open(filename, 'a') as ff:
with open('dock.out', 'r') as outf:
for line in outf:
if line.strip().startswith('Grid Score:'):
line_s = line.split()
if len(line_s) > 2:
ff.write(line.split()[2]+'\n')
else:
ff.write('NaN\n')
elif line.strip().startswith('ERROR: Conformation could not be scored.'):
ff.write('NaN\n')
def write_script_ligand_prep(self):
with open('prepare_ligand_dock.py', 'w') as ff:
script ="""import os
import sys
import numpy as np
import shutil
from mdkit.utility import utils
from mdkit.utility import mol2
# read mol2 file
mol2file = sys.argv[1]
new_mol2file = sys.argv[2]
center = map(float,(sys.argv[3]).split())
coords = np.array(mol2.get_coordinates(mol2file))
cog = utils.center_of_geometry(coords)
coords = coords - (cog - center)
idx = 0
with open(new_mol2file, 'w') as nmol2f:
with open(mol2file, 'r') as mol2f:
is_structure = False
for line in mol2f:
if line.startswith('@<TRIPOS>ATOM'):
is_structure = True
nmol2f.write(line)
elif line.startswith('@<TRIPOS>'):
is_structure = False
nmol2f.write(line)
elif is_structure:
new_coords = [format(coord, '.4f') for coord in coords[idx]]
newline = line[:16] + ' '*(10-len(new_coords[0])) + str(new_coords[0]) + \
' '*(10-len(new_coords[1])) + str(new_coords[1]) + ' '*(10-len(new_coords[2])) + str(new_coords[2]) + line[46:]
nmol2f.write(newline)
idx += 1
else:
nmol2f.write(line)"""%locals()
ff.write(script)
```
#### File: DockingToolBox/dockbox/glide.py
```python
import os
import sys
import glob
import shutil
import subprocess
import numpy as np
import method
import license
from mdkit.utility import reader
from mdkit.utility import mol2
required_programs = ['prepwizard', 'glide', 'glide_sort', 'pdbconvert']
default_settings = {'poses_per_lig': '10', 'pose_rmsd': '0.5', 'precision': 'SP', 'use_prepwizard': 'True'}
known_settings = {'precision': ['SP', 'XP'], 'use_prepwizard': ['true', 'false', 'yes', 'no']}
class Glide(method.DockingMethod):
def __init__(self, instance, site, options):
super(Glide, self).__init__(instance, site, options)
# set box center
center = site[1] # set box
self.options['grid_center'] = ', '.join(map(str.strip, center.split(',')))
# set box size
boxsize = site[2]
boxsize = map(str.strip, boxsize.split(','))
self.options['innerbox'] = ', '.join(["%i"%int(float(boxsize[idx])) for idx in range(3)])
outerbox = []
for idx, xyz in enumerate(['x', 'y', 'z']):
self.options['act'+xyz+'range'] = str("%.1f"%float(boxsize[idx]))
outerbox.append(self.options['act'+xyz+'range'])
self.options['outerbox'] = ', '.join(outerbox)
self.tmpdirline = ""
if 'tmpdir' in self.options:
self.tmpdirline = "export SCHRODINGER_TMPDIR=%s"%self.options['tmpdir']
if self.options['use_prepwizard'].lower() in ['yes', 'true']:
self.use_prepwizard = True
elif self.options['use_prepwizard'].lower() in ['no', 'false']:
self.use_prepwizard = False
else:
raise ValueError("Value for use_prepwizard non recognized")
def write_docking_script(self, filename, file_r, file_l):
""" Write docking script for glide """
locals().update(self.options)
if self.use_prepwizard:
# prepare protein cmd (the protein structure is already assumed to be minimized/protonated with prepwizard)
prepwizard_cmd = license.wrap_command("prepwizard -fix %(file_r)s target.mae"%locals(), 'schrodinger')
else:
prepwizard_cmd = "structconvert -ipdb %(file_r)s -omae target.mae"%locals()
# prepare grid and docking cmd
glide_grid_cmd = license.wrap_command("glide grid.in", 'schrodinger')
glide_dock_cmd = license.wrap_command("glide dock.in", 'schrodinger')
tmpdirline = self.tmpdirline
# write glide script
with open(filename, 'w') as file:
script ="""#!/bin/bash
%(tmpdirline)s
# (A) Prepare receptor
%(prepwizard_cmd)s
# (B) Prepare grid
echo "USECOMPMAE YES
INNERBOX %(innerbox)s
ACTXRANGE %(actxrange)s
ACTYRANGE %(actyrange)s
ACTZRANGE %(actzrange)s
GRID_CENTER %(grid_center)s
OUTERBOX %(outerbox)s
ENTRYTITLE target
GRIDFILE grid.zip
RECEP_FILE target.mae" > grid.in
%(glide_grid_cmd)s
# (C) convert ligand to maestro format
structconvert -imol2 %(file_l)s -omae lig.mae
# (D) perform docking
echo "WRITEREPT YES
USECOMPMAE YES
DOCKING_METHOD confgen
POSES_PER_LIG %(poses_per_lig)s
POSE_RMSD %(pose_rmsd)s
GRIDFILE $PWD/grid.zip
LIGANDFILE $PWD/lig.mae
PRECISION %(precision)s" > dock.in
%(glide_dock_cmd)s"""% locals()
file.write(script)
def extract_docking_results(self, file_s, input_file_r, input_file_l):
"""Extract Glide docking results"""
if os.path.exists('dock_pv.maegz'):
# (1) cmd to extract results
subprocess.check_output('glide_sort -r sort.rept dock_pv.maegz -o dock_sorted.mae', shell=True, executable='/bin/bash')
# (2) convert to .mol2
subprocess.check_output('mol2convert -n 2: -imae dock_sorted.mae -omol2 dock_sorted.mol2', shell=True, executable='/bin/bash')
if os.path.exists('dock_sorted.mol2'):
ligname = reader.open(input_file_l).ligname
mol2.update_mol2file('dock_sorted.mol2', 'lig-.mol2', ligname=ligname, multi=True)
# extract scores
with open('dock.rept', 'r') as ffin:
with open(file_s, 'w') as ffout:
line = ffin.next()
while not line.startswith('===='):
line = ffin.next()
while True:
line = ffin.next()
if line.strip():
print >> ffout, line[43:51].strip()
else:
break
def get_tmpdir_line(self):
if self.options['tmpdir']:
line = "export SCHRODINGER_TMPDIR=%(tmpdir)s"%locals()
else:
line = ""
def write_rescoring_script(self, filename, file_r, files_l):
"""Rescore using Glide SP scoring function"""
locals().update(self.options)
files_l_joined = ' '.join(files_l)
if self.use_prepwizard:
# prepare protein cmd (the protein structure is already assumed to be minimized/protonated with prepwizard)
prepwizard_cmd = license.wrap_command("prepwizard -fix %(file_r)s target.mae"%locals(), 'schrodinger')
else:
prepwizard_cmd = "structconvert -ipdb %(file_r)s -omae target.mae"%locals()
# prepare grid and scoring cmd
glide_grid_cmd = license.wrap_command("glide grid.in", 'schrodinger') # grid prepare
glide_dock_cmd = license.wrap_command("glide dock.in", 'schrodinger') # docking command
tmpdirline = self.tmpdirline
with open(filename, 'w') as file:
script ="""#!/bin/bash
%(tmpdirline)s
cat %(files_l_joined)s > lig.mol2
# (A) Prepare receptor
%(prepwizard_cmd)s
# (B) Prepare grid
echo "USECOMPMAE YES
INNERBOX %(innerbox)s
ACTXRANGE %(actxrange)s
ACTYRANGE %(actyrange)s
ACTZRANGE %(actzrange)s
GRID_CENTER %(grid_center)s
OUTERBOX %(outerbox)s
ENTRYTITLE target
GRIDFILE grid.zip
RECEP_FILE target.mae" > grid.in
%(glide_grid_cmd)s
# (C) convert ligand to maestro format
structconvert -imol2 lig.mol2 -omae lig.mae
# (D) perform rescoring
echo "WRITEREPT YES
USECOMPMAE YES
DOCKING_METHOD inplace
GRIDFILE $PWD/grid.zip
LIGANDFILE $PWD/lig.mae
PRECISION SP" > dock.in
%(glide_dock_cmd)s"""% locals()
file.write(script)
def extract_rescoring_results(self, filename, nligands=None):
idxs = []
scores = []
if os.path.exists('dock.scor'):
with open('dock.scor', 'r') as ffin:
line = ffin.next()
while not line.startswith('===='):
line = ffin.next()
while True:
line = ffin.next()
if line.strip():
idxs.append(int(line[36:42].strip()))
scores.append(line[43:51].strip())
else:
break
scores = np.array(scores)
scores = scores[np.argsort(idxs)]
else:
scores = [ 'NaN' for idx in range(nligands)]
with open(filename, 'w') as ffout:
for sc in scores:
print >> ffout, sc
```
#### File: DockingToolBox/dockbox/method.py
```python
import os
import sys
import stat
import shutil
import subprocess
from glob import glob
from mdkit.amber import minimization
from mdkit.utility import mol2
import configure
class DockingMethod(object):
def __init__(self, instance, site, options):
"""Initialize docking instance"""
self.instance = instance
self.site = site
self.options = options
self.program = self.__class__.__name__.lower()
def run_docking(self, file_r, file_l, minimize_options=None, cleanup=0, prepare_only=False, skip_docking=False):
"""Run docking one (file per ligand and receptor)"""
curdir = os.getcwd()
# find name for docking directory
if 'name' in self.options:
dockdir = self.options['name']
else:
dockdir = self.instance
if self.site[0]:
dockdir += '.' + self.site[0]
if not skip_docking:
# create directory for docking (remove directory if exists)
shutil.rmtree(dockdir, ignore_errors=True)
os.mkdir(dockdir)
os.chdir(dockdir)
if not skip_docking:
print "Starting docking with %s..."%self.program.capitalize()
print "The following options will be used:"
options_info = ""
for key, value in self.options.iteritems():
options_info += str(key) + ': ' + str(value) + ', '
print options_info[:-2]
# (A) run docking
script_name = "run_" + self.program + ".sh"
self.write_docking_script(script_name, file_r, file_l)
os.chmod(script_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR)
if prepare_only:
return
try:
# try running docking procedure
subprocess.check_output('./' + script_name + " &> " + self.program + ".log", shell=True, executable='/bin/bash')
except subprocess.CalledProcessError as e:
print e
print "Error: check %s file for more details!"%(dockdir+'/'+self.program+'.log')
os.chdir(curdir)
return
if prepare_only:
return
# (B) extract docking results
self.extract_docking_results('score.out', file_r, file_l)
# (C) cleanup poses (minimization, remove out-of-box poses)
if minimize_options['minimization']:
self.backup_files('origin')
self.minimize_extracted_poses(file_r, 'score.out', cleanup=cleanup, **minimize_options)
self.remove_out_of_range_poses('score.out')
# (D) remove intermediate files if required
if cleanup >= 1:
self.cleanup()
os.chdir(curdir)
print "Docking with %s done."%self.program.capitalize()
def run_rescoring(self, file_r, files_l):
"""Rescore multiple ligands on one receptor"""
curdir = os.getcwd()
# get name of rescoring from instance
rescordir = self.instance
if self.site[0]:
rescordir += '.' + self.site[0]
# overwrite previous directory if exists
shutil.rmtree(rescordir, ignore_errors=True)
os.mkdir(rescordir)
# change directory
os.chdir(rescordir)
mol2files = files_l
if self.program in configure.single_run_scoring_programs or (self.program == 'colvar' and self.options['type'] == 'sasa'):
# if the program rescores in one run, provides a list of files
mol2files = [mol2files]
if mol2files:
# iterate over all the poses
for idx, file_l in enumerate(mol2files):
# (A) write script
script_name = "run_scoring_" + self.program + ".sh"
self.write_rescoring_script(script_name, file_r, file_l)
os.chmod(script_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR)
# (B) run scoring method
try:
subprocess.check_output('./' + script_name + ' &> ' + self.program + '.log', shell=True, executable='/bin/bash')
except subprocess.CalledProcessError as e:
print e.output
pass
# (C) extract rescoring results
if self.program in configure.single_run_scoring_programs:
nligands = len(file_l)
self.extract_rescoring_results('score.out', nligands=nligands)
else:
self.extract_rescoring_results('score.out')
else:
# if no files provided, create an empty score.out file
open('score.out', 'w').close()
os.chdir(curdir)
return rescordir + '/score.out'
def get_output_mol2files(self):
"""Get output mol2files sorted by pose ranking after docking"""
filenames_idxs = []
for filename in glob('pose-*.mol2'):
suffix, ext = os.path.splitext(filename)
filenames_idxs.append(int(suffix.split('-')[-1]))
filenames_idxs = sorted(filenames_idxs)
mol2files = []
for idx in filenames_idxs:
mol2files.append('pose-%s.mol2'%idx)
return mol2files
def backup_files(self, dir):
"""Do a backup of output mol2files"""
mol2files = self.get_output_mol2files()
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
for filename in mol2files:
shutil.copyfile(filename, dir+'/'+filename)
def remove_scores_from_scorefile(self, file_s, indices, nligands=None):
"""Remove scores of bad poses (failed minimization, out of the box...) from score.out"""
if os.path.exists(file_s):
new_content = []
with open(file_s, 'r') as sf:
for idx, line in enumerate(sf):
if idx not in indices:
new_content.append(line)
if nligands:
# consistency check
assert nligands == idx+1, "number of ligand mol2files should be equal to number of lines in score.out"
with open(file_s, 'w') as sf:
for line in new_content:
sf.write(line)
def minimize_extracted_poses(self, file_r, file_s, cleanup=0, **minimize_options):
"""Perform AMBER minimization on extracted poses"""
mol2files = self.get_output_mol2files()
if mol2files:
# do energy minimization on ligand
minimization.do_minimization_after_docking(file_r, mol2files, keep_hydrogens=True, charge_method=minimize_options['charge_method'],\
ncyc=minimize_options['ncyc'], maxcyc=minimize_options['maxcyc'], cut=minimize_options['cut'], amber_version=minimize_options['amber_version'])
failed_idxs = []
# extract results from minimization and purge out
for idx, filename_before_min in enumerate(mol2files):
suffix, ext = os.path.splitext(filename_before_min)
filename = 'em/' + suffix + '-out' + ext
if os.path.isfile(filename): # the minimization succeeded
shutil.copyfile(filename, filename_before_min)
else: # the minimization failed
os.remove(filename_before_min)
failed_idxs.append(idx)
# remove scores of failed poses
self.remove_scores_from_scorefile(file_s, failed_idxs, nligands=len(mol2files))
if failed_idxs:
# display warning message
failed_mol2files = [mol2files[idx] for idx in failed_idxs]
print "Warning: minimization of poses %s failed, poses were removed!"%(', '.join(failed_mol2files))
if cleanup >= 1:
# if cleanup is more than 1, remove EM directory
shutil.rmtree('em', ignore_errors=True)
def remove_out_of_range_poses(self, file_s):
"""Get rid of poses which were predicted outside the box"""
mol2files = self.get_output_mol2files()
if mol2files:
sitename, center, boxsize = self.site
# get values of docking box center and boxsize
center = map(float, center.split(','))
boxsize = map(float, boxsize.split(','))
out_of_range_idxs = []
for jdx, filename in enumerate(mol2files):
is_out = False
for coord in mol2.get_coordinates(filename):
for idx, value in enumerate(coord):
# check if the pose is out of the box
if abs(value - center[idx]) > boxsize[idx]*1./2:
is_out = True
break
if is_out:
os.remove(filename)
out_of_range_idxs.append(jdx)
break
# remove scores of failed poses
self.remove_scores_from_scorefile(file_s, out_of_range_idxs, nligands=len(mol2files))
if out_of_range_idxs:
# display warning message
out_of_range_mol2files = [mol2files[idx] for idx in out_of_range_idxs]
print "Warning: poses %s were found out of the box, poses were removed!"%(', '.join(out_of_range_mol2files))
def cleanup(self):
"""Remove all intermediate files"""
for filename in glob('*'):
if os.path.isfile(filename) and not filename.startswith('pose-') and filename != 'score.out':
os.remove(filename)
def write_rescoring_script(self, script_name, file_r, file_l):
pass
def extract_rescoring_results(self, filename):
pass
def write_docking_script(self, script_name, file_r, file_l):
pass
def extract_docking_results(self, file_r, file_l, file_s, input_file_r):
pass
class ScoringMethod(DockingMethod):
def run_docking(self, file_r, file_l, minimize=False, cleanup=0, extract_only=False):
pass
def remove_out_of_range_poses(self, file_s):
pass
def minimize_extracted_poses(self, file_r):
pass
``` |
{
"source": "jp43/mdtools",
"score": 3
} |
#### File: mdkit/namd/namdtools.py
```python
import os
def create_constrained_pdbfile(pdbfile, startpdb, ligname):
with open(startpdb, 'r') as startfile:
with open(pdbfile, 'w') as posresfile:
for line in startfile:
if line.startswith(('ATOM', 'HETATM')):
atom_name = line[12:16].strip()
res_name = line[17:20].strip()
if res_name == 'WAT': # water molecules
newline = line[0:30] + '%8.3f'%0.0 + line[38:]
elif res_name in ligname: # atoms of the ligand
if atom_name.startswith(('C', 'N', 'O')):
newline = line[0:30] + '%8.3f'%50.0 + line[38:]
else:
newline = line[0:30] + '%8.3f'%0.0 + line[38:]
else: # atoms of the protein
if atom_name in ['C', 'CA', 'N', 'O']:
newline = line[0:30] + '%8.3f'%50.0 + line[38:]
else:
newline = line[0:30] + '%8.3f'%0.0 + line[38:]
else:
newline = line
print >> posresfile, newline.replace('\n','')
def create_steered_constrained_pdbfile(pdbfile, startpdb, ligname):
has_found_first_atom = False
has_found_last_atom = False
# check pdb once to find first and last C alpha atoms
with open(startpdb, 'r') as startfile:
for line in startfile:
if line.startswith(('ATOM', 'HETATM')):
atom_name = line[12:16].strip()
atom_num = line[6:11].strip()
if not has_found_first_atom and atom_name == 'CA':
atom_num_first = atom_num
has_found_first_atom = True
elif atom_name == 'CA':
atom_num_last = atom_num
has_found_last_atom = True
if not has_found_first_atom:
raise ValueError("First CA atom not found in %s"%startpdb)
if not has_found_last_atom:
raise ValueError("Last CA atom not found in %s"%startpdb)
with open(startpdb, 'r') as startfile:
with open(pdbfile, 'w') as posresfile:
for line in startfile:
if line.startswith(('ATOM', 'HETATM')):
atom_num = line[6:11].strip()
if atom_num == atom_num_first:
newline = line[0:56] + "0.00 1.00\n"
elif atom_num == atom_num_last:
newline = line[0:56] + "1.00 0.00\n"
else:
newline = line[0:56] + "0.00 0.00\n"
else:
newline = line
posresfile.write(newline)
```
#### File: mdkit/utility/mol2.py
```python
import os
import sys
import shutil
import subprocess
import networkx as nx
from utils import center_of_geometry
# recognized sections
known_section = ['MOLECULE', 'ATOM', 'BOND', 'SUBSTRUCTURE']
class Reader(object):
def __init__(self, filename):
self.filename = filename
self.file = open(filename, 'r')
# move to first molecule
for line in self.file:
if line.startswith('@<TRIPOS>MOLECULE'):
break
@property
def ligname(self):
ff = open(self.filename, 'r')
first_atom = False
ligname = None
for line in ff:
if line.startswith('@<TRIPOS>ATOM'):
first_atom = True
elif first_atom:
line_s = line.split()
if not ligname:
ligname = line_s[7]
elif line_s[7] != ligname:
raise ValueError('Ligand name not consistent between structures')
first_atom = False
ff.close()
return ligname
@property
def nmolecules(self):
ff = open(self.filename, 'r')
nmolecules = 0
for line in ff:
if line.startswith('@<TRIPOS>MOLECULE'):
nmolecules += 1
ff.close()
return nmolecules
def read(self):
struct = self.next()
while struct is not None:
yield struct
struct = self.next()
def readlines(self):
structs = []
struct = self.next()
while struct is not None:
structs.append(struct)
struct = self.next()
return structs
def next(self):
struct = None
for idx, line in enumerate(self.file):
# initialize stucture
if idx == 0:
struct = {}
section = 'MOLECULE'
struct[section] = []
# new molecule detected
if line.startswith('@<TRIPOS>MOLECULE'):
break
elif line.startswith('@<TRIPOS>'):
section = line[9:].strip()
struct[section] = []
elif section and line.strip():
if section == 'ATOM':
atom = line.split()
if len(atom) > 9:
atom = atom[:9]
struct[section].append(atom)
else:
struct[section].append(line)
elif section and not line.strip():
struct[section].append(line)
section = None
return struct
def close(self):
self.file.close()
def __iter__(self):
return self.read()
readline = next
class Writer(object):
def write(self, filename, structs, mode='w', multi=False, last=None):
if not isinstance(structs, list):
structs = [structs]
if not isinstance(filename, str):
raise ValueError('filename should be a string!')
if multi:
suffix, ext = os.path.splitext(filename)
filename = []
for idx, struct in enumerate(structs):
if (last and (idx+1) <= last) or not last:
filename.append((idx, suffix+str(idx+1)+ext))
else:
if len(structs) == 1:
filename = [(0, filename)]
else:
raise ValueError(".mol2 writer not implemented to write a single file \
with multiple structures!")
for idx, fname in filename:
with open(fname, mode) as ff:
for section in known_section:
struct = structs[idx]
if section in struct:
ff.write('@<TRIPOS>'+section+'\n')
for line in struct[section]:
if section == 'ATOM':
newline = '%7s %-5s %9s %9s %9s %-5s %2s %-5s %9s\n'%tuple(line)
else:
newline = line
ff.write(newline)
def update_mol2file(inputfile, outputfile, ADupdate=None, multi=False, ligname=None, unique=False, mask=None, remove=None, last=None, shift=None):
f = Reader(inputfile)
structs = f.readlines()
f.close()
updated_structs = []
for struct in structs:
if ADupdate:
struct = update_AD_output_from_original_struct(struct, ADupdate)
if shift is not None:
struct = shift_coordinates(struct, shift)
if ligname:
struct = update_ligand_name(struct, ligname)
if unique:
struct = give_unique_atom_names(struct, mask)
if remove:
struct = remove_atoms(struct, remove)
updated_structs.append(struct)
Writer().write(outputfile, updated_structs, multi=multi, last=last)
def pdb2mol2(inputfile, outputfile, sample, keep_charges_from=None):
# get atom lines in PDB:
atom_lines_pdb = []
with open(inputfile, 'r') as pdbf:
for line in pdbf:
if line.startswith(('ATOM','HETATM')):
atom_lines_pdb.append(line)
f = Reader(sample)
structs = f.readlines()
f.close()
struct = structs[0]
new_struct = struct
for idx, line in enumerate(struct['ATOM']):
atom_name_mol2 = line[1].lower()
is_atom = False
for line_pdb in atom_lines_pdb:
atom_name_pdb = line_pdb[12:16].strip().lower()
if atom_name_pdb == atom_name_mol2:
if is_atom:
raise ValueError("Mol2 atom name already found in PDB file, your files should have unique atom names!")
is_atom = True
coords = [coord.strip() + '0' for coord in [line_pdb[30:38], line_pdb[38:46], line_pdb[46:54]]]
for jdx in range(3):
new_struct['ATOM'][idx][jdx+2] = coords[jdx]
if not is_atom:
raise IOError("Mol2 atom name not found in PDB file, check your input files!")
if keep_charges_from:
f_ref = Reader(keep_charges_from)
struct_charges = f_ref.readlines()[0]
f_ref.close()
new_struct['MOLECULE'] = struct_charges['MOLECULE']
for idx, line in enumerate(struct['ATOM']):
atom_name_mol2 = line[1].lower()
atom_name_mol2_charges = struct_charges['ATOM'][idx][1].lower()
if atom_name_mol2 != atom_name_mol2_charges:
raise ValueError("Atom names in ref mol2file for charges does not fit names in sample!")
else:
new_struct['ATOM'][idx][-1] = struct_charges['ATOM'][idx][-1]
Writer().write(outputfile, new_struct)
def get_graph(inputfile):
f = Reader(inputfile)
struct = f.next()
f.close()
G = nx.Graph()
for line in struct['ATOM']:
sybyl_atom_type = line[5]
atom_type = sybyl_atom_type.split('.')[0].upper()
G.add_node(int(line[0]), type=atom_type)
for line in struct['BOND']:
line_s = line.split()
node_1 = int(line_s[1])
node_2 = int(line_s[2])
G.add_edge(node_1, node_2)
return G
def update_ligand_name(struct, ligname):
ligname_p = struct['ATOM'][0][-2]
new_struct = struct
for idx, line in enumerate(struct['ATOM']):
new_struct['ATOM'][idx][-2] = ligname
if 'SUBSTRUCTURE' in struct:
for idx, line in enumerate(struct['SUBSTRUCTURE']):
new_struct['SUBSTRUCTURE'][idx] = line.replace(ligname_p, ligname)
return new_struct
def shift_coordinates(struct, shift):
coords = []
for line in struct['ATOM']:
coords.append(map(float, line[2:5]))
cog = center_of_geometry(coords)
center_x, center_y, center_z = cog - shift
new_struct = struct
for idx, line in enumerate(struct['ATOM']):
x, y, z = map(float, line[2:5])
new_struct['ATOM'][idx][2] = "%.4f"%(x-center_x)
new_struct['ATOM'][idx][3] = "%.4f"%(y-center_y)
new_struct['ATOM'][idx][4] = "%.4f"%(z-center_z)
return new_struct
def replace_coordinates(struct, coords):
new_struct = struct
for idx, line in enumerate(struct['ATOM']):
x, y, z = coords[idx]
new_struct['ATOM'][idx][2] = "%.4f"%x
new_struct['ATOM'][idx][3] = "%.4f"%y
new_struct['ATOM'][idx][4] = "%.4f"%z
return new_struct
def is_unique_name(struct):
known_atom_names = []
for line in struct['ATOM']:
atom_name = line[1]
if atom_name not in known_atom_names:
known_atom_names.append(atom_name)
else:
return False
return True
def update_AD_output_from_original_struct(struct1, filename):
# read original structure
f = Reader(filename)
struct2 = f.next()
new_struct = struct2
f.close()
for idx, struct in enumerate([struct1, struct2]):
if not is_unique_name(struct):
raise ValueError("Mol2 structure (%i) should have unique atom names"%(idx+1))
for idx, line2 in enumerate(struct2['ATOM']):
for line1 in struct1['ATOM']:
if line1[1] == line2[1]:
for jdx in range(2,5):
new_struct['ATOM'][idx][jdx] = line1[jdx]
return new_struct
def remove_atoms(struct, atomtype):
if not isinstance(atomtype, list):
atomtype = [atomtype]
new_struct = struct
atom_section = []
bond_section = []
jdx = 0
old_atoms_idxs = []
new_atoms_idxs = []
removed_atom_idxs = []
for idx, line in enumerate(struct['ATOM']):
old_atoms_idxs.append(line[0])
if line[5] in atomtype:
new_atoms_idxs.append('-1')
removed_atom_idxs.append(line[0])
else:
jdx += 1
new_atoms_idxs.append(str(jdx))
line[0] = jdx
atom_section.append(line)
natoms = jdx
jdx = 0
for line in struct['BOND']:
line_s = line.split()
origin_atom_id = line_s[1]
target_atom_id = line_s[2]
if (not origin_atom_id in removed_atom_idxs) and (not target_atom_id in removed_atom_idxs):
jdx += 1
line_s[0] = str(jdx)
line_s[1] = new_atoms_idxs[old_atoms_idxs.index(origin_atom_id)]
line_s[2] = new_atoms_idxs[old_atoms_idxs.index(target_atom_id)]
bond_section.append("%4s %4s %4s %-4s\n"%tuple(line_s))
nbonds = jdx
line_s = new_struct['MOLECULE'][1].split()
line_s[0] = str(natoms)
line_s[1] = str(nbonds)
new_struct['MOLECULE'][1] = ' ' + ' '.join(line_s) + '\n'
new_struct['ATOM'] = atom_section
new_struct['BOND'] = bond_section
return new_struct
def arrange_hydrogens(inputfile, outputfile, path=None):
if path and path not in sys.path:
sys.path.append(path)
from MolKit import Read
from PyBabel.atomTypes import AtomHybridization
from babel import ArrangeHydrogens
mol = Read(inputfile)
base, ext = os.path.splitext(inputfile)
# remove hydrogens from structure
inputfile_noH = base + '_noH' + ext
subprocess.check_output('babel -imol2 %s -omol2 %s -d &>/dev/null'%(inputfile,inputfile_noH), shell=True, executable='/bin/bash')
molnoH = Read(inputfile_noH)
allAtoms = mol.allAtoms
allAtomsNoH = molnoH.allAtoms
babel = AtomHybridization()
babel.assignHybridization(allAtoms)
babel.assignHybridization(allAtomsNoH)
# get mol2 ids of all atoms and all hydrogens
ff = Reader(inputfile)
struct = ff.next()
hat_mol2_ids = []
at_mol2_ids = []
for line in struct['ATOM']:
atom_name = line[5]
#print line[2]
if atom_name[0] in ['H', 'h']:
hat_mol2_ids.append(line[0])
at_mol2_ids.append(line[0])
hat_mol2_ids = map(int, hat_mol2_ids)
at_mol2_ids = map(int, at_mol2_ids)
# find out the heavy atom each hydrogen is bound with
at_with_hat_mol2_ids = []
for id in hat_mol2_ids:
for line in struct['BOND']:
line_s = line.split()
origin_atom_id = int(line_s[1])
target_atom_id = int(line_s[2])
#print origin_atom_id
if id == origin_atom_id:
at_with_hat_mol2_ids.append(target_atom_id)
elif id == target_atom_id:
at_with_hat_mol2_ids.append(origin_atom_id)
if len(at_with_hat_mol2_ids) != len(hat_mol2_ids):
raise ValueError("Each hydrogen should have only one bound! Check you .mol2 file")
addh = ArrangeHydrogens()
# at_with_hat_idxs are the indices of atoms related to each hydrogen of hat
hat, at_with_hat_idxs = addh.addHydrogens(allAtoms, allAtomsNoH)
hat_coords = []
hat_done_mol2_ids = []
for idx, at_with_hat_idx in enumerate(at_with_hat_idxs):
at_with_hat_mol2_id = at_mol2_ids[at_with_hat_idx]
hat_mol2_ids_cur = [hat_mol2_ids[jdx] for jdx, id in enumerate(at_with_hat_mol2_ids) \
if id == at_with_hat_mol2_id]
kdx = 0
while hat_mol2_ids_cur[kdx] in hat_done_mol2_ids:
kdx += 1
hat_done_mol2_ids.append(hat_mol2_ids_cur[kdx])
hat_coords.append(hat[idx][0])
for line in struct['ATOM']:
id = int(line[0])
if id in hat_done_mol2_ids:
idx = hat_done_mol2_ids.index(id)
line[2:5] = ["%.4f"%coords for coords in hat_coords[idx]]
Writer().write(outputfile, struct)
os.remove(inputfile_noH)
def give_unique_atom_names(struct, mask=None):
new_struct = struct
known_atom_types = {}
for jdx, line in enumerate(struct['ATOM']):
if not mask or line[5] in mask:
sybyl_atom_type = line[5]
atom_type = sybyl_atom_type.split('.')[0].upper()
if atom_type in known_atom_types:
known_atom_types[atom_type] += 1
else:
known_atom_types[atom_type] = 1
new_struct['ATOM'][jdx][1] = atom_type + str(known_atom_types[atom_type])
return new_struct
def get_atoms_names(filename):
atoms_names = []
with open(filename, 'r') as mol2f:
is_structure = False
for line in mol2f:
if line.startswith('@<TRIPOS>ATOM'):
is_structure = True
elif line.startswith('@<TRIPOS>'):
is_structure = False
elif is_structure:
line_s = line.split()
atoms_names.append(line_s[1])
return atoms_names
def get_coordinates(filename, keep_h=True):
coords = []
with open(filename, 'r') as mol2f:
is_structure = False
for line in mol2f:
if line.startswith('@<TRIPOS>ATOM'):
is_structure = True
elif line.startswith('@<TRIPOS>'):
is_structure = False
elif is_structure:
line_s = line.split()
if keep_h or line_s[5][0].lower() != 'h':
coords.append(map(float,line_s[2:5]))
return coords
```
#### File: mdkit/utility/pdb.py
```python
import numpy as np
import itertools as it
_known_entries = [ "ATOM ", "HETATM", "ANISOU", "CRYST1",
"COMPND", "MODEL", "ENDMDL", "TER", "HEADER", "TITLE", "REMARK",
"CONECT"]
class PDBError(Exception):
pass
class Reader(object):
def __init__(self, filename, **kwargs):
self.filename = filename
self.file = open(filename, 'r')
def read(self):
struct = self.next()
while struct is not None:
yield struct
struct = self.next()
def next(self):
struct = None
for idx, line in enumerate(self.file):
# initialize stucture
if idx == 0:
struct = {}
struct['ATOM'] = []
if line.startswith(('ATOM','HETATM')):
line_s = [line[6:11], line[12:16], line[17:20], line[22:26], line[30:38], line[38:46], line[46:54]]
struct['ATOM'].append(map(str.strip, line_s))
elif line.startswith('END'):
break
return struct
def _skip(self):
try:
self.file.next()
except StopIteration:
return None
try:
natoms = int(self.file.next())
except StopIteration:
raise PDBError("File ended unexpectedly when reading number of atoms.")
for atom in it.izip(xrange(natoms), self.file):
pass
try:
self.file.next()
except StopIteration:
raise PDBError("File ended unexpectedly when reading box line.")
return None
def readlines(self, *args):
if len(args) == 0:
configs = []
config = self.next()
while config is not None:
configs.append(config)
config = self.next()
elif len(args) == 1:
lines = args[0]
if isinstance(lines, int):
lines = [lines]
else:
lines = list(set(lines))
lines.sort()
lines = np.array(lines)
lines = np.hstack((-1, lines))
sklines = np.diff(lines) - 1
configs = []
for skline in sklines:
for idx in xrange(skline):
self._skip()
config = self.next()
configs.append(config)
else:
raise PDBError("invalid number of arguments to readlines")
return np.array(configs)
def close(self):
self.file.close()
def __iter__(self):
return self.read()
readline = next
class Writer(object):
pass
def split_file_rl(file_r, file_l, file_rl, ligname):
with open(file_rl, 'r') as frl:
with open(file_r, 'w') as fr:
with open(file_l, 'w') as fl:
for line in frl:
if line.startswith(('HETATM', 'ATOM')) and line[17:20].strip() == ligname:
fl.write(line)
else:
fr.write(line)
``` |
{
"source": "jp4jp4/Fall_Detect",
"score": 3
} |
#### File: jp4jp4/Fall_Detect/Fall.py
```python
import ui
def read_data(sender):
import motion, location
import time, datetime
import io
import numpy as np
import matplotlib.pyplot as plt
val = view['switch1'].value
if val==True:
motion.start_updates()
y=0
nx = np.empty(1)
ny = np.empty(1)
nz = np.empty(1)
view['mag'].text = ''
view['accel'].text = ''
view['gyro'].text = ''
view['gravity'].text = ''
while (y<=100):
time.sleep(.05)
x = motion.get_attitude()
view['gyro'].text = str(x) + '\n' + view['gyro'].text
x = motion.get_gravity()
view['gravity'].text = str(x) + '\n' + view['gravity'].text
x = motion.get_user_acceleration()
nx = np.append(nx,x[0])
ny = np.append(ny,x[1])
nz = np.append(nz,x[2])
view['accel'].text = str(x) + '\n' + view['accel'].text
x = motion.get_magnetic_field()
view['mag'].text = str(x) + '\n' + view['mag'].text
y +=1
view['y'].text = str(y) + 'measurements'
motion.stop_updates()
plt.plot(nx)
plt.show()
plt.savefig('x.tif')
plt.close()
plt.plot(ny)
plt.show()
plt.savefig('y.tif')
plt.close()
plt.plot(nz)
plt.show()
plt.savefig('z.tif')
plt.close()
medianx = np.median(nz)
stdx = np.std(nz)
apex = np.amax(np.absolute(nz))
print (apex)
print (stdx)
if apex >= stdx*2:
if apex > stdx*5:
view['fell'].text = 'Fall'
else:
view['fell'].text = 'Trip'
fname = 'motion' + str(datetime.datetime.now()).split('.')[1] + '.txt'
with open(fname, 'w') as fo:
fo.write('gyro\n')
fo.write(view['gyro'].text)
fo.write('gravity\n')
fo.write(view['gravity'].text)
fo.write('accel\n')
fo.write(view['accel'].text)
fo.write('mag\n')
fo.write(view['mag'].text)
else:
view['mag'].text = ''
view['accel'].text = ''
view['gyro'].text = ''
view['gravity'].text = ''
view['y'].text = str(0)
view = ui.load_view('Fall')
view.name = "fall"
view.present('sheet')
``` |
{
"source": "jp5000/LandBOSSE",
"score": 3
} |
#### File: landbosse/excelio/XlsxFileOperations.py
```python
import os
import sys
from datetime import datetime
from shutil import copy2
from shutil import copytree
from .XlsxOperationException import XlsxOperationException
class XlsxFileOperations:
"""
This class is made to handle file naming and copying.
"""
def __init__(self):
"""
The __init__() method just makes a timestamp that will be used throughout
the lifetime of this instance.
"""
dt = datetime.now()
self.timestamp = f'{dt.year}-{dt.month}-{dt.day}-{dt.hour}-{dt.minute}-{dt.second}'
def get_input_output_paths_from_argv_or_env(self):
"""
This uses the sys.argv object to inspect the command line to find input
and output paths as specified on the command line. It expects the
command line to have options in the following form:
-i [input file] -o [output file]
If one or both of these is missing, then it is filled with the defaults
from the environment variables LANDBOSSE_INPUT_DIR or LANDBOSSE_OUTPUT_DIR
If all of them are missing the method defaults to 'inputs/' and 'outputs/'
There is a third option this method looks for:
--validate or -v
If that parameter is present, then --output or -o is optional. If enabled,
validation mode will accept an input directory with --input that must also have a
landbosse-output.xlsx file. The model will then run on the inputs. Instead
of writing a file of the output, the output will stay in memory. The
landbosse-output.xlsx will then be loaded and compared against the in
memory output. If the outputs are the same, the validation passes
because the results were reproduced. Otherwise, the validation failed
because something broke in the model.
Parameters
----------
This function takes no parameters.
Returns
-------
str, str, bool
The first two strings are paths to the input and output files
respectively. The third bool is True if validation mode is
enabled, False for normal operation.
"""
# Get the fallback paths from the environment variables and set their
# defaults. These defualts are used
input_path_from_env = os.environ.get('LANDBOSSE_INPUT_DIR', 'input')
output_path_from_env = os.environ.get('LANDBOSSE_OUTPUT_DIR', 'output')
# input and output paths from command line are initially set to None
# to indicate they have not been found yet.
input_path_from_arg = None
output_path_from_arg = None
# This is for validation option detection
validation_enabled = '--validate' in sys.argv or '-v' in sys.argv
# Look for the input path on command line
if '--input' in sys.argv and sys.argv.index('--input') + 1 < len(sys.argv):
input_idx = sys.argv.index('--input') + 1
input_path_from_arg = sys.argv[input_idx]
if '-i' in sys.argv and sys.argv.index('-i') + 1 < len(sys.argv):
input_idx = sys.argv.index('-i') + 1
input_path_from_arg = sys.argv[input_idx]
# Look for the output path on command line
if '--output' in sys.argv and sys.argv.index('--output') + 1 < len(sys.argv):
output_idx = sys.argv.index('--output') + 1
output_path_from_arg = sys.argv[output_idx]
if '-o' in sys.argv and sys.argv.index('-o') + 1 < len(sys.argv):
output_idx = sys.argv.index('-o') + 1
output_path_from_arg = sys.argv[output_idx]
# Find the final input and output paths. If a command line argument was
# found for input and/or output, that is used. If it wasn't found,
# the value from the environment variable search is returned, which includes
# the default if the environment variable itself wasn't found.
input_path = input_path_from_arg if input_path_from_arg is not None else input_path_from_env
output_path = output_path_from_arg if output_path_from_arg is not None else output_path_from_env
# Return the state of the command lin arguments.
return input_path, output_path, validation_enabled
def landbosse_input_dir(self):
"""
See the get_input_output_paths_from_argv_or_env() function above. This
function is simply a wrapper around that function to get the input
path.
Returns
-------
str
The input directory.
"""
input_path, _, _ = self.get_input_output_paths_from_argv_or_env()
return input_path
def landbosse_output_dir(self):
"""
See the get_input_output_paths_from_argv_or_env() function above. This
method gets the base path from there. Then, it checks for a timestamped
directory that matches the timestamp in this instance. If it finds that
directory, it simply returns the path to that directory. If it does
not find that directory, it creates the directory and returns the path
to the newly created directory.
Returns
-------
str
The output directory.
"""
_, output_base_path, _ = self.get_input_output_paths_from_argv_or_env()
output_path = os.path.join(output_base_path, f'landbosse-{self.timestamp}')
if os.path.exists(output_path) and not os.path.isdir(output_path):
raise FileExistsError(f'Cannot overwrite {output_path} with LandBOSSE data.')
elif not os.path.exists(output_path):
os.mkdir(output_path)
return output_path
else:
return output_path
def parametric_project_data_output_path(self):
"""
Returns th path to the project output data folder.
This folder is to put the parameterized project data sheets generated
during model runs.
If the directory does not exist, it is created.
Returns
-------
str
Path to project data output folder.
"""
path = os.path.join(self.landbosse_output_dir(), 'calculated_parametric_inputs', 'parametric_project_data')
if os.path.exists(path) and not os.path.isdir(path):
raise XlsxOperationException(f'Attempt to write project data to {path} failed. File exists and is not a directory.')
os.makedirs(path, exist_ok=True)
return path
def extended_project_list_path(self):
"""
This returns the path to which the extended project list, which has all
the parametric values, should be copied.
If the folder does not exist yet, this method creates it.
Returns
-------
str
The absolute path to the destiantion of the extended project
list.
"""
path = os.path.join(self.landbosse_output_dir(), 'calculated_parametric_inputs')
if os.path.exists(path) and not os.path.isdir(path):
raise XlsxOperationException(f'Attempt to write project data to {path} failed. File exists and is not a directory.')
os.makedirs(path, exist_ok=True)
return path
def copy_input_data(self):
"""
This copies all input data to the outputs folder. The input data it copies
are all the data BEFORE they have been modified for parametric runs.
"""
dst_inputs_copy_path = os.path.join(self.landbosse_output_dir(), 'inputs')
os.makedirs(dst_inputs_copy_path, exist_ok=True)
src_project_list_xlsx = os.path.join(self.landbosse_input_dir(), 'project_list.xlsx')
dst_project_list_xlsx = os.path.join(dst_inputs_copy_path, 'project_list.xlsx')
src_project_data_dir = os.path.join(self.landbosse_input_dir(), 'project_data')
dst_project_data_dir = os.path.join(dst_inputs_copy_path, 'project_data')
copy2(src_project_list_xlsx, dst_project_list_xlsx)
copytree(src_project_data_dir, dst_project_data_dir)
src_expected_validation_data = os.path.join(self.landbosse_input_dir(),
'landbosse-expected-validation-data.xlsx')
dst_expected_validation_data = os.path.join(self.landbosse_output_dir(),
'landbosse-expected-validation-data.xlsx')
if os.path.isfile(src_expected_validation_data):
copy2(src_expected_validation_data, dst_expected_validation_data)
def timestamp_filename(self, directory, basename, extension):
"""
This function creates a timestamped filename. It uses a filename in the
format of:
basename-timestamp.extension
And joins it to the directory specified by directory. It uses os.path.join()
so it's OS independent.
Parameters
----------
directory : str
The directory for this filename
basename : str
The filename without the timestamp or extension
extension : str
The last part of the filname, without the "."
Returns
-------
str
The path for the host operating system.
"""
filename = '{}-{}.{}'.format(basename, self.timestamp, extension)
result = os.path.join(directory, filename)
return result
```
#### File: landbosse/model/Manager.py
```python
import traceback
import math
from .ManagementCost import ManagementCost
from .FoundationCost import FoundationCost
from .SubstationCost import SubstationCost
from .GridConnectionCost import GridConnectionCost
from .SitePreparationCost import SitePreparationCost
from .CollectionCost import Cable, Array, ArraySystem
from .ErectionCost import ErectionCost
from .DevelopmentCost import DevelopmentCost
class Manager:
"""
The Manager class distributes input and output dictionaries among
the various modules. It maintains the hierarchical dictionary
structure.
"""
def __init__(self, input_dict, output_dict):
"""
This initializer sets up the instance variables of:
self.cost_modules: A list of cost module instances. Each of the
instances must implement the method input_output.
self.input_dict: A placeholder for the inputs dictionary
self.output_dict: A placeholder for the output dictionary
"""
self.input_dict = input_dict
self.output_dict = output_dict
def execute_landbosse(self, project_name):
try:
# Create weather window that will be used for all tasks (window for entire project; selected to restrict to seasons and hours specified)
weather_data_user_input = self.input_dict['weather_window']
season_construct = self.input_dict['season_construct']
time_construct = self.input_dict['time_construct']
daily_operational_hours = self.input_dict['hour_day'][time_construct]
# Filtered window. Restrict to the seasons and hours specified.
filtered_weather_window = weather_data_user_input.loc[(weather_data_user_input['Season'].isin(season_construct)) & (weather_data_user_input['Time window'] == time_construct)]
filtered_weather_window = filtered_weather_window[0:(math.ceil(self.input_dict['construct_duration'] * 30 * daily_operational_hours))]
# Rename weather data to specify types
self.input_dict['weather_window'] = filtered_weather_window
self.input_dict['weather_data_user_input'] = weather_data_user_input
foundation_cost = FoundationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
foundation_cost.run_module()
roads_cost = SitePreparationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
roads_cost.run_module()
substation_cost = SubstationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
substation_cost.run_module()
transdist_cost = GridConnectionCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
transdist_cost.run_module()
collection_cost = ArraySystem(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
collection_cost.run_module()
development_cost = DevelopmentCost(input_dict=self.input_dict, output_dict=self.output_dict,
project_name=project_name)
development_cost.run_module()
erection_cost_output_dict = dict()
erection_cost = ErectionCost(
input_dict=self.input_dict,
output_dict=self.output_dict,
project_name=project_name
)
erection_cost.run_module()
self.output_dict['erection_cost'] = erection_cost_output_dict
total_costs = self.output_dict['total_collection_cost']
total_costs = total_costs.append(self.output_dict['total_road_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_transdist_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_substation_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_foundation_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_erection_cost'], sort=False)
total_costs = total_costs.append(self.output_dict['total_development_cost'],sort=False)
self.input_dict['project_value_usd'] = total_costs.sum(numeric_only=True)[0]
self.input_dict['foundation_cost_usd'] = self.output_dict['total_foundation_cost'].sum(numeric_only=True)[0]
management_cost = ManagementCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=project_name)
management_cost.run_module()
return 0
except Exception:
traceback.print_exc()
return 1 # module did not run successfully
``` |
{
"source": "jp7492code/classification-and-regression",
"score": 2
} |
#### File: Ch02/02_04/get_mean_function.py
```python
Files/Ch02/02_04/get_mean_function.py<gh_stars>0
def get_mean(column):
return df[column].mean()
```
#### File: Ch03/helper_funcs/line_helpers.py
```python
def monthly_avg_calc(mo,col):
return df[df['date'].str.contains('201[2345]_[0]?'+ str(mo))][col].mean()
```
#### File: Ch03/helper_funcs/table_helpers.py
```python
def max_temp(s):
return s[s['Air_Temp']==s['Air_Temp'].max()]
def min_temp(s):
return s[s['Air_Temp']==s['Air_Temp'].min()]
def min_max_temps(yr):
return [(max_temp(s)['Air_Temp'].values[0],
max_temp(s)['date'].values[0],
min_temp(s)['Air_Temp'].values[0],
min_temp(s)['date'].values[0]
) for s in get_seasons(yr)]
``` |
{
"source": "jp7492code/multiple-linear-regression",
"score": 4
} |
#### File: multiple-linear-regression/Accidents/4_calculate_median.py
```python
import sqlite3 # provides python with a library for sqlite
SQLITE_FILE = "UKRoadData.sqlite"
conn = sqlite3.connect(SQLITE_FILE)
myCursor = conn.cursor()
# SQLite does not have a "median" function. Use create_function
# first, create a function to calculate median
def calcMedian(theListofValues):
quotient, remainder = divmod(len(theListofValues),2)
if remainder:
return sorted(theListofValues)[quotient]
return sum(sorted(theListofValues)[quotient - 1:quotient + 1]) / 2
# then install the function in sqlite
# create_function(name, # of parameters, func)
conn.create_function("median",1,calcMedian)
do_this_sqlite = """
SELECT median(Accident_Severity),avg(Accident_Severity) as Severity,Label
FROM Accidents_2015
LEFT JOIN Vehicles_2015 ON Accidents_2015.Accident_Index = Vehicles_2015.Accident_Index
LEFT JOIN vehicle_type ON Vehicle_Type LIKE vehicle_type.Code
WHERE Label LIKE "%otorcycle%"
GROUP BY Label
ORDER BY Severity
"""
print '{:>40} {:>13} {:>13}'.format("Motorcycle","Med Severity","Avg Severity")
print "=" * (40+13+13+3)
for aRow in myCursor.execute(do_this_sqlite):
print '{2:>40} {0:^13} {1:^13.2f}'.format(*aRow)
``` |
{
"source": "jp8042/oppia",
"score": 2
} |
#### File: core/domain/stats_services.py
```python
import collections
import copy
import itertools
from core.domain import interaction_registry
from core.domain import stats_domain
from core.platform import models
import feconf
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
transaction_services = models.Registry.import_transaction_services()
# Counts contributions from all versions.
VERSION_ALL = 'all'
def _migrate_to_latest_issue_schema(exp_issue_dict):
"""Holds the responsibility of performing a step-by-step sequential update
of an exploration issue dict based on its schema version. If the current
issue schema version changes (stats_models.CURRENT_ISSUE_SCHEMA_VERSION), a
new conversion function must be added and some code appended to this
function to account for that new version.
Args:
exp_issue_dict: dict. Dict representing the exploration issue.
Raises:
Exception. The issue_schema_version is invalid.
"""
issue_schema_version = exp_issue_dict['schema_version']
if issue_schema_version is None or issue_schema_version < 1:
issue_schema_version = 0
if not (0 <= issue_schema_version
<= stats_models.CURRENT_ISSUE_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned issue schemas at'
'present.' %
stats_models.CURRENT_ISSUE_SCHEMA_VERSION)
while issue_schema_version < stats_models.CURRENT_ISSUE_SCHEMA_VERSION:
stats_domain.ExplorationIssue.update_exp_issue_from_model(
exp_issue_dict)
issue_schema_version += 1
def _migrate_to_latest_action_schema(learner_action_dict):
"""Holds the responsibility of performing a step-by-step sequential update
of an learner action dict based on its schema version. If the current action
schema version changes (stats_models.CURRENT_ACTION_SCHEMA_VERSION), a new
conversion function must be added and some code appended to this function to
account for that new version.
Args:
learner_action_dict: dict. Dict representing the learner action.
Raises:
Exception. The action_schema_version is invalid.
"""
action_schema_version = learner_action_dict['schema_version']
if action_schema_version is None or action_schema_version < 1:
action_schema_version = 0
if not (0 <= action_schema_version
<= stats_models.CURRENT_ACTION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d and unversioned action schemas '
'at present.' %
stats_models.CURRENT_ACTION_SCHEMA_VERSION)
while action_schema_version < stats_models.CURRENT_ACTION_SCHEMA_VERSION:
stats_domain.LearnerAction.update_learner_action_from_model(
learner_action_dict)
action_schema_version += 1
def get_exploration_stats(exp_id, exp_version):
"""Retrieves the ExplorationStats domain instance.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationStats. The exploration stats domain object.
"""
exploration_stats = get_exploration_stats_by_id(exp_id, exp_version)
if exploration_stats is None:
exploration_stats = stats_domain.ExplorationStats.create_default(
exp_id, exp_version, {})
return exploration_stats
def update_stats(exp_id, exp_version, aggregated_stats):
"""Updates ExplorationStatsModel according to the dict containing aggregated
stats.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
aggregated_stats: dict. Dict representing an ExplorationStatsModel
instance with stats aggregated in the frontend.
"""
exploration_stats = get_exploration_stats_by_id(
exp_id, exp_version)
exploration_stats.num_starts_v2 += aggregated_stats['num_starts']
exploration_stats.num_completions_v2 += aggregated_stats['num_completions']
exploration_stats.num_actual_starts_v2 += aggregated_stats[
'num_actual_starts']
for state_name in aggregated_stats['state_stats_mapping']:
exploration_stats.state_stats_mapping[
state_name].total_answers_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['total_answers_count']
exploration_stats.state_stats_mapping[
state_name].useful_feedback_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['useful_feedback_count']
exploration_stats.state_stats_mapping[
state_name].total_hit_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['total_hit_count']
exploration_stats.state_stats_mapping[
state_name].first_hit_count_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['first_hit_count']
exploration_stats.state_stats_mapping[
state_name].num_times_solution_viewed_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['num_times_solution_viewed']
exploration_stats.state_stats_mapping[
state_name].num_completions_v2 += aggregated_stats[
'state_stats_mapping'][state_name]['num_completions']
save_stats_model_transactional(exploration_stats)
def handle_stats_creation_for_new_exploration(exp_id, exp_version, state_names):
"""Creates ExplorationStatsModel for the freshly created exploration and
sets all initial values to zero.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
state_names: list(str). State names of the exploration.
"""
state_stats_mapping = {
state_name: stats_domain.StateStats.create_default()
for state_name in state_names
}
exploration_stats = stats_domain.ExplorationStats.create_default(
exp_id, exp_version, state_stats_mapping)
create_stats_model(exploration_stats)
def handle_stats_creation_for_new_exp_version(
exp_id, exp_version, state_names, exp_versions_diff, revert_to_version):
"""Retrieves the ExplorationStatsModel for the old exp_version and makes
any required changes to the structure of the model. Then, a new
ExplorationStatsModel is created for the new exp_version.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
state_names: list(str). State names of the exploration.
exp_versions_diff: ExplorationVersionsDiff|None. The domain object for
the exploration versions difference, None if it is a revert.
revert_to_version: int|None. If the change is a revert, the version.
Otherwise, None.
"""
old_exp_version = exp_version - 1
new_exp_version = exp_version
exploration_stats = get_exploration_stats_by_id(
exp_id, old_exp_version)
if exploration_stats is None:
handle_stats_creation_for_new_exploration(
exp_id, new_exp_version, state_names)
return
# Handling reverts.
if revert_to_version:
old_exp_stats = get_exploration_stats_by_id(exp_id, revert_to_version)
# If the old exploration issues model doesn't exist, the current model
# is carried over (this is a fallback case for some tests, and can
# never happen in production.)
if old_exp_stats:
exploration_stats.num_starts_v2 = old_exp_stats.num_starts_v2
exploration_stats.num_actual_starts_v2 = (
old_exp_stats.num_actual_starts_v2)
exploration_stats.num_completions_v2 = (
old_exp_stats.num_completions_v2)
exploration_stats.state_stats_mapping = (
old_exp_stats.state_stats_mapping)
exploration_stats.exp_version = new_exp_version
create_stats_model(exploration_stats)
return
# Handling state deletions.
for state_name in exp_versions_diff.deleted_state_names:
exploration_stats.state_stats_mapping.pop(state_name)
# Handling state additions.
for state_name in exp_versions_diff.added_state_names:
exploration_stats.state_stats_mapping[state_name] = (
stats_domain.StateStats.create_default())
# Handling state renames.
for new_state_name in exp_versions_diff.new_to_old_state_names:
exploration_stats.state_stats_mapping[new_state_name] = (
exploration_stats.state_stats_mapping.pop(
exp_versions_diff.new_to_old_state_names[new_state_name]))
exploration_stats.exp_version = new_exp_version
# Create new statistics model.
create_stats_model(exploration_stats)
def create_exp_issues_for_new_exploration(exp_id, exp_version):
"""Creates the ExplorationIssuesModel instance for the exploration.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
"""
stats_models.ExplorationIssuesModel.create(exp_id, exp_version, [])
def _handle_exp_issues_after_state_deletion(
state_name, exp_issue, deleted_state_names):
"""Checks if the exploration issue's concerned state is a deleted state and
invalidates the exploration issue accoridngly.
Args:
state_name: str. The issue's concerened state name.
exp_issue: ExplorationIssue. The exploration issue domain object.
deleted_state_names: list(str). The list of deleted state names in this
commit.
Returns:
ExplorationIssue. The exploration issue domain object.
"""
if state_name in deleted_state_names:
exp_issue.is_valid = False
return exp_issue
def _handle_exp_issues_after_state_rename(
state_name, exp_issue, old_to_new_state_names,
playthrough_ids_by_state_name):
"""Checks if the exploration issue's concerned state is a renamed state and
modifies the exploration issue accoridngly.
Args:
state_name: str. The issue's concerened state name.
exp_issue: ExplorationIssue. The exploration issue domain object.
old_to_new_state_names: dict. The dict mapping state names to their
renamed versions. This mapping contains state names only if it is
actually renamed.
playthrough_ids_by_state_name: dict. The dict mapping old state names to
their new ones
Returns:
ExplorationIssue. The exploration issue domain object.
"""
if state_name not in old_to_new_state_names:
return exp_issue, playthrough_ids_by_state_name
old_state_name = state_name
new_state_name = old_to_new_state_names[old_state_name]
if stats_models.ISSUE_TYPE_KEYNAME_MAPPING[
exp_issue.issue_type] == 'state_names':
state_names = exp_issue.issue_customization_args['state_names'][
'value']
exp_issue.issue_customization_args['state_names']['value'] = [
new_state_name
if state_name == old_state_name else state_name
for state_name in state_names]
else:
exp_issue.issue_customization_args['state_name']['value'] = (
new_state_name)
playthrough_ids_by_state_name[old_state_name].extend(
exp_issue.playthrough_ids)
return exp_issue, playthrough_ids_by_state_name
def update_exp_issues_for_new_exp_version(
exploration, exp_versions_diff, revert_to_version):
"""Retrieves the ExplorationIssuesModel for the old exp_version and makes
any required changes to the structure of the model.
Args:
exploration: Exploration. Domain object for the exploration.
exp_versions_diff: ExplorationVersionsDiff|None. The domain object for
the exploration versions difference, None if it is a revert.
revert_to_version: int|None. If the change is a revert, the version.
Otherwise, None.
"""
exp_issues = get_exp_issues(exploration.id, exploration.version - 1)
if exp_issues is None:
create_exp_issues_for_new_exploration(
exploration.id, exploration.version - 1)
return
# Handling reverts.
if revert_to_version:
old_exp_issues = get_exp_issues(exploration.id, revert_to_version)
# If the old exploration issues model doesn't exist, the current model
# is carried over (this is a fallback case for some tests, and can
# never happen in production.)
if old_exp_issues:
exp_issues.unresolved_issues = old_exp_issues.unresolved_issues
exp_issues.exp_version = exploration.version + 1
create_exp_issues_model(exp_issues)
return
playthrough_ids_by_state_name = collections.defaultdict(list)
for i_idx, exp_issue in enumerate(exp_issues.unresolved_issues):
keyname = stats_models.ISSUE_TYPE_KEYNAME_MAPPING[exp_issue.issue_type]
if keyname == 'state_names':
state_names = exp_issue.issue_customization_args[keyname]['value']
for state_name in state_names:
# Handle exp issues changes for deleted states.
exp_issues.unresolved_issues[i_idx] = (
_handle_exp_issues_after_state_deletion(
state_name, exp_issue,
exp_versions_diff.deleted_state_names))
# Handle exp issues changes for renamed states.
exp_issues.unresolved_issues[
i_idx], playthrough_ids_by_state_name = (
_handle_exp_issues_after_state_rename(
state_name, exp_issue,
exp_versions_diff.old_to_new_state_names,
playthrough_ids_by_state_name))
else:
state_name = exp_issue.issue_customization_args[keyname]['value']
# Handle exp issues changes for deleted states.
exp_issues.unresolved_issues[i_idx] = (
_handle_exp_issues_after_state_deletion(
state_name, exp_issue,
exp_versions_diff.deleted_state_names))
# Handle exp issues changes for renamed states.
exp_issues.unresolved_issues[
i_idx], playthrough_ids_by_state_name = (
_handle_exp_issues_after_state_rename(
state_name, exp_issue,
exp_versions_diff.old_to_new_state_names,
playthrough_ids_by_state_name))
# Handling changes to playthrough instances.
all_playthrough_ids = []
all_playthroughs = []
for old_state_name in playthrough_ids_by_state_name:
new_state_name = exp_versions_diff.old_to_new_state_names[
old_state_name]
playthrough_ids = playthrough_ids_by_state_name[old_state_name]
playthroughs = get_playthroughs_multi(playthrough_ids)
for p_idx, playthrough in enumerate(playthroughs):
if stats_models.ISSUE_TYPE_KEYNAME_MAPPING[
playthrough.issue_type] == 'state_names':
state_names = playthrough.issue_customization_args[
'state_names']['value']
playthrough.issue_customization_args['state_names']['value'] = [
new_state_name
if state_name == old_state_name else state_name
for state_name in state_names]
else:
playthrough.issue_customization_args['state_name']['value'] = (
new_state_name)
for a_idx, action in enumerate(playthrough.actions):
if action.action_customization_args['state_name']['value'] == (
old_state_name):
playthroughs[p_idx].actions[
a_idx].action_customization_args['state_name'][
'value'] = new_state_name
all_playthrough_ids.extend(playthrough_ids)
all_playthroughs.extend(playthroughs)
update_playthroughs_multi(all_playthrough_ids, all_playthroughs)
exp_issues.exp_version += 1
create_exp_issues_model(exp_issues)
def get_exp_issues(exp_id, exp_version):
"""Retrieves the ExplorationIssues domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationIssues|None: The domain object for exploration issues or
None if the exp_id is invalid.
"""
exp_issues = None
exp_issues_model = stats_models.ExplorationIssuesModel.get_model(
exp_id, exp_version)
if exp_issues_model is not None:
exp_issues = get_exp_issues_from_model(exp_issues_model)
return exp_issues
def get_playthrough_by_id(playthrough_id):
"""Retrieves the Playthrough domain object.
Args:
playthrough_id: str. ID of the playthrough.
Returns:
Playthrough|None: The domain object for the playthrough or None if the
playthrough_id is invalid.
"""
playthrough = None
playthrough_model = stats_models.PlaythroughModel.get(
playthrough_id, strict=False)
if playthrough_model is not None:
playthrough = get_playthrough_from_model(playthrough_model)
return playthrough
def get_playthroughs_multi(playthrough_ids):
"""Retrieves multiple Playthrough domain objects.
Args:
playthrough_ids: list(str). List of playthrough IDs.
Returns:
list(Playthrough). List of playthrough domain objects.
"""
playthrough_instances = stats_models.PlaythroughModel.get_multi(
playthrough_ids)
playthroughs = [
get_playthrough_from_model(playthrough_instance)
for playthrough_instance in playthrough_instances]
return playthroughs
def update_playthroughs_multi(playthrough_ids, playthroughs):
"""Updates the playthrough instances.
Args:
playthrough_ids: list(str). List of playthrough IDs.
playthroughs: list(Playthrough). List of playthrough domain objects.
"""
playthrough_instances = stats_models.PlaythroughModel.get_multi(
playthrough_ids)
updated_instances = []
for idx, playthrough_instance in enumerate(playthrough_instances):
playthrough_dict = playthroughs[idx].to_dict()
playthrough_instance.issue_type = playthrough_dict['issue_type']
playthrough_instance.issue_customization_args = (
playthrough_dict['issue_customization_args'])
playthrough_instance.actions = playthrough_dict['actions']
updated_instances.append(playthrough_instance)
stats_models.PlaythroughModel.put_multi(updated_instances)
def get_exploration_stats_by_id(exp_id, exp_version):
"""Retrieves the ExplorationStats domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationStats. The domain object for exploration statistics.
Raises:
Exception: Entity for class ExplorationStatsModel with id not found.
"""
exploration_stats = None
exploration_stats_model = stats_models.ExplorationStatsModel.get_model(
exp_id, exp_version)
if exploration_stats_model is not None:
exploration_stats = get_exploration_stats_from_model(
exploration_stats_model)
return exploration_stats
def get_multiple_exploration_stats_by_version(exp_id, version_numbers):
"""Returns a list of ExplorationStats domain objects corresponding to the
specified versions.
Args:
exp_id: str. ID of the exploration.
version_numbers: list(int). List of version numbers.
Returns:
list(ExplorationStats|None). List of ExplorationStats domain class
instances.
"""
exploration_stats = []
exploration_stats_models = (
stats_models.ExplorationStatsModel.get_multi_versions(
exp_id, version_numbers))
for exploration_stats_model in exploration_stats_models:
if exploration_stats_model is None:
exploration_stats.append(None)
else:
exploration_stats.append(get_exploration_stats_from_model(
exploration_stats_model))
return exploration_stats
def get_exp_issues_from_model(exp_issues_model):
"""Gets an ExplorationIssues domain object from an ExplorationIssuesModel
instance.
Args:
exp_issues_model: ExplorationIssuesModel. Exploration issues model in
datastore.
Returns:
ExplorationIssues. The domain object for exploration issues.
"""
unresolved_issues = []
for unresolved_issue_dict in exp_issues_model.unresolved_issues:
_migrate_to_latest_issue_schema(copy.deepcopy(unresolved_issue_dict))
unresolved_issues.append(
stats_domain.ExplorationIssue.from_dict(unresolved_issue_dict))
return stats_domain.ExplorationIssues(
exp_issues_model.exp_id, exp_issues_model.exp_version,
unresolved_issues)
def get_exploration_stats_from_model(exploration_stats_model):
"""Gets an ExplorationStats domain object from an ExplorationStatsModel
instance.
Args:
exploration_stats_model: ExplorationStatsModel. Exploration statistics
model in datastore.
Returns:
ExplorationStats. The domain object for exploration statistics.
"""
new_state_stats_mapping = {
state_name: stats_domain.StateStats.from_dict(
exploration_stats_model.state_stats_mapping[state_name])
for state_name in exploration_stats_model.state_stats_mapping
}
return stats_domain.ExplorationStats(
exploration_stats_model.exp_id,
exploration_stats_model.exp_version,
exploration_stats_model.num_starts_v1,
exploration_stats_model.num_starts_v2,
exploration_stats_model.num_actual_starts_v1,
exploration_stats_model.num_actual_starts_v2,
exploration_stats_model.num_completions_v1,
exploration_stats_model.num_completions_v2,
new_state_stats_mapping)
def get_playthrough_from_model(playthrough_model):
"""Gets a PlaythroughModel domain object from a PlaythroughModel instance.
Args:
playthrough_model: PlaythroughModel. Playthrough model in datastore.
Returns:
Playthrough. The domain object for a playthrough.
"""
actions = []
for action_dict in playthrough_model.actions:
_migrate_to_latest_action_schema(action_dict)
actions.append(stats_domain.LearnerAction.from_dict(action_dict))
return stats_domain.Playthrough(
playthrough_model.exp_id, playthrough_model.exp_version,
playthrough_model.issue_type,
playthrough_model.issue_customization_args, actions)
def create_stats_model(exploration_stats):
"""Creates an ExplorationStatsModel in datastore given an ExplorationStats
domain object.
Args:
exploration_stats: ExplorationStats. The domain object for exploration
statistics.
Returns:
str. ID of the datastore instance for ExplorationStatsModel.
"""
new_state_stats_mapping = {
state_name: exploration_stats.state_stats_mapping[state_name].to_dict()
for state_name in exploration_stats.state_stats_mapping
}
instance_id = stats_models.ExplorationStatsModel.create(
exploration_stats.exp_id,
exploration_stats.exp_version,
exploration_stats.num_starts_v1,
exploration_stats.num_starts_v2,
exploration_stats.num_actual_starts_v1,
exploration_stats.num_actual_starts_v2,
exploration_stats.num_completions_v1,
exploration_stats.num_completions_v2,
new_state_stats_mapping
)
return instance_id
def _save_stats_model(exploration_stats):
"""Updates the ExplorationStatsModel datastore instance with the passed
ExplorationStats domain object.
Args:
exploration_stats. ExplorationStats. The exploration statistics domain
object.
"""
new_state_stats_mapping = {
state_name: exploration_stats.state_stats_mapping[state_name].to_dict()
for state_name in exploration_stats.state_stats_mapping
}
exploration_stats_model = stats_models.ExplorationStatsModel.get_model(
exploration_stats.exp_id, exploration_stats.exp_version)
exploration_stats_model.num_starts_v1 = exploration_stats.num_starts_v1
exploration_stats_model.num_starts_v2 = exploration_stats.num_starts_v2
exploration_stats_model.num_actual_starts_v1 = (
exploration_stats.num_actual_starts_v1)
exploration_stats_model.num_actual_starts_v2 = (
exploration_stats.num_actual_starts_v2)
exploration_stats_model.num_completions_v1 = (
exploration_stats.num_completions_v1)
exploration_stats_model.num_completions_v2 = (
exploration_stats.num_completions_v2)
exploration_stats_model.state_stats_mapping = new_state_stats_mapping
exploration_stats_model.put()
def save_stats_model_transactional(exploration_stats):
"""Updates the ExplorationStatsModel datastore instance with the passed
ExplorationStats domain object in a transaction.
Args:
exploration_stats. ExplorationStats. The exploration statistics domain
object.
"""
transaction_services.run_in_transaction(
_save_stats_model, exploration_stats)
def create_exp_issues_model(exp_issues):
"""Creates a new ExplorationIssuesModel in the datastore.
Args:
exp_issues: ExplorationIssues. The exploration issues domain object.
"""
unresolved_issues_dicts = [
unresolved_issue.to_dict()
for unresolved_issue in exp_issues.unresolved_issues]
stats_models.ExplorationIssuesModel.create(
exp_issues.exp_id, exp_issues.exp_version, unresolved_issues_dicts)
def _save_exp_issues_model(exp_issues):
"""Updates the ExplorationIssuesModel datastore instance with the passed
ExplorationIssues domain object.
Args:
exp_issues: ExplorationIssues. The exploration issues domain
object.
"""
unresolved_issues_dicts = [
unresolved_issue.to_dict()
for unresolved_issue in exp_issues.unresolved_issues]
exp_issues_model = stats_models.ExplorationIssuesModel.get_model(
exp_issues.exp_id, exp_issues.exp_version)
exp_issues_model.exp_version = exp_issues.exp_version
exp_issues_model.unresolved_issues = unresolved_issues_dicts
exp_issues_model.put()
def save_exp_issues_model_transactional(exp_issues):
"""Updates the ExplorationIssuesModel datastore instance with the passed
ExplorationIssues domain object in a transaction.
Args:
exp_issues: ExplorationIssues. The exploration issues domain
object.
"""
transaction_services.run_in_transaction(
_save_exp_issues_model, exp_issues)
def get_exploration_stats_multi(exp_version_references):
"""Retrieves the exploration stats for the given explorations.
Args:
exp_version_references: list(ExpVersionReference). List of exploration
version reference domain objects.
Returns:
list(ExplorationStats). The list of exploration stats domain objects.
"""
exploration_stats_models = (
stats_models.ExplorationStatsModel.get_multi_stats_models(
exp_version_references))
exploration_stats_list = []
for index, exploration_stats_model in enumerate(exploration_stats_models):
if exploration_stats_model is None:
exploration_stats_list.append(
stats_domain.ExplorationStats.create_default(
exp_version_references[index].exp_id,
exp_version_references[index].version,
{}))
else:
exploration_stats_list.append(
get_exploration_stats_from_model(exploration_stats_model))
return exploration_stats_list
def delete_playthroughs_multi(playthrough_ids):
"""Deletes multiple playthrough instances.
Args:
playthrough_ids: list(str). List of playthrough IDs to be deleted.
"""
stats_models.PlaythroughModel.delete_playthroughs_multi(playthrough_ids)
def get_visualizations_info(exp_id, state_name, interaction_id):
"""Returns a list of visualization info. Each item in the list is a dict
with keys 'data' and 'options'.
Args:
exp_id: str. The ID of the exploration.
state_name: str. Name of the state.
interaction_id: str. The interaction type.
Returns:
list(dict). Each item in the list is a dict with keys representing
- 'id': str. The visualization ID.
- 'data': list(dict). A list of answer/frequency dicts.
- 'options': dict. The visualization options.
An example of the returned value may be:
[{'options': {'y_axis_label': 'Count', 'x_axis_label': 'Answer'},
'id': 'BarChart',
'data': [{u'frequency': 1, u'answer': 0}]}]
"""
if interaction_id is None:
return []
visualizations = interaction_registry.Registry.get_interaction_by_id(
interaction_id).answer_visualizations
calculation_ids = set([
visualization.calculation_id for visualization in visualizations])
calculation_ids_to_outputs = {}
for calculation_id in calculation_ids:
# Don't show top unresolved answers calculation ouutput in stats of
# exploration.
if calculation_id == 'TopNUnresolvedAnswersByFrequency':
continue
# This is None if the calculation job has not yet been run for this
# state.
calc_output_domain_object = _get_calc_output(
exp_id, state_name, calculation_id)
# If the calculation job has not yet been run for this state, we simply
# exclude the corresponding visualization results.
if calc_output_domain_object is None:
continue
# If the output was associated with a different interaction ID, skip the
# results. This filtering step is needed since the same calculation_id
# can be shared across multiple interaction types.
if calc_output_domain_object.interaction_id != interaction_id:
continue
calculation_ids_to_outputs[calculation_id] = (
calc_output_domain_object.calculation_output.to_raw_type())
return [{
'id': visualization.id,
'data': calculation_ids_to_outputs[visualization.calculation_id],
'options': visualization.options,
'addressed_info_is_supported': (
visualization.addressed_info_is_supported),
} for visualization in visualizations
if visualization.calculation_id in calculation_ids_to_outputs]
def record_answer(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer):
"""Record an answer by storing it to the corresponding StateAnswers entity.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration.
state_name: str. The name of the state.
interaction_id: str. The ID of the interaction.
submitted_answer: SubmittedAnswer. The submitted answer.
"""
record_answers(
exploration_id, exploration_version, state_name, interaction_id,
[submitted_answer])
def record_answers(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer_list):
"""Optimally record a group of answers using an already loaded exploration..
The submitted_answer_list is a list of SubmittedAnswer domain objects.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration.
state_name: str. The name of the state.
interaction_id: str. The ID of the interaction.
submitted_answer_list: list(SubmittedAnswer). The list of answers to be
recorded.
"""
state_answers = stats_domain.StateAnswers(
exploration_id, exploration_version, state_name, interaction_id,
submitted_answer_list)
for submitted_answer in submitted_answer_list:
submitted_answer.validate()
stats_models.StateAnswersModel.insert_submitted_answers(
state_answers.exploration_id, state_answers.exploration_version,
state_answers.state_name, state_answers.interaction_id,
state_answers.get_submitted_answer_dict_list())
def get_state_answers(exploration_id, exploration_version, state_name):
"""Returns a StateAnswers object containing all answers associated with the
specified exploration state, or None if no such answers have yet been
submitted.
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration to fetch
answers for.
state_name: str. The name of the state to fetch answers for.
Returns:
StateAnswers or None. A StateAnswers object containing all answers
associated with the state, or None if no such answers exist.
"""
state_answers_models = stats_models.StateAnswersModel.get_all_models(
exploration_id, exploration_version, state_name)
if state_answers_models:
main_state_answers_model = state_answers_models[0]
submitted_answer_dict_list = itertools.chain.from_iterable([
state_answers_model.submitted_answer_list
for state_answers_model in state_answers_models])
return stats_domain.StateAnswers(
exploration_id, exploration_version, state_name,
main_state_answers_model.interaction_id,
[stats_domain.SubmittedAnswer.from_dict(submitted_answer_dict)
for submitted_answer_dict in submitted_answer_dict_list],
schema_version=main_state_answers_model.schema_version)
else:
return None
def get_sample_answers(exploration_id, exploration_version, state_name):
"""Fetches a list of sample answers that were submitted to the specified
exploration state (at the given version of the exploration).
Args:
exploration_id: str. The exploration ID.
exploration_version: int. The version of the exploration to fetch
answers for.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of some sample raw answers. At most 100 answers are
returned.
"""
answers_model = stats_models.StateAnswersModel.get_master_model(
exploration_id, exploration_version, state_name)
if answers_model is None:
return []
# Return at most 100 answers, and only answers from the initial shard (If
# we needed to use subsequent shards then the answers are probably too big
# anyway).
sample_answers = answers_model.submitted_answer_list[:100]
return [
stats_domain.SubmittedAnswer.from_dict(submitted_answer_dict).answer
for submitted_answer_dict in sample_answers]
def get_top_state_answer_stats(exploration_id, state_name):
"""Fetches the top (at most) 10 answers from the given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of the top 10 answers, sorted by decreasing frequency.
"""
calc_output = (
_get_calc_output(exploration_id, state_name, 'Top10AnswerFrequencies'))
raw_calc_output = (
[] if calc_output is None else
calc_output.calculation_output.to_raw_type())
return [
{'answer': output['answer'], 'frequency': output['frequency']}
for output in raw_calc_output
if output['frequency'] >= feconf.STATE_ANSWER_STATS_MIN_FREQUENCY
]
def get_top_state_unresolved_answers(exploration_id, state_name):
"""Fetches the top unresolved answers for the given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_name: str. The name of the state to fetch answers for.
Returns:
list(*). A list of the top 10 answers, sorted by decreasing frequency.
"""
calc_output_model = _get_calc_output(
exploration_id, state_name, 'TopNUnresolvedAnswersByFrequency')
if not calc_output_model:
return []
calculation_output = calc_output_model.calculation_output.to_raw_type()
return [
{'answer': output['answer'], 'frequency': output['frequency']}
for output in calculation_output
if output['frequency'] >= feconf.STATE_ANSWER_STATS_MIN_FREQUENCY
]
def get_top_state_answer_stats_multi(exploration_id, state_names):
"""Fetches the top (at most) 10 answers from each given state_name in the
corresponding exploration. Only answers that occur with frequency >=
STATE_ANSWER_STATS_MIN_FREQUENCY are returned.
Args:
exploration_id: str. The exploration ID.
state_names: list(str). The name of the state to fetch answers for.
Returns:
dict(str: list(*)). Dict mapping each state name to the list of its top
(at most) 10 answers, sorted by decreasing frequency.
"""
return {
state_name: get_top_state_answer_stats(exploration_id, state_name)
for state_name in state_names
}
def _get_calc_output(exploration_id, state_name, calculation_id):
"""Get state answers calculation output domain object obtained from
StateAnswersCalcOutputModel instance stored in the data store. The
calculation ID comes from the name of the calculation class used to compute
aggregate data from submitted user answers. This returns aggregated output
for all versions of the specified state and exploration.
Args:
exploration_id: str. ID of the exploration.
state_name: str. Name of the state.
calculation_id: str. Name of the calculation class.
Returns:
StateAnswersCalcOutput|None. The state answers calculation output
domain object or None.
"""
calc_output_model = stats_models.StateAnswersCalcOutputModel.get_model(
exploration_id, VERSION_ALL, state_name, calculation_id)
if calc_output_model:
calculation_output = None
if (calc_output_model.calculation_output_type ==
stats_domain.CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST):
calculation_output = (
stats_domain.AnswerFrequencyList.from_raw_type(
calc_output_model.calculation_output))
elif (calc_output_model.calculation_output_type ==
stats_domain.CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS):
calculation_output = (
stats_domain.CategorizedAnswerFrequencyLists.from_raw_type(
calc_output_model.calculation_output))
return stats_domain.StateAnswersCalcOutput(
exploration_id, VERSION_ALL, state_name,
calc_output_model.interaction_id, calculation_id,
calculation_output)
else:
return None
```
#### File: extensions/answer_summarizers/models.py
```python
import collections
import itertools
import operator
from core.domain import exp_domain
from core.domain import stats_domain
import feconf
import utils
CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.EXPLICIT_CLASSIFICATION,
exp_domain.TRAINING_DATA_CLASSIFICATION,
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES = frozenset([
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION,
])
class _HashableAnswer(object):
"""Wraps answer with object that can be placed into sets and dicts."""
def __init__(self, answer):
self.answer = answer
self.hashable_answer = utils.get_hashable_value(answer)
def __hash__(self):
return hash(self.hashable_answer)
def __eq__(self, other):
if isinstance(other, _HashableAnswer):
return self.hashable_answer == other.hashable_answer
return False
def _get_top_answers_by_frequency(answers, limit=None):
"""Computes the number of occurrences of each answer, keeping only the top
limit answers, and returns an AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers: iterable(*). The collection of answers to be tallied.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit" answers.
"""
answer_counter = utils.OrderedCounter(_HashableAnswer(a) for a in answers)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(hashable_answer.answer, frequency)
for hashable_answer, frequency in answer_counter.most_common(n=limit)
])
def _get_top_unresolved_answers_by_frequency(
answers_with_classification, limit=None):
"""Computes the list of unresolved answers by keeping track of their latest
classification categorization and then computes the occurrences of each
unresolved answer, keeping only limit answers, and returns an
AnswerFrequencyList.
This method is run from within the context of a MapReduce job.
Args:
answers_with_classification: iterable(*). The collection of answers
with their corresponding classification categorization.
limit: int or None. The maximum number of answers to return. When None,
all answers are returned.
Returns:
stats_domain.AnswerFrequencyList. A list of the top "limit"
unresolved answers.
"""
classification_results_dict = {}
# The list of answers is sorted according to the time of answer submission.
# Thus following loop goes through the list and aggregates the most recent
# classification categorization of each answer.
for ans in answers_with_classification:
frequency = 0
if _HashableAnswer(ans['answer']) in classification_results_dict:
frequency = classification_results_dict[_HashableAnswer(
ans['answer'])]['frequency']
classification_results_dict[_HashableAnswer(ans['answer'])] = {
'classification_categorization': (
ans['classification_categorization']),
'frequency': frequency + 1
}
unresolved_answers_with_frequency_list = [{
'answer': ans.answer,
'frequency': val['frequency']
} for ans, val in classification_results_dict.iteritems() if val[
'classification_categorization'] in (
UNRESOLVED_ANSWER_CLASSIFICATION_CATEGORIES)]
unresolved_answers_with_frequency_list.sort(
key=lambda x: x['frequency'], reverse=True)
return stats_domain.AnswerFrequencyList([
stats_domain.AnswerOccurrence(item['answer'], item['frequency'])
for item in unresolved_answers_with_frequency_list[:limit]
])
class BaseCalculation(object):
"""Base calculation class.
This is the superclass for all calculations used to generate interaction
answer views.
"""
@property
def id(self):
return self.__class__.__name__
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Perform calculation on a single StateAnswers entity. This is run in
the context of a batch MapReduce job.
This method must be overwritten in subclasses.
"""
raise NotImplementedError(
'Subclasses of BaseCalculation should implement the '
'calculate_from_state_answers_dict(state_answers_dict) method.')
class AnswerFrequencies(BaseCalculation):
"""Calculation for answers' frequencies (how often each answer was
submitted).
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, and returns a list
of dicts; each dict has keys 'answer' and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = (
_get_top_answers_by_frequency(d['answer'] for d in answer_dicts))
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top5AnswerFrequencies(BaseCalculation):
"""Calculation for the top 5 answers, by frequency."""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, keeping only the
top 5 answers, and returns a list of dicts; each dict has keys 'answer'
and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
(d['answer'] for d in answer_dicts), limit=5)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class Top10AnswerFrequencies(BaseCalculation):
"""Calculation for the top 10 answers, by frequency."""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, keeping only the
top 10 answers, and returns a list of dicts; each dict has keys 'answer'
and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
(d['answer'] for d in answer_dicts), limit=10)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class FrequencyCommonlySubmittedElements(BaseCalculation):
"""Calculation for determining the frequency of commonly submitted
individual answers among multiple set answers (such as of type
SetOfUnicodeString).
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each individual answer across
all given answer sets, keeping only the top 10. Returns a list of dicts;
each dict has keys 'answer' and 'frequency'.
This method is run from within the context of a MapReduce job.
"""
answer_dicts = state_answers_dict['submitted_answer_list']
answer_frequency_list = _get_top_answers_by_frequency(
itertools.chain.from_iterable(d['answer'] for d in answer_dicts),
limit=10)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
answer_frequency_list)
class TopAnswersByCategorization(BaseCalculation):
"""Calculation for the top answers by both frequency and respective
categorizations. The output from this calculation is one list for each
classification category, where each list is a ranked list of answers, by
frequency.
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Computes the number of occurrences of each answer, split into groups
based on the number of classification categories.
This method is run from within the context of a MapReduce job.
"""
grouped_submitted_answer_dicts = itertools.groupby(
state_answers_dict['submitted_answer_list'],
operator.itemgetter('classification_categorization'))
submitted_answers_by_categorization = collections.defaultdict(list)
for category, answer_dicts in grouped_submitted_answer_dicts:
if category in CLASSIFICATION_CATEGORIES:
submitted_answers_by_categorization[category].extend(
d['answer'] for d in answer_dicts)
categorized_answer_frequency_lists = (
stats_domain.CategorizedAnswerFrequencyLists({
category: _get_top_answers_by_frequency(categorized_answers)
for category, categorized_answers in
submitted_answers_by_categorization.iteritems()}))
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
categorized_answer_frequency_lists)
class TopNUnresolvedAnswersByFrequency(BaseCalculation):
"""Calculation for the top unresolved answers by frequency
The output from this calculation is a ranked list of unresolved answers,
in descending order of frequency.
"""
def calculate_from_state_answers_dict(self, state_answers_dict):
"""Filters unresolved answers and then computes the number of
occurrences of each unresolved answer.
This method is run within the context of a MapReduce job.
Args:
state_answers_dict: dict. A dict containing state answers and
exploration information such as:
* exploration_id: id of the exploration.
* exploration_version: Specific version of the exploration or
VERSION_ALL is used if answers are aggragated across
multiple versions.
* state_name: Name of the state.
* interaction_id: id of the interaction.
* submitted_answer_list: A list of submitted answers.
NOTE: The answers in this list must be sorted in
chronological order of their submission.
Returns:
stats_domain.StateAnswersCalcOutput. A calculation output object
containing the list of top unresolved answers, in descending
order of frequency (up to at most limit answers).
"""
answers_with_classification = [{
'answer': ans['answer'],
'classification_categorization': (
ans['classification_categorization'])
} for ans in state_answers_dict['submitted_answer_list']]
unresolved_answers = _get_top_unresolved_answers_by_frequency(
answers_with_classification,
limit=feconf.TOP_UNRESOLVED_ANSWERS_LIMIT)
return stats_domain.StateAnswersCalcOutput(
state_answers_dict['exploration_id'],
state_answers_dict['exploration_version'],
state_answers_dict['state_name'],
state_answers_dict['interaction_id'],
self.id,
unresolved_answers)
```
#### File: oppia/scripts/docstrings_checker.py
```python
import os
import re
import sys
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.8.4')
sys.path.insert(0, _PYLINT_PATH)
# pylint: disable=wrong-import-position
import astroid # isort:skip
from pylint.checkers import utils # isort:skip
from pylint.extensions import _check_docs_utils # isort:skip
# pylint: enable=wrong-import-position
def space_indentation(s):
"""The number of leading spaces in a string
Args:
s: str. The input string.
Returns:
int. The number of leading spaces.
"""
return len(s) - len(s.lstrip(' '))
def get_setters_property_name(node):
"""Get the name of the property that the given node is a setter for.
Args:
node: str. The node to get the property name for.
Returns:
str|None. The name of the property that the node is a setter for,
or None if one could not be found.
"""
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
if (isinstance(decorator, astroid.Attribute) and
decorator.attrname == "setter" and
isinstance(decorator.expr, astroid.Name)):
return decorator.expr.name
return None
def get_setters_property(node):
"""Get the property node for the given setter node.
Args:
node: astroid.FunctionDef. The node to get the property for.
Returns:
astroid.FunctionDef|None. The node relating to the property of
the given setter node, or None if one could not be found.
"""
property_ = None
property_name = get_setters_property_name(node)
class_node = utils.node_frame_class(node)
if property_name and class_node:
class_attrs = class_node.getattr(node.name)
for attr in class_attrs:
if utils.decorated_with_property(attr):
property_ = attr
break
return property_
def returns_something(return_node):
"""Check if a return node returns a value other than None.
Args:
return_node: astroid.Return. The return node to check.
Returns:
bool. True if the return node returns a value
other than None, False otherwise.
"""
returns = return_node.value
if returns is None:
return False
return not (isinstance(returns, astroid.Const) and returns.value is None)
def possible_exc_types(node):
"""Gets all of the possible raised exception types for the given raise node.
Caught exception types are ignored.
Args:
node: astroid.node_classes.NodeNG. The raise
to find exception types for.
Returns:
set(str). A list of exception types.
"""
excs = []
if isinstance(node.exc, astroid.Name):
inferred = utils.safe_infer(node.exc)
if inferred:
excs = [inferred.name]
elif (isinstance(node.exc, astroid.Call) and
isinstance(node.exc.func, astroid.Name)):
target = utils.safe_infer(node.exc.func)
if isinstance(target, astroid.ClassDef):
excs = [target.name]
elif isinstance(target, astroid.FunctionDef):
for ret in target.nodes_of_class(astroid.Return):
if ret.frame() != target:
continue
val = utils.safe_infer(ret.value)
if (val and isinstance(val, (
astroid.Instance, astroid.ClassDef)) and
utils.inherit_from_std_ex(val)):
excs.append(val.name)
elif node.exc is None:
handler = node.parent
while handler and not isinstance(handler, astroid.ExceptHandler):
handler = handler.parent
if handler and handler.type:
inferred_excs = astroid.unpack_infer(handler.type)
excs = (exc.name for exc in inferred_excs
if exc is not astroid.Uninferable)
try:
return set(
exc for exc in excs if not utils.node_ignores_exception(
node, exc))
except astroid.InferenceError:
return set()
def docstringify(docstring):
for docstring_type in [GoogleDocstring]:
instance = docstring_type(docstring)
if instance.is_valid():
return instance
return _check_docs_utils.Docstring(docstring)
class GoogleDocstring(_check_docs_utils.GoogleDocstring):
re_multiple_type = _check_docs_utils.GoogleDocstring.re_multiple_type
re_param_line = re.compile(r'''
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [:]
\s*
({type}|\S*)
(?:,\s+optional)?
[.] )? \s* # optional type declaration
\s* (.*) # beginning of optional description
'''.format(
type=re_multiple_type,
), flags=re.X | re.S | re.M)
re_returns_line = re.compile(r'''
\s* (({type}|\S*).)? # identifier
\s* (.*) # beginning of description
'''.format(
type=re_multiple_type,
), flags=re.X | re.S | re.M)
re_yields_line = re_returns_line
``` |
{
"source": "jp-96/esp32-micropython-ble-example",
"score": 2
} |
#### File: jp-96/esp32-micropython-ble-example/ble_heartratemonitor_central.py
```python
import bluetooth
import struct
import time
from ble_advertising import decode_services, decode_name
from micropython import const
# _IRQ_CENTRAL_CONNECT = const(1)
# _IRQ_CENTRAL_DISCONNECT = const(2)
# _IRQ_GATTS_WRITE = const(3)
# _IRQ_GATTS_READ_REQUEST = const(4)
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)
_IRQ_GATTC_DESCRIPTOR_DONE = const(14)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
_IRQ_GATTC_WRITE_DONE = const(17)
_IRQ_GATTC_NOTIFY = const(18)
# _IRQ_GATTC_INDICATE = const(19)
# _IRQ_GATTS_INDICATE_DONE = const(20)
# _IRQ_MTU_EXCHANGED = const(21)
# _IRQ_L2CAP_ACCEPT = const(22)
# _IRQ_L2CAP_CONNECT = const(23)
# _IRQ_L2CAP_DISCONNECT = const(24)
# _IRQ_L2CAP_RECV = const(25)
# _IRQ_L2CAP_SEND_READY = const(26)
_IRQ_CONNECTION_UPDATE = const(27)
# _IRQ_ENCRYPTION_UPDATE = const(28)
# _IRQ_GET_SECRET = const(29)
# _IRQ_SET_SECRET = const(30)
_ADV_IND = const(0x00)
_ADV_DIRECT_IND = const(0x01)
# _ADV_SCAN_IND = const(0x02)
# _ADV_NONCONN_IND = const(0x03)
_SERV_HRM_UUID = bluetooth.UUID(0x180D) # org.bluetooth.service.heart_rate.xml
_CHAR_HRM_UUID = bluetooth.UUID(0x2A37) # org.bluetooth.characteristic.heart_rate_measurement.xml
_CHAR_BSL_UUID = bluetooth.UUID(0x2A38) # org.bluetooth.characteristic.body_sensor_location.xml
_CHAR_HRC_UUID = bluetooth.UUID(0x2A39) # org.bluetooth.characteristic.heart_rate_control_point.xml
_SERV_BATT_UUID = bluetooth.UUID(0x180F) # org.bluetooth.service.battery_service.xml
_CHAR_BATT_UUID = bluetooth.UUID(0x2A19) # org.bluetooth.characteristic.battery_level.xml
_DESC_CCC_UUID = bluetooth.UUID(0x2902) # org.bluetooth.descriptor.gatt.client_characteristic_configuration.xml
# decode_heart_rate_measurement t
_HRM_HRV = const(1) # Heart Rate Measurement Value
_HRM_SCS = const(2) # Sensor Contact Status
_HRM_EES = const(3) # Energy Expended
_HRM_RRI = const(4) # RR-Interval
def decode_heart_rate_measurement(b, t):
# org.bluetooth.characteristic.heart_rate_measurement.xml
# Flags Field
flags = b[0]
# bit:0 Heart Rate Value Format bit
hrv_flag = flags & 1
if t == _HRM_HRV:
# Heart Rate Measurement Value - org.bluetooth.unit.period.beats_per_minute
if hrv_flag == 0:
# Heart Rate Measurement Value (uint8)
hrv = b[1]
else:
# Heart Rate Measurement Value (uint16)
hrv = b[1] | (b[2] << 8)
return hrv
# bit:2-1 Sensor Contact Status bits
scs_flag = (flags >> 1) & 3
if t == _HRM_SCS:
return scs_flag
# bit:3 Energy Expended Status bit
ees_flag = (flags >> 3) & 1
if t == _HRM_EES:
# Energy Expended - org.bluetooth.unit.energy.joule
eev = None
if ees_flag == 1:
idx = 2 + hrv_flag
eev = b[idx] | (b[idx + 1] << 8)
return eev
# bit:4 RR-Interval bit
rri_flag = (flags >> 4) & 1
if t == _HRM_RRI:
# RR-Interval - Resolution of 1/1024 second
rr = []
if rri_flag == 1:
idx = 2 + hrv_flag + ees_flag * 2
while idx < len(b):
rr.append(b[idx] | (b[idx + 1] << 8))
idx += 2
return rr
def decode_heart_rate_value(b):
# Heart Rate Measurement Value
return decode_heart_rate_measurement(b, _HRM_HRV)
def decode_sensor_contact_status(b):
# Sensor Contact Status
return decode_heart_rate_measurement(b, _HRM_SCS)
def decode_sensor_contact_status_str(b):
scs_flag = decode_sensor_contact_status(b)
# Sensor Contact Status
if scs_flag == 0:
#scs = "0 - Sensor Contact feature is not supported in the current connection"
scs = "0:Not supported"
elif scs_flag == 1:
#scs = "1 - Sensor Contact feature is not supported in the current connection"
scs = "1:Not supported"
elif scs_flag == 2:
#scs = "2 - Sensor Contact feature is supported, but contact is not detected"
scs = "2:Not detected"
else:
#scs = "3 - Sensor Contact feature is supported and contact is detected"
scs = "3:Detected"
return scs
def decode_energy_expended(b):
# Energy Expended
return decode_heart_rate_measurement(b, _HRM_EES)
def decode_rr_interval(b):
# RR-Interval
return decode_heart_rate_measurement(b, _HRM_RRI)
class BLEHeartRateMonitorCentral:
def __init__(self, ble):
self._ble = ble
self._ble.active(True)
self._ble.irq(self._irq)
self._reset()
def _reset(self):
# Cached name and address from a successful scan.
self._name = None
self._addr_type = None
self._addr = None
# Callbacks for completion of various operations.
# These reset back to None after being invoked.
self._scan_callback = None
self._conn_callback = None
self._read_callback = None
self._write_callback = None
# Persistent callback for when new data is notified from the device.
self._notify_callback = None
self._batt_notify_callback = None
# Connected device.
self._conn_handle = None
self._start_handle = None
self._end_handle = None
self._batt_start_handle = None
self._batt_end_handle = None
# GATTC_CHARACTERISTIC
self._heartrate_handle = None
self._config_handle = None
self._location_handle = None
self._control_handle = None
self._batt_level_handle = None
self._batt_config_handle = None
self._connected = False
def _irq(self, event, data):
print("_irq() event=", event)
if event == _IRQ_SCAN_RESULT:
# A single scan result, gap_scan().
addr_type, addr, adv_type, rssi, adv_data = data
if adv_type in (_ADV_IND, _ADV_DIRECT_IND) and _SERV_HRM_UUID in decode_services(adv_data): # _SERV_HRM_UUID found.
# Found a potential device, remember it and stop scanning.
self._addr_type = addr_type
self._addr = bytes(
addr
) # Note: addr buffer is owned by caller so need to copy it.
self._name = decode_name(adv_data) or "?"
self._ble.gap_scan(None) # manually stop.
elif event == _IRQ_SCAN_DONE:
# Scan duration finished or manually stopped, gap_scan().
if self._scan_callback:
if self._addr:
# Found a device during the scan (and the scan was explicitly stopped).
self._scan_callback(self._addr_type, self._addr, self._name)
self._scan_callback = None
else:
# Scan timed out.
self._scan_callback(None, None, None)
elif event == _IRQ_PERIPHERAL_CONNECT:
# A successful gap_connect().
conn_handle, addr_type, addr = data
if addr_type == self._addr_type and addr == self._addr:
self._conn_handle = conn_handle
self._ble.gattc_discover_services(self._conn_handle)
elif event == _IRQ_PERIPHERAL_DISCONNECT:
# Connected peripheral has disconnected.
conn_handle, addr_type, addr = data
if conn_handle == self._conn_handle:
# If it was initiated by us, it'll already be reset.
self._reset()
elif event == _IRQ_GATTC_SERVICE_RESULT:
# Called for each service found by gattc_discover_services().
conn_handle, start_handle, end_handle, uuid = data
if conn_handle == self._conn_handle:
if uuid == _SERV_HRM_UUID: # _SERV_HRM_UUID found.
self._start_handle, self._end_handle = start_handle, end_handle
if uuid == _SERV_BATT_UUID: # _SERV_BATT_UUID found.
self._batt_start_handle, self._batt_end_handle = start_handle, end_handle
elif event == _IRQ_GATTC_SERVICE_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
if self._start_handle and self._end_handle and self._batt_start_handle and self._batt_end_handle:
self._ble.gattc_discover_characteristics(
self._conn_handle, min(self._start_handle, self._batt_start_handle), max(self._end_handle, self._batt_end_handle)
)
elif self._start_handle and self._end_handle:
self._ble.gattc_discover_characteristics(
self._conn_handle, self._start_handle, self._end_handle
)
else:
print("Failed to find service.")
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
# Called for each characteristic found by gattc_discover_services().
conn_handle, def_handle, value_handle, properties, uuid = data
if conn_handle == self._conn_handle:
if uuid == _CHAR_HRM_UUID: # _CHAR_HRM_UUID found.
self._heartrate_handle = value_handle
if uuid == _CHAR_BSL_UUID: # _CHAR_BSL_UUID found.
self._location_handle = value_handle
if uuid == _CHAR_HRC_UUID: # _CHAR_HRC_UUID found.
self._control_handle = value_handle
if uuid == _CHAR_BATT_UUID: # _CHAR_BATT_UUID found.
self._batt_level_handle = value_handle
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
if self._heartrate_handle:
self._ble.gattc_discover_descriptors(self._conn_handle, self._heartrate_handle, self._end_handle)
else:
print("Failed to find characteristic.")
elif event == _IRQ_GATTC_DESCRIPTOR_RESULT:
# Called for each descriptor found by gattc_discover_descriptors().
conn_handle, dsc_handle, uuid = data
if conn_handle == self._conn_handle and uuid == _DESC_CCC_UUID: # _DESC_CCC_UUID found.
if not self._config_handle:
self._config_handle = dsc_handle
elif event == _IRQ_GATTC_DESCRIPTOR_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
if self._config_handle:
# We've finished connecting and discovering device, fire the connect callback.
self._connected = True
if self._conn_callback:
self._conn_callback()
else:
print("Failed to find descriptor.")
elif event == _IRQ_GATTC_WRITE_DONE:
# A gattc_write() has completed.
# Note: The value_handle will be zero on btstack (but present on NimBLE).
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, value_handle, status = data
if self._write_callback:
self._write_callback(status)
self._write_callback = None
elif event == _IRQ_GATTC_READ_RESULT:
# A read completed successfully.
conn_handle, value_handle, char_data = data
if conn_handle == self._conn_handle: # and value_handle in [self._location_handle, self._batt_level_handle]:
if self._read_callback:
self._read_callback(bytes(char_data))
self._read_callback = None
elif event == _IRQ_GATTC_READ_DONE:
# Read completed (no-op).
conn_handle, value_handle, status = data
elif event == _IRQ_GATTC_NOTIFY:
# A server has sent a notify request.
conn_handle, value_handle, notify_data = data
if conn_handle == self._conn_handle:
if value_handle == self._heartrate_handle and self._notify_callback:
self._notify_callback(value_handle, bytes(notify_data))
elif value_handle == self._batt_level_handle and self._batt_notify_callback:
self._batt_notify_callback(value_handle, bytes(notify_data))
elif event == _IRQ_CONNECTION_UPDATE:
# The remote device has updated connection parameters.
conn_handle, conn_interval, conn_latency, supervision_timeout, status = data
print("_IRQ_CONNECTION_UPDATE")
else:
# Unhandled
print("************ Unhandled ************ event=", event)
# Returns true if we've successfully connected and discovered characteristics.
def is_connected(self):
return self._connected
# Find a device advertising the environmental sensor service.
def scan(self, callback=None):
self._addr_type = None
self._addr = None
self._scan_callback = callback
self._ble.gap_scan(2000, 30000, 30000)
# Connect to the specified device (otherwise use cached address from a scan).
def connect(self, addr_type=None, addr=None, callback=None):
self._addr_type = addr_type or self._addr_type
self._addr = addr or self._addr
self._conn_callback = callback
if self._addr_type is None or self._addr is None:
return False
self._ble.gap_connect(self._addr_type, self._addr)
return True
# Disconnect from current device.
def disconnect(self):
if not self._conn_handle:
return
self._ble.gap_disconnect(self._conn_handle)
self._reset()
def _gattc_read_sync(self, conn_handle, value_handle):
result = None
def on_read(v):
nonlocal result
result = v[0]
self._read_callback = on_read
# Issues an (asynchronous) read, will invoke callback with data.
self._ble.gattc_read(conn_handle, value_handle)
counter = 0
while self._read_callback and counter < 10:
time.sleep_ms(50)
counter += 1
return result
def _gattc_write_sync(self, conn_handle, value_handle, data, mode=0):
result = None
if mode == 1:
def on_write(v):
nonlocal result
result = v
self._write_callback = on_write
self._ble.gattc_write(conn_handle, value_handle, data, mode)
if mode == 1:
counter = 0
while self._write_callback and counter < 10:
time.sleep_ms(50)
counter += 1
return result
# Enable device notifications, and sets a callback to be invoked when the device notifies us.
def enable_notify(self, callback):
self._notify_callback = callback
if self.is_connected():
result = self._gattc_write_sync(self._conn_handle, self._config_handle, struct.pack('<h', 1), 1)
print("enable_notify:", result)
# Body sensor location
def read_body_sensor_location(self):
if not self.is_connected():
return
if self._location_handle:
return self._gattc_read_sync(self._conn_handle, self._location_handle)
# Energy Expended (not tested)
def enable_energy(self):
if not self.is_connected():
return
if self._control_handle:
result = self._gattc_write_sync(self._conn_handle, self._control_handle, struct.pack('b', 1), 1)
print("enable_energy:", result)
# Battery level
def read_battery_level(self):
if not self.is_connected():
return
if self._batt_level_handle:
return self._gattc_read_sync(self._conn_handle, self._batt_level_handle)
def demo():
ble = bluetooth.BLE()
central = BLEHeartRateMonitorCentral(ble)
def execute():
not_found = False
def on_scan(addr_type, addr, name):
def b2s(b):
if b:
s=[]
for v in b:
s.append("{:02x}".format(v).upper())
return ":".join(s)
else:
return ""
if addr_type is not None:
print("Found device:", addr_type, b2s(addr), "'" + name + "'")
central.connect()
else:
nonlocal not_found
not_found = True
print("No device found.")
central.scan(callback=on_scan)
# Wait for connection...
retry_count = 50
while not central.is_connected():
time.sleep_ms(100)
if not_found:
return
retry_count = retry_count - 1
if retry_count < 0:
print("No connection.")
return
print("Connected")
# Battery level
level = central.read_battery_level()
print("-----------------------------------------")
print(" Battery level:", level)
print("-----------------------------------------")
def on_notify(value_handle, notify_data):
# print("on_notify()", value_handle, notify_data)
print(" 1)", decode_heart_rate_value(notify_data), "bpm")
print(" 2)", decode_sensor_contact_status_str(notify_data))
print(" 3)", decode_energy_expended(notify_data), "joule")
print(" 4)", decode_rr_interval(notify_data), "RR (1/1024 sec)")
# org.bluetooth.characteristic.heart_rate_measurement.xml
idx = 0
# Flags Field
flags = notify_data[idx]
idx += 1
# bit:0 Heart Rate Value Format bit
hrv_flag = (flags ) & 1
# bit:2-1 Sensor Contact Status bits
scs_flag = (flags >> 1) & 3
# bit:3 Energy Expended Status bit
ees_flag = (flags >> 3) & 1
# bit:4 RR-Interval bit
rri_flag = (flags >> 4) & 1
# Heart Rate Measurement Value - org.bluetooth.unit.period.beats_per_minute
if hrv_flag == 0:
# Heart Rate Measurement Value (uint8)
hrv = notify_data[idx]
idx += 1
else:
# Heart Rate Measurement Value (uint16)
hrv = notify_data[idx] | (notify_data[idx + 1] << 8)
idx += 2
# Sensor Contact Status
scs = None
if scs_flag == 0:
#scs = "0 - Sensor Contact feature is not supported in the current connection"
scs = "0:Not supported"
elif scs_flag == 1:
#scs = "1 - Sensor Contact feature is not supported in the current connection"
scs = "1:Not supported"
elif scs_flag == 2:
#scs = "2 - Sensor Contact feature is supported, but contact is not detected"
scs = "2:Not detected"
else:
#scs = "3 - Sensor Contact feature is supported and contact is detected"
scs = "3:Detected"
# Energy Expended - org.bluetooth.unit.energy.joule
eev = None
if ees_flag == 1:
eev = notify_data[idx] | (notify_data[idx + 1] << 8)
idx += 2
# RR-Interval - Resolution of 1/1024 second
rr = []
if rri_flag == 1:
while idx < len(notify_data):
rr.append(notify_data[idx] | (notify_data[idx + 1] << 8))
idx += 2
print("Heart Rate Monitor:", hrv, "bpm ,", scs, ",", eev, ",", rr)
# enable notify
central.enable_notify(callback=on_notify)
# Body sensor location
loc = central.read_body_sensor_location()
if loc >= 0 and loc <= 6:
loc += 1
else:
loc = 0
loc_name = ["(None)", "0-Other", "1-Chest", "2-Wrist", "3-Finger", "4-Hand", "5-Ear Lobe", "6-Foot"]
print("-----------------------------------------")
print(" body sensor location:", loc_name[loc])
print("-----------------------------------------")
# Enable energy expanded
central.enable_energy()
# connection loop
while central.is_connected():
time.sleep_ms(100)
print("Disconnected")
try:
while True:
execute()
except:
ble.active(False)
if __name__ == "__main__":
demo()
``` |
{
"source": "jp-96/micropython",
"score": 4
} |
#### File: tests/basics/class_contains.py
```python
class A:
def __contains__(self, key):
return True
a = A()
print(True in a)
print(1 in a)
print(() in a)
# B contains given things
class B:
def __init__(self, items):
self.items = items
def __contains__(self, key):
return key in self.items
b = B([])
print(1 in b)
b = B([1, 2])
print(1 in b)
print(2 in b)
print(3 in b)
class C:
def __contains__(self, arg):
return arg
print(C().__contains__(0))
print(C().__contains__(1))
print(C().__contains__(''))
print(C().__contains__('foo'))
print(C().__contains__(None))
print(0 in C())
print(1 in C())
print('' in C())
print('foo' in C())
print(None in C())
print(0 not in C())
print(1 not in C())
print('' not in C())
print('foo' not in C())
print(None not in C())
```
#### File: tests/basics/fun_calldblstar4.py
```python
def f(a, b=None, c=None):
print(a, b, c)
f(**{"a": 1}, **{"b": 2})
f(**{"a": 1}, **{"b": 2}, c=3)
f(**{"a": 1}, b=2, **{"c": 3})
try:
f(1, **{"b": 2}, **{"b": 3})
except TypeError:
print("TypeError")
# test calling a method with multiple **args
class A:
def f(self, a, b=None, c=None):
print(a, b, c)
a = A()
a.f(**{"a": 1}, **{"b": 2})
a.f(**{"a": 1}, **{"b": 2}, c=3)
a.f(**{"a": 1}, b=2, **{"c": 3})
try:
a.f(1, **{"b": 2}, **{"b": 3})
except TypeError:
print("TypeError")
``` |
{
"source": "jp-96/upy_bleperipheral",
"score": 2
} |
#### File: upy_bleperipheral/bleperipheral/util.py
```python
import bluetooth
import micropython
import uasyncio
from micropython import const
def _f():
pass
async def _g():
pass
class _B():
def _b(self):
pass
_type_function = type(_f)
_type_generator = type(_g)
_type_bound_method = type(_B()._b)
def isFunction(obj):
return type(obj) == _type_function
def isGenerator(obj):
return type(obj) == _type_generator
def isBoundMethod(obj):
return type(obj) == _type_bound_method
```
#### File: upy_bleperipheral/examples/ble_uart_peripheral.py
```python
import bluetooth
#from ble_advertising import advertising_payload
from bleperipheral import BLEPeripheral
from micropython import const
_IRQ_CENTRAL_CONNECT = const(1 << 0)
_IRQ_CENTRAL_DISCONNECT = const(1 << 1)
_IRQ_GATTS_WRITE = const(1 << 2)
_UART_UUID = bluetooth.UUID("6E400001-B5A3-F393-E0A9-E50E24DCCA9E")
_UART_TX = (
bluetooth.UUID("6E400003-B5A3-F393-E0A9-E50E24DCCA9E"),
bluetooth.FLAG_NOTIFY,
)
_UART_RX = (
bluetooth.UUID("6E400002-B5A3-F393-E0A9-E50E24DCCA9E"),
bluetooth.FLAG_WRITE,
)
_UART_SERVICE = (
_UART_UUID,
(_UART_TX, _UART_RX,),
)
# org.bluetooth.characteristic.gap.appearance.xml
_ADV_APPEARANCE_GENERIC_COMPUTER = const(128)
class BLEUART:
def __init__(self, name="upy-uart", rxbuf=100):
self._bleperipheral = BLEPeripheral()
# Optionally add services=[_UART_UUID], but this is likely to make the payload too large.
((self._tx_handle, self._rx_handle,),) = self._bleperipheral.build(
(_UART_SERVICE,),
adv_name=name,
adv_appearance=_ADV_APPEARANCE_GENERIC_COMPUTER
)
# Increase the size of the rx buffer and enable append mode.
self._bleperipheral.setBuffer(self._rx_handle, rxbuf, True)
self._rx_buffer = bytearray()
self._bleperipheral.irq(handlerGattsWrite=self._gattsWrite)
self._bleperipheral.advertise()
def irq(self, handler):
self._handler = handler
def _gattsWrite(self, handle, value_handle, data):
if value_handle == self._rx_handle:
self._rx_buffer += data
if self._handler:
self._handler()
def any(self):
return len(self._rx_buffer)
def read(self, sz=None):
if not sz:
sz = len(self._rx_buffer)
result = self._rx_buffer[0:sz]
self._rx_buffer = self._rx_buffer[sz:]
return result
def write(self, data):
self._bleperipheral.notify(self._tx_handle, data)
def close(self):
self._bleperipheral.close()
def demo():
import time
uart = BLEUART()
def on_rx():
print("rx: ", uart.read().decode().strip())
uart.irq(on_rx)
nums = [4, 8, 15, 16, 23, 42]
i = 0
try:
while True:
uart.write(str(nums[i]) + "\n")
i = (i + 1) % len(nums)
time.sleep_ms(1000)
except KeyboardInterrupt:
pass
uart.close()
if __name__ == "__main__":
demo()
``` |
{
"source": "jpa38/pytheme_material_design",
"score": 2
} |
#### File: pytheme_material_design/pytheme-color/version.py
```python
def get_version():
return __version__
def get_auteur():
return __author__
def get_copyright():
return __copyright__
def get_license():
return __license__
def get_name():
return __name__
def get_last_version():
versionning = []
versionning.append(["0.0.1", "Création du script"])
versionning.append(["0.1.1", "Insertion de la GUI"])
versionning.append(["0.2.0", "Ajout du versionning"])
return versionning[-1][0]
__author__ = "<NAME>"
__owner__ = "BOUYGUES ENERGIES ET SERVICES"
__year__ = "2019"
__copyright__ = "©" +" " + __owner__ + " " + __year__
__credits__ = []
__license__ = "Creative Commons Attribution Share Alike 4.0"
__license_keyword__ = "cc-by-sa-4.0"
__license_link__ = "https://creativecommons.org/licenses/by-nc-sa/4.0/"
__version__ = get_last_version()
__maintainer__ = "<NAME>"
__email__ = ""
__status__ = "Production"
__name__ = "Export SEE TO SQLite"
``` |
{
"source": "jpa38/Thumbs_doll",
"score": 3
} |
#### File: Thumbs_doll/thumbs_doll/script.py
```python
import random
import urllib.request
from urllib.parse import urlparse
from PIL import Image
import glob, os
from pathlib import Path
import shutil
global img_travail
img_travail = 'image_doll_working_img'
global img_name
img_name = ''
def set_image_name(url):
# TODO veriier utilité
global img_name
img_name = get_img_name(url)+get_url_extension(url)
def get_full_image_name(url):
"""Get the name + extension
Args:
url (str): the entry
Returns:
str : The name and extension about an URL / Path
"""
return str(get_img_name(url) + get_url_extension(url))
def set_path_destination(path):
# TODO veriier utilité
global path_destination
# path_destination = Path(path)
# path = "'''r" + path + "'''"
path_destination = path
print(path_destination)
def get_path_destination():
# TODO veriier utilité
return path_output
def get_path_input():
return path_input
def download_image(url,name):
# fullname = get_img_name(url)+get_url_extension(url)
try:
urllib.request.urlretrieve(url,name)
except:
reset_preview()
print("Probleme URL")
# global img_name
# # img_name = fullname
def get_url_extension(url):
path = urlparse(url).path
ext = os.path.splitext(path)[1]
return str(ext)
def get_img_name(url):
path = urlparse(url).path
filename_w_ext = os.path.basename(path)
filename, file_extension = os.path.splitext(filename_w_ext)
return str(filename)
def get_full_destination():
pass
def is_url(input):
if "http" in input :
return True
else :
return False
def is_file(input):
if os.path.isfile(input) and os.access(input, os.R_OK):
return True
else:
return False
def get_image_size(img):
im = Image.open(img)
# width, height = im.size
return max(im.size)
def resize(img,size,destination, name):
thumb_size = size, size
file, ext = os.path.splitext(img)
im = Image.open(img)
im.thumbnail(thumb_size)
new_destination = os.path.join(destination, (str(name) + "_"+ str(size)+"px"+ext))
print(new_destination)
im.save(new_destination, format=None)
def conditions_initiales():
# set_path_destination(r'''.\output''')
global path_initial
path_initial = os.getcwd()
global path_input
path_input = os.path.join(path_initial,"input\\")
global path_output
path_output = os.path.join(path_initial,"output\\")
# shutil.rmtree(path_input)
reset_folder(path_input)
reset_preview()
# todo verifier les dossiers existes
print("conditions initiales Done")
def reset_preview():
source = os.path.join(path_initial,'img','icon.png')
destination = os.path.join(path_initial,'preview.jpg')
shutil.copyfile(source, destination)
def reset_folder(folder):
try :
shutil.rmtree(folder)
except:
pass
if not os.path.exists(folder):
os.makedirs(folder)
# Keep presets
# set_path_destination(r'''C:\Users\J.PALANCA\Desktop''')
conditions_initiales()
# TODO gerer les paths pour l'execution via commande line
if __name__ == '__main__':
if True:
pass
else:
url = "http://www.planetrock.fr/wp-content/uploads/2019/01/chaton.png"
rename = "test_img"
download_image(url, str(rename))
for px in [512, 256, 128, 64, 32, 24, 16]:
manip(rename + get_url_extension(url), px, url)
``` |
{
"source": "jpa99/dependency-analyzer",
"score": 3
} |
#### File: dependency-analyzer/src/cli.py
```python
import sys
import utils
import argparse
from analyzer import DependencyAnalyzer, Config
## Parse command line arguments
def parse_args():
logging_levels = ["notset", "debug", "info", "warning", "error", "critical"]
parser = argparse.ArgumentParser(description="Analyze dependencies for input Python file.")
parser.add_argument("dirpath", type=str,
help="directory path to analyze")
parser.add_argument("filepath", type=str,
help="python file path to analyze")
parser.add_argument("-l", "--logging_level", type=str, default="error",
choices=set(logging_levels),
help="logging level")
parser.add_argument("-s", "--search_imports", action='store_true',
help="flag to search local machine and check if all dependencies are installed")
parser.add_argument("-g", "--render_graph", action='store_true',
help="flag to render dependency graph")
parser.add_argument("-u", "--mark_unused", action='store_true',
help="flag to mark unused dependencies")
args = parser.parse_args()
render_graph = args.render_graph
if not utils.is_valid_dir(args.dirpath):
print("\n[Command Line Error] Invalid directory \"{dirpath}\".".format(dirpath=args.dirpath), file=sys.stderr)
elif not utils.is_valid_file(args.filepath):
print("\n[Command Line Error] Invalid file \"{filepath}\".".format(filepath=args.filepath), file=sys.stderr)
else:
logging_level = logging_levels.index(args.logging_level)*10
config = Config(logging_level=logging_level, resolve_all_imports=not args.search_imports, render_graph=args.render_graph, mark_unused = args.mark_unused)
dependency_analyzer = DependencyAnalyzer(config)
dependency_analyzer.run(args.dirpath, args.filepath)
``` |
{
"source": "jpaadre/Whole30Recipes",
"score": 3
} |
#### File: jpaadre/Whole30Recipes/email_script.py
```python
import requests
import json
import yagmail
from datetime import datetime
def pull_recipes():
with open(".//credentials//recipe_api.json", "r") as handler:
recipes = json.load(handler)
headers = {}
headers['x-rapidapi-key'] = recipes['x-rapidapi-key']
headers['x-rapidapi-host'] = recipes['x-rapidapi-host']
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/random"
querystring = {"number":"5","tags":"whole30"}
response = requests.request("GET", url, headers=headers, params=querystring)
json_recipes = json.loads(response.text)
return json_recipes
def format_recipe_string(key):
recipe_string = f"""<h3><strong>{key['title']}</strong></h3>
<p><strong><a href="{key['sourceUrl']}">Recipe</a></strong></p>
<p>{key['summary']}</p>
<p> </p>"""
return recipe_string
def format_recipe_email_body(json_recipes):
recipe_html = []
for i in json_recipes['recipes']:
recipe_string = format_recipe_string(i)
recipe_html.append(recipe_string)
recipe_body = "<p> </p>".join(recipe_html)
email_contents = "<h2><strong>Whole30 Recipes for the Week</strong></h2>" + recipe_body
return email_contents
def send_email(email_contents):
today = datetime.now()
today_form = today.strftime("%B %d")
subject = "Whole30 Suggestions: " + today_form
with open(".//credentials//gmail_api.json", "r") as handler:
gmail = json.load(handler)
yag = yagmail.SMTP(user=gmail['user'], password=gmail['password'])
yag.send(to='<EMAIL>', subject=subject, contents=email_contents)
return
def email_pipeline():
json_recipes = pull_recipes()
email_contents = format_recipe_email_body(json_recipes)
send_email(email_contents)
return
if __name__ == "__main__":
email_pipeline()
``` |
{
"source": "jpaalasm/sleep-musicalization-web",
"score": 3
} |
#### File: sleep-musicalization-web/webapp/models.py
```python
import tempfile
import os
import random
from django.core.files.base import File
from django.db import models
from django.conf import settings
class SongManager(models.Manager):
def make_song(self, username, date):
"""Creates a new song by fetching users data from Beddit and
storing it to mp3 file.
"""
key = unicode(''.join([random.choice("<KEY>") for i in range(12)]))
song = self.create(user_nickname=username,
beddit_username=username,
key=key,
title=username + " " + date.strftime("%x"))
return song
def get_latest_public_songs(self):
return self.filter(public=True, state="finished").order_by("-created")
def get_number_of_public_songs(self):
return self.filter(public=True, state="finished").count()
def get_my_songs(self, beddit_username):
return self.filter(public=True, state="finished", beddit_username=beddit_username)
class Song(models.Model):
STATE_CHOICES = (("new", "Waiting for processing"),
("processing", "Processing"),
("finished", "Finished"),
("error", "Error"),
)
key = models.CharField(max_length=20, db_index=True)
state = models.CharField(max_length=20, choices=STATE_CHOICES, default="new")
# Automatically generated metadata, user may not change
created = models.DateTimeField(auto_now_add=True)
beddit_username = models.CharField(max_length=40)
times_listened = models.IntegerField(default=0)
length_seconds = models.IntegerField(default=0)
# User editable fields
user_nickname = models.CharField(max_length=40)
public = models.BooleanField(default=False, help_text="Include this song in public list of songs?")
title = models.CharField(max_length=100, help_text="You can make up a nice title for your song")
description = models.TextField(blank=True, help_text="If you like, you can write something about this night and song")
song_file = models.FileField(upload_to="songs", blank=True)
sleep_data = models.TextField(blank=True)
objects = SongManager()
def set_state(self, new_state):
self.state = new_state
self.save()
def __unicode__(self):
return self.title
```
#### File: sleep-musicalization-web/webapp/tasks.py
```python
import os
import tempfile
import logging
from django.core.files.base import File
from celery.task import Task
from celery.registry import tasks
from models import Song
import musicalization
class GenerateSongTask(Task):
def run(self, song_id, date, access_token, **kwargs):
song = Song.objects.get(id=song_id)
logging.debug("Fetched song " + song.key)
try:
song.set_state("processing")
logging.debug("Processing song")
sleep_data_json_string = musicalization.make_beddit_api_request("sleep", song.beddit_username, date, access_token)
output_file_name = tempfile.mktemp(suffix=".mp3")
musicalization.kunquat_musicalization(song.beddit_username, date, access_token, output_file_name)
song.sleep_data = sleep_data_json_string
with open(output_file_name) as outfile:
logging.debug("Saving song file")
song.song_file.save(song.key + ".mp3", File(outfile))
os.unlink(output_file_name)
song.set_state("finished")
logging.debug("Finished processing song")
except:
logging.exception("Exception while processing song")
song.set_state("error")
tasks.register(GenerateSongTask)
``` |
{
"source": "JPaalman/M5-Project",
"score": 3
} |
#### File: game/map/map.py
```python
import os
from game import settings
import game.tiles
from game.map.colorMap import air_tiles
from game.resources import resourceManager
from game.resources.textures import textureManager as tM
class Map:
PADDING_CHAR = 32
MAPBORDER_CHAR = 66
"""
This class retrieves contents of a map file, puts all values into the proper variables and
make them available for retrieving.
"""
def __init__(self, mname):
self.mapHeight = None
self.mapWidth = None
self.mapLayout = None
self.mapName = None
self.bgImage = None
self.tileData = None
self.MAP_STYLE = None
self.PLAYER_ACC = None
self.PLAYER_ACC = None
self.PLAYER_FRICTION = None
self.PLAYER_GRAV = None
self.PLAYER_JUMP = None
self.BACKGROUND_IMAGE = None
self.ENEMY_SPEED = None
self.PLATFORM_SPEED = None
self.BACKGROUND_MUSIC = None
self.FFT_LOW = None
self.FFT_HIGH = None
self.rawMapLines = resourceManager.getMap(mname)
self.initMap(self.rawMapLines)
def getTiles(self):
"""
Converts all data stored in the map file to Tile objects
:return: Tile[] of tiles representing the map
"""
res = []
rownr = 0
tmp = []
arr = bytearray([])
i = 0
while i < len(self.mapLayout[0]):
arr.append(self.PADDING_CHAR)
i += 1
tmp.append(arr)
for x in self.mapLayout:
tmp.append(x)
self.mapLayout = tmp
# print(len(self.mapLayout))
# for x in self.mapLayout:
# print(str(x))
while rownr < len(self.mapLayout):
colnr = 0
while colnr < len(self.mapLayout[rownr]) - 1:
if self.mapLayout[rownr][colnr] != 32 and self.mapLayout[rownr][colnr] != 66:
data = self.findTileData(self.mapLayout[rownr][colnr])
res.append(game.tiles.Tile(self.getX(colnr), self.getY(rownr), self.mapLayout[rownr][colnr], data))
colnr += 1
rownr += 1
return res
def initMap(self, lines):
"""
Initializes all the instance variables by reading the file contents and
converting the contents to their proper representation
:param lines: Array of raw lines retrieved from the map file
"""
lines = self.cleanLines(lines)
index = 0;
# Read map name
self.mapName = self.getParamValue(lines[index])
index += 1
# Read map height
self.mapHeight = int(float(self.getParamValue(lines[index])))
index += 1
# Read map width
param = self.getParamValue(lines[index])
if param != "AUTO":
self.mapWidth = int(float(param))
index += 1
# Read background stuff
self.BACKGROUND_IMAGE = str(self.getParamValue(lines[index]))
index += 1
self.BACKGROUND_MUSIC = str(self.getParamValue(lines[index]))
index += 1
self.bgImage = tM.getImage(self.BACKGROUND_IMAGE, False)
# Read player properties
self.PLAYER_ACC = float(self.getParamValue(lines[index]))
index += 1
self.PLAYER_FRICTION = float(self.getParamValue(lines[index]))
index += 1
self.PLAYER_GRAV = float(self.getParamValue(lines[index]))
index += 1
self.PLAYER_JUMP = float(self.getParamValue(lines[index]))
index += 1
# Read enemy properties
self.ENEMY_SPEED = float(self.getParamValue(lines[index]))
index += 1
# Read platform properties
self.PLATFORM_SPEED = float(self.getParamValue(lines[index]))
index += 1
self.MAP_STYLE = self.getParamValue(lines[index])
index += 1
try:
self.FFT_LOW = self.getParamValue(lines[index])
index += 1
except:
print("No FFT_LOW found")
try:
self.FFT_HIGH = self.getParamValue(lines[index])
index += 1
except:
print("No FFT_HIGH found")
# Load maplayout
self.mapLayout = self.getMapLayout(lines[index:])
self.mapLayout = self.fillMapBottom(self.mapLayout)
index += len(self.mapLayout) + 1
# Load tile data
self.tileData = self.getTileData(lines[index:])
# print("Map initiated:")
# print("Map name: " + self.mapName)
# print("Width: " + str(self.mapWidth))
# print("Height: " + str(self.mapHeight))
# print("\nMapdata:")
# for x in self.mapLayout:
# print(x)
def cleanLines(self, ls):
"""
Cleans the input lines by removing newline characters and ignoring empty lines or comment
lines that start with '#'
:param ls: The list that you want to clean
:return: The cleaned list
"""
tempList = []
for x in ls:
tempList.append(x[:-1])
ls = tempList
out = []
i = self.nextLine(ls, -1)
while i < len(ls):
out.append(ls[i])
i = self.nextLine(ls, i)
return out
def nextLine(self, ls, index):
"""
Returns the index of the next line that contains information.
Emtpy lines or comment lines starting with '#' are skipped.
:param ls: List with possibly relevant values.
:param index: The last index read by the program
:return: The index of the next line that is relevant
"""
for x in range(index + 1, len(ls) - 1):
if (len(ls[x]) > 0) and (ls[x][0] != "#") and (ls[x][0] != 10):
return x
return len(ls)
def getParamValue(self, input):
"""
Retrieves the string value of a parameter stored in the map file
:param input: [PARAMNAME]=[VALUE]
:return: VALUE
"""
return input.split("=")[1]
def getMapLayout(self, data):
"""
Processes the raw lines that represent the map to a complete map. This includes
adding padding to lines if they are shorter than the MAPWIDTH param in the map file specifies,
and truncates lines if they are longer than the MAPWIDTH specifies. The same padding and truncation is applied
to the map vertically. The map is padded with "space" characters, and border characters at the borders.
:param data: The raw lines that represent the map layout itself.
:return: A matrix of bytes that contains the byte for every tile on the map.
"""
if self.mapWidth is None:
self.mapWidth = len(data[0]) + 1
print("Auto detected mapwidth set to: " + str(self.mapWidth))
res = []
padding = [self.PADDING_CHAR] * self.mapWidth
padding[0] = self.MAPBORDER_CHAR
padding[len(padding) - 1] = self.MAPBORDER_CHAR
bottom_padding = [self.MAPBORDER_CHAR] * self.mapWidth
i = 0
while (i < len(data)) and (data[i] != "!MAPEND") and len(res) != self.mapHeight + 1:
if len(res) == self.mapHeight:
return res
add = bytearray(data[i][:self.mapWidth], "ascii")
if len(add) < self.mapWidth:
while len(add) < self.mapWidth - 1:
if i == 0:
add.append(self.MAPBORDER_CHAR)
else:
add.append(self.PADDING_CHAR)
add.append(self.MAPBORDER_CHAR)
res.append(add)
i += 1
if len(res) != self.mapHeight:
while len(res) < self.mapHeight - 1:
res.append(padding)
res.append(bottom_padding)
return res
def getTileData(self, lines):
values = []
for x in lines:
values.append(int(x))
return values
def findTileData(self, tid):
temp = 0
if len(self.tileData) > 0:
# moving platform
if tid == 77:
temp = self.tileData[0]
self.tileData = self.tileData[1:]
return temp
def getX(self, x):
return settings.TILESIZE * x
def getY(self, y):
return settings.TILESIZE * y
def fillMapBottom(self, data):
rownr = len(data) - 2
while rownr > 0:
colnr = 0
while colnr < len(data[rownr]):
if data[rownr][colnr] == 32:
if (data[rownr + 1][colnr] == 66) or (data[rownr + 1][colnr] == 70):
data[rownr][colnr] = 70
colnr += 1
rownr -= 1
rownr = 0
while rownr < len(data):
colnr = 0
while colnr < len(data[rownr]):
if data[rownr][colnr] == 70:
if (data[rownr][colnr - 1] not in air_tiles) \
and (data[rownr][colnr + 1] not in air_tiles
and (data[rownr - 1][colnr] not in air_tiles
and (data[rownr + 1][colnr] not in air_tiles))):
data[rownr][colnr] = 120
colnr += 1
rownr += 1
return data
``` |
{
"source": "jpaav/comm",
"score": 2
} |
#### File: comm/accounts/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Custom Profile linked to a User
from rooms.models import Org
class Profile(models.Model):
def __str__(self):
return 'Profile: ' + self.user.get_full_name()
user = models.OneToOneField(User, on_delete=models.CASCADE)
# The manager to get Profile objects
objects = models.Manager()
# The primary key
# uid = models.IntegerField(primary_key=True)
timezone = models.CharField(max_length=50, default='EST', blank=True)
# The user variable to allow authentication to work
username = models.CharField(max_length=200, default="")
bio = models.CharField(max_length=1000, default="", blank=True)
permission = {} # the key is the permission itself which can be a url or just a word for the permission the value is a list of orgainizations it can do this action for
# TODO: remove the below var orgs it seems unnecessary.
orgs = models.ForeignKey(
Org,
models.CASCADE,
null=True,
blank=True
)
# add permission to the profile for the PERM and the ORG
def addPermission(self, perm, org):
if self.permission.get(perm, None) is None:
self.permission[perm] = [org]
else:
self.permission[perm].extend(org)
# checks to see if permission contains PERM for ORG
def hasPerm(self, perm, org):
for i in self.permission[perm]:
if i == org:
return True
return False
# checks to see if anyone has the permission adn returns authorized organizations
def allWithPerm(self, perm):
return self.permission.get(perm, None)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
# TODO: should be able to delete all the except: stuff after all the users have run this
try:
instance.profile.save()
except:
print("stuff")
print(sender.username, sender.id)
print(instance.username, instance.id)
p = Profile.objects.filter(username=instance.get_username()).first()
if p is None:
print("cool")
instance.profile = Profile(username=instance.get_username())
else:
print("cool a")
instance.profile = p
instance.profile.user = instance
instance.save()
```
#### File: comm/orgs/models.py
```python
from django.contrib.auth.models import User
from django.db import models
class Org(models.Model):
def __str__(self):
return 'Org: ' + self.name
objects = models.Manager()
name = models.CharField(max_length=50, null=True)
description = models.CharField(max_length=1000, null=True, blank=True)
location = models.CharField(max_length=100, null=True, blank=True)
owner = models.ForeignKey(
User,
null=True,
related_name='owner',
)
# TODO Check if this should be "CASCADE"
members = models.ManyToManyField(
User,
related_name='members',
blank=True
)
unapproved = models.ManyToManyField(
User,
related_name='unapproved',
blank=True
)
```
#### File: patientlog/templatetags/linebreakless.py
```python
from django.utils import six
from django import template
from django.template.base import Node
from django.utils.functional import allow_lazy
register = template.Library()
@register.tag
def linebreakless(parser, token):
nodelist = parser.parse(('endlinebreakless',))
parser.delete_first_token()
return LinebreaklessNode(nodelist)
class LinebreaklessNode(Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
strip_line_breaks = allow_lazy(lambda x: x.replace('\n', ''), six.text_type)
return strip_line_breaks(self.nodelist.render(context).strip())
```
#### File: comm/rooms/views.py
```python
from django.shortcuts import render
from rooms.models import Room
def room(request, room_id):
room = Room.objects.filter(pk=room_id)
return render(request, 'rooms/room.html')
``` |
{
"source": "jpabb7/p2pScrapper",
"score": 2
} |
#### File: BitTorrent-5.2.2/BitTorrent/Choker.py
```python
from __future__ import division
import math
import random
from BTL.obsoletepythonsupport import set
class Choker(object):
def __init__(self, config, schedule):
self.config = config
self.schedule = schedule
self.connections = []
self.count = 0
self.unchokes_since_last = 0
self.interval = 5
self.shutting_down = False
#self.magic_number = 6 # magic 6 : (30 / self.interval)
self.magic_number = (30 / self.interval)
schedule(self.interval, self._round_robin)
def _round_robin(self):
self.schedule(self.interval, self._round_robin)
self.count += 1
# don't do more work than you have to
if not self.connections:
return
# rotation for round-robin
if self.count % self.magic_number == 0:
for i, c in enumerate(self.connections):
u = c.upload
if u.choked and u.interested:
self.connections = self.connections[i:] + self.connections[:i]
break
self._rechoke()
## new
############################################################################
def _rechoke(self):
# step 1:
# get sorted in order of preference lists of peers
# one for downloading torrents, and one for seeding torrents
down_pref = []
seed_pref = []
for i, c in enumerate(self.connections):
u = c.upload
if c.download.have.numfalse == 0 or not u.interested:
continue
# I cry.
if c.download.multidownload.storage.have.numfalse != 0:
## heuristic for downloading torrents
if not c.download.is_snubbed():
## simple download rate based
down_pref.append((-c.download.get_rate(), i))
## ratio based
#dr = c.download.get_rate()
#ur = max(1, u.get_rate())
#ratio = dr / ur
#down_pref.append((-ratio, i))
else:
## heuristic for seeding torrents
## Uoti special
## if c._decrypt is not None:
## seed_pref.append((self.count, u.get_rate(), i))
## elif (u.unchoke_time > self.count - self.magic_number or
## u.buffer and c.connection.is_flushed()):
## seed_pref.append((u.unchoke_time, u.get_rate(), i))
## else:
## seed_pref.append((1, u.get_rate(), i))
## sliding, first pass (see below)
r = u.get_rate()
if c._decrypt is not None:
seed_pref.append((2, r, i))
else:
seed_pref.append((1, r, i))
down_pref.sort()
seed_pref.sort()
#pprint(down_pref)
#pprint(seed_pref)
down_pref = [ self.connections[i] for junk, i in down_pref ]
seed_pref = [ self.connections[i] for junk, junk, i in seed_pref ]
max_uploads = self._max_uploads()
## sliding, second pass
## # up-side-down sum for an idea of capacity
## uprate_sum = sum(rates[-max_uploads:])
## if max_uploads == 0:
## avg_uprate = 0
## else:
## avg_uprate = uprate_sum / max_uploads
## #print 'avg_uprate', avg_uprate, 'of', max_uploads
## self.extra_slots = max(self.extra_slots - 1, 0)
## if avg_uprate > self.arbitrary_min:
## for r in rates:
## if r < (avg_uprate * 0.80): # magic 80%
## self.extra_slots += 2
## break
## self.extra_slots = min(len(seed_pref), self.extra_slots)
## max_uploads += self.extra_slots
## #print 'plus', self.extra_slots
# step 2:
# split the peer lists by a ratio to fill the available upload slots
d_uploads = max(1, int(round(max_uploads * 0.70)))
s_uploads = max(1, int(round(max_uploads * 0.30)))
#print 'original', 'ds', d_uploads, 'us', s_uploads
extra = max(0, d_uploads - len(down_pref))
if extra > 0:
s_uploads += extra
d_uploads -= extra
extra = max(0, s_uploads - len(seed_pref))
if extra > 0:
s_uploads -= extra
d_uploads = min(d_uploads + extra, len(down_pref))
#print 'ds', d_uploads, 'us', s_uploads
down_pref = down_pref[:d_uploads]
seed_pref = seed_pref[:s_uploads]
preferred = set(down_pref)
preferred.update(seed_pref)
# step 3:
# enforce unchoke states
count = 0
to_choke = []
for i, c in enumerate(self.connections):
u = c.upload
if c in preferred:
u.unchoke(self.count)
count += 1
else:
to_choke.append(c)
# step 4:
# enforce choke states and handle optimistics
count = 0
optimistics = max(self.config['min_uploads'],
max_uploads - len(preferred))
#print 'optimistics', optimistics
for c in to_choke:
u = c.upload
if c.download.have.numfalse == 0:
u.choke()
elif count >= optimistics:
u.choke()
else:
# this one's optimistic
u.unchoke(self.count)
if u.interested:
count += 1
############################################################################
def shutdown(self):
self.shutting_down = True
def connection_made(self, connection):
p = random.randrange(len(self.connections) + 1)
self.connections.insert(p, connection)
def connection_lost(self, connection):
self.connections.remove(connection)
if (not self.shutting_down and
connection.upload.interested and not connection.upload.choked):
self._rechoke()
def interested(self, connection):
if not connection.upload.choked:
self._rechoke()
def not_interested(self, connection):
if not connection.upload.choked:
self._rechoke()
def _max_uploads(self):
uploads = self.config['max_uploads']
rate = self.config['max_upload_rate'] / 1024
if uploads > 0:
pass
elif rate <= 0:
uploads = 7 # unlimited, just guess something here...
elif rate < 9:
uploads = 2
elif rate < 15:
uploads = 3
elif rate < 42:
uploads = 4
else:
uploads = int(math.sqrt(rate * .6))
return uploads
```
#### File: BitTorrent-5.2.2/BitTorrent/configfile.py
```python
import os
import sys
import traceback
from BitTorrent.translation import _
from ConfigParser import RawConfigParser
from ConfigParser import MissingSectionHeaderError, ParsingError
from BitTorrent import parseargs, version, BTFailure
from BTL.platform import app_name
from BTL.platform import encode_for_filesystem, decode_from_filesystem
from BitTorrent.platform import get_save_dir, locale_root, is_frozen_exe
from BitTorrent.platform import get_dot_dir, get_incomplete_data_dir
from BitTorrent.platform import enforce_shortcut, enforce_association
from BitTorrent.platform import smart_gettext_and_install
from BitTorrent.platform import get_old_incomplete_data_dir
from BitTorrent.platform import get_temp_subdir
from BitTorrent.platform import old_broken_config_subencoding
from BitTorrent.zurllib import bind_tracker_connection
from BTL.exceptions import str_exc
from BitTorrent.shortargs import convert_from_shortforms
downloader_save_options = [
# General
'confirm_quit' ,
# Appearance
'progressbar_style' ,
'toolbar_text' ,
'toolbar_size' ,
# Bandwidth
'max_upload_rate' ,
'max_download_rate' ,
# Saving
'save_in' ,
'save_incomplete_in' ,
'ask_for_save' ,
# Network
'minport' ,
'maxport' ,
'upnp' ,
'ip' ,
'resolve_hostnames' ,
# Misc
'open_from' ,
'geometry' ,
'start_maximized' ,
'column_order' ,
'enabled_columns' ,
'column_widths' ,
'sort_column' ,
'sort_ascending' ,
'show_details' ,
'settings_tab' ,
'details_tab' ,
'splitter_height' ,
'theme' ,
'donated' ,
'notified' ,
]
if os.name == 'nt':
downloader_save_options.extend([
# General
'enforce_association' ,
'launch_on_startup' ,
'minimize_to_tray' ,
'start_minimized' ,
'close_to_tray' ,
# Bandwidth
'bandwidth_management',
'show_variance_line' ,
])
MAIN_CONFIG_FILE = 'ui_config'
TORRENT_CONFIG_FILE = 'torrent_config'
alt_uiname = {'bittorrent':'btdownloadgui',
'maketorrent':'btmaketorrentgui',}
def _read_config(filename):
"""Returns a RawConfigParser that has parsed the config file specified by
the passed filename."""
# check for bad config files
p = RawConfigParser()
fp = None
try:
fp = open(filename)
except IOError:
pass
if fp is not None:
try:
p.readfp(fp, filename=filename)
except MissingSectionHeaderError:
fp.close()
del fp
bad_config(filename)
except ParsingError:
fp.close()
del fp
bad_config(filename)
else:
fp.close()
return p
def _write_config(error_callback, filename, p):
if not p.has_section('format'):
p.add_section('format')
p.set('format', 'encoding', 'utf-8')
try:
f = file(filename, 'wb')
p.write(f)
f.close()
except Exception, e:
try:
f.close()
except:
pass
error_callback(_("Could not permanently save options: ")+str_exc(e))
def bad_config(filename):
base_bad_filename = filename + '.broken'
bad_filename = base_bad_filename
i = 0
while os.access(bad_filename, os.F_OK):
bad_filename = base_bad_filename + str(i)
i+=1
os.rename(filename, bad_filename)
sys.stderr.write(_("Error reading config file. "
"Old config file stored in \"%s\"") % bad_filename)
def get_config(defaults, section):
"""This reads the key-value pairs from the specified section in the
config file and from the common section. It then places those
appearing in the defaults into a dict, which is then returned.
@type defaults: dict
@param defaults: dict of name-value pairs derived from the
defaults list for this application (see defaultargs.py).
Only the names in the name-value pairs are used. get_config
only reads variables from the config file with matching names.
@type section: str
@param section: in the configuration from which to read options.
So far, the sections have been named after applications, e.g.,
bittorrent, bittorrent-console, etc.
@return: a dict containing option-value pairs.
"""
assert type(defaults)==dict
assert type(section)==str
configdir = get_dot_dir()
if configdir is None:
return {}
if not os.path.isdir(configdir):
try:
os.mkdir(configdir, 0700)
except:
pass
p = _read_config(os.path.join(configdir, 'config')) # returns parser.
if p.has_section('format'):
encoding = p.get('format', 'encoding')
else:
encoding = old_broken_config_subencoding
values = {}
if p.has_section(section):
for name, value in p.items(section):
if name in defaults:
values[name] = value
if p.has_section('common'):
for name, value in p.items('common'):
if name in defaults and name not in values:
values[name] = value
if defaults.get('data_dir') == '' and \
'data_dir' not in values and os.path.isdir(configdir):
datadir = os.path.join(configdir, 'data')
values['data_dir'] = decode_from_filesystem(datadir)
parseargs.parse_options(defaults, values, encoding)
return values
def save_global_config(defaults, section, error_callback,
save_options=downloader_save_options):
filename = os.path.join(defaults['data_dir'], MAIN_CONFIG_FILE)
p = _read_config(filename)
p.remove_section(section)
if p.has_section(alt_uiname[section]):
p.remove_section(alt_uiname[section])
p.add_section(section)
for name in save_options:
name.decode('ascii').encode('utf-8') # just to make sure we can
if defaults.has_key(name):
value = defaults[name]
if isinstance(value, str):
value = value.decode('ascii').encode('utf-8')
elif isinstance(value, unicode):
value = value.encode('utf-8')
p.set(section, name, value)
else:
err_str = _("Configuration option mismatch: '%s'") % name
if is_frozen_exe:
err_str = _("You must quit %s and reinstall it. (%s)") % (app_name, err_str)
error_callback(err_str)
_write_config(error_callback, filename, p)
def save_torrent_config(path, infohash, config, error_callback):
section = infohash.encode('hex')
filename = os.path.join(path, TORRENT_CONFIG_FILE)
p = _read_config(filename)
p.remove_section(section)
p.add_section(section)
for key, value in config.items():
p.set(section, key, value)
_write_config(error_callback, filename, p)
def read_torrent_config(global_config, path, infohash, error_callback):
section = infohash.encode('hex')
filename = os.path.join(path, TORRENT_CONFIG_FILE)
p = _read_config(filename)
if not p.has_section(section):
return {}
else:
c = {}
for name, value in p.items(section):
if global_config.has_key(name):
t = type(global_config[name])
if t == bool:
c[name] = value in ('1', 'True', True)
else:
try:
c[name] = type(global_config[name])(value)
except ValueError, e:
error_callback('%s (name:%s value:%s type:%s global:%s)' %
(str_exc(e), name, repr(value),
type(global_config[name]), global_config[name]))
# is this reasonable?
c[name] = global_config[name]
elif name == 'save_as':
# Backwards compatibility for BitTorrent 4.4 torrent_config file
c[name] = value
try:
c[name] = c[name].decode('utf-8')
except:
pass
return c
def remove_torrent_config(path, infohash, error_callback):
section = infohash.encode('hex')
filename = os.path.join(path, TORRENT_CONFIG_FILE)
p = _read_config(filename)
if p.has_section(section):
p.remove_section(section)
_write_config(error_callback, filename, p)
def parse_configuration_and_args(defaults, uiname, arglist=[], minargs=None,
maxargs=None):
"""Given the default option settings and overrides these defaults
from values read from the config file, and again overrides the
config file with the arguments that appear in the arglist.
'defaults' is a list of tuples of the form (optname, value,
desc) where 'optname' is a string containing the option's name,
value is the option's default value, and desc is the option's
description.
'uiname' is a string specifying the user interface that has been
created by the caller. Ex: bittorrent, maketorrent.
arglist is usually argv[1:], i.e., excluding the name used to
execute the program.
minargs specifies the minimum number of arguments that must appear in
arglist. If the number of arguments is less than the minimum then
a BTFailure exception is raised.
maxargs specifies the maximum number of arguments that can appear
in arglist. If the number of arguments exceeds the maximum then
a BTFailure exception is raised.
This returns the tuple (config,args) where config is
a dictionary of (option, value) pairs, and args is the list
of arguments in arglist after the command-line arguments have
been removed.
For example:
bittorrent-curses.py --save_as lx-2.6.rpm lx-2.6.rpm.torrent --max_upload_rate 0
returns a (config,args) pair where the
config dictionary contains many defaults plus
the mappings
'save_as': 'linux-2.6.15.tar.gz'
and
'max_upload_rate': 0
The args in the returned pair is
args= ['linux-2.6.15.tar.gz.torrent']
"""
assert type(defaults)==list
assert type(uiname)==str
assert type(arglist)==list
assert minargs is None or type(minargs) in (int,long) and minargs>=0
assert maxargs is None or type(maxargs) in (int,long) and maxargs>=minargs
# remap shortform arguments to their long-forms.
arglist = convert_from_shortforms(arglist)
defconfig = dict([(name, value) for (name, value, doc) in defaults])
if arglist[0:] == ['--version']:
print version
sys.exit(0)
if arglist[0:] == '--help':
parseargs.printHelp(uiname, defaults)
sys.exit(0)
if "--use_factory_defaults" not in arglist:
presets = get_config(defconfig, uiname) # read from .bittorrent dir.
# run as if fresh install using temporary directories.
else:
presets = {}
temp_dir = get_temp_subdir()
#set_config_dir(temp_dir) # is already set in platform.py.
save_in = encode_for_filesystem( u"save_in" )[0]
presets["save_in"] = \
decode_from_filesystem(os.path.join(temp_dir,save_in))
data = encode_for_filesystem( u"data" )[0]
presets["data_dir"] = \
decode_from_filesystem(os.path.join(temp_dir,data))
incomplete = encode_for_filesystem( u"incomplete" )[0]
presets["save_incomplete_in"] = \
decode_from_filesystem(os.path.join(temp_dir,incomplete))
presets["one_connection_per_ip"] = False
config = args = None
try:
config, args = parseargs.parseargs(arglist, defaults, minargs, maxargs,
presets)
except parseargs.UsageException, e:
print e
parseargs.printHelp(uiname, defaults)
sys.exit(0)
datadir = config.get('data_dir')
found_4x_config = False
if datadir:
datadir,bad = encode_for_filesystem(datadir)
if bad:
raise BTFailure(_("Invalid path encoding."))
if not os.path.exists(datadir):
os.mkdir(datadir)
if uiname in ('bittorrent', 'maketorrent'):
values = {}
p = _read_config(os.path.join(datadir, MAIN_CONFIG_FILE))
if p.has_section('format'):
encoding = p.get('format', 'encoding')
else:
encoding = old_broken_config_subencoding
if not p.has_section(uiname) and p.has_section(alt_uiname[uiname]):
uiname = alt_uiname[uiname]
if p.has_section(uiname):
for name, value in p.items(uiname):
if name in defconfig:
values[name] = value
elif not found_4x_config:
# identify 4.x version config file
if name in ('start_torrent_behavior',
'seed_forever',
'progressbar_hack',
'seed_last_forever',
'next_torrent_ratio',
'next_torrent_time',
'last_torrent_ratio',
):
found_4x_config = True
parseargs.parse_options(defconfig, values, encoding)
presets.update(values)
config, args = parseargs.parseargs(arglist, defaults, minargs,
maxargs, presets)
for d in ('', 'resume', 'metainfo', 'torrents'):
ddir = os.path.join(datadir, d)
if not os.path.exists(ddir):
os.mkdir(ddir, 0700)
else:
assert(os.path.isdir(ddir))
if found_4x_config:
# version 4.x stored KB/s, < version 4.x stores B/s
config['max_upload_rate'] *= 1024
if config.get('language'):
# this is non-blocking if the language does not exist
smart_gettext_and_install('bittorrent', locale_root,
languages=[config['language']])
if config.has_key('bind') and config['bind'] != '':
bind_tracker_connection(config['bind'])
if config.has_key('launch_on_startup'):
enforce_shortcut(config, log_func=sys.stderr.write)
if os.name == 'nt' and config.has_key('enforce_association'):
enforce_association()
if config.has_key('save_in') and config['save_in'] == '' and \
(not config.has_key("save_as") or config['save_as'] == '' ) \
and uiname != 'bittorrent':
config['save_in'] = decode_from_filesystem(get_save_dir())
incomplete = decode_from_filesystem(get_incomplete_data_dir())
if config.get('save_incomplete_in') == '':
config['save_incomplete_in'] = incomplete
if config.get('save_incomplete_in') == get_old_incomplete_data_dir():
config['save_incomplete_in'] = incomplete
if uiname == "test-client" or (uiname.startswith("bittorrent")
and uiname != 'bittorrent-tracker'):
if not config.get('ask_for_save'):
# we check for existance, so things like "D:\" don't trip us up.
if (config['save_in'] and
not os.path.exists(config['save_in'])):
try:
os.makedirs(config['save_in'])
except OSError, e:
if (e.errno == 2 or # no such file or directory
e.errno == 13): # permission denied
traceback.print_exc()
print >> sys.stderr, "save_in could not be created. Falling back to prompting."
config['ask_for_save'] = True
elif e.errno != 17: # path already exists
raise
if (config['save_incomplete_in'] and
not os.path.exists(config['save_incomplete_in'])):
try:
os.makedirs(config['save_incomplete_in'])
except OSError, e:
if e.errno != 17: # path already exists
traceback.print_exc()
print >> sys.stderr, "save_incomplete_in could not be created. Falling back to default incomplete path."
config['save_incomplete_in'] = incomplete
return config, args
```
#### File: BitTorrent-5.2.2/BitTorrent/ConnectionManager.py
```python
from __future__ import division
import sys
from BTL.platform import app_name
from BTL.translation import _
from BitTorrent import BTFailure
from BTL.obsoletepythonsupport import *
from BTL.hash import sha
from BitTorrent.RawServer_twisted import Handler
from BitTorrent.Connector import Connector
from BitTorrent.HTTPConnector import HTTPConnector
from BitTorrent.LocalDiscovery import LocalDiscovery
from BitTorrent.InternetWatcher import InternetSubscriber
from BTL.DictWithLists import DictWithInts, OrderedDict
from BTL.platform import bttime
from BTL.rand_tools import iter_rand_pos
import random
import logging
import urlparse
ONLY_LOCAL = False
GLOBAL_FILTER = None
def GLOBAL_FILTER(ip, port, direction=""):
#print ip, direction
return False
GLOBAL_FILTER = None
# header, reserved, download id, my id, [length, message]
LOWER_BOUND = 1
UPPER_BOUND = 120
BUFFER = 1.2
use_timeout_order = False
timeout_order = [3, 15, 30]
debug = False
def set_timeout_metrics(delta):
delta = max(delta, 0.0001)
avg = ((timeout_order[0] / BUFFER) + delta) / 2
avg *= BUFFER
avg = max(LOWER_BOUND, avg)
avg = min(UPPER_BOUND, avg)
timeout_order[0] = avg
timeout_order[2] = timeout_order[0] * 30
timeout_order[1] = timeout_order[2] / 2
class GaurdedInitialConnection(Handler):
def __init__(self, parent, id, encrypt=False, log_prefix="", lan=False,
urgent=False, timeout=None ):
self.t = None
self.id = id
self.lan = lan
self.parent = parent
self.urgent = urgent
self.timeout = timeout
self.encrypt = encrypt
self.connector = None
self.log_prefix = log_prefix
def _make_connector(self, s):
addr = (s.ip, s.port)
self.parent.cache_complete_peer(addr, self.id, type(self),
encrypt=self.encrypt,
urgent=self.urgent,
lan=self.lan)
return Connector(self.parent, s, self.id, True,
obfuscate_outgoing=self.encrypt,
log_prefix=self.log_prefix,
lan=self.lan)
def connection_starting(self, addr):
self.start = bttime()
self.t = self.parent.add_task(self.timeout,
self.parent._cancel_connection, addr)
def _abort_timeout(self):
if self.t and self.t.active():
self.t.cancel()
self.t = None
def connection_made(self, s):
t = bttime() - self.start
set_timeout_metrics(t)
addr = (s.ip, s.port)
if debug:
self.parent.logger.warning('connection made: %s %s' %
(addr, t))
del self.parent.pending_connections[addr]
self._abort_timeout()
con = self._make_connector(s)
self.parent._add_connection(con)
# if the pending queue filled and put the remaining connections
# into the spare list, this will push more connections in to pending
self.parent.replace_connection()
def connection_failed(self, s, exception):
addr = (s.ip, s.port)
if debug:
self.parent.logger.warning('connection failed: %s %s' %
(addr, exception.getErrorMessage()))
if s.connector.wasPreempted():
self.parent._resubmit_connection(addr)
del self.parent.pending_connections[addr]
self._abort_timeout()
# only holepunch if this connection timed out entirely
if self.timeout >= timeout_order[-1]:
c = self.parent.find_connection_in_common(addr)
if c:
c.send_holepunch_request(addr)
self.parent.replace_connection()
class HTTPInitialConnection(GaurdedInitialConnection):
def _make_connector(self, s):
addr = (s.ip, s.port)
self.parent.cache_complete_peer(addr, self.id, type(self),
urgent=self.urgent)
# ow!
piece_size = self.parent.downloader.storage.piece_size
urlage = self.parent.downloader.urlage
return HTTPConnector(self.parent, piece_size, urlage, s, self.id, True,
log_prefix=self.log_prefix)
class ConnectionManager(InternetSubscriber):
def __init__(self, make_upload, downloader, choker,
numpieces, ratelimiter,
rawserver, config, private, my_id, add_task, infohash, context,
addcontactfunc, reported_port, tracker_ips, log_prefix ):
"""
@param downloader: MultiDownload for this torrent.
@param my_id: my peer id.
@param tracker_ips: list of tracker ip addresses.
ConnectionManager does not drop connections from the tracker.
This allows trackers to perform NAT checks even when there
are max_allow_in connections.
@param log_prefix: string used as the prefix for all
log entries generated by the ConnectionManager and its
created Connectors.
"""
self.make_upload = make_upload
self.downloader = downloader
self.choker = choker
# aaargh
self.piece_size = downloader.storage.piece_size
self.numpieces = numpieces
self.ratelimiter = ratelimiter
self.rawserver = rawserver
self.my_id = my_id
self.private = private
self.config = config
self.add_task = add_task
self.infohash = infohash
self.context = context
self.addcontact = addcontactfunc
self.reported_port = reported_port
self.everinc = False
self.tracker_ips = tracker_ips
self.log_prefix = log_prefix
self.logger = logging.getLogger(self.log_prefix)
self.closed = False
# submitted
self.pending_connections = {}
# transport connected
self.connectors = set()
# protocol active
# we do a lot of itterating and few mutations, so use a list
self.complete_connectors = [] # set()
# use a dict for a little semi-randomness
self.spares = {} # OrderedDict()
self.cached_peers = OrderedDict()
self.cache_limit = 300
self.connector_ips = DictWithInts()
self.connector_ids = DictWithInts()
self.banned = set()
self._ka_task = self.add_task(config['keepalive_interval'],
self.send_keepalives)
self._pex_task = None
if not self.private:
self._pex_task = self.add_task(config['pex_interval'],
self.send_pex)
self.reopen(reported_port)
def cleanup(self):
if not self.closed:
self.close_connections()
del self.context
self.cached_peers.clear()
if self._ka_task.active():
self._ka_task.cancel()
if self._pex_task and self._pex_task.active():
self._pex_task.cancel()
def reopen(self, port):
self.closed = False
self.reported_port = port
self.unthrottle_connections()
for addr in self.cached_peers:
self._fire_cached_connection(addr)
self.rawserver.internet_watcher.add_subscriber(self)
def internet_active(self):
for addr in self.cached_peers.iterkeys():
self._fire_cached_connection(addr)
def remove_addr_from_cache(self, addr):
# could have been an incoming connection
# or could have been dropped by the cache limit
if addr in self.cached_peers:
del self.cached_peers[addr]
def try_one_connection(self):
keys = self.cached_peers.keys()
if not keys:
return False
addr = random.choice(keys)
self._fire_cached_connection(addr)
return True
def _fire_cached_connection(self, addr):
v = self.cached_peers[addr]
complete, (id, handler, a, kw) = v
return self._start_connection(addr, id, handler, *a, **kw)
def cache_complete_peer(self, addr, pid, handler, *a, **kw):
self.cache_peer(addr, pid, handler, 1, *a, **kw)
def cache_incomplete_peer(self, addr, pid, handler, *a, **kw):
self.cache_peer(addr, pid, handler, 0, *a, **kw)
def cache_peer(self, addr, pid, handler, complete, *a, **kw):
# obey the cache size limit
if (addr not in self.cached_peers and
len(self.cached_peers) >= self.cache_limit):
for k, v in self.cached_peers.iteritems():
if not v[0]:
del self.cached_peers[k]
break
else:
# cache full of completes, delete a random peer.
# yes, this can cache an incomplete when the cache is full of
# completes, but only 1 because of the filter above.
oldaddr = self.cached_peers.keys()[0]
del self.cached_peers[oldaddr]
elif not complete:
if addr in self.cached_peers and self.cached_peers[addr][0]:
# don't overwrite a complete with an incomplete.
return
self.cached_peers[addr] = (complete, (pid, handler, a, kw))
def send_keepalives(self):
self._ka_task = self.add_task(self.config['keepalive_interval'],
self.send_keepalives)
for c in self.complete_connectors:
c.send_keepalive()
def send_pex(self):
self._pex_task = self.add_task(self.config['pex_interval'],
self.send_pex)
pex_set = set()
for c in self.complete_connectors:
if c.listening_port:
pex_set.add((c.ip, c.listening_port))
for c in self.complete_connectors:
c.send_pex(pex_set)
def hashcheck_succeeded(self, i):
for c in self.complete_connectors:
# should we send a have message if peer already has the piece?
# yes! it is low bandwidth and useful for that peer.
c.send_have(i)
def find_connection_in_common(self, addr):
for c in self.complete_connectors:
if addr in c.remote_pex_set:
return c
# returns False if the connection info has been pushed on to self.spares
# other filters and a successful connection return True
def start_connection(self, addr, id=None, encrypt=False, lan=False):
"""@param addr: domain name/ip address and port pair.
@param id: peer id.
"""
return self._start_connection(addr, id, GaurdedInitialConnection,
encrypt=encrypt,
lan=lan)
def start_http_connection(self, url):
r = urlparse.urlparse(url)
host = r[1]
if ':' in host:
host, port = host.split(':')
port = int(port)
else:
port = 80
df = self.rawserver.gethostbyname(host)
df.addCallback(self._connect_http, port, url)
df.addLogback(self.logger.warning, "Resolve failed")
def _connect_http(self, ip, port, url):
self._start_connection((ip, port), url,
HTTPInitialConnection, urgent=True)
def _start_connection(self, addr, pid, handler, *a, **kw):
"""@param addr: domain name/ip address and port pair.
@param pid: peer id.
"""
if self.closed:
return True
if addr[0] in self.banned:
return True
if pid == self.my_id:
return True
for v in self.connectors:
if pid and v.id == pid:
return True
if self.config['one_connection_per_ip'] and v.ip == addr[0]:
return True
total_outstanding = len(self.connectors)
# it's possible the pending connections could eventually complete,
# so we have to account for those when enforcing max_initiate
total_outstanding += len(self.pending_connections)
if total_outstanding >= self.config['max_initiate']:
self.spares[(addr, pid)] = (handler, a, kw)
return False
# if these fail, I'm getting a very weird addr object
assert isinstance(addr, tuple)
assert isinstance(addr[0], str)
assert isinstance(addr[1], int)
if ONLY_LOCAL and addr[0] != "127.0.0.1" and not addr[0].startswith("192.168") and addr[1] != 80:
return True
if GLOBAL_FILTER and not GLOBAL_FILTER(addr[0], addr[1], "out"):
return True
if addr not in self.cached_peers:
self.cache_incomplete_peer(addr, pid, handler, *a, **kw)
# sometimes we try to connect to a peer we're already trying to
# connect to
#assert addr not in self.pending_connections
if addr in self.pending_connections:
return True
kw['log_prefix'] = self.log_prefix
timeout = 30
if use_timeout_order:
timeout = timeout_order[0]
kw.setdefault('timeout', timeout)
h = handler(self, pid, *a, **kw)
self.pending_connections[addr] = (h, (addr, pid, handler, a, kw))
urgent = kw.pop('urgent', False)
connector = self.rawserver.start_connection(addr, h, self.context,
# we'll handle timeouts.
# not so fond of this.
timeout=None,
urgent=urgent)
h.connector = connector
return True
def _resubmit_connection(self, addr):
# we leave it on pending_connections.
# so the standard connection_failed handling occurs.
h, info = self.pending_connections[addr]
addr, pid, handler, a, kw = info
self.spares[(addr, pid)] = (handler, a, kw)
def _cancel_connection(self, addr):
if addr not in self.pending_connections:
# already made
return
# we leave it on pending_connections.
# so the standard connection_failed handling occurs.
h, info = self.pending_connections[addr]
addr, pid, handler, a, kw = info
if use_timeout_order and h.timeout < timeout_order[-1]:
for t in timeout_order:
if t > h.timeout:
h.timeout = t
break
else:
h.timeout = timeout_order[-1]
# this feels odd
kw['timeout'] = h.timeout
self.spares[(addr, pid)] = (handler, a, kw)
# do this last, since twisted might fire the event handler from inside
# the function
# HMM:
# should be stopConnecting, but I've seen this fail.
# close does the same thing, but disconnects in the case where the
# connection was made. Not sure how that occurs without add being in
# self.pending_connections
# Maybe this was fixed recently in CRLR.
#h.connector.stopConnecting()
h.connector.close()
def connection_handshake_completed(self, connector):
self.connector_ips.add(connector.ip)
self.connector_ids.add(connector.id)
self.complete_connectors.append(connector)
connector.upload = self.make_upload(connector)
connector.download = self.downloader.make_download(connector)
self.choker.connection_made(connector)
if connector.uses_dht:
connector.send_port(self.reported_port)
if self.config['resolve_hostnames']:
df = self.rawserver.gethostbyaddr(connector.ip)
def save_hostname(hostname_tuple):
hostname, aliases, ips = hostname_tuple
connector.hostname = hostname
df.addCallback(save_hostname)
df.addErrback(lambda fuckoff : None)
def got_port(self, connector):
if self.addcontact and connector.uses_dht and \
connector.dht_port != None:
self.addcontact(connector.connection.ip, connector.dht_port)
def ever_got_incoming(self):
return self.everinc
def how_many_connections(self):
return len(self.complete_connectors)
def replace_connection(self):
if self.closed:
return
while self.spares:
k, v = self.spares.popitem()
addr, id = k
handler, a, kw = v
started = self._start_connection(addr, id, handler, *a, **kw)
if not started:
# start_connection decided to push this connection back on to
# self.spares because a limit was hit. break now or loop
# forever
break
def throttle_connections(self):
self.throttled = True
for c in iter_rand_pos(self.connectors):
c.connection.pause_reading()
def unthrottle_connections(self):
self.throttled = False
for c in iter_rand_pos(self.connectors):
c.connection.resume_reading()
# arg. resume actually flushes the buffers in iocpreactor, so
# we have to check the state constantly
if self.throttled:
break
def close_connection(self, id):
for c in self.connectors:
if c.id == id and not c.closed:
c.connection.close()
c.closed = True
def close_connections(self):
self.rawserver.internet_watcher.remove_subscriber(self)
self.closed = True
pending = self.pending_connections.values()
# drop connections which could be made after we're not interested
for h, info in pending:
h.connector.close()
for c in self.connectors:
if not c.closed:
c.connection.close()
c.closed = True
def singleport_connection(self, connector):
"""hand-off from SingleportListener once the infohash is known and
thus we can map a connection on to a particular Torrent."""
if connector.ip in self.banned:
return False
m = self.config['max_allow_in']
if (m and len(self.connectors) >= m and
connector.ip not in self.tracker_ips):
return False
self._add_connection(connector)
if self.closed:
return False
connector.set_parent(self)
connector.connection.context = self.context
return True
def _add_connection(self, connector):
self.connectors.add(connector)
if self.closed:
connector.connection.close()
elif self.throttled:
connector.connection.pause_reading()
def ban(self, ip):
self.banned.add(ip)
def connection_lost(self, connector):
assert isinstance(connector, Connector)
self.connectors.remove(connector)
if self.ratelimiter:
self.ratelimiter.dequeue(connector)
if connector.complete:
self.connector_ips.remove(connector.ip)
self.connector_ids.remove(connector.id)
self.complete_connectors.remove(connector)
self.choker.connection_lost(connector)
class AnyportListener(Handler):
def __init__(self, port, singleport):
self.port = port
self.singleport = singleport
rawserver = singleport.rawserver
s = rawserver.create_serversocket(port, config['bind'])
rawserver.start_listening(s, self)
def __getattr__(self, attr):
return getattr(self.singleport, attr)
class SingleportListener(Handler):
"""Manages a server socket common to all torrents. When a remote
peer opens a connection to the local peer, the SingleportListener
maps that peer on to the appropriate torrent's connection manager
(see SingleportListener.select_torrent).
See Connector which upcalls to select_torrent after the infohash is
received in the opening handshake."""
def __init__(self, rawserver, nattraverser, log_prefix,
use_local_discovery):
self.rawserver = rawserver
self.nattraverser = nattraverser
self.port = 0
self.ports = {}
self.port_change_notification = None
self.torrents = {}
self.connectors = set()
self.infohash = None
self.obfuscated_torrents = {}
self.local_discovery = None
self.ld_services = {}
self.use_local_discovery = use_local_discovery
self._creating_local_discovery = False
self.log_prefix = log_prefix
self.logger = logging.getLogger(self.log_prefix)
def _close(self, port):
serversocket = self.ports[port][0]
if self.nattraverser:
try:
self.nattraverser.unregister_port(port, "TCP")
except:
# blanket, just incase - we don't want to interrupt things
self.logger.warning("UPnP deregistration error",
exc_info=sys.exc_info())
self.rawserver.stop_listening(serversocket)
serversocket.close()
if self.local_discovery:
self.local_discovery.stop()
self.local_discovery = None
def _check_close(self, port):
if not port or self.port == port or len(self.ports[port][1]) > 0:
return
self._close(port)
del self.ports[port]
def open_port(self, port, config):
"""Starts BitTorrent running as a server on the specified port."""
if port in self.ports:
self.port = port
return
s = self.rawserver.create_serversocket(port, config['bind'])
if self.nattraverser:
try:
d = self.nattraverser.register_port(port, port, "TCP",
config['bind'],
app_name)
def change(*a):
self.rawserver.external_add_task(0, self._change_port, *a)
d.addCallback(change)
def silent(*e):
pass
d.addErrback(silent)
except:
# blanket, just incase - we don't want to interrupt things
self.logger.warning("UPnP registration error",
exc_info=sys.exc_info())
self.rawserver.start_listening(s, self)
oldport = self.port
self.port = port
self.ports[port] = [s, {}]
self._check_close(oldport)
if self.local_discovery:
self.local_discovery.stop()
if self.use_local_discovery:
self._create_local_discovery()
def _create_local_discovery(self):
assert self.use_local_discovery
self._creating_local_discovery = True
try:
self.local_discovery = LocalDiscovery(self.rawserver, self.port,
self._start_connection)
self._creating_local_discovery = False
except:
self.rawserver.add_task(5, self._create_local_discovery)
def _start_connection(self, addr, infohash):
infohash = infohash.decode('hex')
if infohash not in self.torrents:
return
connection_manager = self.torrents[infohash]
# TODO: peer id?
connection_manager.start_connection(addr, None)
def _change_port(self, port):
if self.port == port:
return
[serversocket, callbacks] = self.ports[self.port]
self.ports[port] = [serversocket, callbacks]
del self.ports[self.port]
self.port = port
for callback in callbacks:
if callback:
callback(port)
def get_port(self, callback = None):
if self.port:
callbacks = self.ports[self.port][1]
callbacks.setdefault(callback, 0)
callbacks[callback] += 1
return self.port
def release_port(self, port, callback = None):
callbacks = self.ports[port][1]
callbacks[callback] -= 1
if callbacks[callback] == 0:
del callbacks[callback]
self._check_close(port)
def close_sockets(self):
for port in self.ports.iterkeys():
self._close(port)
def add_torrent(self, infohash, connection_manager):
if infohash in self.torrents:
raise BTFailure(_("Can't start two separate instances of the same "
"torrent"))
self.torrents[infohash] = connection_manager
key = sha('req2' + infohash).digest()
self.obfuscated_torrents[key] = connection_manager
if self.local_discovery:
service = self.local_discovery.announce(infohash.encode('hex'),
connection_manager.my_id.encode('hex'))
self.ld_services[infohash] = service
def remove_torrent(self, infohash):
del self.torrents[infohash]
del self.obfuscated_torrents[sha('req2' + infohash).digest()]
if infohash in self.ld_services:
service = self.ld_services.pop(infohash)
if self.local_discovery:
self.local_discovery.unannounce(service)
def connection_made(self, connection):
"""Called when TCP connection has finished opening, but before
BitTorrent protocol has begun."""
if ONLY_LOCAL and connection.ip != '127.0.0.1' and not connection.ip.startswith("192.168") :
return
if GLOBAL_FILTER and not GLOBAL_FILTER(connection.ip, connection.port, "in"):
return
connector = Connector(self, connection, None, False,
log_prefix=self.log_prefix)
self.connectors.add(connector)
def select_torrent(self, connector, infohash):
"""Called when infohash has been received allowing us to map
the connection on to a given Torrent's ConnectionManager."""
# call-up from Connector.
if infohash in self.torrents:
accepted = self.torrents[infohash].singleport_connection(connector)
if not accepted:
# the connection manager may refuse the connection, in which
# case keep the connection in our list until it is dropped
connector.close()
else:
# otherwise remove it
self.connectors.remove(connector)
def select_torrent_obfuscated(self, connector, streamid):
if ONLY_LOCAL and connector.connection.ip != '127.0.0.1':
return
if streamid not in self.obfuscated_torrents:
return
self.obfuscated_torrents[streamid].singleport_connection(connector)
def connection_lost(self, connector):
if (ONLY_LOCAL or GLOBAL_FILTER) and connector not in self.connectors:
return
assert isinstance(connector, Connector)
self.connectors.remove(connector)
def remove_addr_from_cache(self, addr):
# since this was incoming, we don't cache the peer anyway
pass
```
#### File: BitTorrent/GUI_wx/CustomWidgets.py
```python
import os
import sys
from BTL.platform import bttime
from BTL.DictWithLists import DictWithLists
from BTL.obsoletepythonsupport import set
from BTL.sparse_set import SparseSet
from BTL.Lists import collapse
import wx
if os.name == 'nt':
import win32gui
import win32con
def _ScaleBlit(bmp, dc, dst_rect):
sX = float(dst_rect.width) / float(bmp.GetWidth())
sY = float(dst_rect.height) / float(bmp.GetHeight())
dc.SetUserScale(sX, sY)
old_mode = None
if os.name == 'nt':
h_dst = dc.GetHDC()
try:
old_mode = win32gui.SetStretchBltMode(h_dst, win32con.HALFTONE)
except:
pass
if sX == 0:
x = 0
else:
x = dst_rect.x/sX
if sY == 0:
y = 0
else:
y = dst_rect.y/sY
if sys.platform == "darwin":
# magic!
y = round(y)
x += 0.2
dc.SetDeviceOrigin(x, y)
dc.DrawBitmap(bmp, 0, 0, True)
dc.SetDeviceOrigin(0, 0)
else:
dc.DrawBitmap(bmp, x, y, True)
if os.name == 'nt':
try:
win32gui.SetStretchBltMode(h_dst, old_mode)
except:
pass
dc.SetUserScale(1, 1)
class DoubleBufferedMixin(object):
def __init__(self):
self.bind_events()
self.buffer_size = wx.Size(-1, -1)
self.last_size = self._calc_size()
self.init_buffer()
def _calc_size(self):
return self.GetClientSize()
def init_buffer(self):
size = self._calc_size()
if ((self.buffer_size.width < size.width) or
(self.buffer_size.height < size.height)):
self.buffer = wx.EmptyBitmap(size.width, size.height)
dc = wx.MemoryDC()
dc.SelectObject(self.buffer)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
dc.SelectObject(wx.NullBitmap)
self.buffer_size = size
return True
return False
def bind_events(self):
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e : None)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def redraw(self):
dc = wx.MemoryDC()
dc.SelectObject(self.buffer)
size = self._calc_size()
self.last_size = size
self.draw(dc, size=size)
dc.SelectObject(wx.NullBitmap)
self.Refresh()
def OnSize(self, event):
reallocated = self.init_buffer()
if reallocated or self.last_size != self._calc_size():
self.redraw()
else:
self.Refresh()
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self, self.buffer)
class ScaledBufferMixin(DoubleBufferedMixin):
def __init__(self, w_step=200, h_step=15):
self.w_step = w_step
self.h_step = h_step
# don't go crazy
self.w_max = 1000
self.h_max = 100
DoubleBufferedMixin.__init__(self)
def _round(self, d, step):
return max(int(d / step), 1) * step
def _calc_size(self):
size = self.GetClientSize()
# * 2 for the high quality
w = self._round(size.width*2, self.w_step)
h = self._round(size.height, self.h_step)
w = max(w, self.buffer_size.width)
w = min(w, self.w_max)
h = max(h, self.buffer_size.height)
h = min(h, self.h_max)
return wx.Size(w, h)
def OnPaint(self, event):
dc = wx.PaintDC(self)
_ScaleBlit(self.buffer, dc, self.GetClientRect(), strip_border=1)
class ListCtrlPassThrough(object):
def __init__(self, listctrl):
# I'll be nice and ignore you.
if not isinstance(listctrl, wx.ListCtrl):
return
self.listctrl = listctrl
self.Bind(wx.EVT_LEFT_DOWN, self.LeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.LeftDClick)
self.Bind(wx.EVT_CONTEXT_MENU, self.ContextMenu)
def _resolve_position(self):
p = self.GetPosition()
p -= self.listctrl._get_origin_offset()
return p
def _resolve_index(self):
p = self._resolve_position()
try:
i, flags = self.listctrl.HitTest(p)
except TypeError:
# unpack non-sequence
return
return i
def ContextMenu(self, event):
self.listctrl.DoPopup(self._resolve_position())
def LeftDClick(self, event):
i = self._resolve_index()
if i is None:
return
e = wx.ListEvent(wx.wxEVT_COMMAND_LIST_ITEM_ACTIVATED)
e.m_itemIndex = i
self.listctrl.ProcessEvent(e)
def LeftDown(self, event):
i = self._resolve_index()
if i is None:
return
if not event.ControlDown():
if event.ShiftDown():
if '__WXMSW__' in wx.PlatformInfo:
self.listctrl.DeselectAll()
f = self.listctrl.GetFocusedItem()
if f > -1:
for j in xrange(min(i,f), max(i,f)):
self.listctrl.Select(j)
self.listctrl.Select(f)
else:
self.listctrl.DeselectAll()
self.listctrl.Select(i)
self.listctrl.SetFocus()
self.listctrl.Focus(i)
class NullGauge(object):
def NullMethod(*a):
pass
__init__ = NullMethod
def __getattr__(self, attr):
return self.NullMethod
class SimpleDownloadGauge(ListCtrlPassThrough, ScaledBufferMixin, wx.Window):
def __init__(self, parent,
completed_color=None,
remaining_color=None,
border_color=None,
border=True,
size=(0,0),
top_line=True,
**k):
original = {
"smooth": True,
"border color": wx.NamedColour("light gray"),
"completed color": wx.Colour(0, 230, 50),
"line color" : wx.Colour(0, 178, 39),
"remaining color": wx.NamedColour("white"),
"transferring color": wx.NamedColour("yellow"),
"missing color": wx.NamedColour("red"),
"rare colors": [wx.Colour(235, 235, 255),
wx.Colour(215, 215, 255),
wx.Colour(195, 195, 255),
wx.Colour(175, 175, 255),
wx.Colour(155, 155, 255),
wx.Colour(135, 135, 255),
wx.Colour(115, 115, 255),
wx.Colour(95, 95, 255),
wx.Colour(75, 75, 255),
wx.Colour(55, 55, 255),
wx.Colour(50, 50, 255)]
}
new_green = {
"smooth": True,
"border color": wx.Colour(111, 111, 111),
"completed color": wx.Colour(14, 183, 19),
"line color" : wx.Colour(255, 255, 0),
"remaining color": wx.NamedColour("white"),
"transferring color": wx.Colour(94, 243, 99),
"missing color": wx.Colour(255, 0, 0),
"rare colors": [wx.Colour(185, 185, 185),
wx.Colour(195, 195, 195),
wx.Colour(205, 205, 205),
wx.Colour(215, 215, 215),
wx.Colour(225, 225, 225),
wx.Colour(235, 235, 235),
wx.Colour(245, 245, 245),
wx.Colour(255, 255, 255)]
}
new_blue = {
"smooth": True,
"border color": wx.NamedColour("light gray"),
"completed color": wx.NamedColour("blue"),
"line color" : wx.NamedColour("blue"),
"remaining color": wx.NamedColour("white"),
"transferring color": wx.NamedColour("yellow"),
"missing color": wx.Colour(255, 0, 0),
"rare colors": [wx.Colour(185, 185, 185),
wx.Colour(195, 195, 195),
wx.Colour(205, 205, 205),
wx.Colour(215, 215, 215),
wx.Colour(225, 225, 225),
wx.Colour(235, 235, 235),
wx.Colour(245, 245, 245),
wx.Colour(255, 255, 255)]
}
self.gauge_theme = new_green
wx.Window.__init__(self, parent, size=size, **k)
#wx.Gauge.__init__(self, parent, 0, 10000, style=wx.GA_SMOOTH)
ListCtrlPassThrough.__init__(self, parent)
if border_color == None:
border_color = self.gauge_theme["border color"]
if completed_color == None:
completed_color = self.gauge_theme["completed color"]
if remaining_color == None:
remaining_color = self.gauge_theme["remaining color"]
self.completed_color = completed_color
self.remaining_color = remaining_color
self.border_color = border_color
self.border = border
self.line_color = self.gauge_theme["line color"]
self.top_line = top_line
self.smoother = wx.BitmapFromImage(
wx.GetApp().theme_library.get(("progressbar",)))
self.percent = None
ScaledBufferMixin.__init__(self)
def invalidate(self):
pass
def SetValue(self, value, state=None, data=None, redraw=True):
#wx.Gauge.SetValue(self, value * 10000)
if value != self.percent:
self.percent = value
self.redraw()
def OnPaint(self, event):
dc = wx.PaintDC(self)
rect = self.GetClientRect()
if self.border:
dc.SetPen(wx.Pen(self.border_color))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0, 0, rect.width, rect.height)
rect = wx.Rect(rect.x + 1, rect.y + 1,
rect.width - 2, rect.height - 2)
_ScaleBlit(self.buffer, dc, rect)
def draw(self, dc, size):
srect = wx.Rect(0, 0, size.width, size.height)
self.draw_bar(dc, srect)
# dear god, I hope it's smooth
if self.gauge_theme["smooth"]:
dc.SetClippingRegion(srect.x, srect.y, srect.width, srect.height)
_ScaleBlit(self.smoother, dc,
wx.Rect(0, 0, srect.width, srect.height))
# top-line
if self.top_line and self.percent is not None:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(self.line_color))
line_width = 1
# top:
line_position = 0
# middle:
#line_position = (srect.height) // 2
# bottom:
#line_position = srect.height - line_width
dc.DrawRectangle(srect.x, line_position,
srect.width * self.percent, line_width)
dc.SetPen(wx.Pen(self.border_color))
dc.DrawRectangle(srect.x + srect.width * self.percent, line_position,
srect.width, line_width)
def draw_bar(self, dc, rect):
if self.percent == None:
return 0
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.Brush(self.remaining_color))
dc.DrawRectangle(rect.x, rect.y,
rect.width, rect.height)
dc.SetBrush(wx.Brush(self.completed_color))
dc.DrawRectangle(rect.x, rect.y,
rect.width * self.percent, rect.height)
return 0
REFRESH_MAX_SEC = 3
class FancyDownloadGauge(SimpleDownloadGauge):
def __init__(self, *args, **kwargs):
self.resolution = 1000
self.grouped = DictWithLists()
self.missing_known = False
self.last_time = bttime()
self.last_update = -1
SimpleDownloadGauge.__init__(self, *args, **kwargs)
self.transfering_color = self.gauge_theme["transferring color"]
self.missing_color = self.gauge_theme["missing color"]
self.SetValue(None, redraw=False)
def gradient(self, v):
if v == 0:
if self.missing_known:
c = self.missing_color
else:
c = self.gauge_theme["rare colors"][0]
else:
v = min(v, len(self.gauge_theme["rare colors"]))
c = self.gauge_theme["rare colors"][v - 1]
return c
def invalidate(self):
self.last_time = 0
def SetValue(self, percent, state = None, data = None, redraw=True):
# only draw if progress moved .01% or it's been REFRESH_MAX_SEC seconds
if self.percent != None:
if (percent < (self.percent + 0.0001) and
bttime() < (self.last_time + REFRESH_MAX_SEC)):
return
self.last_time = bttime()
if not redraw:
return
p_dirty = False
if self.percent != percent:
p_dirty = True
self.percent = percent
missing_known = state == "running"
if self.missing_known != missing_known:
p_dirty = True
self.missing_known = missing_known
if not data:
# no data. allow future SetValues to continue passing
# until we get something
self.last_time = 0 - REFRESH_MAX_SEC
# draw an empty bar
data = (0, -1, {})
length, update, piece_states = data
self.resolution = length
if p_dirty or update != self.last_update:
self.grouped = piece_states
self.redraw()
self.last_update = update
def draw_bar(self, dc, rect):
# size events can catch this
if self.percent is None:
return
y1 = rect.y
w = rect.width
h = rect.height
if self.resolution <= 0:
return
# sort, so we get 0...N, h, t
keys = self.grouped.keys()
keys.sort()
for k in keys:
v = self.grouped[k]
if k == 'h':
c = self.completed_color
elif k == 't':
c = self.transfering_color
else:
c = self.gradient(k)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.Brush(c))
def draw(b, e):
b = float(b)
e = float(e)
r = float(self.resolution)
x1 = (b / r) * w
x2 = (e / r) * w
# stupid floats
x1 = int(rect.x + x1)
x2 = int(rect.x + x2)
dc.DrawRectangle(x1, y1,
x2 - x1, h)
if isinstance(v, SparseSet):
for (b, e) in v.iterrange():
draw(b, e)
elif isinstance(v, dict):
for b in v.iterkeys():
draw(b, b + 1)
elif isinstance(v, set):
#for b in v:
# draw(b, b + 1)
# maybe this is better? (fewer rectangles)
l = list(v)
l.sort()
for (b, e) in collapse(l):
draw(b, e)
else:
# assumes sorted!
for (b, e) in collapse(v):
draw(b, e)
class ModerateDownloadGauge(FancyDownloadGauge):
def __init__(self, parent,
completed_color=None,
remaining_color=None,
border_color=None,
border=True,
size=(0,0),
top_line=False,
*args, **kwargs):
FancyDownloadGauge.__init__(self, parent,
completed_color=completed_color,
remaining_color=remaining_color,
border_color=border_color,
border=border,
size=size,
top_line=top_line,
*args, **kwargs)
self.resolution = 1000
def sort(a,b):
if isinstance(a, str) and isinstance(b, str) : return cmp(a,b)
elif isinstance(a, int) and isinstance(b, int) : return cmp(b,a)
elif isinstance(a, str): return -1
elif isinstance(b, str): return 1
sort = staticmethod(sort)
def SetValue(self, value, state=None, data=None, redraw=True):
if data is not None:
sorted_data = {}
length, update, piece_states = data
self.resolution = length
keys = piece_states.keys()
keys.sort(self.sort)
pos = 0
h = piece_states.get('h', SparseSet())
t = piece_states.get('t', set())
t = list(t)
t.sort()
have_trans_sparse_set = h + t
for k in keys:
p = piece_states[k]
if k in ('h', 't'):
count = len(p)
else:
count = 0
# OW
for i in p:
if i not in have_trans_sparse_set:
count += 1
if not count:
continue
newpos = pos+count
s = SparseSet()
s.add(pos, newpos)
sorted_data[k] = s
pos = newpos
data = (length, update, sorted_data)
FancyDownloadGauge.SetValue(self, value, state, data, redraw)
```
#### File: BitTorrent/GUI_wx/__init__.py
```python
from __future__ import division
import os
import sys
try:
import wxversion
except:
pass
else:
# doesn't work in py2exe
try:
wxversion.select('2.6')
except:
pass
import wx
import wx.grid
import wxPython
from BTL.translation import _
from BitTorrent.platform import image_root
import BTL.stackthreading as threading
from BTL.defer import ThreadedDeferred
import bisect
vs = wxPython.__version__
min_wxpython = "2.6"
assert vs >= min_wxpython, _("wxPython version %s or newer required") % min_wxpython
assert 'unicode' in wx.PlatformInfo, _("The Unicode versions of wx and wxPython are required")
text_wrappable = wx.__version__[4] >= '2'
profile = False
if profile:
from BTL.profile import Profiler, Stats
prof_file_name = 'ui.mainloop.prof'
def gui_wrap(_f, *args, **kwargs):
wx.the_app.CallAfter(_f, *args, **kwargs)
SPACING = 8 # default pixels between widgets
PORT_RANGE = 5 # how many ports to try
WILDCARD = _("Torrent files (*.torrent)|*.torrent|"\
"All files (*.*)|*.*")
def get_theme_root(theme_name):
for t in (theme_name, 'default'):
td = os.path.join(image_root, 'themes', t)
if os.path.exists(td):
return td
def list_themes():
def _lt():
themes = []
tr = os.path.join(image_root, 'themes')
ld = os.listdir(tr)
for d in ld:
if os.path.isdir(os.path.join(tr, d)):
themes.append(d)
return themes
df = ThreadedDeferred(None, _lt, daemon=True)
df.start()
return df
class ImageLibrary(object):
def __init__(self, image_root):
self.image_root = image_root
self._data = {}
def resolve_filename(self, key, size=None, base=None, ext='.png'):
if base is None:
base = self.image_root
name = os.path.join(base, *key)
name = os.path.abspath(name)
if size is not None:
sized_name = name + '_%d' % size + ext
if os.path.exists(sized_name):
name = sized_name
else:
name += ext
else:
name += ext
name = os.path.abspath(name)
if not os.path.exists(name):
raise IOError(2, "No such file or directory: %r" % name)
return name
def get(self, key, size=None, base=None, ext='.png'):
if self._data.has_key((key, size)):
return self._data[(key, size)]
name = self.resolve_filename(key, size, base, ext)
i = wx.Image(name, wx.BITMAP_TYPE_PNG)
if not i.Ok():
raise Exception("The image is not valid: %r" % name)
self._data[(key, size)] = i
return i
class ThemeLibrary(ImageLibrary):
def __init__(self, themes_root, theme_name):
self.themes_root = themes_root
for t in (theme_name, 'default'):
image_root = os.path.join(themes_root, 'themes', t)
if os.path.exists(image_root):
self.theme_name = t
ImageLibrary.__init__(self, image_root)
return
raise IOError("default theme path must exist: %r" % image_root)
def resolve_filename(self, key, size=None, base=None, ext='.png'):
try:
return ImageLibrary.resolve_filename(self, key, size, base, ext)
except Exception, e:
default_base = os.path.join(self.themes_root, 'themes', 'default')
return ImageLibrary.resolve_filename(self, key, size,
base=default_base,
ext=ext)
class XSizer(wx.BoxSizer):
notfirst = wx.ALL
direction = wx.HORIZONTAL
def __init__(self, **k):
wx.BoxSizer.__init__(self, self.direction)
def Add(self, widget, proportion=0, flag=0, border=SPACING):
flag = flag | self.notfirst
wx.BoxSizer.Add(self, widget, proportion=proportion, flag=flag, border=border)
def AddFirst(self, widget, proportion=0, flag=0, border=SPACING):
flag = flag | wx.ALL
self.Add(widget, proportion=proportion, flag=flag, border=border)
class VSizer(XSizer):
notfirst = wx.BOTTOM|wx.LEFT|wx.RIGHT
direction = wx.VERTICAL
class HSizer(XSizer):
notfirst = wx.BOTTOM|wx.RIGHT|wx.TOP
direction = wx.HORIZONTAL
class LabelValueFlexGridSizer(wx.FlexGridSizer):
def __init__(self, parent_widget, *a, **k):
wx.FlexGridSizer.__init__(self, *a, **k)
self.parent_widget = parent_widget
def add_label(self, label):
h = ElectroStaticText(self.parent_widget, label=label)
f = h.GetFont()
f.SetWeight(wx.FONTWEIGHT_BOLD)
h.SetFont(f)
self.Add(h)
def add_value(self, value, dotify=False):
t = ElectroStaticText(self.parent_widget, id=wx.ID_ANY, label="",
dotify=dotify)
self.Add(t, flag=wx.FIXED_MINSIZE|wx.GROW)
t.SetLabel(value)
return t
def add_pair(self, label, value, dotify_value=False):
self.add_label(label)
t = self.add_value(value, dotify=dotify_value)
return t
class ElectroStaticText(wx.StaticText):
def __init__(self, parent, id=wx.ID_ANY, label='', dotify=False):
wx.StaticText.__init__(self, parent, id, label)
self.label = label
self._string = self.label
if dotify:
self.Bind(wx.EVT_PAINT, self.DotifyOnPaint)
def SetLabel(self, label):
if label != self.label:
self.label = label
self._string = self.label
wx.StaticText.SetLabel(self, self.label)
def dotdotdot(self, label, width, max_width):
label_reverse = label[::-1]
beginning_values = self.dc.GetPartialTextExtents(label)
ending_values = self.dc.GetPartialTextExtents(label_reverse)
halfwidth = (width - self.dc.GetTextExtent("...")[0]) / 2
beginning = bisect.bisect_left(beginning_values, halfwidth)
ending = bisect.bisect_left(ending_values, halfwidth)
if ending > 0:
string = label[:beginning] + "..." + label[(0 - ending):]
else:
string = label[:beginning] + "..."
return string
def DotifyOnPaint(self, event):
self.dc = wx.PaintDC(self)
self.dc.SetFont(self.GetFont())
width = self.GetSize().width
str_width = self.dc.GetTextExtent(self._string)[0]
max_width = self.dc.GetTextExtent(self.label)[0]
if width >= max_width:
self._string = self.label
elif width != str_width:
string = self.dotdotdot(self.label, width, max_width)
self._string = string
wx.StaticText.SetLabel(self, self._string)
event.Skip()
class ElectroStaticBitmap(wx.Window):
def __init__(self, parent, bitmap=None, *a, **k):
wx.Window.__init__(self, parent, *a, **k)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.bitmap = None
if bitmap:
self.SetBitmap(bitmap)
else:
self.SetSize((0, 0))
def SetBitmap(self, bitmap):
self.bitmap = bitmap
w, h = self.bitmap.GetWidth(), self.bitmap.GetHeight()
self.SetSize((w, h))
self.SetMinSize((w, h))
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
if self.bitmap:
dc.DrawBitmap(self.bitmap, 0, 0, True)
def GetSize(self):
if self.bitmap:
return wx.Size(self.bitmap.GetWidth(), self.bitmap.GetHeight())
else:
return 0, 0
class Validator(wx.TextCtrl):
valid_chars = '1234567890'
minimum = None
maximum = None
cast = int
def __init__(self, parent, option_name, config, setfunc):
wx.TextCtrl.__init__(self, parent)
self.option_name = option_name
self.config = config
self.setfunc = setfunc
self.SetValue(str(config[option_name]))
self.SetBestFittingSize((self.width,-1))
self.Bind(wx.EVT_CHAR, self.text_inserted)
self.Bind(wx.EVT_KILL_FOCUS, self.focus_out)
def get_value(self):
value = None
try:
value = self.cast(self.GetValue())
except ValueError:
pass
return value
def set_value(self, value):
self.SetValue(str(value))
self.setfunc(self.option_name, value)
def focus_out(self, event):
# guard against the the final focus lost event on wxMAC
if self.IsBeingDeleted():
return
value = self.get_value()
if value is None:
self.SetValue(str(self.config[self.option_name]))
if (self.minimum is not None) and (value < self.minimum):
value = self.minimum
if (self.maximum is not None) and (value > self.maximum):
value = self.maximum
self.set_value(value)
def text_inserted(self, event):
key = event.KeyCode()
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if (self.valid_chars is not None) and (chr(key) not in self.valid_chars):
return
event.Skip()
class IPValidator(Validator):
valid_chars = '1234567890.'
width = 128
cast = str
class PortValidator(Validator):
width = 64
minimum = 1024
maximum = 65535
def add_end(self, end_name):
self.end_option_name = end_name
def set_value(self, value):
self.SetValue(str(value))
self.setfunc(self.option_name, value)
self.setfunc(self.end_option_name, value+PORT_RANGE)
class RatioValidator(Validator):
width = 48
minimum = 0
class MinutesValidator(Validator):
width = 48
minimum = 1
class PathDialogButton(wx.Button):
def __init__(self, parent, gen_dialog, setfunc=None,
label=_("&Browse...")):
wx.Button.__init__(self, parent, label=label)
self.gen_dialog = gen_dialog
self.setfunc = setfunc
self.Bind(wx.EVT_BUTTON, self.choose)
def choose(self, event):
"""Pop up a choose dialog and set the result if the user clicks OK."""
dialog = self.gen_dialog()
result = dialog.ShowModal()
if result == wx.ID_OK:
path = dialog.GetPath()
if self.setfunc:
self.setfunc(path)
class ChooseDirectorySizer(wx.BoxSizer):
def __init__(self, parent, path='', setfunc=None,
editable=True,
dialog_title=_("Choose a folder..."),
button_label=_("&Browse...")):
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
self.parent = parent
self.setfunc = setfunc
self.dialog_title = dialog_title
self.button_label = button_label
self.pathbox = wx.TextCtrl(self.parent, size=(250, -1))
self.pathbox.SetEditable(editable)
self.Add(self.pathbox, proportion=1, flag=wx.RIGHT, border=SPACING)
self.pathbox.SetValue(path)
self.button = PathDialogButton(parent,
gen_dialog=self.dialog,
setfunc=self.set_choice,
label=self.button_label)
self.Add(self.button)
def set_choice(self, path):
self.pathbox.SetValue(path)
if self.setfunc:
self.setfunc(path)
def get_choice(self):
return self.pathbox.GetValue()
def dialog(self):
dialog = wx.DirDialog(self.parent,
message=self.dialog_title,
style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON)
dialog.SetPath(self.get_choice())
return dialog
class ChooseFileSizer(ChooseDirectorySizer):
def __init__(self, parent, path='', setfunc=None,
editable=True,
dialog_title=_("Choose a file..."),
button_label=_("&Browse..."),
wildcard=_("All files (*.*)|*.*"),
dialog_style=wx.OPEN):
ChooseDirectorySizer.__init__(self, parent, path=path, setfunc=setfunc,
editable=editable,
dialog_title=dialog_title,
button_label=button_label)
self.wildcard = wildcard
self.dialog_style = dialog_style
def dialog(self):
directory, file = os.path.split(self.get_choice())
dialog = wx.FileDialog(self.parent,
defaultDir=directory,
defaultFile=file,
message=self.dialog_title,
wildcard=self.wildcard,
style=self.dialog_style)
#dialog.SetPath(self.get_choice())
return dialog
class ChooseFileOrDirectorySizer(wx.BoxSizer):
def __init__(self, parent, path='', setfunc=None,
editable=True,
file_dialog_title=_("Choose a file..."),
directory_dialog_title=_("Choose a folder..."),
file_button_label=_("Choose &file..."),
directory_button_label=_("Choose f&older..."),
wildcard=_("All files (*.*)|*.*"),
file_dialog_style=wx.OPEN):
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.parent = parent
self.setfunc = setfunc
self.file_dialog_title = file_dialog_title
self.directory_dialog_title = directory_dialog_title
self.file_button_label = file_button_label
self.directory_button_label = directory_button_label
self.wildcard = wildcard
self.file_dialog_style = file_dialog_style
self.pathbox = wx.TextCtrl(self.parent, size=(250, -1))
self.pathbox.SetEditable(editable)
self.Add(self.pathbox, flag=wx.EXPAND|wx.BOTTOM, border=SPACING)
self.pathbox.SetValue(path)
self.subsizer = wx.BoxSizer(wx.HORIZONTAL)
self.Add(self.subsizer, flag=wx.ALIGN_RIGHT, border=0)
self.fbutton = PathDialogButton(parent,
gen_dialog=self.file_dialog,
setfunc=self.set_choice,
label=self.file_button_label)
self.subsizer.Add(self.fbutton, flag=wx.LEFT, border=SPACING)
self.dbutton = PathDialogButton(parent,
gen_dialog=self.directory_dialog,
setfunc=self.set_choice,
label=self.directory_button_label)
self.subsizer.Add(self.dbutton, flag=wx.LEFT, border=SPACING)
def set_choice(self, path):
self.pathbox.SetValue(path)
if self.setfunc:
self.setfunc(path)
def get_choice(self):
return self.pathbox.GetValue()
def directory_dialog(self):
dialog = wx.DirDialog(self.parent,
message=self.directory_dialog_title,
style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON)
dialog.SetPath(self.get_choice())
return dialog
def file_dialog(self):
dialog = wx.FileDialog(self.parent,
message=self.file_dialog_title,
defaultDir=self.get_choice(),
wildcard=self.wildcard,
style=self.file_dialog_style)
dialog.SetPath(self.get_choice())
return dialog
class Grid(wx.grid.Grid):
def SetColRenderer(self, col, renderer):
table = self.GetTable()
attr = table.GetAttr(-1, col, wx.grid.GridCellAttr.Col)
if (not attr):
attr = wx.grid.GridCellAttr()
attr.SetRenderer(renderer)
self.SetColAttr(col, attr)
def SetColEditor(self, col, editor):
table = self.GetTable()
attr = table.GetAttr(-1, col, wx.grid.GridCellAttr.Col)
if (not attr):
attr = wx.grid.GridCellAttr()
attr.SetEditor(editor)
self.SetColAttr(col, attr)
class BTMenu(wx.Menu):
"""Base class for menus"""
def __init__(self, *a, **k):
wx.Menu.__init__(self, *a, **k)
def add_item(self, label):
iid = wx.NewId()
self.Append(iid, label)
return iid
def add_check_item(self, label, value=False):
iid = wx.NewId()
self.AppendCheckItem(iid, label)
self.Check(id=iid, check=value)
return iid
class CheckButton(wx.CheckBox):
"""Base class for check boxes"""
def __init__(self, parent, label, main, option_name, initial_value,
extra_callback=None):
wx.CheckBox.__init__(self, parent, label=label)
self.main = main
self.option_name = option_name
self.option_type = type(initial_value)
self.SetValue(bool(initial_value))
self.extra_callback = extra_callback
self.Bind(wx.EVT_CHECKBOX, self.callback)
def callback(self, *args):
if self.option_type is not type(None):
self.main.config[self.option_name] = self.option_type(
not self.main.config[self.option_name])
self.main.setfunc(self.option_name, self.main.config[self.option_name])
if self.extra_callback is not None:
self.extra_callback()
class BTPanel(wx.Panel):
sizer_class = wx.BoxSizer
sizer_args = (wx.VERTICAL,)
def __init__(self, *a, **k):
k['style'] = k.get('style', 0) | wx.CLIP_CHILDREN
wx.Panel.__init__(self, *a, **k)
self.sizer = self.sizer_class(*self.sizer_args)
self.SetSizer(self.sizer)
def Add(self, widget, *a, **k):
self.sizer.Add(widget, *a, **k)
def AddFirst(self, widget, *a, **k):
if hasattr(self.sizer, 'AddFirst'):
self.sizer.AddFirst(widget, *a, **k)
else:
self.sizer.Add(widget, *a, **k)
# handles quirks in the design of wx. For example, the wx.LogWindow is not
# really a window, but this make it respond to shows as if it were.
def MagicShow_func(win, show=True):
win.Show(show)
if show:
win.Raise()
class MagicShow:
"""You know, like with a guy pulling rabbits out of a hat"""
def MagicShow(self, show=True):
if hasattr(self, 'magic_window'):
# hackery in case we aren't actually a window
win = self.magic_window
else:
win = self
MagicShow_func(win, show)
class BTDialog(wx.Dialog, MagicShow):
"""Base class for all BitTorrent window dialogs"""
def __init__(self, *a, **k):
wx.Dialog.__init__(self, *a, **k)
if sys.platform == 'darwin':
self.CenterOnParent()
self.SetIcon(wx.the_app.icon)
self.Bind(wx.EVT_KEY_DOWN, self.key)
def key(self, event):
c = event.GetKeyCode()
if c == wx.WXK_ESCAPE:
self.EndModal(wx.ID_CANCEL)
event.Skip()
class BTFrame(wx.Frame, MagicShow):
"""Base class for all BitTorrent window frames"""
def __init__(self, *a, **k):
metal = k.pop('metal', False)
wx.Frame.__init__(self, *a, **k)
if sys.platform == 'darwin' and metal:
self.SetExtraStyle(wx.FRAME_EX_METAL)
self.SetIcon(wx.the_app.icon)
def load_geometry(self, geometry, default_size=None):
if '+' in geometry:
s, x, y = geometry.split('+')
x, y = int(x), int(y)
else:
x, y = -1, -1
s = geometry
if 'x' in s:
w, h = s.split('x')
w, h = int(w), int(h)
else:
w, h = -1, -1
i = 0
if '__WXMSW__' in wx.PlatformInfo:
i = wx.Display.GetFromWindow(self)
d = wx.Display(i)
(x1, y1, x2, y2) = d.GetGeometry()
x = min(x, x2-64)
y = min(y, y2-64)
if (w, h) <= (0, 0) and default_size is not None:
w = default_size.width
h = default_size.height
self.SetDimensions(x, y, w, h, sizeFlags=wx.SIZE_USE_EXISTING)
if (x, y) == (-1, -1):
self.CenterOnScreen()
def _geometry_string(self):
pos = self.GetPositionTuple()
size = self.GetSizeTuple()
g = ''
g += 'x'.join(map(str, size))
if pos > (0,0):
g += '+' + '+'.join(map(str, pos))
return g
def SetTitle(self, title):
if title != self.GetTitle():
wx.Frame.SetTitle(self, title)
class BTFrameWithSizer(BTFrame):
"""BitTorrent window frames with sizers, which are less flexible than normal windows"""
panel_class = BTPanel
sizer_class = wx.BoxSizer
sizer_args = (wx.VERTICAL,)
def __init__(self, *a, **k):
BTFrame.__init__(self, *a, **k)
try:
self.SetIcon(wx.the_app.icon)
self.panel = self.panel_class(self)
self.sizer = self.sizer_class(*self.sizer_args)
self.Add(self.panel, flag=wx.GROW, proportion=1)
self.SetSizer(self.sizer)
except:
self.Destroy()
raise
def Add(self, widget, *a, **k):
self.sizer.Add(widget, *a, **k)
class TaskSingleton(object):
def __init__(self):
self.handle = None
def start(self, t, _f, *a, **kw):
if self.handle:
self.handle.Stop()
self.handle = wx.the_app.FutureCall(t, _f, *a, **kw)
def stop(self):
if self.handle:
self.handle.Stop()
self.handle = None
class BTApp(wx.App):
"""Base class for all wx-based BitTorrent applications"""
def __init__(self, *a, **k):
self.doneflag = threading.Event()
wx.App.__init__(self, *a, **k)
def OnInit(self):
self.running = True
if profile:
try:
os.unlink(prof_file_name)
except:
pass
self.prof = Profiler()
self.prof.enable()
wx.the_app = self
self._DoIterationId = wx.NewEventType()
self.Connect(-1, -1, self._DoIterationId, self._doIteration)
self.evt = wx.PyEvent()
self.evt.SetEventType(self._DoIterationId)
self.event_queue = []
# this breaks TreeListCtrl, and I'm too lazy to figure out why
#wx.IdleEvent_SetMode(wx.IDLE_PROCESS_SPECIFIED)
# this fixes 24bit-color toolbar buttons
wx.SystemOptions_SetOptionInt("msw.remap", 0)
icon_path = os.path.join(image_root, 'bittorrent.ico')
self.icon = wx.Icon(icon_path, wx.BITMAP_TYPE_ICO)
return True
def OnExit(self):
self.running = False
if profile:
self.prof.disable()
st = Stats(self.prof.getstats())
st.sort()
f = open(prof_file_name, 'wb')
st.dump(file=f)
def who(self, _f, a):
if _f.__name__ == "_recall":
if not hasattr(a[0], 'gen'):
return str(a[0])
return a[0].gen.gi_frame.f_code.co_name
return _f.__name__
def _doIteration(self, event):
if self.doneflag.isSet():
# the app is dying
return
_f, a, kw = self.event_queue.pop(0)
## t = bttime()
## print self.who(_f, a)
_f(*a, **kw)
## print self.who(_f, a), 'done in', bttime() - t
## if bttime() - t > 1.0:
## print 'TOO SLOW!'
## assert False
def CallAfter(self, callable, *args, **kw):
"""
Call the specified function after the current and pending event
handlers have been completed. This is also good for making GUI
method calls from non-GUI threads. Any extra positional or
keyword args are passed on to the callable when it is called.
"""
# append (right) and pop (left) are atomic
self.event_queue.append((callable, args, kw))
wx.PostEvent(self, self.evt)
def FutureCall(self, _delay_time, callable, *a, **kw):
return wx.FutureCall(_delay_time, self.CallAfter, callable, *a, **kw)
```
#### File: BitTorrent-5.2.2/BitTorrent/platform.py
```python
import os
import sys
import locale
import shutil
import socket
import gettext
import tarfile
import traceback
from BTL.platform import efs, efs2, get_filesystem_encoding
# NOTE: intentionally appears in the file before importing anything
# from BitTorrent because it is called when setting --use_factory_defaults.
def get_temp_dir():
shellvars = ['${TMP}', '${TEMP}']
dir_root = get_dir_root(shellvars, default_to_home=False)
#this method is preferred to the envvars
if os.name == 'nt':
try_dir_root = win32api.GetTempPath()
if try_dir_root is not None:
dir_root = try_dir_root
if dir_root is None:
try_dir_root = None
if os.name == 'nt':
# this should basically never happen. GetTempPath always returns something
try_dir_root = r'C:\WINDOWS\Temp'
elif os.name == 'posix':
try_dir_root = '/tmp'
if (try_dir_root is not None and
os.path.isdir(try_dir_root) and
os.access(try_dir_root, os.R_OK|os.W_OK)):
dir_root = try_dir_root
return dir_root
MAX_DIR = 5
_tmp_subdir = None
# NOTE: intentionally appears in the file before importing anything
# from BitTorrent because it is called when setting --use_factory_defaults.
def get_temp_subdir():
"""Creates a unique subdirectory of the platform temp directory.
This revolves between MAX_DIR directory names deleting the oldest
whenever MAX_DIR exist. Upon return the number of temporary
subdirectories should never exceed MAX_DIR-1. If one has already
been created for this execution, this returns that subdirectory.
@return the absolute path of the created temporary directory.
"""
global _tmp_subdir
if _tmp_subdir is not None:
return _tmp_subdir
tmp = get_temp_dir()
target = None # holds the name of the directory that will be made.
for i in xrange(MAX_DIR):
subdir = efs2(u"BitTorrentTemp%d" % i)
path = os.path.join(tmp, subdir)
if not os.path.exists(path):
target = path
break
# subdir should not in normal behavior be None. It can occur if something
# prevented a directory from being removed on a previous call or if MAX_DIR
# is changed.
if target is None:
subdir = efs2(u"BitTorrentTemp0")
path = os.path.join(tmp, subdir)
shutil.rmtree( path, ignore_errors = True )
target = path
i = 0
# create the temp dir.
os.mkdir(target)
# delete the oldest directory.
oldest_i = ( i + 1 ) % MAX_DIR
oldest_subdir = efs2(u"BitTorrentTemp%d" % oldest_i)
oldest_path = os.path.join(tmp, oldest_subdir)
if os.path.exists( oldest_path ):
shutil.rmtree( oldest_path, ignore_errors = True )
_tmp_subdir = target
return target
_config_dir = None
def set_config_dir(dir):
"""Set the root directory for configuration information."""
global _config_dir
# Normally we won't set it this way. This is called if the
# --use_factory_defaults command-line option is specfied. By changing
# the config directory early in the initialization we can guarantee
# that the system acts like a fresh install.
_config_dir = dir
# Set configuration directory before importing any other BitTorrent modules.
if "--use_factory_defaults" in sys.argv or "-u" in sys.argv:
temp_dir = get_temp_subdir()
set_config_dir(temp_dir)
import BitTorrent.zurllib as urllib
from BTL import language
from BTL.sparse_set import SparseSet
from BTL.defer import ThreadedDeferred
from BTL.platform import app_name, get_module_filename, is_frozen_exe, get_shell_dir
from BitTorrent import version
if os.name == 'nt':
import pywintypes
import winerror
import _winreg
#import BTL.likewin32api as win32api
import win32api
import win32file
from win32com.shell import shellcon
import win32con
from twisted.python.shortcut import Shortcut
import ctypes
import struct
FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
FILE_SUPPORTS_SPARSE_FILES = 0x00000040
FSCTL_QUERY_ALLOCATED_RANGES = 0x000940CF
elif os.name == 'posix' and os.uname()[0] == 'Darwin':
has_pyobjc = False
try:
from Foundation import NSBundle
has_pyobjc = True
except ImportError:
pass
try:
import statvfs
except ImportError:
statvfs = None
def get_dir_root(shellvars, default_to_home=True):
def check_sysvars(x):
y = os.path.expandvars(x)
if y != x and os.path.isdir(y):
return y
return None
dir_root = None
for d in shellvars:
dir_root = check_sysvars(d)
if dir_root is not None:
break
else:
if default_to_home:
dir_root = os.path.expanduser('~')
if dir_root == '~' or not os.path.isdir(dir_root):
dir_root = None
if get_filesystem_encoding() == None:
try:
dir_root = dir_root.decode(sys.getfilesystemencoding())
except:
try:
dir_root = dir_root.decode('utf8')
except:
pass
return dir_root
def get_old_dot_dir():
return os.path.join(get_config_dir(), efs2(u'.bittorrent'))
def get_dot_dir():
# So called because on Unix platforms (but not OS X) this returns ~/.bittorrent.
dot_dir = get_old_dot_dir()
new_dot_dir = None
if sys.platform == 'darwin':
new_dot_dir = os.path.join(get_config_dir(), 'Library', 'Application Support', app_name)
elif os.name == 'nt':
new_dot_dir = os.path.join(get_config_dir(), app_name)
if new_dot_dir:
if os.path.exists(dot_dir):
if os.path.exists(new_dot_dir):
count = 0
for root, dirs, files in os.walk(new_dot_dir):
count = len(dirs) + len(files)
break
if count == 0:
shutil.rmtree(new_dot_dir)
shutil.move(dot_dir, new_dot_dir)
else:
shutil.move(dot_dir, new_dot_dir)
dot_dir = new_dot_dir
return dot_dir
old_broken_config_subencoding = 'utf8'
try:
old_broken_config_subencoding = sys.getfilesystemencoding()
except:
pass
os_name = os.name
os_version = None
if os_name == 'nt':
wh = {(1, 4, 0): "95",
(1, 4, 10): "98",
(1, 4, 90): "ME",
(2, 4, 0): "NT",
(2, 5, 0): "2000",
(2, 5, 1): "XP" ,
(2, 5, 2): "2003",
(2, 6, 0): "Vista",
}
class OSVERSIONINFOEX(ctypes.Structure):
_fields_ = [("dwOSVersionInfoSize", ctypes.c_ulong),
("dwMajorVersion", ctypes.c_ulong),
("dwMinorVersion", ctypes.c_ulong),
("dwBuildNumber", ctypes.c_ulong),
("dwPlatformId", ctypes.c_ulong),
("szCSDVersion", ctypes.c_char * 128),
("wServicePackMajor", ctypes.c_ushort),
("wServicePackMinor", ctypes.c_ushort),
("wSuiteMask", ctypes.c_ushort),
("wProductType", ctypes.c_byte),
("wReserved", ctypes.c_byte),
]
class OSVERSIONINFO(ctypes.Structure):
_fields_ = [("dwOSVersionInfoSize", ctypes.c_ulong),
("dwMajorVersion", ctypes.c_ulong),
("dwMinorVersion", ctypes.c_ulong),
("dwBuildNumber", ctypes.c_ulong),
("dwPlatformId", ctypes.c_ulong),
("szCSDVersion", ctypes.c_char * 128),
]
o = OSVERSIONINFOEX()
o.dwOSVersionInfoSize = 156 # sizeof(OSVERSIONINFOEX)
r = ctypes.windll.kernel32.GetVersionExA(ctypes.byref(o))
if r:
win_version_num = (o.dwPlatformId, o.dwMajorVersion, o.dwMinorVersion,
o.wServicePackMajor, o.wServicePackMinor, o.dwBuildNumber)
else:
o = OSVERSIONINFOEX()
o.dwOSVersionInfoSize = 148 # sizeof(OSVERSIONINFO)
r = ctypes.windll.kernel32.GetVersionExA(ctypes.byref(o))
win_version_num = (o.dwPlatformId, o.dwMajorVersion, o.dwMinorVersion,
0, 0, o.dwBuildNumber)
wk = (o.dwPlatformId, o.dwMajorVersion, o.dwMinorVersion)
if wh.has_key(wk):
os_version = wh[wk]
else:
os_version = wh[max(wh.keys())]
sys.stderr.write("Couldn't identify windows version: wk:%s, %s, "
"assuming '%s'\n" % (str(wk),
str(win_version_num),
os_version))
del wh, wk
elif os_name == 'posix':
os_version = os.uname()[0]
app_root = os.path.split(get_module_filename())[0]
doc_root = app_root
osx = False
if os.name == 'posix':
if os.uname()[0] == "Darwin":
doc_root = app_root = app_root.encode('utf8')
if has_pyobjc:
doc_root = NSBundle.mainBundle().resourcePath()
osx = True
def calc_unix_dirs():
appdir = '%s-%s' % (app_name, version)
ip = os.path.join(efs2(u'share'), efs2(u'pixmaps'), appdir)
dp = os.path.join(efs2(u'share'), efs2(u'doc'), appdir)
lp = os.path.join(efs2(u'share'), efs2(u'locale'))
return ip, dp, lp
def no_really_makedirs(path):
# the deal here is, directories like "C:\" exist but can not be created
# (access denied). We check for the exception anyway because of the race
# condition.
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != 17: # already exists
raise
def get_config_dir():
"""a cross-platform way to get user's config directory.
"""
if _config_dir is not None:
return _config_dir
shellvars = ['${APPDATA}', '${HOME}', '${USERPROFILE}']
dir_root = get_dir_root(shellvars)
if dir_root is None and os.name == 'nt':
app_dir = get_shell_dir(shellcon.CSIDL_APPDATA)
if app_dir is not None:
dir_root = app_dir
if dir_root is None and os.name == 'nt':
tmp_dir_root = os.path.split(sys.executable)[0]
if os.access(tmp_dir_root, os.R_OK|os.W_OK):
dir_root = tmp_dir_root
return dir_root
# For string literal subdirectories, starting with unicode and then
# converting to filesystem encoding may not always be necessary, but it seems
# safer to do so. --Dave
image_root = os.path.join(app_root, efs2(u'images'))
locale_root = os.path.join(get_dot_dir(), efs2(u'locale'))
no_really_makedirs(locale_root)
plugin_path = []
internal_plugin = os.path.join(app_root, efs2(u'BitTorrent'),
efs2(u'Plugins'))
local_plugin = os.path.join(get_dot_dir(), efs2(u'Plugins'))
if os.access(local_plugin, os.F_OK):
plugin_path.append(local_plugin)
if os.access(internal_plugin, os.F_OK):
plugin_path.append(internal_plugin)
if not os.access(image_root, os.F_OK) or not os.access(locale_root, os.F_OK):
# we guess that probably we are installed on *nix in this case
# (I have no idea whether this is right or not -- matt)
if app_root[-4:] == '/bin':
# yep, installed on *nix
installed_prefix = app_root[:-4]
image_root, doc_root, locale_root = map(
lambda p: os.path.join(installed_prefix, p), calc_unix_dirs()
)
systemwide_plugin = os.path.join(installed_prefix, efs2(u'lib'),
efs2(u'BitTorrent'))
if os.access(systemwide_plugin, os.F_OK):
plugin_path.append(systemwide_plugin)
if os.name == 'nt':
def GetDiskFreeSpaceEx(s):
if isinstance(s, unicode):
GDFS = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
GDFS = ctypes.windll.kernel32.GetDiskFreeSpaceExA
FreeBytesAvailable = ctypes.c_ulonglong(0)
TotalNumberOfBytes = ctypes.c_ulonglong(0)
TotalNumberOfFreeBytes = ctypes.c_ulonglong(0)
r = GDFS(s,
ctypes.pointer(FreeBytesAvailable),
ctypes.pointer(TotalNumberOfBytes),
ctypes.pointer(TotalNumberOfFreeBytes))
return FreeBytesAvailable.value, TotalNumberOfBytes.value, TotalNumberOfFreeBytes.value
def get_free_space(path):
# optimistic if we can't tell
free_to_user = 2**64
path, file = os.path.split(path)
if os.name == 'nt':
while not os.path.exists(path):
path, top = os.path.split(path)
free_to_user, total, total_free = GetDiskFreeSpaceEx(path)
elif hasattr(os, "statvfs") and statvfs:
s = os.statvfs(path)
free_to_user = s[statvfs.F_BAVAIL] * long(s[statvfs.F_BSIZE])
return free_to_user
def get_sparse_files_support(path):
supported = False
if os.name == 'nt':
drive, path = os.path.splitdrive(os.path.abspath(path))
if len(drive) > 0: # might be a network path
if drive[-1] != '\\':
drive += '\\'
volumename, serialnumber, maxpath, fsflags, fs_name = win32api.GetVolumeInformation(drive)
if fsflags & FILE_SUPPORTS_SPARSE_FILES:
supported = True
return supported
# is there a linux max path?
def is_path_too_long(path):
if os.name == 'nt':
if len(path) > win32con.MAX_PATH:
return True
return False
def is_sparse(path):
supported = get_sparse_files_support(path)
if not supported:
return False
if os.name == 'nt':
return bool(win32file.GetFileAttributes(path) & FILE_ATTRIBUTE_SPARSE_FILE)
return False
def get_allocated_regions(path, f=None, begin=0, length=None):
supported = get_sparse_files_support(path)
if not supported:
return
if os.name == 'nt':
if not os.path.exists(path):
return False
if f is None:
f = file(path, 'r')
handle = win32file._get_osfhandle(f.fileno())
if length is None:
length = os.path.getsize(path) - begin
a = SparseSet()
run = 128
i = begin
end = begin + length
while i < end:
d = struct.pack("<QQ", i, length)
try:
r = win32file.DeviceIoControl(handle, FSCTL_QUERY_ALLOCATED_RANGES,
d, struct.calcsize("<QQ")*run, None)
except pywintypes.error, e:
if e.args[0] == winerror.ERROR_MORE_DATA:
run *= 2
continue
# I've also seen:
# error: (1784, 'DeviceIoControl', 'The supplied user buffer is not valid for the requested operation.')
return
if not r:
break
for c in xrange(0, len(r), 16):
qq = struct.unpack("<QQ", r[c:c+16])
b = qq[0]
e = b + qq[1]
a.add(b, e)
i = max(i, e)
return a
return
def get_max_filesize(path):
fs_name = None
# optimistic if we can't tell
max_filesize = 2**64
if os.name == 'nt':
drive, path = os.path.splitdrive(os.path.abspath(path))
if len(drive) > 0: # might be a network path
if drive[-1] != '\\':
drive += '\\'
volumename, serialnumber, maxpath, fsflags, fs_name = win32api.GetVolumeInformation(drive)
if fs_name == "FAT32":
max_filesize = 2**32 - 1
elif (fs_name == "FAT" or
fs_name == "FAT16"):
# information on this varies, so I chose the description from
# MS: http://support.microsoft.com/kb/q118335/
# which happens to also be the most conservative.
max_clusters = 2**16 - 11
max_cluster_size = 2**15
max_filesize = max_clusters * max_cluster_size
else:
path = os.path.realpath(path)
# not implemented yet
#fsname = crawl_path_for_mount_entry(path)
return fs_name, max_filesize
def get_torrents_dir():
return os.path.join(get_dot_dir(), efs2(u'torrents'))
def get_nebula_file():
return os.path.join(get_dot_dir(), efs2(u'nebula'))
def get_home_dir():
shellvars = ['${HOME}', '${USERPROFILE}']
dir_root = get_dir_root(shellvars)
if (dir_root is None) and (os.name == 'nt'):
dir = get_shell_dir(shellcon.CSIDL_PROFILE)
if dir is None:
# there's no clear best fallback here
# MS discourages you from writing directly in the home dir,
# and sometimes (i.e. win98) there isn't one
dir = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
dir_root = dir
return dir_root
def get_local_data_dir():
if os.name == 'nt':
# this results in paths that are too long
# 86 characters: 'C:\Documents and Settings\Some Guy\Local Settings\Application Data\BitTorrent\incoming'
#return os.path.join(get_shell_dir(shellcon.CSIDL_LOCAL_APPDATA), app_name)
# I'm even a little nervous about this one
return get_dot_dir()
else:
# BUG: there might be a better place to save incomplete files in under OSX
return get_dot_dir()
def get_old_incomplete_data_dir():
incomplete = efs2(u'incomplete')
return os.path.join(get_old_dot_dir(), incomplete)
def get_incomplete_data_dir():
# 'incomplete' is a directory name and should not be localized
incomplete = efs2(u'incomplete')
return os.path.join(get_local_data_dir(), incomplete)
def get_save_dir():
dirname = u'%s Downloads' % unicode(app_name)
dirname = efs2(dirname)
if os.name == 'nt':
d = get_shell_dir(shellcon.CSIDL_PERSONAL)
if d is None:
d = desktop
else:
d = desktop
return os.path.join(d, dirname)
def get_startup_dir():
"""get directory where symlinks/shortcuts to be run at startup belong"""
dir = None
if os.name == 'nt':
dir = get_shell_dir(shellcon.CSIDL_STARTUP)
return dir
def create_shortcut(source, dest, *args):
if os.name == 'nt':
if len(args) == 0:
args = None
path, file = os.path.split(source)
sc = Shortcut(source,
arguments=args,
workingdir=path)
sc.save(dest)
else:
# some other os may not support this, but throwing an error is good since
# the function couldn't do what was requested
os.symlink(source, dest)
# linux also can't do args... maybe we should spit out a shell script?
assert not args
def resolve_shortcut(path):
if os.name == 'nt':
sc = Shortcut()
sc.load(path)
return sc.GetPath(0)[0]
else:
# boy, I don't know
return path
def remove_shortcut(dest):
if os.name == 'nt':
dest += ".lnk"
os.unlink(dest)
def enforce_shortcut(config, log_func):
if os.name != 'nt':
return
path = win32api.GetModuleFileName(0)
if 'python' in path.lower():
# oops, running the .py too lazy to make that work
path = r"C:\Program Files\BitTorrent\bittorrent.exe"
root_key = _winreg.HKEY_CURRENT_USER
subkey = r'Software\Microsoft\Windows\CurrentVersion\run'
key = _winreg.CreateKey(root_key, subkey)
if config['launch_on_startup']:
_winreg.SetValueEx(key, app_name, 0, _winreg.REG_SZ,
'"%s" --force_start_minimized' % path)
else:
try:
_winreg.DeleteValue(key, app_name)
except WindowsError, e:
# value doesn't exist
pass
def enforce_association():
if os.name != 'nt':
return
try:
_enforce_association()
except WindowsError:
# access denied. not much we can do.
traceback.print_exc()
def _enforce_association():
INSTDIR, EXENAME = os.path.split(win32api.GetModuleFileName(0))
if 'python' in EXENAME.lower():
# oops, running the .py too lazy to make that work
INSTDIR = r"C:\Program Files\BitTorrent"
EXENAME = "bittorrent.exe"
# owie
edit_flags = chr(0x00) + chr(0x00) + chr(0x10) + chr(0x00)
# lots of wrappers for more direct NSIS mapping
HKCR = _winreg.HKEY_CLASSES_ROOT
HKCU = _winreg.HKEY_CURRENT_USER
def filter_vars(s):
s = s.replace("$INSTDIR", INSTDIR)
s = s.replace("${EXENAME}", EXENAME)
return s
def WriteReg(root_key, subkey, key_name, type, value):
subkey = filter_vars(subkey)
key_name = filter_vars(key_name)
value = filter_vars(value)
# CreateKey opens the key for us and creates it if it does not exist
#key = _winreg.OpenKey(root_key, subkey, 0, _winreg.KEY_ALL_ACCESS)
key = _winreg.CreateKey(root_key, subkey)
_winreg.SetValueEx(key, key_name, 0, type, value)
def WriteRegStr(root_key, subkey, key_name, value):
WriteReg(root_key, subkey, key_name, _winreg.REG_SZ, value)
def WriteRegBin(root_key, subkey, key_name, value):
WriteReg(root_key, subkey, key_name, _winreg.REG_BINARY, value)
def DeleteRegKey(root_key, subkey):
try:
_winreg.DeleteKey(root_key, subkey)
except WindowsError:
# key doesn't exist
pass
## Begin NSIS copy/paste/translate
WriteRegStr(HKCR, '.torrent', "", "bittorrent")
DeleteRegKey(HKCR, r".torrent\Content Type")
# This line maks it so that BT sticks around as an option
# after installing some other default handler for torrent files
WriteRegStr(HKCR, r".torrent\OpenWithProgids", "bittorrent", "")
# this prevents user-preference from generating "Invalid Menu Handle" by looking for an app
# that no longer exists, and instead points it at us.
WriteRegStr(HKCU, r"Software\Microsoft\Windows\CurrentVersion\Explorer\FileExts\.torrent", "Application", EXENAME)
WriteRegStr(HKCR, r"Applications\${EXENAME}\shell", "", "open")
WriteRegStr(HKCR, r"Applications\${EXENAME}\shell\open\command", "", r'"$INSTDIR\${EXENAME}" "%1"')
# Add a mime type
WriteRegStr(HKCR, r"MIME\Database\Content Type\application/x-bittorrent", "Extension", ".torrent")
# Add a shell command to match the 'bittorrent' handler described above
WriteRegStr(HKCR, "bittorrent", "", "TORRENT File")
WriteRegBin(HKCR, "bittorrent", "EditFlags", edit_flags)
# make us the default handler for bittorrent://
WriteRegBin(HKCR, "bittorrent", "URL Protocol", chr(0x0))
WriteRegStr(HKCR, r"bittorrent\Content Type", "", "application/x-bittorrent")
WriteRegStr(HKCR, r"bittorrent\DefaultIcon", "", r"$INSTDIR\${EXENAME},0")
WriteRegStr(HKCR, r"bittorrent\shell", "", "open")
## ReadRegStr $R1 HKCR "bittorrent\shell\open\command" ""
## StrCmp $R1 "" continue
##
## WriteRegStr HKCR "bittorrent\shell\open\command" "backup" $R1
##
## continue:
WriteRegStr(HKCR, r"bittorrent\shell\open\command", "", r'"$INSTDIR\${EXENAME}" "%1"')
# Add a shell command to handle torrent:// stuff
WriteRegStr(HKCR, "torrent", "", "TORRENT File")
WriteRegBin(HKCR, "torrent", "EditFlags", edit_flags)
# make us the default handler for torrent://
WriteRegBin(HKCR, "torrent", "URL Protocol", chr(0x0))
WriteRegStr(HKCR, r"torrent\Content Type", "", "application/x-bittorrent")
WriteRegStr(HKCR, r"torrent\DefaultIcon", "", "$INSTDIR\${EXENAME},0")
WriteRegStr(HKCR, r"torrent\shell", "", "open")
## ReadRegStr $R1 HKCR "torrent\shell\open\command" ""
## WriteRegStr HKCR "torrent\shell\open\command" "backup" $R1
WriteRegStr(HKCR, r"torrent\shell\open\command", "", r'"$INSTDIR\${EXENAME}" "%1"')
def btspawn(cmd, *args):
ext = ''
if is_frozen_exe:
ext = '.exe'
path = os.path.join(app_root, cmd+ext)
if not os.access(path, os.F_OK):
if os.access(path+'.py', os.F_OK):
path = path+'.py'
args = [path] + list(args) # $0
spawn(*args)
def spawn(*args):
if os.name == 'nt':
# do proper argument quoting since exec/spawn on Windows doesn't
bargs = args
args = []
for a in bargs:
if not a.startswith("/"):
a.replace('"', '\"')
a = '"%s"' % a
args.append(a)
argstr = ' '.join(args[1:])
# use ShellExecute instead of spawn*() because we don't want
# handles (like the controlsocket) to be duplicated
win32api.ShellExecute(0, "open", args[0], argstr, None, 1) # 1 == SW_SHOW
else:
if os.access(args[0], os.X_OK):
forkback = os.fork()
if forkback == 0:
# BUG: stop IPC!
print "execl ", args[0], args
os.execl(args[0], *args)
else:
#BUG: what should we do here?
pass
def language_path():
dot_dir = get_dot_dir()
lang_file_name = os.path.join(dot_dir, efs(u'data')[0],
efs(u'language')[0])
return lang_file_name
def get_language(name):
from BTL import LOCALE_URL
url = LOCALE_URL + name + ".tar.gz"
socket.setdefaulttimeout(5)
r = urllib.urlopen(url)
# urllib seems to ungzip for us
tarname = os.path.join(locale_root, name + ".tar")
f = file(tarname, 'wb')
f.write(r.read())
f.close()
tar = tarfile.open(tarname, "r")
for tarinfo in tar:
tar.extract(tarinfo, path=locale_root)
tar.close()
##def smart_gettext_translation(domain, localedir, languages, fallback=False):
## try:
## t = gettext.translation(domain, localedir, languages=languages)
## except Exception, e:
## for lang in languages:
## try:
## get_language(lang)
## except Exception, e:
## #print "Failed on", lang, e
## pass
## t = gettext.translation(domain, localedir, languages=languages,
## fallback=fallback)
## return t
def blocking_smart_gettext_and_install(domain, localedir, languages,
fallback=False, unicode=False):
try:
t = gettext.translation(domain, localedir, languages=languages,
fallback=fallback)
except Exception, e:
# if we failed to find the language, fetch it from the web
running_count = 0
running_deferred = {}
# Get some reasonable defaults for arguments that were not supplied
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in gettext._expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
languages = nelangs
for lang in languages:
# HACK
if lang.startswith('en'):
continue
if lang.startswith('C'):
continue
try:
get_language(lang)
except: #urllib.HTTPError:
pass
t = gettext.translation(domain, localedir,
languages=languages,
fallback=True)
t.install(unicode)
def smart_gettext_and_install(domain, localedir, languages,
fallback=False, unicode=False):
try:
t = gettext.translation(domain, localedir, languages=languages,
fallback=fallback)
except Exception, e:
# if we failed to find the language, fetch it from the web async-style
running_count = 0
running_deferred = {}
# Get some reasonable defaults for arguments that were not supplied
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in gettext._expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
languages = nelangs
for lang in languages:
d = ThreadedDeferred(None, get_language, lang)
def translate_and_install(r, td=d):
running_deferred.pop(td)
# only let the last one try to install
if len(running_deferred) == 0:
t = gettext.translation(domain, localedir,
languages=languages,
fallback=True)
t.install(unicode)
def failed(e, tlang=lang, td=d):
if td in running_deferred:
running_deferred.pop(td)
# don't raise an error, just continue untranslated
sys.stderr.write('Could not find translation for language "%s"\n' %
tlang)
#traceback.print_exc(e)
d.addCallback(translate_and_install)
d.addErrback(failed)
# accumulate all the deferreds first
running_deferred[d] = 1
# start them all, the last one finished will install the language
for d in running_deferred:
d.start()
return
# install it if we got it the first time
t.install(unicode)
def _gettext_install(domain, localedir=None, languages=None, unicode=False):
# gettext on win32 does not use locale.getdefaultlocale() by default
# other os's will fall through and gettext.find() will do this task
if os_name == 'nt':
# this code is straight out of gettext.find()
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
# this is the important addition - since win32 does not typically
# have any enironment variable set, append the default locale before 'C'
languages.append(locale.getdefaultlocale()[0])
if 'C' not in languages:
languages.append('C')
# we call the smart version, because anyone calling this needs it
# before they can continue. yes, we do block on network IO. there is no
# alternative (installing post-startup causes already loaded strings not
# to be re-loaded)
blocking_smart_gettext_and_install(domain, localedir,
languages=languages,
unicode=unicode)
def read_language_file():
"""Reads the language file. The language file contains the
name of the selected language, not any translations."""
lang = None
if os.name == 'nt':
# this pulls user-preference language from the installer location
try:
regko = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\BitTorrent")
lang_num = _winreg.QueryValueEx(regko, "Language")[0]
lang_num = int(lang_num)
lang = language.locale_sucks[lang_num]
except:
pass
else:
lang_file_name = language_path()
if os.access(lang_file_name, os.F_OK|os.R_OK):
mode = 'r'
if sys.version_info >= (2, 3):
mode = 'U'
lang_file = open(lang_file_name, mode)
lang_line = lang_file.readline()
lang_file.close()
if lang_line:
lang = ''
for i in lang_line[:5]:
if not i.isalpha() and i != '_':
break
lang += i
if lang == '':
lang = None
return lang
def write_language_file(lang):
"""Writes the language file. The language file contains the
name of the selected language, not any translations."""
if lang != '': # system default
get_language(lang)
if os.name == 'nt':
regko = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, "Software\\BitTorrent")
if lang == '':
_winreg.DeleteValue(regko, "Language")
else:
lcid = None
# I want two-way dicts
for id, code in language.locale_sucks.iteritems():
if code.lower() == lang.lower():
lcid = id
break
if not lcid:
raise KeyError(lang)
_winreg.SetValueEx(regko, "Language", 0, _winreg.REG_SZ, str(lcid))
else:
lang_file_name = language_path()
lang_file = open(lang_file_name, 'w')
lang_file.write(lang)
lang_file.close()
def install_translation(unicode=False):
languages = None
try:
lang = read_language_file()
if lang is not None:
languages = [lang, ]
except:
#pass
traceback.print_exc()
_gettext_install('bittorrent', locale_root, languages=languages, unicode=unicode)
def write_pid_file(fname, errorfunc = None):
"""Creates a pid file on platforms that typically create such files;
otherwise, this returns without doing anything. The fname should
not include a path. The file will be placed in the appropriate
platform-specific directory (/var/run in linux).
"""
assert type(fname) == str
assert errorfunc == None or callable(errorfunc)
if os.name == 'nt': return
try:
pid_fname = os.path.join(efs2(u'/var/run'),fname)
file(pid_fname, 'w').write(str(os.getpid()))
except:
try:
pid_fname = os.path.join(efs2(u'/etc/tmp'),fname)
except:
if errorfunc:
errorfunc("Couldn't open pid file. Continuing without one.")
else:
pass # just continue without reporting warning.
desktop = None
if os.name == 'nt':
desktop = get_shell_dir(shellcon.CSIDL_DESKTOPDIRECTORY)
else:
homedir = get_home_dir()
if homedir == None :
desktop = '/tmp/'
else:
desktop = homedir
if os.name in ('mac', 'posix'):
tmp_desktop = os.path.join(homedir, efs2(u'Desktop'))
if os.access(tmp_desktop, os.R_OK|os.W_OK):
desktop = tmp_desktop + os.sep
```
#### File: BitTorrent-5.2.2/BitTorrent/RTTMonitor2.py
```python
debug = False
#debug = True
import os
import Queue
import socket
import itertools
import random
from pprint import pprint
from BTL.platform import bttime
import BTL.stackthreading as threading
from BTL.HostIP import get_host_ip, get_host_ips
from BitTorrent.platform import spawn, app_root
#from twisted.web.xmlrpc import Proxy
if os.name == 'nt':
import win32icmp
IP_TTL_EXPIRED_TRANSIT = 11013
IP_SUCCESS = 0
def daemon_thread(target, args=()):
t = threading.Thread(target=target, args=args)
t.setDaemon(True)
return t
def in_common(routes):
"""routes is a list of lists, each containing a route to a peer."""
# Greg: This is a little weird. Nodes appear in the queue in
# increasing order of TTL among nodes in the path to
# a given IP. However, there is no guarantee of ordering
# between IP's. As a result, a closer branching
# may be missed if the traceroute following that branch
# is delayed longer than the traceroutes to other IPs.
# --Dave
r = []
branch = False
for n in itertools.izip(*routes):
# strip dead nodes
f = [i for i in n if i !='*']
# ignore all dead nodes
if len(f) == 0:
continue
if len(set(f)) == 1:
r.append(f[0])
else:
# more than one unique node, the tree has branched
branch = True
break
return (branch, r)
class RTTMonitorBase(object):
def __init__(self, new_rtt=None):
self.instantanious_rtt = None
def f(rtt):
pass
if new_rtt:
self.new_rtt = new_rtt
else:
self.new_rtt = f
def set_nodes_restart(self, nodes):
pass
def get_current_rtt(self):
return self.instantanious_rtt
# someday I'll write this using twisted. --<NAME>
#class RTTMonitorUnix(RTTMonitorBase):
import xmlrpclib # blech. Better with twisted, but RTTMonitorWin32
# was already written to handle blocking Icmp calls.
class Icmp(object):
"""Implements ICMP."""
def create_file(self):
return 0
def ping(self, fid, addr, ttl, timeout):
# returns addr, status, rtt.
pass
def close(self,fid):
pass
class UnixIcmp(Icmp):
def __init__(self, external_add_task, port):
assert callable(external_add_task) # rawserver's
assert type(port) in (int,long) and port > 0 and port <= 65535
#pid = os.spawnl(os.P_NOWAIT, "xicmp", str(port))
print "Spawning xicmp on port ", port
xicmp = os.path.join( app_root, "icmp", "xicmp" )
spawn( xicmp, str(port) )
def _start_proxy(port):
self.proxy = xmlrpclib.ServerProxy('http://localhost:%d' % port)
external_add_task(4.0, _start_proxy, port) # allow time to spawn.
def create_file(self):
return self.proxy.IcmpCreateFile()
def ping(self, fid, addr, ttl, timeout):
try:
return self.proxy.IcmpSendEcho( fid, addr, ttl, timeout )
except xmlrpclib.Fault:
return None
def close(self,fid):
#print "UnixIcmp: close: fid=", fid
return self.proxy.IcmpCloseHandle( fid )
class Options(object):
pass
class Win32Icmp(Icmp):
def __init__(self):
self.o = Options()
def create_file(self):
i = win32icmp.IcmpCreateFile()
def ping(self, fid, addr, ttl, timeout):
self.o.Ttl = ttl
return win32icmp.IcmpSendEcho(fid, addr, None, self.o, timeout)
def close(self,fid):
win32icmp.IcmpCloseHandle(fid)
def _set_min(x, y):
if x is None:
return y
if y is None:
return x
return min(x, y)
_set_max = max
#class RTTMonitorWin32(RTTMonitorBase):
class RTTMonitorBlocking(RTTMonitorBase):
def __init__(self, new_rtt, icmp, interval = 0.5, timeout = 6.0 ):
"""
@param new_rtt called every time a ping arrives.
@param icmp is the ICMP implementation.
@param timeout (currently uses a static timeout threshold)
"""
assert callable(new_rtt)
assert isinstance(icmp, Icmp)
self.icmp = icmp
self.nodes = []
self.timeout = int(1000 * timeout) # in ms.
self.interval = interval
self.stop_event = threading.Event()
self.abort_traceroute = threading.Event()
self.finished_event = threading.Event()
# the thread is finished because it hasn't started
self.finished_event.set()
RTTMonitorBase.__init__(self, new_rtt)
def set_nodes_restart(self, nodes):
if debug:
pprint( "set_nodes_restart: nodes=%s" % str(nodes))
self.nodes = []
for node in nodes:
self.add_node(node)
t = threading.Thread(target=self.run, args=(list(self.nodes),))
t.setDaemon(True)
t.start()
def add_node(self, node):
self.nodes.append(node)
def get_route(self, q, dst):
try:
dst = socket.gethostbyname(dst)
self.traceroute(dst, self.timeout, lambda n : q.put((dst, n)))
except socket.gaierror:
# if hostbyname lookup fails, it's not a node we can use
# maybe this should be a warning or something, but a downed
# internet connection will cause a lot of these
pass
def run(self, nodes):
q = Queue.Queue()
dst = None
# handy for hard-coding common node
#dst = '192.168.127.12'
if not dst:
threads = []
for i in nodes:
t = daemon_thread(target=self.get_route, args=(q, i, ))
threads.append(t)
t.start()
waiter_done_event = threading.Event()
def waiter(threads, waiter_done_event):
try:
for thread in threads:
thread.join() # blocks until thread terminates.
except Exception, e:
print "waiter hiccupped", e
waiter_done_event.set()
waiting_thread = daemon_thread(target=waiter,
args=(threads, waiter_done_event, ))
waiting_thread.start()
common = []
routes = {}
#print "tracerouting..."
hop_check = 0 # distance (in hops) being checked for branch.
hop_cnt = {} # number responses received at the given distance.
farthest_possible = 1000 # farthest distance possible as
# determined by the shortest number of
# hops to a node in the passed nodes.
branch = False
while not waiter_done_event.isSet():
try:
msg = q.get(True, 1.0)
except Queue.Empty:
pass
else:
dst = msg[0]
# nodes appear in the queue in
# increasing order of TTL.
new_node = msg[1]
if dst not in routes:
l = []
routes[dst] = l
else:
l = routes[dst]
l.append(new_node)
#print "calling in_common with ", routes.values()
# BEGIN replaces in_common
#hops_so_far = len(routes[dst])
## It is not possible for the common path to be longer
## than the closest node.
#if dst == new_node and hops_so_far < farthest_possible:
# farthest_possible = hops_so_far
#if hop_cnt.has_key(hops_so_far):
# hop_cnt[hops_so_far] += 1
#else:
# hop_cnt[hops_so_far] = 1
#
#if hops_so_far == hop_check:
# # if got all pings for a given distance then see if
# # there is a branch.
# while hop_cnt[hop_check] == len(nodes) and \
# hop_check <= farthest_possible:
# n = None
# for r in routes:
# if n is not None and n != routes[d]:
# branch = True
# break
# if routes[hop_check] != '*':
# n = routes[hop_check]
# else:
# common.append(n)
# hop_check += 1
# if hop_check > farthest_possible:
# branch = True
## END
branch, common = in_common(routes.values())
if branch:
break
#print "done tracerouting..."
self.abort_traceroute.set()
waiter_done_event.wait()
self.abort_traceroute.clear()
local_ips = get_host_ips()
new_common = []
for c in common:
if c not in local_ips:
new_common.append(c)
common = new_common
if debug:
pprint(common)
if len(common) == 0:
# this should be inspected, it's not a simple debug message
if debug:
print "No common node", routes
return
del routes
dst = common[-1]
# kill the old thread
self.stop_event.set()
# wait for it to finish
self.finished_event.wait()
# clear to indicate we're running
self.finished_event.clear()
self.stop_event.clear()
if debug:
print "Farthest common hop [%s]" % dst
# Ping a representative peer but set the ttl to the length of the
# common path so that the farthest common hop responds with
# ICMP time exceeded. (Some routers will send time exceeded
# messages, but they won't respond to ICMP pings directly)
representative = nodes[random.randrange(0, len(nodes))]
if debug:
print "pinging representative %s ttl=%d" % (
representative,len(common))
try:
while not self.stop_event.isSet():
start = bttime()
rtt = self.ping(representative, 5000, ttl=len(common))
self.instantanious_rtt = rtt
delta = bttime() - start
self.stop_event.wait(self.interval - delta)
if debug: print "RTTMonitor.py: new_rtt %s" % rtt
self.new_rtt(self.instantanious_rtt)
except Exception, e:
import traceback
traceback.print_exc()
print "ABORTING", e
self.finished_event.set()
def traceroute(self, dst, timeout, report=None):
"""If report is None then this returns the route as a list of IP
addresses. If report is not None then this calls report as each
node is discovered in the path (e.g., if there are 6 hops in the
path then report gets called 6 times)."""
if debug:
print "Tracing route to [%s]" % dst
i = self.icmp.create_file()
o = Options()
route = None
if report == None:
route = []
def add_node(node):
route.append(node)
report = add_node
for ttl in xrange(64):
try:
if ttl == 0:
addr = get_host_ip()
status = -1
rtt = 0
else:
addr, status, rtt = self.icmp.ping(i,dst,ttl,timeout)
if debug:
print "ttl", ttl, "\t", rtt, "ms\t", addr
report(addr)
if status == IP_SUCCESS:
if debug:
print "Traceroute complete in", ttl, "hops"
break
except Exception, e:
report('*')
if debug:
print "Hop", ttl, "failed:", str(e)
if self.abort_traceroute.isSet():
break
self.icmp.close(i)
if route:
return route
def ping(self, dst, timeout, ttl = None):
"""Returns ICMP echo round-trip time to dst or returns None if a
timeout occurs. timeout is measured in milliseconds.
The TTL is useful if the caller wants to ping the router that
is a number of hops in the direction of the dst, e.g., when a
router will not respond to pings directly but will generate
ICMP Time Exceeded messages."""
i = self.icmp.create_file()
rtt = None
try:
addr, status, rtt = self.icmp.ping(i, dst, ttl, timeout)
if debug:
if status == IP_SUCCESS:
print "Reply from", addr + ":", "time=" + str(rtt)
elif status == IP_TTL_EXPIRED_TRANSIT:
print "Ping ttl expired %d: from %s time=%s" %(
status, str(addr), str(rtt))
else:
print "Ping failed", status
except Exception, e:
if debug:
print "Ping failed:", str(e)
self.icmp.close(i)
return rtt
#if os.name == 'nt':
# RTTMonitor = RTTMonitorWin32
#else:
# RTTMonitor = RTTMonitorUnix
RTTMonitor = RTTMonitorBlocking
```
#### File: BitTorrent-5.2.2/BitTorrent/uplatform.py
```python
from BitTorrent import platform
from BTL.platform import efs2
#get_filesystem_encoding = platform.get_filesystem_encoding
#encode_for_filesystem = platform.encode_for_filesystem
#decode_from_filesystem = platform.decode_from_filesystem
#set_config_dir = platform.set_config_dir
calc_unix_dirs = platform.calc_unix_dirs
get_free_space = platform.get_free_space
get_sparse_files_support = platform.get_sparse_files_support
is_path_too_long = platform.is_path_too_long
is_sparse = platform.is_sparse
get_allocated_regions = platform.get_allocated_regions
get_max_filesize = platform.get_max_filesize
create_shortcut = platform.create_shortcut
remove_shortcut = platform.remove_shortcut
enforce_shortcut = platform.enforce_shortcut
enforce_association = platform.enforce_association
btspawn = platform.btspawn
spawn = platform.spawn
#get_language = platform.get_language
smart_gettext_and_install = platform.smart_gettext_and_install
#read_language_file = platform.read_language_file
#write_language_file = platform.write_language_file
#install_translation = platform.install_translation
write_pid_file = platform.write_pid_file
#old_open = open
#def open(name, mode='r'):
# return old_open(efs2(name), mode)
#
#
#def language_path():
# return decode_from_filesystem(platform.language_path())
#
#def get_dir_root(shellvars, default_to_home=True):
# return decode_from_filesystem(
# platform.get_dir_root(shellvars, default_to_home))
def get_temp_dir():
return decode_from_filesystem(platform.get_temp_dir())
def get_temp_subdir():
return decode_from_filesystem(platform.get_temp_subdir())
#def get_config_dir():
# return decode_from_filesystem(platform.get_config_dir())
#
#def get_old_dot_dir():
# return decode_from_filesystem(platform.get_old_dot_dir())
#
#def get_dot_dir():
# return decode_from_filesystem(platform.get_dot_dir())
#
#def get_cache_dir():
# return decode_from_filesystem(platform.get_cache_dir())
def get_home_dir():
return decode_from_filesystem(platform.get_home_dir())
def get_local_data_dir():
return decode_from_filesystem(platform.get_local_data_dir())
def get_old_incomplete_data_dir():
return decode_from_filesystem(platform.get_old_incomplete_data_dir())
def get_incomplete_data_dir():
return decode_from_filesystem(platform.get_incomplete_data_dir())
def get_save_dir():
return decode_from_filesystem(platform.get_save_dir())
def get_shell_dir(value):
return decode_from_filesystem(platform.get_shell_dir(value))
def get_startup_dir():
return decode_from_filesystem(platform.get_startup_dir())
```
#### File: BitTorrent-5.2.2/BTL/asyncexecutor.py
```python
from twisted.python.threadpool import ThreadPool
class AsyncExecutor(object):
""" defaults to minthreads=5, maxthreads=20 """
pool = ThreadPool( name = 'AsyncExecutorPool')
def _execute(self, func, *args, **kwargs):
if not self.pool.started:
self.pool.start()
self.pool.dispatch(None, func, *args, **kwargs)
execute = classmethod(_execute)
stop = pool.stop
def test():
import random
import time
def test(digit):
print 'Testing %d' % digit
time.sleep(random.randint(1, 5000)/1000)
print ' finished with test %d' % digit
for i in xrange(10):
AsyncExecutor.execute(test, )
AsyncExecutor.stop()
if __name__ == '__main__':
test()
```
#### File: BitTorrent-5.2.2/BTL/bdistutils.py
```python
import sys, os, shutil
from distutils import core
if sys.platform == "win32":
setup = core.setup
# no need to import anything further. None of the remaining
# functionality is available in windows.
else:
import pwd
from distutils.sysconfig import get_python_lib
import distutils.sysconfig
from stat import S_IMODE, S_IRUSR, S_IXUSR, S_IRGRP, S_IXGRP, S_IROTH, S_IXOTH
from daemon import getuid_from_username, getgid_from_username
from daemon import getgid_from_groupname
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class SetupException(Exception):
pass
def getuid_for_path(path):
return os.stat(path).st_uid
def seteugid_to_login():
"""set effective user id and effective group id to the user and group ids
of the user logged into this terminal."""
uid = pwd.getpwnam(os.getlogin())[2] # search /etc/passwd for uid and
gid = pwd.getpwnam(os.getlogin())[3] # gid of user logged into this
# terminal.
os.setegid(gid)
os.seteuid(uid) # Is there a better way? --Dave
def get_svn_change_code():
"""Returns the svn repository's date and revision number for the current
working directory. The returned string has the format 'YYYY_MM_DD_revXXXX'
where XXXX is the revision number."""
def to_dict(lines):
# FRAGILE XXX
splitted = [l.split(':') for l in lines]
pairs = [(s[0].strip(), ':'.join(s[1:]).strip()) for s in splitted]
d = dict(pairs)
return d
# returns date and revision number
d = to_dict(os.popen("svn info").readlines())
url = d["URL"]
revision = int(d["Last Changed Rev"])
date = d["Last Changed Date"]
date = date.split(' ')[0] # keep only "YYYY-MM-DD"
date = "_".join(date.split('-')) # replace dash with underscore
date_rev = "%s_rev%.4d" % (date,revision)
return date_rev
def get_cdv_change_code():
# cdv won't run on the dev machines as root. nfs does not allow
# root access to mounted drives. --Dave
if os.getuid() == 0 and getuid_for_path(".") != 0:
seteugid_to_login()
# fragile. XXXX
l = os.popen("cdv history -c 1").readlines()[0].split(" ")
if os.getuid() == 0:
os.seteuid(0)
#os.setegid(oldgid)
l = [x.strip() for x in l if x.strip() != ''] # remove empty strings.
x,code,x,x,x,x,dow,mo,dom,t,y = l
month = "%.2d" % (months.index(mo)+1)
dom = "%.2d" % int(dom) # single digit day of month like 3 becomes 03
t = "_".join(t.split(':')) # convert ':' to underscores in time.
return y+"_"+month+"_"+dom+"_"+t+"_"+code
def get_install_prefix( appname ):
"""Generates directory name /opt/appname_YYYY_MM_DD_revXXXX"""
# fragile. XXXX
#change = get_cdv_change_code()
change = get_svn_change_code()
path = os.path.join("/opt", appname+"_"+change)
return os.path.normpath(path)
def get_unique_install_prefix( appname ):
"""Generates a directory name /opt/appname_YYYY_MM_DD_revXX or
/opt/appname_YYYY_MM_DD_revXX_vVVV if the prior exists.
VVV is a counter that is incremented with each install of
the distribution with the same svn change code.
Unlike get_install_prefix, this does not assume that cdv exists
on the system, but instead assumes there is a version.txt
file in the distribution root directory containing the cdv change
date and code information. This file is created in the install
directory whenever bdistutils is run with the installdev option."""
vfile = os.path.join(sys.path[0], "version.txt")
if not os.path.exists(vfile):
raise SetupException( "Cannot derive install prefix from cdv change date "
"code, because there is no version.txt file in the "
"root of the distribution tree." )
cfp = open(vfile, 'r')
change_str = cfp.readline().strip()
prefix = os.path.join("/opt", appname+"_"+change_str)
while os.path.exists(prefix):
path, name = os.path.split(prefix)
code_or_cnt = prefix.split("_")[-1]
if code_or_cnt[0] == 'v':
cnt = int(code_or_cnt[1:])
cnt += 1
prefix = "_".join(prefix.split("_")[:-1])
else:
cnt = 1
prefix = "%s_v%03.f" % (prefix, cnt)
return os.path.normpath(prefix)
def setup( **kwargs ):
"""site-specific setup.
If sys.argv[1] is not installdev then this behaves
as python's distutils.core.setup.
If sys.argv[1] is installdev then this installs into a
directory like:
/opt/Mitte_2006_10_16_14_39_51_78a5
The date and time is the commit time for this version in the svn repository
and 78a5 is the code for the version in svn.
Also creates a symbolic link like /opt/mitte pointing to
/opt/Mitte_2006_10_16_14_39_51_78a5.
"""
name = kwargs['name']
# setup doesn't like kwargs it doesn't know.
destname = kwargs.get('destname', name)
if kwargs.has_key('destname'): del kwargs['destname']
username = kwargs.get('username',None)
if kwargs.has_key('username'): del kwargs['username']
groupname = kwargs.get('groupname',None)
if kwargs.has_key('groupname'): del kwargs['groupname']
symlinks = kwargs.get('symlinks',None)
if kwargs.has_key('symlinks'): del kwargs['symlinks']
installdev=False
installprod = False
old_prefix = None
if len(sys.argv)>1 and sys.argv[1] == "force-installdev":
# force install simply installs in a new directory.
sys.prefix = get_unique_install_prefix(destname)
distutils.sysconfig.PREFIX=sys.prefix
print "get_unique_install_prefix returned sys.prefix=", sys.prefix
installdev = True
sys.argv[1] = "install"
# determine old install directory.
if os.path.exists( os.path.join("/opt/",destname) ):
old_prefix = os.path.realpath(os.path.join("/opt/", destname))
old_prefix = os.path.split(old_prefix)[0]
elif len(sys.argv)>1 and sys.argv[1] == "installdev":
installdev=True
sys.argv[1] = "install"
# create change code file.
code = get_svn_change_code()
if code:
# may fail if root and destination is nfs mounted.
try:
cfp = open(os.path.join(sys.path[0],"version.txt"), 'w')
cfp.write( code )
cfp.close()
except IOError:
# try again as login username.
old_uid = os.geteuid()
seteugid_to_login()
cfp = open(os.path.join(sys.path[0],"version.txt"), 'w')
cfp.write( code )
cfp.close()
os.seteuid(old_uid) # require root access to install into /opt or python site-packages.
# determine install directory
sys.prefix = get_install_prefix(destname)
distutils.sysconfig.PREFIX=sys.prefix
if os.path.exists(sys.prefix):
raise SetupException( "This code revision has already been installed %s."
" If you want to install it again then move the "
"existing directory or use force-installdev." % sys.prefix )
# determine old install directory.
if os.path.exists( os.path.join("/opt/",destname) ):
old_prefix = os.path.realpath(os.path.join("/opt/", destname))
old_prefix = os.path.split(old_prefix)[0]
if len(sys.argv)>1 and sys.argv[1] == "install":
# building with root privilege can fail if the destination of the
# build is nfs mounted.
sys.argv[1] = "build"
try:
# try as root if I am root.
core.setup(**kwargs)
except:
# try using login username
old_uid = os.geteuid()
seteugid_to_login()
core.setup(**kwargs)
os.seteuid(old_uid)
sys.argv[1] = "install"
try:
core.setup(**kwargs)
except OSError:
# perms error on nfs mount, retry using login username
old_uid = os.geteuid()
seteugid_to_login()
core.setup(**kwargs)
os.seteuid(old_uid)
if installdev:
print "installdev is True."
# shortened the directory path.
#long_path = os.path.join(sys.path[0], "build", "lib", name)
long_path = os.path.join(sys.prefix, "lib", "python2.4", "site-packages", name)
print "long_path=",long_path
dest = os.path.join(sys.prefix,name)
print "dest=", dest
if os.path.exists(long_path):
print "copytree from ", long_path, " to ", dest
shutil.copytree(long_path,dest)
#shutil.rmtree(os.path.join(sys.prefix, "lib" ))
# copy all files not in packages into /opt.
for f in os.listdir('.'):
if f == "build": continue
if f == ".cdv": continue
if f == ".svn": continue
if f == "lib": continue
if not os.path.exists( os.path.join(sys.prefix,f)):
if os.path.isdir(f):
shutil.copytree(f,os.path.join(sys.prefix,f),False)
else:
shutil.copyfile(f,os.path.join(sys.prefix,f))
# create symlink from /opt/blah to /opt/blah_YYYY_MM_DD_HH:MM:SS_code
link_to = sys.prefix
symlnk = os.path.join( '/opt', destname )
print "removing symlink from", symlnk
if os.path.islink(symlnk):
print "removing", symlnk
os.remove(symlnk)
print "creating symlink", symlnk, "to", link_to
os.symlink(link_to, symlnk)
if username:
uid = getuid_from_username(username)
else:
uid = -1
if groupname:
gid = getgid_from_groupname(groupname)
elif username:
gid = getgid_from_username(username)
else:
gid = -1
# recursively change owner and group name of install directory.
## Turns out that this is a bad idea. The account in which the
## service runs should not own its own install directory, because
## it could modify its own code.
#if uid != -1 or gid != -1:
# os.chown(sys.prefix,uid,gid)
# dirs = os.walk(sys.prefix)
# for path, dirnames, filenames in dirs:
# for dir in dirnames:
# os.chown(os.path.join(path, dir),uid,gid)
# for fname in filenames:
# os.chown(os.path.join(path, fname),uid,gid)
# make world readable and make directories world cd'able (i.e., world executable)
dirs = os.walk(sys.prefix)
for path, dirnames, filenames in dirs:
for dir in dirnames:
dir = os.path.join(path,dir)
mode = os.stat(dir).st_mode
mode = S_IMODE(mode)
mode |= S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH
os.chmod(dir,mode)
for fname in filenames:
fname = os.path.join(path, fname)
mode = os.stat(fname).st_mode
mode |= S_IRUSR | S_IRGRP | S_IROTH
os.chmod(fname, mode)
# create pid dir.
pid_dir = os.path.join("/var/run/", name )
if not os.path.exists(pid_dir):
os.mkdir(pid_dir)
os.chown(pid_dir,uid,gid)
```
#### File: BitTorrent-5.2.2/BTL/brpclib.py
```python
import xmlrpclib
from xmlrpclib2 import *
from BTL import brpc
old_PyCurlTransport = PyCurlTransport
class PyCurlTransport(old_PyCurlTransport):
def set_connection_params(self, h):
h.add_header('User-Agent', "brpclib.py/1.0")
h.add_header('Connection', "Keep-Alive")
h.add_header('Content-Type', "application/octet-stream")
def _parse_response(self, response):
# read response from input file/socket, and parse it
return brpc.loads(response.getvalue())[0]
# --------------------------------------------------------------------
# request dispatcher
class _Method:
# some magic to bind an B-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args, **kwargs):
args = (args, kwargs)
return self.__send(self.__name, args)
# ARG! prevent repr(_Method()) from submiting an RPC call!
def __repr__(self):
return "<%s instance at 0x%08X>" % (self.__class__, id(self))
# Double underscore is BAD!
class BRPC_ServerProxy(xmlrpclib.ServerProxy):
"""uri [,options] -> a logical connection to an B-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0):
# establish a "logical" server connection
# get the url
import urllib
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
raise IOError, "unsupported B-RPC protocol"
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
transport = xmlrpclib.SafeTransport()
else:
transport = xmlrpclib.Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __request(self, methodname, params):
# call a method on the remote server
request = brpc.dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
def new_server_proxy(url):
c = cache_set.get_cache(PyCURL_Cache, url)
t = PyCurlTransport(c)
return BRPC_ServerProxy(url, transport=t)
ServerProxy = new_server_proxy
if __name__ == '__main__':
s = ServerProxy('https://greg.mitte.bittorrent.com:7080/')
def ping(*a, **kw):
(a2, kw2) = s.ping(*a, **kw)
assert a2 == list(a), '%s list is not %s' % (r, list(a))
assert kw2 == dict(kw), '%s dict is not %s' % (kw2, dict(kw))
ping(0, 1, 1, name="potato")
ping(0, 1, 1, name="anime")
ping("phish", 0, 1, 1)
ping("games", 0, 1, 1)
```
#### File: BitTorrent-5.2.2/BTL/circular_list.py
```python
import random
class Link(object):
__slots__ = ['prev', 'data', 'next']
def __init__(self, data):
self.prev = self
self.data = data
self.next = self
def __str__(self):
p = id(self.prev)
n = id(self.next)
return 'link:(%s, (%s, %s), %s)' % (p, id(self), self.data, n)
class CircularList(object):
def __init__(self):
self.iter = None
self.link_refs = {} # data: link
def prepend(self, data):
link = Link(data)
assert data not in self.link_refs
self.link_refs[data] = link
if not self.iter:
self.iter = link
else:
self._insert_before(self.iter, link)
def append(self, data):
link = Link(data)
assert data not in self.link_refs
self.link_refs[data] = link
if not self.iter:
self.iter = link
else:
self._insert_after(self.iter, link)
def remove(self, data):
link = self.link_refs.pop(data)
if len(self.link_refs) == 0:
self.iter = None
return
prev = link.prev
next = link.next
assert next is not None and prev is not None
prev.next = next
next.prev = prev
if link == self.iter:
self.iter = next
## stuff I consider to be link-related
########
def _double_link(self, link1, link2):
# was a single item loop, move to a double
assert link1.prev == link1 and link1.next == link1
link1.prev = link2
link1.next = link2
link2.next = link1
link2.prev = link1
def _insert_after(self, link1, link2):
assert link1 != link2
if link1.next == link1:
self._double_link(link1, link2)
else:
link2.next = link1.next
link2.prev = link1
link1.next.prev = link2
link1.next = link2
def _insert_before(self, link1, link2):
assert link1 != link2
if link1.prev == link1:
self._double_link(link1, link2)
else:
link2.prev = link1.prev
link2.next = link1
link1.prev.next = link2
link1.prev = link2
########
def iterator(self):
for i in iter(self):
yield i
def __iter__(self):
if not self.iter:
return
while True:
yield self.iter.data
# someone could remove an item during iteration
if not self.iter:
return
self.iter = self.iter.next
def __len__(self):
return len(self.link_refs)
def __str__(self):
n = len(self.link_refs)
a = []
# don't interrupt iteration for a print
first = self.iter
next = first
while next:
a.append(str(next))
next = next.next
if next.data == first.data:
break
items = '\n'.join(a)
return "iter: %s \n[\n%s\n]" % (self.iter, items)
if __name__ == '__main__':
import time
length = 80000
class ltype(list):
def prepend(self, i):
self.insert(0, i)
from BTL.Lists import QList
class qtype(QList):
def prepend(self, i):
self.append(i)
def iterator(self):
if len(self) == 0:
return
while True:
yield self[0]
if len(self) == 0:
return
self.append(self.popleft())
#CircularList = ltype
#CircularList = qtype
print CircularList
s = time.clock()
l = CircularList()
for i in xrange(length):
l.append(i)
#print l
print 'append ', time.clock() - s
s = time.clock()
l = CircularList()
for i in xrange(length):
l.prepend(i)
#print l
print 'prepend', time.clock() - s
s = time.clock()
l = CircularList()
for i in xrange(length):
if i % 2 == 0:
l.prepend(i)
else:
l.append(i)
#print l
print 'sort ', time.clock() - s
#fair = {}
s = time.clock()
l = CircularList()
it = l.iterator()
for i in xrange(length):
l.prepend(i)
#fair[i] = 0
x = it.next()
#print x, i
#fair[x] += 1
#assert x == i, '%s %s' % (x, i)
#print l
print 'iter ', time.clock() - s
#for k in fair:
# print k, fair[k]
l = CircularList()
print l
l.prepend(0)
print l
l.prepend(1)
print l
l.remove(1)
print l
l.remove(0)
print l
```
#### File: BitTorrent-5.2.2/BTL/CMap.py
```python
if __name__ == '__main__':
import sys
sys.path = ['.','..'] + sys.path # HACK to simplify unit testing.
from BTL.translation import _
class BEGIN: # represents special BEGIN location before first next.
pass
from UserDict import DictMixin
from cmap_swig import *
import sys
from weakref import WeakKeyDictionary
LEAK_TEST = False
class CMap(object,DictMixin):
"""In-order mapping. Provides same operations and behavior as a dict,
but provides in-order iteration. Additionally provides operations to
find the nearest key <= or >= a given key.
This provides a significantly wider set of operations than
berkeley db BTrees, but it provides no means for persistence.
LIMITATION: The key must be a python numeric type, e.g., an integer
or a float. The value can be any python object.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: n/a
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CMap assumes there are few iterators in existence at
any given time.
Iterators are not invalidated by insertions. Iterators are
invalidated by deletions only when the key-value pair
referenced is deleted. Deletion has a '+k' because the
__delitem__ searches linearly through the set of iterators
pointing into this map to find any iterator pointing at the
deleted item and then invalidates the iterator.
This class is backed by the C++ STL map class, but conforms
to the Python container interface."""
class _AbstractIterator:
"""Iterates over elements in the map in order."""
def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
"""Creates an iterator pointing to element si in map m.
Do not instantiate directly. Use iterkeys, itervalues, or
iteritems.
The _AbstractIterator takes ownership of any C++ iterator
(i.e., the swig object 'si') and will deallocate it when
the iterator is deallocated.
Examples of typical behavior:
>>> from CMap import *
>>> m = CMap()
>>> m[12] = 6
>>> m[9] = 4
>>> for k in m:
... print int(k)
...
9
12
>>>
Example edge cases (empty map):
>>> from CMap import *
>>> m = CMap()
>>> try:
... i = m.__iter__()
... i.value()
... except IndexError:
... print 'IndexError.'
...
IndexError.
>>> try:
... i.next()
... except StopIteration:
... print 'stopped'
...
stopped
@param map: CMap.
@param node: Node that this iterator will point at. If None
then the iterator points to end(). If BEGIN
then the iterator points to one before the beginning.
"""
assert isinstance(m, CMap)
assert not isinstance(si, CMap._AbstractIterator)
if si == None:
self._si = map_end(m._smap)
else:
self._si = si # C++ iterator wrapped by swig.
self._map = m
m._iterators[self] = 1 # using map as set of weak references.
def __hash__(self):
return id(self)
def __cmp__(self, other):
if not self._si or not other._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN and other._si == BEGIN: return 0
if self._si == BEGIN and other._si != BEGIN: return -1
elif self._si != BEGIN and other._si == BEGIN: return 1
return iter_cmp(self._map._smap, self._si, other._si )
def at_begin(self):
"""equivalent to self == m.begin() where m is a CMap.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.begin()
>>> i == m.begin()
True
>>> i.at_begin()
True
>>> i == m.end() # no elements so begin()==end()
True
>>> i.at_end()
True
>>> m[6] = 'foo' # insertion does not invalidate iterators.
>>> i = m.begin()
>>> i == m.end()
False
>>> i.value()
'foo'
>>> try: # test at_begin when not at beginning.
... i.next()
... except StopIteration:
... print 'ok'
ok
>>> i.at_begin()
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN: # BEGIN is one before begin(). Yuck!!
return False
return map_iter_at_begin(self._map._smap, self._si)
def at_end(self):
"""equivalent to self == m.end() where m is a CMap, but
at_end is faster because it avoids the dynamic memory
alloation in m.end().
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'foo'
>>> i = m.end() # test when at end.
>>> i == m.end()
True
>>> i.at_end()
True
>>> int(i.prev())
6
>>> i.at_end() # testing when not at end.
False
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
return False
return map_iter_at_end(self._map._smap, self._si)
def key(self):
"""@return: the key of the key-value pair referenced by this
iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to .next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_key(self._si)
def value(self):
"""@return: the value of the key-value pair currently referenced
by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise IndexError(_("Cannot dereference iterator until after "
"first call to next."))
elif map_iter_at_end(self._map._smap, self._si):
raise IndexError()
return iter_value(self._si)
def item(self):
"""@return the key-value pair referenced by this iterator.
"""
if not self._si:
raise RuntimeError( _("invalid iterator") )
return self.key(), self.value()
def _next(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
self._si = map_begin(self._map._smap)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
return
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
iter_incr(self._si)
if map_iter_at_end(self._map._smap,self._si):
raise StopIteration
def _prev(self):
if not self._si:
raise RuntimeError( _("invalid iterator") )
if self._si == BEGIN:
raise StopIteration()
elif map_iter_at_begin(self._map._smap, self._si):
self._si = BEGIN
raise StopIteration
iter_decr(self._si)
def __del__(self):
# Python note: if a reference to x is intentionally
# eliminated using "del x" and there are other references
# to x then __del__ does not get called at this time.
# Only when the last reference is deleted by an intentional
# "del" or when the reference goes out of scope does
# the __del__ method get called.
self._invalidate()
def _invalidate(self):
if self._si == None:
return
try:
del self._map._iterators[self]
except KeyError:
pass # could've been removed because weak reference,
# and because _invalidate is called from __del__.
if self._si != BEGIN:
iter_delete(self._si)
self._si = None
def __iter__(self):
"""If the iterator is itself iteratable then we do things like:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[11] = 'bar'
>>> for x in m.itervalues():
... print x
...
foo
bar
"""
return self
def __len__(self):
return len(self._map)
class KeyIterator(_AbstractIterator):
def next(self):
"""Returns the next key in the map.
Insertion does not invalidate iterators. Deletion only
invalidates an iterator if the iterator pointed at the
key-value pair being deleted.
This is implemented by moving the iterator and then
dereferencing it. If we dereferenced and then moved
then we would get the odd behavior:
Ex: I have keys [1,2,3]. The iterator i points at 1.
print i.next() # prints 1
print i.next() # prints 2
print i.prev() # prints 3
print i.prev() # prints 2
However, because we move and then dereference, when an
iterator is first created it points to nowhere
so that the first next moves to the first element.
Ex:
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 1
>>> m[8] = 4
>>> i = m.__iter__()
>>> print int(i.next())
5
>>> print int(i.next())
8
>>> print int(i.prev())
5
We are still left with the odd behavior that an
iterator cannot be dereferenced until after the first next().
Ex edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.__iter__()
>>> try:
... i.prev()
... except StopIteration:
... print 'StopIteration'
...
StopIteration
>>> m[5]='a'
>>> i = m.iterkeys()
>>> int(i.next())
5
>>> try: i.next()
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.prev())
5
>>> try: int(i.prev())
... except StopIteration: print 'StopIteration'
...
StopIteration
>>> int(i.next())
5
"""
self._next()
return self.key()
def prev(self):
"""Returns the previous key in the map.
See next() for more detail and examples.
"""
self._prev()
return self.key()
class ValueIterator(_AbstractIterator):
def next(self):
"""@return: next value in the map.
>>> from CMap import *
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.itervalues()
>>> int(i.next())
10
>>> int(i.next())
3
"""
self._next()
return self.value()
def prev(self):
self._prev()
return self.value()
class ItemIterator(_AbstractIterator):
def next(self):
"""@return: next item in the map's key ordering.
>>> from CMap import CMap
>>> m = CMap()
>>> m[5] = 10
>>> m[6] = 3
>>> i = m.iteritems()
>>> k,v = i.next()
>>> int(k)
5
>>> int(v)
10
>>> k,v = i.next()
>>> int(k)
6
>>> int(v)
3
"""
self._next()
return self.key(), self.value()
def prev(self):
self._prev()
return self.key(), self.value()
def __init__(self, d={} ):
"""Instantiate RBTree containing values from passed dict and
ordered based on cmp.
>>> m = CMap()
>>> len(m)
0
>>> m[5]=2
>>> len(m)
1
>>> print m[5]
2
"""
#self._index = {} # to speed up searches.
self._smap = map_constructor() # C++ map wrapped by swig.
for key, value in d.items():
self[key]=value
self._iterators = WeakKeyDictionary()
# whenever node is deleted. search iterators
# for any iterator that becomes invalid.
def __contains__(self,x):
return self.get(x) != None
def __iter__(self):
"""@return: KeyIterator positioned one before the beginning of the
key ordering so that the first next() returns the first key."""
return CMap.KeyIterator(self)
def begin(self):
"""Returns an iterator pointing at first key-value pair. This
differs from iterkeys, itervalues, and iteritems which return an
iterator pointing one before the first key-value pair.
@return: key iterator to first key-value.
>>> from CMap import *
>>> m = CMap()
>>> m[5.0] = 'a'
>>> i = m.begin()
>>> int(i.key()) # raises no IndexError.
5
>>> i = m.iterkeys()
>>> try:
... i.key()
... except IndexError:
... print 'IndexError raised'
...
IndexError raised
"""
i = CMap.KeyIterator(self, map_begin(self._smap) )
return i
def end(self):
"""Returns an iterator pointing after end of key ordering.
The iterator's prev method will move to the last
key-value pair in the ordering. This in keeping with
the notion that a range is specified as [i,j) where
j is not in the range, and the range [i,j) where i==j
is an empty range.
This operation takes O(1) time.
@return: key iterator one after end.
"""
i = CMap.KeyIterator(self,None) # None means one after last node.
return i
def iterkeys(self):
return CMap.KeyIterator(self)
def itervalues(self):
return CMap.ValueIterator(self)
def iteritems(self):
return CMap.ItemIterator(self)
def __len__(self):
return map_size(self._smap)
def __str__(self):
s = "{"
first = True
for k,v in self.items():
if first:
first = False
else:
s += ", "
if type(v) == str:
s += "%s: '%s'" % (k,v)
else:
s += "%s: %s" % (k,v)
s += "}"
return s
def __repr__(self):
return self.__str__()
def __getitem__(self, key):
# IMPL 1: without _index
return map_find(self._smap,key) # raises KeyError if key not found
# IMPL 2: with _index.
#return iter_value(self._index[key])
def __setitem__(self, key, value):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>>
"""
assert type(key) == int or type(key) == float
# IMPL 1. without _index.
map_set(self._smap,key,value)
## IMPL 2. with _index
## If using indices following allows us to perform only one search.
#i = map_insert_iter(self._smap,key,value)
#if iter_value(i) != value:
# iter_set(i,value)
#else: self._index[key] = i
## END IMPL2
def __delitem__(self, key):
"""Deletes the item with matching key from the map.
This takes O(log n + k) where n is the number of elements
in the map and k is the number of iterators pointing into the map.
Before deleting the item it linearly searches through
all iterators pointing into the map and invalidates any that
are pointing at the item about to be deleted.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> m[13] = 'bar'
>>> m[14] = 'boo'
>>> del m[12]
>>> try:
... m[12]
... except KeyError:
... print 'ok'
...
ok
>>> j = m.begin()
>>> int(j.next())
14
>>> i = m.begin()
>>> i.value()
'bar'
>>> del m[13] # delete object referenced by an iterator
>>> try:
... i.value()
... except RuntimeError:
... print 'ok'
ok
>>> j.value() # deletion should not invalidate other iterators.
'boo'
"""
#map_erase( self._smap, key ) # map_erase is dangerous. It could
# delete the node causing an iterator
# to become invalid. --Dave
si = map_find_iter( self._smap, key ) # si = swig'd iterator.
if map_iter_at_end(self._smap, si):
iter_delete(si)
raise KeyError(key)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, si ) == 0:
i._invalidate()
map_iter_erase( self._smap, si )
iter_delete(si)
#iter_delete( self._index[key] ) # IMPL 2. with _index.
#del self._index[key] # IMPL 2. with _index.
def erase(self, iter):
"""Remove item pointed to by the iterator. All iterators that
point at the erased item including the passed iterator
are immediately invalidated after the deletion completes.
>>> from CMap import CMap
>>> m = CMap()
>>> m[12] = 'foo'
>>> i = m.find(12)
>>> m.erase(i)
>>> len(m) == 0
True
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different CMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot erase end() iterator.") )
# invalidate iterators.
for i in list(self._iterators):
if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
i._invalidate()
# remove item from the map.
map_iter_erase( self._smap, iter._si )
# invalidate last iterator pointing to the deleted location in the map.
iter._invalidate()
def __del__(self):
# invalidate all iterators.
for i in list(self._iterators):
i._invalidate()
map_delete(self._smap)
def get(self, key, default=None):
"""@return value corresponding to specified key or return 'default'
if the key is not found.
"""
try:
return map_find(self._smap,key) # IMPL 1. without _index.
#return iter_value(self._index[key]) # IMPL 2. with _index.
except KeyError:
return default
def keys(self):
"""
>>> from CMap import *
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [int(x) for x in m.keys()] # m.keys() but guaranteed integers.
[4, 6]
"""
k = []
for key in self:
k.append(key)
return k
def values(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> m.values()
[7, 3]
"""
i = self.itervalues()
v = []
try:
while True:
v.append(i.next())
except StopIteration:
pass
return v
def items(self):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> m[6.0] = 3
>>> [(int(x[0]),int(x[1])) for x in m.items()]
[(4, 7), (6, 3)]
"""
i = self.iteritems()
itms = []
try:
while True:
itms.append(i.next())
except StopIteration:
pass
return itms
def has_key(self, key):
"""
>>> from CMap import CMap
>>> m = CMap()
>>> m[4.0] = 7
>>> if m.has_key(4): print 'ok'
...
ok
>>> if not m.has_key(7): print 'ok'
...
ok
"""
try:
self[key]
except KeyError:
return False
return True
def clear(self):
"""delete all entries
>>> from CMap import CMap
>>> m = CMap()
>>> m[4] = 7
>>> m.clear()
>>> print len(m)
0
"""
self.__del__()
self._smap = map_constructor()
def copy(self):
"""return shallow copy"""
return CMap(self)
def lower_bound(self,key):
"""
Finds smallest key equal to or above the lower bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Key Iterator pointing to first item equal to or greater
than key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> i = m.lower_bound(11) # iterator.
>>> int(i.key())
15
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.lower_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> i = m.lower_bound(9)
>>> if i == m.begin(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_lower_bound( self._smap, key ))
def upper_bound(self, key):
"""
Finds largest key equal to or below the upper bound. In keeping
with the [begin,end) convention, the returned iterator
actually points to the key one above the upper bound.
Takes O(log n) time.
@param x: Key of (key, value) pair to be located.
@return: Iterator pointing to first element equal to or greater than
key, or end() if no such item exists.
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[15] = 'bar'
>>> m[17] = 'choo'
>>> i = m.upper_bound(11) # iterator.
>>> i.value()
'bar'
Edge cases:
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.upper_bound(9)
>>> i.value()
'foo'
>>> i = m.upper_bound(11)
>>> if i == m.end(): print 'ok'
...
ok
"""
return CMap.KeyIterator(self, map_upper_bound( self._smap, key ))
def find(self,key):
"""
Finds the item with matching key and returns a KeyIterator
pointing at the item. If no match is found then returns end().
Takes O(log n) time.
>>> from CMap import CMap
>>> m = CMap()
>>> i = m.find(10)
>>> if i == m.end(): print 'ok'
...
ok
>>> m[10] = 'foo'
>>> i = m.find(10)
>>> int(i.key())
10
>>> i.value()
'foo'
"""
return CMap.KeyIterator(self, map_find_iter( self._smap, key ))
def update_key( self, iter, key ):
"""
Modifies the key of the item referenced by iter. If the
key change is small enough that no reordering occurs then
this takes amortized O(1) time. If a reordering occurs then
this takes O(log n).
WARNING!!! The passed iterator MUST be assumed to be invalid
upon return and should be deallocated.
Typical use:
>>> from CMap import CMap
>>> m = CMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try: # update to key already in the map.
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
assert isinstance(iter,CMap._AbstractIterator)
if iter._si == BEGIN:
raise IndexError( _("Iterator does not point at key-value pair") )
if self is not iter._map:
raise IndexError(_("Iterator points into a different CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
map_iter_update_key(self._smap, iter._si, key)
def append(self, key, value):
"""Performs an insertion with the hint that it probably should
go at the end.
Raises KeyError if the key is already in the map.
>>> from CMap import CMap
>>> m = CMap()
>>> m.append(5.0,'foo') # append to empty map.
>>> len(m)
1
>>> [int(x) for x in m.keys()] # see note (1)
[5]
>>> m.append(10.0, 'bar') # append in-order
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3.0, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10.0, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>>
note (1): int(x[0]) is used because 5.0 can appear as either 5
or 5.0 depending on the version of python.
"""
map_append(self._smap,key,value)
class CIndexedMap(CMap):
"""This is an ordered mapping, exactly like CMap except that it
provides a cross-index allowing average O(1) searches based on value.
This adds the constraint that values must be unique.
Operation: Time Applicable
Complexity: Methods:
---------------------------------------------------
Item insertion: O(log n) append, __setitem__
Item deletion: O(log n + k) __delitem__, erase
Key search: O(log n) __getitem__, get, find,
__contains__
Value search: average O(1) as per dict
Iteration step: amortized O(1), next, prev
worst-case O(log n)
Memory: O(n)
n = number of elements in map. k = number of iterators pointing
into map. CIndexedMap assumes there are few iterators in existence
at any given time.
The hash table increases the factor in the
O(n) memory cost of the Map by a constant
"""
def __init__(self, dict={} ):
CMap.__init__(self,dict)
self._value_index = {} # cross-index. maps value->iterator.
def __setitem__(self, key, value):
"""
>>> from CMap import *
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> try:
... m[7] = 'bar'
... except ValueError:
... print 'value error'
value error
>>> m[6] = 'foo'
>>> m[6]
'foo'
>>> m[7] = 'bar'
>>> m[7]
'bar'
>>> m[7] = 'bar' # should not raise exception
>>> m[7] = 'goo'
>>> m.get_key_by_value('bar') # should return None.
>>>
"""
assert type(key) == int or type(key) == float
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError( _("Value %s already exists. Values must be "
"unique.") % str(value) )
si = map_insert_iter(self._smap,key,value) # si points where insert
# should occur whether
# insert succeeded or not.
# si == "swig iterator"
sival = iter_value(si)
if sival != value: # if insert failed because k already exists
iter_set(si,value) # then force set.
self._value_index[value] = si
viter = self._value_index[sival]
iter_delete(viter) # remove old value from index
del self._value_index[sival]
else: # else insert succeeded so update index.
self._value_index[value] = si
#self._index[key] = si # IMPL 2. with _index.
def __delitem__(self, key):
"""
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[6] = 'bar'
>>> m[6]
'bar'
>>> int(m.get_key_by_value('bar'))
6
>>> del m[6]
>>> if m.get_key_by_value('bar'):
... print 'found'
... else:
... print 'not found.'
not found.
"""
i = map_find_iter( self._smap, key )
if map_iter_at_end( self._smap, i ):
iter_delete(i)
raise KeyError(key)
else:
value = iter_value(i)
for i in list(self._iterators):
if iter_cmp( self._smap, i._si, iter._si ) == 0:
i._invalidate()
map_iter_erase( self._smap, i )
viter = self._value_index[value]
iter_delete(i)
iter_delete( viter )
del self._value_index[value]
#del self._index[key] # IMPL 2. with _index.
assert map_size(self._smap) == len(self._value_index)
def has_value(self, value):
return self._value_index.has_key(value)
def get_key_by_value(self, value):
"""Returns the key cross-indexed from the passed unique value, or
returns None if the value is not in the map."""
si = self._value_index.get(value) # si == "swig iterator"
if si == None: return None
return iter_key(si)
def append( self, key, value ):
"""See CMap.append
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m.append(5,'foo')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo')]
>>> m.append(10, 'bar')
>>> [(int(x[0]),x[1]) for x in m.items()]
[(5, 'foo'), (10, 'bar')]
>>> m.append(3, 'coo') # out-of-order.
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> int(m.get_key_by_value( 'bar' ))
10
>>> try:
... m.append(10, 'blah') # append key already in map.
... except KeyError:
... print 'ok'
...
ok
>>> [(int(x[0]),x[1]) for x in m.items()]
[(3, 'coo'), (5, 'foo'), (10, 'bar')]
>>> try:
... m.append(10, 'coo') # append value already in map.
... except ValueError:
... print 'ok'
...
ok
"""
if self._value_index.has_key(value) and \
iter_key(self._value_index[value]) != key:
raise ValueError(_("Value %s already exists and value must be "
"unique.") % str(value) )
si = map_append_iter(self._smap,key,value)
if iter_value(si) != value:
iter_delete(si)
raise KeyError(key)
self._value_index[value] = si
def find_key_by_value(self, value):
"""Returns a key iterator cross-indexed from the passed unique value
or end() if no value found.
>>> from Map import *
>>> m = CIndexedMap()
>>> m[6] = 'abc'
>>> i = m.find_key_by_value('abc')
>>> int(i.key())
6
>>> i = m.find_key_by_value('xyz')
>>> if i == m.end(): print 'i points at end()'
i points at end()
"""
si = self._value_index.get(value) # si == "swig iterator."
if si != None:
si = iter_copy(si); # copy else operations like increment on the
# KeyIterator would modify the value index.
return CMap.KeyIterator(self,si)
def copy(self):
"""return shallow copy"""
return CIndexedMap(self)
def update_key( self, iter, key ):
"""
see CMap.update_key.
WARNING!! You MUST assume that the passed iterator is invalidated
upon return.
Typical use:
>>> from CMap import CIndexedMap
>>> m = CIndexedMap()
>>> m[10] = 'foo'
>>> m[8] = 'bar'
>>> i = m.find(10)
>>> m.update_key(i,7) # i is assumed to be invalid upon return.
>>> del i
>>> int(m.get_key_by_value('foo'))
7
>>> [(int(x[0]),x[1]) for x in m.items()] # reordering occurred.
[(7, 'foo'), (8, 'bar')]
>>> i = m.find(8)
>>> m.update_key(i,9) # no reordering.
>>> del i
>>> [(int(x[0]),x[1]) for x in m.items()]
[(7, 'foo'), (9, 'bar')]
Edge cases:
>>> i = m.find(7)
>>> i.value()
'foo'
>>> try:
... m.update_key(i,9)
... except KeyError:
... print 'ok'
...
ok
>>> m[7]
'foo'
>>> int(m.get_key_by_value('foo'))
7
>>> i = m.iterkeys()
>>> try: # updating an iter pointing at BEGIN.
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
>>> i = m.end()
>>> try: # updating an iter pointing at end().
... m.update_key(i,10)
... except IndexError:
... print 'ok'
...
ok
"""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair" ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
si = map_iter_update_key_iter(self._smap, iter._si, key)
# raises KeyError if key already in map.
if si != iter._si: # if map is reordered...
value = iter.value();
val_si = self._value_index[value]
iter_delete(val_si)
self._value_index[value] = si
def erase(self, iter):
"""Remove item pointed to by the iterator. Iterator is immediately
invalidated after the deletion completes."""
if not iter._si:
raise RuntimeError( _("invalid iterator") )
if iter._si == BEGIN:
raise IndexError(_("Iterator does not point at key-value pair." ))
if self is not iter._map:
raise IndexError(_("Iterator points into a different "
"CIndexedMap."))
if map_iter_at_end(self._smap, iter._si):
raise IndexError( _("Cannot update end() iterator.") )
value = iter.value()
CMap.erase(self,iter)
del self._value_index[value]
if __name__ == "__main__":
import doctest
import random
##############################################
# UNIT TESTS
print "Testing module"
doctest.testmod(sys.modules[__name__])
print "doctest complete."
##############################################
# MEMORY LEAK TESTS
if LEAK_TEST:
i = 0
import gc
class X:
x = range(1000) # something moderately big.
# TEST 1. This does not cause memory to grow.
#m = CMap()
#map_insert(m._smap,10,X())
#while True:
# i += 1
# it = map_find_iter( m._smap, 10 )
# iter_delete(it)
# del it
# if i % 100 == 0:
# gc.collect()
# TEST 2: This does not caus a memory leak.
#m = map_constructor_double()
#while True:
# i += 1
# map_insert_double(m,10,5) # here
# it = map_find_iter_double( m, 10 )
# map_iter_erase_double( m, it ) # or here is the problem.
# iter_delete_double(it)
# del it
# #assert len(m) == 0
# assert map_size_double(m) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 3. No memory leak
#m = CMap()
#while True:
# i += 1
# map_insert(m._smap,10,X()) # here
# it = map_find_iter( m._smap, 10 )
# map_iter_erase( m._smap, it ) # or here is the problem.
# iter_delete(it)
# del it
# assert len(m) == 0
# assert map_size(m._smap) == 0
# if i % 100 == 0:
# gc.collect()
# TEST 4: map creation and deletion.
#while True:
# m = map_constructor()
# map_delete(m);
# TEST 5: test iteration.
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# i = map_begin(m)
# while not map_iter_at_begin(m,i):
# iter_incr(i)
# iter_delete(i)
# TEST 6:
#m = map_constructor()
#for i in xrange(10):
# map_insert(m,i,X())
#while True:
# map_find( m, random.randint(0,9) )
# TEST 7:
#m = map_constructor()
#for i in xrange(50):
# map_insert( m, i, X() )
#while True:
# for i in xrange(50):
# map_set( m, i, X() )
# TEST 8
# aha! Another leak! Fixed.
#m = map_constructor()
#while True:
# i += 1
# map_insert(m,10,X())
# map_erase(m,10)
# assert map_size(m) == 0
# TEST 9
m = map_constructor()
for i in xrange(50):
map_insert( m, i, X() )
while True:
it = map_find_iter( m, 5 )
map_iter_update_key( m, it, 1000 )
iter_delete(it)
it = map_find_iter( m, 1000 )
map_iter_update_key( m, it, 5)
iter_delete(it)
```
#### File: BitTorrent-5.2.2/BTL/connection_cache.py
```python
import logging
import pycurllib
from LIFOQueue import LIFOQueue
import Queue
MAX_WAIT = 5
MAX_PER_CACHE_DEFAULT = 15
INF_WAIT_MAX_CONNECTIONS = 1000
logger = logging.getLogger('BTL.connection_cache')
class ConnectionCache(object):
def __init__(self, max_per_cache=None):
if None == max_per_cache:
max_per_cache = MAX_PER_CACHE_DEFAULT
self.size = 0
self.max_per_cache = max_per_cache
self.cache = LIFOQueue(maxsize = self.max_per_cache)
def get_connection(self):
try:
return self.cache.get_nowait()
except Queue.Empty:
logger.warn("ConnectionCache queue empty, size=%d, qsize=%d" % (self.size, self.cache.qsize()))
pass
# I chose not to lock here. Max is advisory, if two threads
# eagerly await a connection near max, I say allow them both
# to make one
if self.size < self.max_per_cache:
self.size += 1
return self._make_connection()
else:
logger.warn("ConnectionCache queue over, size=%d, qsize=%d" % (self.size, self.cache.qsize()))
try:
return self.cache.get(True, MAX_WAIT)
except Queue.Empty:
# ERROR: Should log this!
logger.error("ConnectionCache waited more than %d seconds, size=%d, qsize=%d" % (MAX_WAIT, self.size, self.cache.qsize()))
pass
if self.size > INF_WAIT_MAX_CONNECTIONS:
logger.warn("ConnectionCache wait forever, size=%d, max_connections=%d, qsize=%d" % (self.size, INF_WAIT_MAX_CONNECTIONS, self.cache.qsize()))
return self.cache.get()
self.size += 1
logger.warn("ConnectionCache wait inf, size=%d, max_connections=%d, qsize=%d" % (self.size, INF_WAIT_MAX_CONNECTIONS, self.cache.qsize()))
return self._make_connection()
def destroy_connection(self, c):
c.c.close()
self.size -= 1
def put_connection(self, c):
self.cache.put(c)
class PyCURL_Cache(ConnectionCache):
def __init__(self, uri, max_per_cache=None):
if None == max_per_cache:
max_per_cache = MAX_PER_CACHE_DEFAULT
self.uri = uri
ConnectionCache.__init__(self, max_per_cache=max_per_cache)
def _make_connection(self):
r = pycurllib.Request(self.uri)
return r
class CacheSet(object):
def __init__(self):
self.cache = {}
def get_cache(self, cachetype, url, max_per_cache=None):
if None == max_per_cache:
max_per_cache = MAX_PER_CACHE_DEFAULT
if url not in self.cache:
self.cache[url] = cachetype(url, max_per_cache=max_per_cache)
return self.cache[url]
cache_set = CacheSet()
```
#### File: BitTorrent-5.2.2/BTL/decorate.py
```python
def decorate_func(new, old):
def runner(*a, **kw):
new(*a, **kw)
return old(*a, **kw)
return runner
```
#### File: BitTorrent-5.2.2/BTL/dlock.py
```python
import os
import sys
import socket
from time import asctime, gmtime, time, sleep
from twisted.internet import reactor, task
class dlock(object):
def __init__(self, deadlockfile, update_period=300, myhost=None, debug=None):
if myhost == None: myhost = socket.gethostname()
self.host = myhost
self.pid = os.getpid()
self.deadlockfile = deadlockfile
self.refresher = task.LoopingCall(self.refresh)
self.update_period = update_period
self.debug = debug
# Block until lock is acquired, then refresh the lock file every
# update_period seconds.
#
# Nota Bene: while blocked on acquiring the lock, this sleeps the
# whole process; once the lock is acquired, an event-driven model
# (twisted reactor) is presumed. The intended use (see test at
# bottom) is to block on acquire before running the Twisted
# reactor.
#
def acquire(self):
while True:
while self.islocked():
if self.debug:
lock = self._readlock()
print '%s locked by %s' % (self.deadlockfile, self._lockdict2string(lock))
sleep(self.update_period)
try:
# Use link count hack to work around NFS's broken
# file locking.
tempfile = '.' + str(self.pid) + self.host + str(time()) + '.tmp'
lockfile = self.deadlockfile + '.lock'
# Create temp lock file
fh = open(tempfile, "w")
fh.close()
# Atomicallly create lockfile as a hard link
try:
os.link(tempfile, lockfile)
except:
if self.debug:
print "tempfile: " + tempfile
print "lockfile: " + lockfile
raise
# Check the number of links
if os.stat(tempfile)[3] == os.stat(lockfile)[3]:
# Hooray, I have the write lock on the deadlock file!
self._timestamp_deadlockfile(time())
if self.debug:
lock = self._readlock()
print '%s acquired by %s' % (self.deadlockfile, self._lockdict2string(lock))
self.refresher.start(self.update_period)
# Release the lock
os.unlink(tempfile)
os.unlink(lockfile)
return self
else:
# Failed to grab write lock on deadlock file, keep looping
if self.debug:
print '%d failed to grab write lock on deadlock file: %s (will retry)' % (self.pid, self.deadlockfile)
except:
if self.debug:
print 'File Lock Error: %s@%s could not acquire %s' % (self.pid, self.host, self.deadlockfile)
raise
def refresh(self):
assert self.ownlock()
# No need to grab a write lock on the deadlock file, since it's not stale
self._timestamp_deadlockfile(time())
def _timestamp_deadlockfile(self, ts):
try:
fh = open(self.deadlockfile, 'w')
fh.write(self._lockstr(ts))
fh.close()
os.chmod(self.deadlockfile, 0644)
except:
if self.debug:
print 'File Lock Error: %s@%s could not write %s' % (self.pid, self.host, self.deadlockfile)
raise
def release(self):
if self.ownlock():
try:
self.refresher.stop()
self._timestamp_deadlockfile(0)
if self.debug:
print '%s@%s released lock %s' % (self.pid, self.host, self.deadlockfile)
except:
if self.debug:
print 'File Lock Error: %s@%s could not release %s' % (self.pid, self.host, self.deadlockfile)
raise
return self
def islocked(self):
try:
if self._isstale():
# Lock seems stale, wait for one more update period and check again
sleep(self.update_period)
return not self._isstale()
else:
return True
except:
if self.debug:
print "islocked exception"
return False
def _isstale(self):
lock = self._readlock()
if time() - lock['timestamp'] > self.update_period:
return True
else:
return False
def _readlock(self):
try:
lock = {}
fh = open(self.deadlockfile)
data = fh.read().split()
fh.close()
assert len(data) == 3
lock['pid'] = int(data[0])
lock['host'] = data[1]
lock['timestamp'] = float(data[2])
return lock
except:
if self.debug:
print 'File Lock Error: %s@%s reading %s' % (self.pid, self.host, self.deadlockfile)
raise
# Public method to read a lockfile.
@classmethod
def readlock(cls, lockfile):
lock = cls(deadlockfile=lockfile, myhost='dummy')
return lock._readlock()
def _lockdict2string(self, lock):
return '%s@%s at %s' % (lock['pid'], lock['host'], asctime(gmtime(lock['timestamp'])))
def _lockstr(self, ts):
return '%d %s %f'%(self.pid, self.host, ts)
def ownlock(self):
lock = self._readlock()
return (self.host == lock['host'] and
self.pid == lock['pid'])
def __del__(self):
self.release()
# Tests
#
# Run several in parallel on multiple machines, but have at most one
# whack the deadlock file on initialization.
#
def run_tests(argv=None):
if argv is None:
argv = sys.argv
deadlockfile = './dlock_test'
l = dlock(deadlockfile, 5, debug=True)
# Stupid argv handling; just grab first arg and run that test
if len(argv) > 1:
if argv[1] == 'none':
print "Removing deadlock file."
os.unlink(deadlockfile)
elif argv[1] == 'old':
print "Creating stale deadlock file owned by no one."
fh = open(l.deadlockfile, 'w')
fh.write('%d %s %f'%(0, 0, 0))
fh.close()
elif argv[1] == 'new':
print "Creating fresh deadlock file owned by no one."
fh = open(l.deadlockfile, 'w')
fh.write('%d %s %f'%(0, 0, time()))
fh.close()
else:
print "Un-known arg--starting with old deadlock file."
else:
print "Starting with old deadlock file."
# Tease for a while, then release the lock
def tease(l, n):
if n > 0:
assert l.ownlock()
print 'I (%d) have the lock--ha, ha ha!'%os.getpid()
reactor.callLater(1, tease, l, n - 1)
else:
l.release()
# Start teasing once reactor is run
reactor.callLater(1, tease, l, 20)
# But first, grab the lock (this blocks)
l.acquire()
reactor.run()
if __name__ == "__main__":
sys.exit(run_tests())
```
#### File: BitTorrent-5.2.2/BTL/epollreactor.py
```python
import epoll as select
########################################################
## http://twistedmatrix.com/trac/ticket/1953#comment:20
from twisted.python import log, failure
from twisted.internet.tcp import BaseClient
def failIfNotConnected(self, err):
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
self.connector.connectionFailed(failure.Failure(err))
if hasattr(self, "reactor"):
# this doesn't happen if we failed in __init__
self.stopReading()
self.stopWriting()
del self.connector
try:
self._closeSocket()
except AttributeError:
pass
else:
del self.socket, self.fileno
BaseClient.failIfNotConnected = failIfNotConnected
########################################################
import errno, sys
from zope.interface import implements
# Twisted imports
from twisted.python import log, threadable, failure
from twisted.internet import main, posixbase, error
from twisted.internet.interfaces import IReactorFDSet
# globals
reads = {}
writes = {}
selectables = {}
poller = select.poll()
POLL_DISCONNECTED = (select.POLLHUP | select.POLLERR | select.POLLNVAL)
class PollReactor(posixbase.PosixReactorBase):
"""A reactor that uses poll(2)."""
implements(IReactorFDSet)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
poller.unregister(fd)
except KeyError:
pass
mask = 0
if reads.has_key(fd): mask = mask | select.POLLIN
if writes.has_key(fd): mask = mask | select.POLLOUT
if mask != 0:
poller.register(fd, mask)
else:
if selectables.has_key(fd): del selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if mdict.has_key(fd):
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
fd = reader.fileno()
if not reads.has_key(fd):
selectables[fd] = reader
reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer, writes=writes, selectables=selectables):
"""Add a FileDescriptor for notification of data available to write.
"""
fd = writer.fileno()
if not writes.has_key(fd):
selectables[fd] = writer
writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader, reads=reads):
"""Remove a Selectable for notification of data available to read.
"""
return self._dictRemove(reader, reads)
def removeWriter(self, writer, writes=writes):
"""Remove a Selectable for notification of data available to write.
"""
return self._dictRemove(writer, writes)
def removeAll(self, reads=reads, writes=writes, selectables=selectables):
"""Remove all selectables, and return a list of them."""
if self.waker is not None:
self.removeReader(self.waker)
result = selectables.values()
fds = selectables.keys()
reads.clear()
writes.clear()
selectables.clear()
for fd in fds:
poller.unregister(fd)
if self.waker is not None:
self.addReader(self.waker)
return result
def doPoll(self, timeout,
reads=reads,
writes=writes,
selectables=selectables,
select=select,
log=log,
POLLIN=select.POLLIN,
POLLOUT=select.POLLOUT):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = poller.poll(timeout)
except select.error, e:
if e[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event, POLLIN, POLLOUT, log)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event, POLLIN, POLLOUT, log,
faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
why = None
inRead = False
if event & POLL_DISCONNECTED and not (event & POLLIN):
why = main.CONNECTION_LOST
else:
try:
if event & POLLIN:
why = selectable.doRead()
inRead = True
if not why and event & POLLOUT:
why = selectable.doWrite()
inRead = False
if not selectable.fileno() == fd:
why = error.ConnectionFdescWentAway('Filedescriptor went away')
inRead = False
except:
log.deferr()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet import main
main.installReactor(p)
__all__ = ["PollReactor", "install"]
```
#### File: BitTorrent-5.2.2/BTL/iphelp.py
```python
import ctypes
from ctypes.wintypes import DWORD, ULONG
from BTL.iptypes import inet_addr, IPAddr
Iphlpapi = ctypes.windll.Iphlpapi
class MIB_IPADDRROW(ctypes.Structure):
_fields_ = [("dwAddr", IPAddr),
("dwIndex", DWORD),
("dwMask", DWORD),
("dwBCastAddr", IPAddr),
("dwReasmSize", DWORD),
("unused1", ctypes.c_ushort),
("wType", ctypes.c_ushort),
]
MAX_INTERFACES = 10
class MIB_IPADDRTABLE(ctypes.Structure):
_fields_ = [("dwNumEntries", DWORD),
("table", MIB_IPADDRROW * MAX_INTERFACES)]
def get_interface_by_index(index):
table = MIB_IPADDRTABLE()
size = ULONG(ctypes.sizeof(table))
table.dwNumEntries = 0
Iphlpapi.GetIpAddrTable(ctypes.byref(table), ctypes.byref(size), 0)
for n in xrange(table.dwNumEntries):
row = table.table[n]
if row.dwIndex == index:
return str(row.dwAddr)
raise IndexError("interface index out of range")
def get_route_ip(ip=None):
#ip = socket.gethostbyname('bittorrent.com')
# doesn't really matter if this is out of date, we're just trying to find
# the interface to get to the internet.
ip = ip or '172.16.17.32'
ip = inet_addr(ip)
index = ctypes.c_ulong()
Iphlpapi.GetBestInterface(ip, ctypes.byref(index))
index = long(index.value)
try:
interface_ip = get_interface_by_index(index)
except:
interface_ip = None
return interface_ip
```
#### File: BitTorrent-5.2.2/BTL/IPTools.py
```python
from struct import pack, unpack
from socket import inet_aton, inet_ntoa
def compact(ip, port):
return pack("!4sH", inet_aton(ip), port) # ! == "network order"
# 4s == "4-byte string."
# H == "unsigned short"
def uncompact(x):
ip, port = unpack("!4sH", x)
return inet_ntoa(ip), port
def uncompact_sequence(b):
for x in xrange(0, len(b), 6):
ip, port = uncompact(b[x:x+6])
port = int(port)
yield (ip, port)
def compact_sequence(s):
b = []
for addr in s:
c = compact(addr[0], addr[1])
b.append(c)
return ''.join(b)
##import ctypes
##class CompactAddr(ctypes.Structure):
## _fields_ = [('ip', ctypes.c_int32),
## ('port', ctypes.c_int16)]
##
##def compact_sequence_c(s):
## b = ctypes.create_string_buffer(6 * len(s))
## a = ctypes.addressof(b)
## for i, addr in enumerate(s):
## c = compact(addr[0], addr[1])
## ctypes.cast(
## offset = i*6
## b[offset:offset + 6] = c
## return b
```
#### File: BitTorrent-5.2.2/BTL/language.py
```python
import gettext
class LanguageDict(dict):
def __getitem__(self, key):
for k in gettext._expand_lang(key):
if self.has_key(k):
return dict.__getitem__(self, key)
raise KeyError(key)
language_names = LanguageDict()
language_names.update( {
'af' :u'Afrikaans' , 'bg' :u'Български' ,
'da' :u'Dansk' , 'ca' :u'Català' ,
'cs' :u'Čeština' , 'de' :u'Deutsch' ,
'en' :u'English' , 'es' :u'Español' ,
'es_MX':u'Español de Mexico ' , 'fr' :u'Français' ,
'el' :u'Ελληνικά' , 'he' :u'עברית' ,
'hu' :u'Magyar' , 'it' :u'Italiano' ,
'is' :u'Íslenska' , 'ja' :u'日本語' ,
'ko' :u'한국어' ,'nl' :u'Nederlands' ,
'nb_NO':u'Norsk bokmål' , 'pl' :u'Polski' ,
'pt' :u'Português' , 'pt_BR':u'Português do Brasil' ,
'ro' :u'Română' , 'ru' :u'Русский' ,
'sk' :u'Slovenský' , 'sl' :u'Slovensko' ,
'sv' :u'Svenska' , 'tr' :u'Türkçe' ,
'vi' :u'Tiê?ng Viê?t' ,
'zh_CN':u'简体中文' , # Simplified
'zh_TW':u'繁體中文' , # Traditional
} )
unfinished_language_names = {
'ar' :u'العربية' , 'bs' :u'Bosanski' ,
'eo' :u'Esperanto' , 'eu' :u'Euskara' ,
'et' :u'Eesti' , 'fi' :u'Suomi' ,
'fa' :u'فارسی' , 'ga' :u'Gaeilge' ,
'gl' :u'Galego' , 'hr' :u'Hrvatski' ,
'hy' :u'Հայերեն' , 'in' :u'Bahasa indonesia' ,
'ka' :u'ქართული ენა', 'lt' :u'Lietuvių' ,
'ms' :u'Bahasa melayu' , 'ml' :u'Malayalam' ,
'sq' :u'Shqipe' , 'th' :u'ภาษาไทย' ,
'tlh' :u'tlhIngan-Hol' , 'uk' :u'Українська' ,
'hi' :u'हिंदी' , 'cy' :u'Cymraeg' ,
'nn_NO':u'<NAME>' , 'te' :u' తెలుగు' ,
}
#language_names.update(unfinished_language_names)
class LanguageCodeList(list):
def index(self, value):
for v in gettext._expand_lang(value):
try:
i = list.index(self, v)
return i
except ValueError:
pass
raise ValueError('%v not in list'%value)
languages = LanguageCodeList()
languages.extend(language_names.keys())
languages.sort()
# windows codepage to locale mapping
locale_sucks = {
0x0436: "af", # Afrikaans
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3C01: "ar_BH", # Arabic - Bahrain
0x1401: "ar_DZ", # Arabic - Algeria
0x0C01: "ar_EG", # Arabic - Egypt
0x0801: "ar_IQ", # Arabic - Iraq
0x2C01: "ar_JO", # Arabic - Jordan
0x3401: "ar_KW", # Arabic - Kuwait
0x3001: "ar_LB", # Arabic - Lebanon
0x1001: "ar_LY", # Arabic - Libya
0x1801: "ar_MA", # Arabic - Morocco
0x2001: "ar_OM", # Arabic - Oman
0x4001: "ar_QA", # Arabic - Qatar
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x2801: "ar_SY", # Arabic - Syria
0x1C01: "ar_TN", # Arabic - Tunisia
0x2401: "ar_YE", # Arabic - Yemen
0x082C: "az_AZ", # Azeri - Cyrillic
0x0423: "be", # Belarusian
0x0402: "bg", # Bulgarian
0x0403: "ca", # Catalan
0x0405: "cs", # Czech
0x0406: "da", # Danish
0x0007: "de", # German
0x0C07: "de_AT", # German - Austria
0x0807: "de_CH", # German - Switzerland
0x0407: "de_DE", # German - Germany
0x1407: "de_LI", # German - Liechtenstein
0x1007: "de_LU", # German - Luxembourg
0x0408: "el", # Greek
0x0C09: "en_AU", # English - Australia
0x2809: "en_BZ", # English - Belize
0x1009: "en_CA", # English - Canada
0x2409: "en_CB", # English - Carribbean
0x0809: "en_GB", # English - United Kingdom
0x1809: "en_IE", # English - Ireland
0x2009: "en_JM", # English - Jamaica
0x1409: "en_NZ", # English - New Zealand
0x3409: "en_PH", # English - Phillippines
0x2C09: "en_TT", # English - Trinidad
0x0409: "en_US", # English - United States
0x1C09: "en_ZA", # English - South Africa
0x000A: "es", # Spanish (added)
0x2C0A: "es_AR", # Spanish - Argentina
0x400A: "es_BO", # Spanish - Bolivia
0x340A: "es_CL", # Spanish - Chile
0x240A: "es_CO", # Spanish - Colombia
0x140A: "es_CR", # Spanish - Costa Rica
0x1C0A: "es_DO", # Spanish - Dominican Republic
0x300A: "es_EC", # Spanish - Ecuador
0x040a: "es_ES", # Spanish - Spain
0x100A: "es_GT", # Spanish - Guatemala
0x480A: "es_HN", # Spanish - Honduras
0x080A: "es_MX", # Spanish - Mexico
0x4C0A: "es_NI", # Spanish - Nicaragua
0x180A: "es_PA", # Spanish - Panama
0x280A: "es_PE", # Spanish - Peru
0x500A: "es_PR", # Spanish - Puerto Rico
0x3C0A: "es_PY", # Spanish - Paraguay
0x440A: "es_SV", # Spanish - El Salvador
0x380A: "es_UY", # Spanish - Uruguay
0x200A: "es_VE", # Spanish - Venezuela
0x0425: "et", # Estonian
0x0009: "en", # English (added)
0x042D: "eu", # Basque
0x0429: "fa", # Farsi
0x040B: "fi", # Finnish
0x0438: "fo", # Faroese
0x000C: "fr", # French (added)
0x080C: "fr_BE", # French - Belgium
0x0C0C: "fr_CA", # French - Canada
0x100C: "fr_CH", # French - Switzerland
0x040C: "fr_FR", # French - France
0x140C: "fr_LU", # French - Luxembourg
0x043C: "gd", # Gaelic - Scotland
0x083C: "gd_IE", # Gaelic - Ireland
0x040D: "he", # Hebrew
0x0439: "hi", # Hindi
0x041A: "hr", # Croatian
0x040E: "hu", # Hungarian
0x042B: "hy", # Armenian
0x0421: "id", # Indonesian
0x040F: "is", # Icelandic
0x0010: "it", # Italian (added)
0x0810: "it_CH", # Italian - Switzerland
0x0410: "it_IT", # Italian - Italy
0x0411: "ja", # Japanese
0x0412: "ko", # Korean
0x0427: "lt", # Lithuanian
0x0426: "lv", # Latvian
0x042F: "mk", # FYRO Macedonian
0x044E: "mr", # Marathi
0x083E: "ms_BN", # Malay - Brunei
0x043E: "ms_MY", # Malay - Malaysia
0x043A: "mt", # Maltese
0x0013: "nl", # Dutch (added)
0x0813: "nl_BE", # Dutch - Belgium
0x0413: "nl_NL", # Dutch - The Netherlands
0x0814: "no_NO", # Norwegian - Nynorsk
0x0414: "nb_NO", # Norwegian - Bokmal (?)
0x0415: "pl", # Polish
0x0016: "pt", # Portuguese (added)
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0417: "rm", # Raeto-Romance
0x0418: "ro", # Romanian - Romania
0x0818: "ro_MO", # Romanian - Moldova
0x0419: "ru", # Russian
0x0819: "ru_MO", # Russian - Moldova
0x044F: "sa", # Sanskrit
0x042E: "sb", # Sorbian
0x041B: "sk", # Slovak
0x0424: "sl", # Slovenian
0x041C: "sq", # Albanian
0x081A: "sr_SP", # Serbian - Latin
0x001D: "sv", # Swedish (added)
0x081D: "sv_FI", # Swedish - Finland
0x041D: "sv_SE", # Swedish - Sweden
0x0441: "sw", # Swahili
0x0430: "sx", # Sutu
0x0449: "ta", # Tamil
0x041E: "th", # Thai
0x0432: "tn", # Setsuana
0x041F: "tr", # Turkish
0x0431: "ts", # Tsonga
0X0444: "tt", # Tatar
0x0422: "uk", # Ukrainian
0x0420: "ur", # Urdu
0x0443: "uz_UZ", # Uzbek - Latin
0x042A: "vi", # Vietnamese
0x0434: "xh", # Xhosa
0x043D: "yi", # Yiddish
0x0804: "zh_CN", # Chinese - China
0x0C04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1404: "zh_MO", # Chinese - Macau S.A.R
0x1004: "zh_SG", # Chinese - Singapore
0x0404: "zh_TW", # Chinese - Taiwan
0x0435: "zu", # Zulu
}
if __name__ == '__main__':
from BTL.obsoletepythonsupport import set
internal = set([x.lower() for x in languages])
windows = set(locale_sucks.values())
if not windows.issuperset(internal):
diff = list(internal.difference(windows))
diff.sort()
print diff
```
#### File: BitTorrent-5.2.2/BTL/log.py
```python
from logging import *
import time, sys
import socket
import datetime
import logging
import logging.handlers
from BTL.reactor_magic import reactor
from BTL.defer import Deferred
from BTL.btl_string import printable
from BTL import twisted_logger
# convenience re-export so that they can be used without import logging.
DEBUG = DEBUG
INFO = INFO
WARNING = WARNING
ERROR = ERROR
CRITICAL = CRITICAL
getLogger = getLogger
# Not used at the moment but can be changed later
SYSLOG_HOST = 'localhost'
SYSLOG_PORT = 514
class BTLFormatter(logging.Formatter):
def __init__(self, *a, **k):
self.use_localtime = False
if k.has_key('use_localtime'):
self.use_localtime = k['use_localtime']
del k['use_localtime']
logging.Formatter.__init__(self, *a, **k)
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
try:
if self.use_localtime:
dt = datetime.datetime.fromtimestamp(record.created)
else:
dt = datetime.datetime.utcfromtimestamp(record.created)
if datefmt:
s = dt.strftime(datefmt)
else:
s = dt.isoformat()
except:
s = "Interpretter Shutdown"
return s
class RateLimitedLogger:
"""Logger that tosses log entries whenever the logged
entries per second exceeds the specified rate limit by
max_burst log entries."""
def __init__(self, logger, rate_limit, max_burst, log_all_level = CRITICAL ):
"""@param logger: logging.Logger object that this class wraps.
@param rate_limit: maximum number of log entries per second.
@param max_burst: maximum number of log entries that can be printed
in a burst. max_burst is the sigma in a (sigma,rho) token bucket.
@param log_all_level: log all entries with level >= log_all_level.
Such entries are still counted against the rate limit.
"""
self.logger = logger
self.rate_limit = rate_limit
self.max_burst = max_burst
self.logged_discard = False # logged that we are dropping entries?
self.tokens = self.max_burst
self.log_all_above_level = log_all_level
reactor.callLater(1,self._increment_tokens)
reactor.callLater(5,self._log_clear)
def _increment_tokens(self):
self.tokens += self.rate_limit
if self.tokens >= self.max_burst:
self.tokens = self.max_burst
reactor.callLater(1, self._increment_tokens)
def _log_clear(self):
self.logged_discard = False
def setLevel(self, level):
return self.logger.setLevel(level)
def _discarded(self, level):
self.tokens -= 1
if self.tokens < 0:
self.tokens = 0
if level >= self.log_all_above_level:
return False
elif not self.logged_discard:
self.logger.error( "Discarding '%s' logger entries because they are arriving "
"too fast. Will not log this error again for 5 "
"seconds." % self.logger.name )
self.logged_discard = True
return True # true = discarded
return False # false = not discarded
def debug(self, msg, *args, **kwargs):
if not self._discarded(level = DEBUG):
self.logger.debug(msg,*args, **kwargs)
def info(self, msg, *args, **kwargs):
if not self._discarded(level = INFO):
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
if not self._discarded(level = WARNING):
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
if not self._discarded(level = ERROR):
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args):
if not self._discarded(level = EXCEPTION):
self.logger.exception(msg, *args)
def critical(self, msg, *args, **kwargs):
if not self._discarded(level = CRITICAL):
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
if not self._discarded():
self.logger.log(level, msg, *args, **kwargs)
def findCaller(self):
return self.logger.findCaller()
def makeRecord(self, name, level, fn, lno, msg, args, exc_info):
self.logger.makeRecord(name, level, fn, lno, msg, args, exc_info)
def addHandler(self, hdlr):
self.logger.addHandler(hdlr)
def removeHandler(self, hdlr):
self.logger.removeHandler(hdlr)
def callHandlers(self, record):
self.logger.callHandlers(record)
def getEffectiveLevel(self):
return self.logger.getEffectiveLevel()
class StdioPretender:
"""Pretends to be stdout or stderr."""
# modified from twisted.python.log.StdioOnnaStick
closed = 0
softspace = 0
mode = 'wb'
name = '<stdio (log)>'
def __init__(self, capture_name, level ):
self.level = level
self.logger = logging.getLogger( capture_name )
self.buf = ''
def close(self):
pass
def flush(self):
pass
def fileno(self):
return -1
def read(self):
raise IOError("can't read from the log!")
readline = read
readlines = read
seek = read
tell = read
def write(self, data):
d = (self.buf + data).split('\n')
self.buf = d[-1]
messages = d[0:-1]
for message in messages:
self.logger.log( self.level, message )
def writelines(self, lines):
for line in lines:
self.logger.log( self.level, message )
class SysLogHandler(logging.handlers.SysLogHandler):
# This is a hack to get around log entry size limits imposed by syslog.
def __init__(self, address=('localhost', logging.handlers.SYSLOG_UDP_PORT),
facility=logging.handlers.SysLogHandler.LOG_USER,
max_msg_len = 4096, fragment_len = 900, make_printable = True ):
"""@param max_msg_len: maximum message length before truncation.
@param fragment_len: when message length exceeds 900 it is truncated
and broken into multiple consecutive log entries.
@param make_printable: runs each message through
BTL.btl_string.printable in emit. For example,
this is useful if the messages are being sent
through a UNIX socket to syslogd and the
message might contain non-ascii characters.
"""
logging.handlers.SysLogHandler.__init__( self, address, facility )
self.max_msg_len = max_msg_len
self.fragment_len = fragment_len
self.make_printable = make_printable
def emit(self, record):
"""Differs from the override emit in that it
fragments the message to allow for much longer
syslog messages."""
msg = self.format(record)
if self.make_printable:
msg = printable(msg)
msg = msg[:self.max_msg_len]
i = 0
while msg:
remaining = msg[self.fragment_len:]
if i > 0:
msg = "(cont.) " + msg[:self.fragment_len]
else:
msg = msg[:self.fragment_len]
msg = self.log_format_string % (self.encodePriority(self.facility,
string.lower(record.levelname)),msg)
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
else:
self.socket.sendto(msg, self.address)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
msg = remaining
i += 1
def injectLogger(use_syslog = True, log_file = None, verbose = False,
capture_output = True,
twisted_error_log_level = ERROR,
twisted_info_log_level = INFO,
capture_stderr_log_level = ERROR,
capture_stdout_log_level = INFO,
capture_stderr_name = 'stderr',
capture_stdout_name = 'stdout',
log_level = DEBUG,
log_twisted = True,
use_localtime = False ):
"""
Installs logging.
@param use_syslog: log to syslog. use_syslog, log_file, and verbose are not
mutually exclusive.
@param log_file: log to a file.
@param verbose: output logs to stdout. Setting verbose and capture_output
to this function does NOT result in an infinite loop.
@param capture_output: redirects stdout and stderr to the logger. Be careful. This can
create infinite loops with loggers that
output to stdout or stderr.
@param twisted_error_log_level: log level for errors reported
by twisted.
@param twisted_info_log_level: log level for non-errors reported by twisted.
If capture_output is set then this is also the log
level for anything output to stdout or stderr.
@param log_level: only log events that have level >= passed level
are logged. This is achieved by setting the log level in
each of the installed handlers.
@param capture_stderr_log_level: log level for output captured from stdout.
@param capture_stdout_log_level: log level for output captured from stderr.
@param capture_stderr_name: log name used for stderr. 'name'
refers to the name arg passed to logging.getLogger(name).
@param capture_stdout_name: log name used for stdout. Analogous to capture_stderr_name.
"""
logger = logging.getLogger('')
logger.setLevel(DEBUG) # we use log handler levels to control output level.
formatter = BTLFormatter("%(asctime)s - %(name)s - %(process)d - "
"%(levelname)s - %(message)s", use_localtime=use_localtime)
if log_file is not None:
lf_handler = logging.handlers.RotatingFileHandler(filename=log_file,
mode='a',
maxBytes=2**27,
backupCount=10)
lf_handler.setFormatter(formatter)
lf_handler.setLevel(log_level)
logger.addHandler(lf_handler)
if use_syslog:
sl_handler = SysLogHandler('/dev/log',
facility=SysLogHandler.LOG_LOCAL0)
#address = (SYSLOG_HOST, SYSLOG_PORT))
# namespace - pid - level - message
sl_handler.setFormatter(BTLFormatter("%(name)s - %(process)d - "
"%(levelname)s - %(message)s"))
sl_handler.setLevel(log_level)
logger.addHandler(sl_handler)
if verbose:
# StreamHandler does not capture stdout, it directs output from
# loggers to stdout.
so_handler = logging.StreamHandler(sys.stdout)
so_handler.setFormatter(formatter)
so_handler.setLevel(log_level)
logger.addHandler(so_handler)
if capture_output:
sys.stdout = StdioPretender( capture_stdout_name, capture_stdout_log_level )
sys.stderr = StdioPretender( capture_stderr_name, capture_stderr_log_level )
if log_twisted:
twisted_logger.start(error_log_level = twisted_error_log_level,
info_log_level = twisted_info_log_level)
if __name__ == '__main__':
from BTL.greenlet_yielddefer import coroutine, like_yield
@coroutine
def test_rate_limited_logger():
injectLogger(verbose = True)
log = RateLimitedLogger(logging.getLogger("myapp"), 1,1)
log.info( "should be printed." )
log.info( "should not be printed" ) # but should log "discard" message.
log.info( "also should not be printed" ) # should not logging of discard message.
df = Deferred()
reactor.callLater(3, df.callback, True)
like_yield(df)
log.info( "should also be printed" )
reactor.stop()
def test_injectLogger():
injectLogger(log_file = "your.log", use_syslog=False, verbose=True)
logger = logging.getLogger("myapp")
logger.warning("You are awesome")
print 'stdout!'
print >>sys.stderr, 'stderr!'
from twisted.internet import reactor
from twisted.python import failure
def foo():
reactor.stop()
zuul = dana
reactor.callLater(0, foo)
def test_injectLogger2():
injectLogger(log_file = "your.log", verbose=False, capture_output=True)
print "hello world"
def foo():
reactor.stop()
zuul = dana
reactor.callLater(0, foo)
#test_injectLogger()
test_injectLogger2()
#reactor.callLater(0, test_rate_limited_logger)
reactor.run()
```
#### File: BitTorrent-5.2.2/BTL/opt.py
```python
from ConfigParser import RawConfigParser
from optparse import OptionParser
from BTL.translation import _
class ConfigOptionParser(RawConfigParser, OptionParser):
def __init__(self, usage, default_section, config_file = None):
"""This is an option parser that reads defaults from a config file.
It also allows specification of types for each option (unlike our mess
that is mainline BitTorrent), and is only a slight extension on the
classes provided in the Python standard libraries (unlike the
wheel reinvention in mainline).
@param usage: usage string for this application.
@param default_section: section in the config file containing configuration
for this service. This is a default that can be overriden for
individual options by passing section as a kwarg to add_option.
"""
self._default_section = default_section
OptionParser.__init__(self,usage)
RawConfigParser.__init__(self)
if config_file:
self.read(config_file)
def add_option(self, *args,**kwargs):
if 'section' in kwargs:
section = kwargs['section']
del kwargs['section']
else:
section = self._default_section
if "dest" in kwargs:
if not self.has_option(section, kwargs["dest"]):
if not kwargs.has_key("default"):
raise Exception(
_("Your .conf file is invalid. It does not specify "
"a value for %s.\n %s:\t%s\n") %
(kwargs["dest"],kwargs["dest"],kwargs["help"]))
else:
if not kwargs.has_key("value_type"):
kwargs["default"]=self.get(section, kwargs["dest"])
else:
if kwargs["value_type"] == "float":
kwargs["default"] = float(self.get(section, kwargs["dest"] ))
elif kwargs["value_type"] == "int":
kwargs["default"] = int(self.get(section, kwargs["dest"] ))
elif kwargs["value_type"] == "bool":
v = self.get(section, kwargs["dest"])
if v == "True":
kwargs["default"] = True
elif v == "False":
kwargs["default"] = False
else:
raise Exception( "Boolean value must be either 'True' or 'False'.")
elif kwargs["value_type"] == "str":
# canonicalize strings.
v = self.get(section, kwargs["dest"])
v = v.strip('"').strip()
kwargs["default"] = v
elif kwargs["value_type"] == "list":
v = self.get(section, kwargs["dest"])
kwargs["default"] = v.split(",")
else:
raise Exception( "Option has unrecognized type: %s" % kwargs["value_type"] )
if kwargs.has_key("value_type"):
del kwargs["value_type"]
OptionParser.add_option(self,*args,**kwargs)
```
#### File: BitTorrent-5.2.2/BTL/protocol.py
```python
from twisted.internet import protocol
from BTL.decorate import decorate_func
## someday twisted might do this for me
class SmartReconnectingClientFactory(protocol.ReconnectingClientFactory):
def buildProtocol(self, addr):
prot = protocol.ReconnectingClientFactory.buildProtocol(self, addr)
# decorate the protocol with a delay reset
prot.connectionMade = decorate_func(self.resetDelay,
prot.connectionMade)
return prot
```
#### File: BitTorrent-5.2.2/BTL/ThreadProxy.py
```python
from BTL.defer import Deferred, defer_to_thread
class ThreadProxy(object):
__slots__ = ('obj', 'local_queue_task', 'thread_queue_task')
def __init__(self, obj, local_queue_task, thread_queue_task):
self.obj = obj
self.local_queue_task = local_queue_task
self.thread_queue_task = thread_queue_task
def __gen_call_wrapper__(self, f):
def call_wrapper(*a, **kw):
return defer_to_thread(self.local_queue_task, self.thread_queue_task,
f, *a, **kw)
return call_wrapper
def __getattr__(self, attr):
a = getattr(self.obj, attr)
if callable(a):
return self.__gen_call_wrapper__(a)
return a
def call_with_obj(self, _f, *a, **k):
w = self.__gen_call_wrapper__(_f)
return w(self.obj, *a, **k)
```
#### File: BitTorrent-5.2.2/BTL/TimeLeftEstimator.py
```python
from BTL.platform import bttime
class TimeLeftEstimator(object):
def __init__(self, left):
self.start = None
self.last = None
self.rate = 0
self.remaining = None
self.left = left
self.broke = False
self.got_anything = False
self.when_next_expected = bttime() + 5
def add_amount(self, amount):
""" add number of bytes received """
if not self.got_anything:
self.got_anything = True
self.start = bttime() - 2
self.last = self.start
self.left -= amount
return
self.update(bttime(), amount)
def remove_amount(self, amount):
self.left += amount
def get_time_left(self):
""" returns seconds """
if not self.got_anything:
return None
t = bttime()
if t - self.last > 15:
self.update(t, 0)
return self.remaining
def get_size_left(self):
return self.left
def update(self, t, amount):
self.left -= amount
if t < self.when_next_expected and amount == 0:
return
try:
self.rate = ((self.rate * (self.last - self.start)) + amount) / (t - self.start)
self.last = t
self.remaining = self.left / self.rate
if self.start < self.last - self.remaining:
self.start = self.last - self.remaining
except ZeroDivisionError:
self.remaining = None
if self.broke and self.last - self.start < 20:
self.start = self.last - 20
if self.last - self.start > 20:
self.broke = True
self.when_next_expected = t + min((amount / max(self.rate, 0.0001)), 5)
```
#### File: BitTorrent-5.2.2/BTL/twisted_ebrpc.py
```python
from __future__ import nested_scopes
__version__ = "$Revision: 1.32 $"[11:-2]
# System Imports
import ebrpc
import urlparse
from cStringIO import StringIO
from gzip import GzipFile
pipeline_debug = False
version = "1.0"
from BTL.platform import app_name
from BTL.reactor_magic import reactor
from BTL.exceptions import str_exc
from BTL.protocol import SmartReconnectingClientFactory
from BTL.ebrpclib import ServerProxy
import twisted.web
if twisted.web.__version__ < '0.6.0':
raise ImportError("BTL.twisted_ebrpc requires twisted.web 0.6.0 or greater,"
" from Twisted 2.4.0.\nYou appear to have twisted.web "
"version %s installed at:\n%s" % (twisted.web.__version__,
twisted.web.__file__))
from twisted.web import resource, server
from twisted.internet import protocol
from twisted.python import log, reflect, failure
from twisted.web import http
from twisted.internet import defer
# Useful so people don't need to import ebrpc directly
Fault = ebrpc.Fault
class NoSuchFunction(Fault):
"""There is no function by the given name."""
pass
class Handler:
"""Handle a EBRPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single EBRPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the EBRPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
try:
raise NotImplementedError("Implement run() in subclasses")
except:
self.result.errback(failure.Failure())
def parse_accept_encoding(header):
a = header.split(',')
l = []
for i in a:
i = i.strip()
if ';' not in i:
type = i
# hmmm
l.append(('1', type))
else:
type, q = i.split(';')
type = type.strip()
q = q.strip()
junk, q = q.split('=')
q = q.strip()
if q != '0':
l.append((q, type))
l.sort()
l.reverse()
l = [ t for q, t in l ]
return l
class EBRPC(resource.Resource):
"""A resource that implements EBRPC.
You probably want to connect this to '/RPC2'.
Methods published can return EBRPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'ebrpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
def __init__(self):
resource.Resource.__init__(self)
self.subHandlers = {}
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return self.subHandlers.keys()
def _err(self, *a, **kw):
log.err(*a, **kw)
def render(self, request):
request.setHeader('server', "%s/%s" % (app_name, version))
request.content.seek(0, 0)
args, functionPath = ebrpc.loads(request.content.read())
args, kwargs = args
request.functionPath = functionPath
try:
function = self._getFunction(functionPath)
except Fault, f:
self._cbRender(f, request)
else:
request.setHeader("content-type", "application/octet-stream")
defer.maybeDeferred(function, *args, **kwargs).addErrback(
self._ebRender
).addCallback(
self._cbRender, request
)
return server.NOT_DONE_YET
def _cbRender(self, result, request):
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
s = ebrpc.dumps(result, methodresponse=1)
except Exception, e:
f = Fault(self.FAILURE,
"function:%s can't serialize output: %s" %
(request.functionPath, str_exc(e)))
self._err(f)
s = ebrpc.dumps(f, methodresponse=1)
encoding = request.getHeader("accept-encoding")
if encoding:
encodings = parse_accept_encoding(encoding)
if 'gzip' in encodings or '*' in encodings:
sio = StringIO()
g = GzipFile(fileobj=sio, mode='wb', compresslevel=9)
g.write(s)
g.close()
s = sio.getvalue()
request.setHeader("Content-Encoding", "gzip")
request.setHeader("content-length", str(len(s)))
request.write(s)
request.finish()
def _ebRender(self, failure):
self._err(failure)
if isinstance(failure.value, Fault):
return failure.value
return Fault(self.FAILURE, "An unhandled exception occurred: %s" %
failure.getErrorMessage())
def _getFunction(self, functionPath):
"""Given a string, return a function, or raise NoSuchFunction.
This returned function will be called, and should return the result
of the call, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The default
policy is that given functionPath 'foo', return the method at
self.ebrpc_foo, i.e. getattr(self, "ebrpc_" + functionPath).
If functionPath contains self.separator, the sub-handler for
the initial prefix is used to search for the remaining path.
"""
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None: raise NoSuchFunction(self.NOT_FOUND, "no such subHandler %s" % prefix)
return handler._getFunction(functionPath)
f = getattr(self, "ebrpc_%s" % functionPath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND, "function %s not found" % functionPath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND, "function %s not callable" % functionPath)
else:
return f
def _listFunctions(self):
"""Return a list of the names of all ebrpc methods."""
return reflect.prefixedMethodNames(self.__class__, 'ebrpc_')
class EBRPCIntrospection(EBRPC):
"""Implement the EBRPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be EBRPC types, not Python types.
"""
def __init__(self, parent):
"""Implement Introspection support for an EBRPC server.
@param parent: the EBRPC server to add Introspection support to.
"""
EBRPC.__init__(self)
self._ebrpc_parent = parent
def ebrpc_listMethods(self):
"""Return a list of the method names implemented by this server."""
functions = []
todo = [(self._ebrpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([ prefix + name for name in obj._listFunctions() ])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
ebrpc_listMethods.signature = [['array']]
def ebrpc_methodHelp(self, method):
"""Return a documentation string describing the use of the given method.
"""
method = self._ebrpc_parent._getFunction(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
ebrpc_methodHelp.signature = [['string', 'string']]
def ebrpc_methodSignature(self, method):
"""Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._ebrpc_parent._getFunction(method)
return getattr(method, 'signature', None) or ''
ebrpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(ebrpc):
"""Add Introspection support to an EBRPC server.
@param ebrpc: The ebrpc server to add Introspection support to.
"""
ebrpc.putSubHandler('system', EBRPCIntrospection(ebrpc))
class Query(object):
def __init__(self, path, host, method, user=None, password=<PASSWORD>, *args):
self.path = path
self.host = host
self.user = user
self.password = password
self.method = method
self.payload = ebrpc.dumps(args, method)
self.deferred = defer.Deferred()
self.decode = False
class QueryProtocol(http.HTTPClient):
# All current queries are pipelined over the connection at
# once. When the connection is made, or as queries are made
# while a connection exists, queries are all sent to the
# server. Pipelining limits can be controlled by the caller.
# When a query completes (see parseResponse), if there are no
# more queries then an idle timeout gets sets.
# The QueryFactory reopens the connection if another query occurs.
#
# twisted_ebrpc does currently provide a mechanism for
# per-query timeouts. This could be added with another
# timeout_call mechanism that calls loseConnection and pops the
# current query with an errback.
timeout = 300 # idle timeout.
def log(self, msg, *a):
print "%s: %s: %r" % (self.peer, msg, a)
def connectionMade(self):
http.HTTPClient.connectionMade(self)
self.current_queries = []
self.timeout_call = None
if pipeline_debug:
p = self.transport.getPeer()
p = "%s:%d" % (p.host, p.port)
self.peer = (id(self.transport), p)
self.factory.connectionMade(self)
def _cancelTimeout(self):
if self.timeout_call and self.timeout_call.active():
self.timeout_call.cancel()
self.timeout_call = None
def connectionLost(self, reason):
http.HTTPClient.connectionLost(self, reason)
if pipeline_debug: self.log('connectionLost', reason.getErrorMessage())
self._cancelTimeout()
if self.current_queries:
# queries failed, put them back
if pipeline_debug: self.log('putting back', [q.method for q in self.current_queries])
self.factory.prependQueries(self.current_queries)
self.factory.connectionLost(self)
def sendCommand(self, command, path):
self.transport.write('%s %s HTTP/1.1\r\n' % (command, path))
def setLineMode(self, rest):
# twisted is stupid.
self.firstLine = 1
return http.HTTPClient.setLineMode(self, rest)
def sendQuery(self):
self._cancelTimeout()
query = self.factory.popQuery()
if pipeline_debug: self.log('sending', query.method)
self.current_queries.append(query)
self.sendCommand('POST', query.path)
self.sendHeader('User-Agent', 'BTL/EBRPC 1.0')
self.sendHeader('Host', query.host)
self.sendHeader('Accept-encoding', 'gzip')
self.sendHeader('Connection', 'Keep-Alive')
self.sendHeader('Content-type', 'application/octet-stream')
self.sendHeader('Content-length', str(len(query.payload)))
#if query.user:
# auth = '%s:%s' % (query.user, query.password)
# auth = auth.encode('base64').strip()
# self.sendHeader('Authorization', 'Basic %s' % (auth,))
self.endHeaders()
self.transport.write(query.payload)
def parseResponse(self, contents):
query = self.current_queries.pop(0)
if pipeline_debug: self.log('responded', query.method)
if not self.current_queries:
assert not self.factory.anyQueries()
assert not self.timeout_call
self.timeout_call = reactor.callLater(self.timeout,
self.transport.loseConnection)
try:
response = ebrpc.loads(contents)
except Exception, e:
query.deferred.errback(failure.Failure())
del query.deferred
else:
query.deferred.callback(response[0][0])
del query.deferred
def badStatus(self, status, message):
query = self.current_queries.pop(0)
if pipeline_debug: self.log('failed', query.method)
try:
raise ValueError(status, message)
except:
query.deferred.errback(failure.Failure())
del query.deferred
self.transport.loseConnection()
def handleStatus(self, version, status, message):
if status != '200':
self.badStatus(status, message)
def handleHeader(self, key, val):
if not self.current_queries[0].decode:
if key.lower() == 'content-encoding' and val.lower() == 'gzip':
self.current_queries[0].decode = True
def handleResponse(self, contents):
if self.current_queries[0].decode:
s = StringIO()
s.write(contents)
s.seek(-1)
g = GzipFile(fileobj=s, mode='rb')
contents = g.read()
g.close()
self.parseResponse(contents)
class QueryFactory(object):
def __init__(self):
self.queries = []
self.instance = None
def connectionMade(self, instance):
self.instance = instance
if pipeline_debug: print 'connection made %s' % str(instance.peer)
while self.anyQueries():
self.instance.sendQuery()
def connectionLost(self, instance):
assert self.instance == instance
if pipeline_debug: print 'connection lost %s' % str(instance.peer)
self.instance = None
def prependQueries(self, queries):
self.queries = queries + self.queries
def popQuery(self):
return self.queries.pop(0)
def anyQueries(self):
return bool(self.queries)
def addQuery(self, query):
self.queries.append(query)
if pipeline_debug: print 'addQuery: %s %s' % (self.instance, self.queries)
if self.instance:
self.instance.sendQuery()
def disconnect(self):
if not self.instance:
return
if not hasattr(self.instance, 'transport'):
return
self.instance.transport.loseConnection()
class PersistantSingletonFactory(QueryFactory, SmartReconnectingClientFactory):
def clientConnectionFailed(self, connector, reason):
if pipeline_debug: print 'clientConnectionFailed %s' % str(connector)
return SmartReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def clientConnectionLost(self, connector, unused_reason):
self.started = False
if not self.anyQueries():
self.continueTrying = False
return SmartReconnectingClientFactory.clientConnectionLost(self, connector, unused_reason)
class SingletonFactory(QueryFactory, protocol.ClientFactory):
def clientConnectionFailed(self, connector, reason):
if pipeline_debug: print 'clientConnectionFailed %s' % str(connector)
queries = list(self.queries)
del self.queries[:]
for query in queries:
query.deferred.errback(reason)
self.started = False
class Proxy:
"""A Proxy for making remote EBRPC calls.
Pass the URL of the remote EBRPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
"""
def __init__(self, url, user=None, password=None, retry_forever = True):
"""
@type url: C{str}
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type user: C{str} or None
@param user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from C{url}
if present.
@type password: C{str} or None
@param password: The password with which to authenticate with the
server when making calls. If specified, overrides any password
information embedded in C{url}. If not specified, a value may be taken
from C{url} if present.
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
netlocParts = netloc.split('@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in ['', None]:
self.path = '/'
self.secure = (scheme == 'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
if not retry_forever:
_Factory = SingletonFactory
else:
_Factory = PersistantSingletonFactory
self.factory = _Factory()
self.factory.started = False
self.factory.protocol = QueryProtocol
def callRemote(self, method, *args, **kwargs):
if pipeline_debug: print 'callRemote to %s : %s' % (self.host, method)
args = (args, kwargs)
query = Query(self.path, self.host, method, self.user,
self.password, *args)
self.factory.addQuery(query)
if pipeline_debug: print 'factory started: %s' % self.factory.started
if not self.factory.started:
self.factory.started = True
def connect(host):
if self.secure:
if pipeline_debug: print 'connecting to %s' % str((host, self.port or 443))
from twisted.internet import ssl
reactor.connectSSL(host, self.port or 443,
self.factory, ssl.ClientContextFactory(),
timeout=60)
else:
if pipeline_debug: print 'connecting to %s' % str((host, self.port or 80))
reactor.connectTCP(host, self.port or 80, self.factory,
timeout=60)
df = reactor.resolve(self.host)
df.addCallback(connect)
df.addErrback(query.deferred.errback)
return query.deferred
class AsyncServerProxy(object):
def __init__(self, base_url, username=None, password=<PASSWORD>, debug=False,
retry_forever = True):
self.base_url = base_url
self.username = username
self.password = password
self.proxy = Proxy(self.base_url, self.username, self.password, retry_forever)
self.debug = debug
def __getattr__(self, attr):
return self._make_call(attr)
def _make_call(self, methodname):
return lambda *a, **kw : self._method(methodname, *a, **kw)
def _method(self, methodname, *a, **kw):
# in case they have changed
self.proxy.user = self.username
self.proxy.password = <PASSWORD>.password
if self.debug:
print ('callRemote:', self.__class__.__name__,
self.base_url, methodname, a, kw)
df = self.proxy.callRemote(methodname, *a, **kw)
return df
class EitherServerProxy(object):
SYNC = 0
ASYNC = 1
SYNC_DEFERRED = 2 # BE CAREFUL to call getResult() on the returned Deferred!
"""Server Proxy that supports both asynchronous and synchronous calls."""
def __init__(self, base_url, username = None, password = <PASSWORD>, debug = False,
async = ASYNC, retry_forever = True ):
"""
The EitherServerProxy can make either synchronous or asynchronous calls.
The default is specified by the async parameter to __init__, but each
individual call can override the default behavior by passing 'async' as
a boolean keyword argument to any method call. The async keyword
argument can also be set to None. However, passing async as
None means simply 'use default behavior'. When calling with async=SYNC,
you should not be in the same thread as the reactor or you risk
blocking the reactor.
@param async: determines whether the default is asynchronous or blocking calls."""
assert async in [SYNC, ASYNC, SYNC_DEFERRED]
self.async = async
self.async_proxy = AsyncServerProxy( base_url, username, password, debug,
retry_forever = retry_forever )
# HERE HACK. retry_forever is not supported by ServerProxy.
self.sync_proxy = ServerProxy( base_url )
def __getattr__(self, attr):
return self._make_call(attr)
def _make_call(self, methodname):
return lambda *a, **kw : self._method(methodname, *a, **kw)
def _method(self, methodname, *a, **kw ):
async = kw.pop('async', self.async)
if async is None:
async = self.async
if async == ASYNC:
df = self.async_proxy._method(methodname, *a, **kw)
elif async == SYNC_DEFERRED:
df = defer.execute(getattr(self.sync_proxy, methodname), *a, **kw)
else:
return self.sync_proxy.__getattr__(methodname)(*a, **kw)
return df
SYNC = EitherServerProxy.SYNC
ASYNC = EitherServerProxy.ASYNC
SYNC_DEFERRED = EitherServerProxy.SYNC_DEFERRED
__all__ = ["EBRPC", "Handler", "NoSuchFunction", "Fault", "Proxy", "AsyncServerProxy", "EitherServerProxy"]
```
#### File: BitTorrent-5.2.2/BTL/xmlrpclib2.py
```python
import xml
import xmlrpclib
from connection_cache import PyCURL_Cache, cache_set
import pycurllib
pycurllib.set_use_compression(True)
class PyCurlTransport(xmlrpclib.Transport):
def __init__(self, cache, max_connects=None, timeout=None):
self.host = None
self.cache = cache
self.max_connects = max_connects
self.timeout = timeout
def request(self, host, handler, request_body, verbose=0):
for i in xrange(0):
try:
return self._request(host, handler, request_body, verbose)
except:
pass
return self._request(host, handler, request_body, verbose)
def _set_connection_params(self, h):
h.add_header('User-Agent', "xmlrpclib2.py/2.0")
h.add_header('Connection', "Keep-Alive")
h.add_header('Content-Type', "application/octet-stream")
# this timeout is intended to save us from tomcat not responding
# and locking the site
if None != self.timeout:
h.set_timeout(self.timeout)
else:
h.set_timeout(2000) # for backwards compatibility, keep old default
if None != self.max_connects:
h.set_max_connects(self.max_connects)
def _request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.cache.get_connection()
try:
self._set_connection_params(h)
h.add_data(request_body)
response = pycurllib.urlopen(h, close=False)
except:
# connection may no longer be valid
self.cache.destroy_connection(h)
raise
self.cache.put_connection(h)
if response.code != 200:
raise xmlrpclib.ProtocolError(
host + handler,
response.code, response.msg,
'N/A',
)
self.verbose = verbose
return self._parse_response(response)
def _parse_response(self, response):
# read response from input file/socket, and parse it
p, u = self.getparser()
d = response.getvalue()
try:
p.feed(d)
except xml.parsers.expat.ExpatError, e:
n = xml.parsers.expat.ExpatError("%s : %s" % (e, d))
try:
n.code = e.code
n.lineno = e.lineno
n.offset = e.offset
except:
pass
raise n
p.close()
return u.close()
def new_server_proxy(url, max_connects=None, timeout=None):
c = cache_set.get_cache(PyCURL_Cache, url, max_per_cache=max_connects)
t = PyCurlTransport(c, max_connects=max_connects, timeout=timeout)
return xmlrpclib.ServerProxy(url, transport=t)
ServerProxy = new_server_proxy
```
#### File: BitTorrent-5.2.2/khashmir/utkhashmir.py
```python
import khashmir, knode
from actions import *
from khash import newID
from krpc import KRPCProtocolError, KRPCFailSilently
from BTL.cache import Cache
from sha import sha
from util import *
from BTL.stackthreading import Thread
from socket import gethostbyname
from const import *
from kstore import sample
TOKEN_UPDATE_INTERVAL = 5 * 60 # five minutes
NUM_PEERS = 50 # number of peers to return
class UTNode(knode.KNodeBase):
def announcePeer(self, info_hash, port, khashmir_id):
assert type(port) == type(1)
assert type(info_hash) == type('')
assert type(khashmir_id) == type('')
assert len(info_hash) == 20
assert len(khashmir_id) == 20
try:
token = self.table.tcache[self.id]
except:
token = None
if token:
assert type(token) == type(""), repr(token)
# not true
#assert len(token) == 20, repr(token)
df = self.conn().sendRequest('announce_peer', {'info_hash':info_hash,
'port':port,
'id':khashmir_id,
'token':token})
else:
raise KRPCProtocolError("no write token for node")
df.addErrback(self.errBack)
df.addCallback(self.checkSender)
return df
def getPeers(self, info_hash, khashmir_id):
df = self.conn().sendRequest('get_peers', {'info_hash':info_hash, 'id':khashmir_id})
df.addErrback(self.errBack)
df.addCallback(self.checkSender)
return df
def checkSender(self, dict):
d = knode.KNodeBase.checkSender(self, dict)
try:
token = d['rsp']['token']
assert type(token) == type(""), repr(token)
# not true
#assert len(token) == 20, repr(token)
self.table.tcache[d['rsp']['id']] = token
except KeyError:
pass
return d
class UTStoreValue(StoreValue):
def callNode(self, node, f):
return f(self.target, self.value, node.token, self.table.node.id)
class UTKhashmir(khashmir.KhashmirBase):
_Node = UTNode
def setup(self, host, port, data_dir, rlcount, checkpoint=True):
khashmir.KhashmirBase.setup(self, host, port,data_dir, rlcount, checkpoint)
self.cur_token = self.last_token = sha('')
self.tcache = Cache()
self.gen_token(loop=True)
self.expire_cached_tokens(loop=True)
def expire_cached_tokens(self, loop=False):
self.tcache.expire(time() - TOKEN_UPDATE_INTERVAL)
if loop:
self.rawserver.external_add_task(TOKEN_UPDATE_INTERVAL,
self.expire_cached_tokens, True)
def gen_token(self, loop=False):
self.last_token = self.cur_token
self.cur_token = sha(newID())
if loop:
self.rawserver.external_add_task(TOKEN_UPDATE_INTERVAL,
self.gen_token, True)
def get_token(self, host, port):
x = self.cur_token.copy()
x.update("%s%s" % (host, port))
h = x.digest()
return h
def val_token(self, token, host, port):
x = self.cur_token.copy()
x.update("%s%s" % (host, port))
a = x.digest()
if token == a:
return True
x = self.last_token.copy()
x.update("%s%s" % (host, port))
b = x.digest()
if token == b:
return True
return False
def addContact(self, host, port, callback=None):
# use dns on host, then call khashmir.addContact
Thread(target=self._get_host, args=[host, port, callback]).start()
def _get_host(self, host, port, callback):
# this exception catch can go away once we actually fix the bug
try:
ip = gethostbyname(host)
except TypeError, e:
raise TypeError(str(e) + (": host(%s) port(%s)" % (repr(host), repr(port))))
self.rawserver.external_add_task(0, self._got_host, ip, port, callback)
def _got_host(self, host, port, callback):
khashmir.KhashmirBase.addContact(self, host, port, callback)
def announcePeer(self, info_hash, port, callback=None):
""" stores the value for key in the global table, returns immediately, no status
in this implementation, peers respond but don't indicate status to storing values
a key can have many values
"""
def _storeValueForKey(nodes, key=info_hash, value=port, response=callback , table=self.table):
if not response:
# default callback
def _storedValueHandler(sender):
pass
response=_storedValueHandler
action = UTStoreValue(self, key, value, response, self.rawserver.add_task, "announcePeer")
self.rawserver.external_add_task(0, action.goWithNodes, nodes)
# this call is asynch
self.findNode(info_hash, _storeValueForKey)
def krpc_announce_peer(self, info_hash, port, id, token, _krpc_sender):
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
if not self.val_token(token, sender['host'], sender['port']):
raise KRPCProtocolError("Invalid Write Token")
value = compact_peer_info(_krpc_sender[0], port)
self.store[info_hash] = value
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
return {"id" : self.node.id}
def retrieveValues(self, key):
try:
l = self.store.sample(key, NUM_PEERS)
except KeyError:
l = []
return l
def getPeers(self, info_hash, callback, searchlocal = 1):
""" returns the values found for key in global table
callback will be called with a list of values for each peer that returns unique values
final callback will be an empty list - probably should change to 'more coming' arg
"""
nodes = self.table.findNodes(info_hash, invalid=True)
l = [x for x in nodes if x.invalid]
if len(l) > 4:
nodes = sample(l , 4) + self.table.findNodes(info_hash, invalid=False)[:4]
# get locals
if searchlocal:
l = self.retrieveValues(info_hash)
if len(l) > 0:
self.rawserver.external_add_task(0, callback, [reducePeers(l)])
else:
l = []
# create our search state
state = GetValue(self, info_hash, callback, self.rawserver.add_task, 'getPeers')
self.rawserver.external_add_task(0, state.goWithNodes, nodes, l)
def getPeersAndAnnounce(self, info_hash, port, callback, searchlocal = 1):
""" returns the values found for key in global table
callback will be called with a list of values for each peer that returns unique values
final callback will be an empty list - probably should change to 'more coming' arg
"""
nodes = self.table.findNodes(info_hash, invalid=False)
nodes += self.table.findNodes(info_hash, invalid=True)
# get locals
if searchlocal:
l = self.retrieveValues(info_hash)
if len(l) > 0:
self.rawserver.external_add_task(0, callback, [reducePeers(l)])
else:
l = []
# create our search state
x = lambda a: a
state = GetAndStore(self, info_hash, port, callback, x, self.rawserver.add_task, 'getPeers', "announcePeer")
self.rawserver.external_add_task(0, state.goWithNodes, nodes, l)
def krpc_get_peers(self, info_hash, id, _krpc_sender):
sender = {'id' : id}
sender['host'] = _krpc_sender[0]
sender['port'] = _krpc_sender[1]
n = self.Node().initWithDict(sender)
self.insertNode(n, contacted=0)
l = self.retrieveValues(info_hash)
if len(l) > 0:
return {'values' : [reducePeers(l)],
"id": self.node.id,
"token" : self.get_token(sender['host'], sender['port'])}
else:
nodes = self.table.findNodes(info_hash, invalid=False)
nodes = [node.senderDict() for node in nodes]
return {'nodes' : packNodes(nodes),
"id": self.node.id,
"token" : self.get_token(sender['host'], sender['port'])}
```
#### File: p2pScrapper/BitTorrent-5.2.2/launchmany-curses.py
```python
from __future__ import division
app_name = "BitTorrent"
from BTL.translation import _
DOWNLOAD_SCROLL_RATE = 1
import sys, os
from threading import Event
from time import time, localtime, strftime
from BTL.obsoletepythonsupport import *
from BTL.platform import encode_for_filesystem, decode_from_filesystem
from BitTorrent import platform
from BitTorrent.launchmanycore import LaunchMany
from BitTorrent.defaultargs import get_defaults
from BitTorrent.parseargs import parseargs, printHelp
from BitTorrent.prefs import Preferences
from BitTorrent import configfile
from BitTorrent import version
from BitTorrent import BTFailure
from BitTorrent import bt_log_fmt
import logging
import traceback
from logging import ERROR, WARNING, INFO
from BitTorrent import console, STDERR, inject_main_logfile
try:
curses = import_curses()
import curses.panel
from curses.wrapper import wrapper as curses_wrapper
from signal import signal, SIGWINCH
except:
print _("Textmode UI initialization failed, cannot proceed.")
print
print _("This download interface requires the standard Python module "
"\"curses\", which is unfortunately not available for the native "
"Windows port of Python. It is however available for the Cygwin "
"port of Python, running on all Win32 systems (www.cygwin.com).")
print
print _("You may still use \"launchmany-console.py\" to download.")
sys.exit(1)
exceptions = []
def fmttime(n):
if n <= 0:
return None
n = int(n)
m, s = divmod(n, 60)
h, m = divmod(m, 60)
if h > 1000000:
return _("connecting to peers")
return _("ETA in %d:%02d:%02d") % (h, m, s)
def fmtsize(n):
n = long(n)
unit = [' B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
i = 0
if (n > 999):
i = 1
while i + 1 < len(unit) and (n >> 10) >= 999:
i += 1
n >>= 10
n /= 1024
if i > 0:
size = '%.1f' % n + '%s' % unit[i]
else:
size = '%.0f' % n + '%s' % unit[i]
return size
def ljust(s, size):
s = s[:size]
return s + (' '*(size-len(s)))
def rjust(s, size):
s = s[:size]
return (' '*(size-len(s)))+s
class CursesDisplayer(object):
def __init__(self, scrwin):
self.messages = []
self.scroll_pos = 0
self.scroll_time = 0
self.scrwin = scrwin
signal(SIGWINCH, self.winch_handler)
self.changeflag = Event()
self._remake_window()
curses.use_default_colors()
def winch_handler(self, signum, stackframe):
self.changeflag.set()
curses.endwin()
self.scrwin.noutrefresh()
self.scrwin = curses.newwin(0, 0, 0, 0)
self._remake_window()
self._display_messages()
def _remake_window(self):
self.scrh, self.scrw = self.scrwin.getmaxyx()
self.scrpan = curses.panel.new_panel(self.scrwin)
self.mainwinh = (2*self.scrh)//3
self.mainwinw = self.scrw - 4 # - 2 (bars) - 2 (spaces)
self.mainwiny = 2 # + 1 (bar) + 1 (titles)
self.mainwinx = 2 # + 1 (bar) + 1 (space)
# + 1 to all windows so we can write at mainwinw
self.mainwin = curses.newwin(self.mainwinh, self.mainwinw+1,
self.mainwiny, self.mainwinx)
self.mainpan = curses.panel.new_panel(self.mainwin)
self.mainwin.scrollok(0)
self.mainwin.nodelay(1)
self.mainwin.clearok(1)
self.headerwin = curses.newwin(1, self.mainwinw+1,
1, self.mainwinx)
self.headerpan = curses.panel.new_panel(self.headerwin)
self.headerwin.scrollok(0)
self.headerwin.clearok(0)
self.totalwin = curses.newwin(1, self.mainwinw+1,
self.mainwinh+1, self.mainwinx)
self.totalpan = curses.panel.new_panel(self.totalwin)
self.totalwin.scrollok(0)
self.totalwin.clearok(0)
self.statuswinh = self.scrh-4-self.mainwinh
self.statuswin = curses.newwin(self.statuswinh, self.mainwinw+1,
self.mainwinh+3, self.mainwinx)
self.statuspan = curses.panel.new_panel(self.statuswin)
self.statuswin.scrollok(0)
self.statuswin.clearok(1)
try:
self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
except:
pass
rcols = (_("Size"),_("Download"),_("Upload"))
rwids = (9, 11, 11)
rwid = sum(rwids)
start = self.mainwinw - rwid
self.headerwin.addnstr(0, 2, '#', start, curses.A_BOLD)
self.headerwin.addnstr(0, 4, _("Filename"), start, curses.A_BOLD)
for s,w in zip(rcols, rwids):
st = start + max(w - len(s), 0)
self.headerwin.addnstr(0, st, s[:w], len(s[:w]), curses.A_BOLD)
start += w
self.totalwin.addnstr(0, self.mainwinw - 29, _("Totals:"), 7, curses.A_BOLD)
self._display_messages()
curses.panel.update_panels()
curses.doupdate()
self.changeflag.clear()
def _display_line(self, s, bold = False):
if self.disp_end:
return True
line = self.disp_line
self.disp_line += 1
if line < 0:
return False
if bold:
self.mainwin.addnstr(line, 0, s, self.mainwinw, curses.A_BOLD)
else:
self.mainwin.addnstr(line, 0, s, self.mainwinw)
if self.disp_line >= self.mainwinh:
self.disp_end = True
return self.disp_end
def _display_data(self, data):
if 3*len(data) <= self.mainwinh:
self.scroll_pos = 0
self.scrolling = False
elif self.scroll_time + DOWNLOAD_SCROLL_RATE < time():
self.scroll_time = time()
self.scroll_pos += 1
self.scrolling = True
if self.scroll_pos >= 3*len(data)+2:
self.scroll_pos = 0
i = self.scroll_pos//3
self.disp_line = (3*i)-self.scroll_pos
self.disp_end = False
while not self.disp_end:
ii = i % len(data)
if i and not ii:
if not self.scrolling:
break
self._display_line('')
if self._display_line(''):
break
( name, status, progress, peers, seeds, seedsmsg, #dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) = data[ii]
t = fmttime(t)
if t:
status = t
name = ljust(name,self.mainwinw-35)
size = rjust(fmtsize(size), 9)
dnrate = rjust('%s/s' % fmtsize(dnrate), 11)
uprate = rjust('%s/s' % fmtsize(uprate), 11)
line = "%3d %s%s%s%s" % (ii+1, name, size, dnrate, uprate)
self._display_line(line, True)
if peers + seeds:
datastr = _(" (%s) %s - %s peers %s seeds - %s dn %s up") % (
progress, status, peers, seeds, #dist,
fmtsize(dnamt), fmtsize(upamt) )
else:
datastr = ' '+status+' ('+progress+')'
self._display_line(datastr)
self._display_line(' '+ljust(msg,self.mainwinw-4))
i += 1
def display(self, data):
try:
if self.changeflag.isSet():
return
inchar = self.mainwin.getch()
if inchar == 12: # ^L
self._remake_window()
self.mainwin.erase()
if data:
self._display_data(data)
else:
self.mainwin.addnstr( 1, self.mainwinw//2-5,
_("no torrents"), 12, curses.A_BOLD )
totalup = 0
totaldn = 0
for ( name, status, progress, peers, seeds, seedsmsg, #dist,
uprate, dnrate, upamt, dnamt, size, t, msg ) in data:
totalup += uprate
totaldn += dnrate
totalup = '%s/s' % fmtsize(totalup)
totaldn = '%s/s' % fmtsize(totaldn)
self.totalwin.erase()
self.totalwin.addnstr(0, self.mainwinw-29, _("Totals:"), 7, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-22 + (11-len(totaldn)),
totaldn, 11, curses.A_BOLD)
self.totalwin.addnstr(0, self.mainwinw-11 + (11-len(totalup)),
totalup, 11, curses.A_BOLD)
curses.panel.update_panels()
curses.doupdate()
except:
pass
return inchar in (ord('q'),ord('Q'))
def message(self, s):
try:
self.messages.append(strftime('%x %X - ',localtime(time()))+s)
self._display_messages()
except:
pass
def _display_messages(self):
self.statuswin.erase()
winpos = 0
for s in self.messages[-self.statuswinh:]:
self.statuswin.addnstr(winpos, 0, s, self.mainwinw)
winpos += 1
curses.panel.update_panels()
curses.doupdate()
def exception(self, s):
exceptions.append(s)
self.message(_("SYSTEM ERROR - EXCEPTION GENERATED"))
def modify_default( defaults_tuplelist, key, newvalue ):
name,value,doc = [(n,v,d) for (n,v,d) in defaults_tuplelist if n == key][0]
defaults_tuplelist = [(n,v,d) for (n,v,d) in defaults_tuplelist
if not n == key]
defaults_tuplelist.append( (key,newvalue,doc) )
return defaults_tuplelist
if __name__ == '__main__':
uiname = 'launchmany-curses'
defaults = get_defaults(uiname)
try:
if len(sys.argv) < 2:
printHelp(uiname, defaults)
sys.exit(1)
# Modifying default values from get_defaults is annoying...
# Implementing specific default values for each uiname in
# defaultargs.py is even more annoying. --Dave
ddir = os.path.join( platform.get_dot_dir(), "launchmany-curses" )
ddir = decode_from_filesystem(ddir)
modify_default(defaults, 'data_dir', ddir)
config, args = configfile.parse_configuration_and_args(defaults,
uiname, sys.argv[1:], 0, 1)
if args:
torrent_dir = args[0]
config['torrent_dir'] = decode_from_filesystem(torrent_dir)
else:
torrent_dir = config['torrent_dir']
torrent_dir,bad = encode_for_filesystem(torrent_dir)
if bad:
raise BTFailure(_("Warning: ")+config['torrent_dir']+
_(" is not a directory"))
if not os.path.isdir(torrent_dir):
raise BTFailure(_("Warning: ")+torrent_dir+
_(" is not a directory"))
# the default behavior is to save_in files to the platform
# get_save_dir. For launchmany, if no command-line argument
# changed the save directory then use the torrent directory.
if config['save_in'] == platform.get_save_dir():
config['save_in'] = config['torrent_dir']
except BTFailure, e:
print _("error: ") + unicode(e.args[0]) + \
_("\nrun with no args for parameter explanations")
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
inject_main_logfile()
class LaunchManyApp(object):
class LogHandler(logging.Handler):
def __init__(self, level, displayer):
logging.Handler.__init__(self,level)
self.displayer = displayer
def emit(self, record):
if len(record.getMessage()) > 0:
self.displayer.message(record.getMessage() )
if record.exc_info is not None:
self.displayer.message(
"Traceback (most recent call last):" )
tb = record.exc_info[2]
stack = traceback.extract_tb(tb)
l = traceback.format_list(stack)
for s in l:
self.displayer.message( " %s" % s )
self.displayer.message( " %s: %s" %
( str(record.exc_info[0]),str(record.exc_info[1])))
def __init__(self):
pass
def run(self,scrwin, config):
self.displayer = CursesDisplayer(scrwin)
log_handler = LaunchManyApp.LogHandler(STDERR, self.displayer)
log_handler.setFormatter(bt_log_fmt)
logging.getLogger('').addHandler(log_handler)
logging.getLogger().setLevel(STDERR)
logging.getLogger('').removeHandler(console)
# more liberal with logging launchmany-curses specific output.
lmany_logger = logging.getLogger('launchmany-curses')
lmany_handler = LaunchManyApp.LogHandler(INFO, self.displayer)
lmany_handler.setFormatter(bt_log_fmt)
lmany_logger.setLevel(INFO)
lmany_logger.addHandler(lmany_handler)
config = Preferences().initWithDict(config)
LaunchMany(config, self.displayer.display, 'launchmany-curses')
app = LaunchManyApp()
curses_wrapper(app.run, config)
if exceptions:
print _("\nEXCEPTION:")
print exceptions[0]
```
#### File: BitTorrent-5.2.2/test/test_Feeds.py
```python
import sys
sys.path = ['.',] + sys.path #HACK
import os
from BTL.platform import plugin_path, app_root
plugin_path.append(os.path.join(app_root[:-5], 'BitTorrent', 'Plugins')) #HACK
from BitTorrent.FeedManager import FeedManager
def gui_wrap(f, *a):
f(*a)
feedmanager = FeedManager({}, gui_wrap)
# Test RSS 2 feed:
feed = 'http://www.prodigem.com/torrents/rss/pep_delicious.xml'
feedmanager.new_channel(feed)
# Test RAW feed:
feed = 'http://search.bittorrent.com/search.jsp?query=Ubuntu&Submit2=Search'
feedmanager.new_channel(feed)
import time
time.sleep(10)
``` |
{
"source": "jpablo128/simplystatic",
"score": 3
} |
#### File: simplystatic/bin/addrandompages.py
```python
import argparse
import sys
import time
import os.path
import os
import random
# ONLY FOR DEVELOPMENT TESTING add one directory up to the sys.path
# so the imports work
#sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from simplystatic import s2site
from simplystatic import s2page
def setup_parser():
'''Set up the command-line options.'''
parser = argparse.ArgumentParser(description='Add random pages to existing site.')
parser.add_argument('-d','--directory', action='store', default= os.getcwd(),
help='Site directory (must be a valid s2 structure).')
parser.add_argument('-n','--number', action='store', type=int, default = 20,
help='Number of pages to generate.')
return parser
def make_site_obj(argdict):
'''Instantiate and return the site. This will be used for all commands'''
d = os.getcwd() #'.'
if 'directory' in argdict:
d = argdict['directory']
try:
s = s2site.Site(d)
except:
print "Could not instantiate site object."
sys.exit()
return s
all_tags = ['tag1','tag2','tag3','tag4']
if __name__ == "__main__":
parser = setup_parser()
args = parser.parse_args()
argdict = vars(args)
site = make_site_obj(argdict)
if site.tree_ready:
for i in range(1,argdict['number']+1):
ptags = random.sample(all_tags,random.randint(1,len(all_tags)))
p = site.random_page(tags=ptags)
p.set_published()
p.write()
print "added page ",p.slug
``` |
{
"source": "jpablocardona/platzi-django-advance",
"score": 3
} |
#### File: users/permissions/users.py
```python
from rest_framework.permissions import BasePermission
# Models
from cride.users.models import User
class IsAccountOwner(BasePermission):
"""Allow access only to objects owned by the requesting user"""
def has_object_permission(self, request, view, obj):
"""Check object user"""
return request.user == obj
```
#### File: users/serializers/users.py
```python
from django.conf import settings
from django.db import models
from django.contrib.auth import authenticate, password_validation
from django.core.validators import RegexValidator
from django.contrib.auth.hashers import make_password
# Djangto rest framework
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from rest_framework.validators import UniqueValidator
# Task
from cride.taskapp.tasks import send_confirmation_email
# User
from cride.users.models import User, Profile
# Serializers
from .profiles import ProfileModelSerializer
class UserModelSerializer(serializers.ModelSerializer):
"""User model serializer"""
profile = ProfileModelSerializer(read_only=True)
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'phone_number',
'profile'
)
# Esto trae la informacion del modelo
# depth = 1
class UserLoginSerializer(serializers.Serializer):
""" User login serializers """
email = serializers.EmailField()
password = serializers.CharField(min_length=8)
def validate(self, data):
"""Check credencitals"""
user = authenticate(username=data['email'], password=data['password'])
if not user:
raise serializers.ValidationError('Invalid credentials')
if not user.is_verified:
raise serializers.ValidationError('Account is not activate yet :(')
self.context['user'] = user
return data
def create(self, data):
"""Generate token auth"""
token, created = Token.objects.get_or_create(user=self.context['user'])
return self.context['user'], token.key
class UserSingUpSerializer(serializers.Serializer):
""" User sing up serializers """
username = serializers.CharField(
min_length=4,
max_length=20,
validators=[UniqueValidator(queryset=User.objects.all())]
)
email = serializers.EmailField(validators=[UniqueValidator(queryset=User.objects.all())])
first_name = serializers.CharField(min_length=2, max_length=30)
last_name = serializers.CharField(min_length=2, max_length=30)
phone_regex = RegexValidator(
regex=r'\+?1?\d{9,15}%',
message='phone number must be entered in the format: +9999999. Up to 15 digits allowed'
)
phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True)
password = serializers.CharField(min_length=8, max_length=64)
password_confirmation = serializers.CharField(min_length=8, max_length=64)
def validate(self, data):
""" Handle validate info user"""
passwd = data['password']
passwd_conf = data['password_confirmation']
if passwd != passwd_conf:
raise serializers.ValidationError('Passwords does not match')
password_validation.validate_password(passwd)
data['password'] = <PASSWORD>)
return data
def create(self, data):
""" Handle user create"""
data.pop('password_confirmation')
user = User.objects.create(**data, is_verified=False)
Profile.objects.create(user=user)
send_confirmation_email.delay(user_pk=user.id)
return user
class UserVerifiedSerializer(serializers.Serializer):
"""Account verification serialized"""
token = serializers.CharField()
def validate(self, data):
"""Handle validate account token"""
try:
payload = jwt.decode(data['token'], settings.SECRET_KEY, algorithm='HS256')
except jwt.ExpiredSignatureError:
raise serializers.ValidationError('Verification token has expired')
except jwt.PyJWTError:
raise serializers.ValidationError('Invalid token')
if (payload.get('type', '') != 'email_confirmation'):
raise serializers.ValidationError('Incorrect token')
self.context['payload'] = payload
return data
def save(self):
"""Update users verified account"""
payload = self.context['payload']
user = User.objects.get(username=payload['user'])
if (user.is_verified):
raise serializers.ValidationError('User already verified')
user.is_verified = True
user.save()
``` |
{
"source": "jpablortiz96/Inventario-python",
"score": 2
} |
#### File: jpablortiz96/Inventario-python/app.py
```python
from flask import Flask, render_template, request, flash
import utils
import os
import yagmail
import inventory
app = Flask(__name__)
app.secret_key = os.urandom(24)
@app.route('/')
def login():
return render_template('index.html')
@app.route('/recuperar')
def recuperar():
return render_template('recuperar.html')
@app.route('/main')
def principal():
IDs=[1,2,3,4,5,6,7,8,9]
inventory1=[inventory.Inventory(i, "Producto "+str(i), 2000) for i in IDs]
return render_template('principal.html', inventory=inventory1)
@app.route('/agregar')
def agregar():
return render_template('agregar.html')
@app.route('/editar')
def editar():
return render_template('editar.html')
if __name__ == '__main__':
app.run(debug=True)
```
#### File: jpablortiz96/Inventario-python/inventory.py
```python
class Inventory:
def __init__(self, ID, name, numInStock):
self.ID = ID
self.name = name
self.numInStock = numInStock
``` |
{
"source": "jpacanowski/django-blog",
"score": 2
} |
#### File: blog/templatetags/blog_tags.py
```python
from django.db.models import Count
from django import template
from ..models import Post
register = template.Library()
@register.simple_tag
def total_posts():
return Post.published.count()
@register.inclusion_tag('blog/post/most_commented_posts.html')
def show_most_commented_posts(count=5):
most_commented_posts = Post.published.annotate(
total_comments=Count('comments')
).order_by('-total_comments')[:count]
return {'most_commented_posts': most_commented_posts}
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-published_at')[:count]
return {'latest_posts': latest_posts}
``` |
{
"source": "jpace121/dev_contain",
"score": 2
} |
#### File: dev_contain/dev_contain/start.py
```python
import argparse
import os
import sys
import subprocess
import pathlib
import dev_contain.common as common
def start(in_args):
parser = argparse.ArgumentParser(prog=sys.argv[0]+' start', description='Start a provided container.')
parser.add_argument('--image', '-i', help='Name of image to launch.')
parser.add_argument('--container', '-c', help='Name of the new container.')
parser.add_argument('--volume', '-v', action='append', help='Volume on local machine to mount at same location in container.')
parser.add_argument('--workdir', '-d', help='Directory to start in.')
parser.add_argument('--user', '-u', help='Username to login into container as.')
parser.add_argument('--graphics', '-X', action='store_true', help='Forward graphics.')
parser.add_argument('--no-ssh', '-S', action='store_true', help='Do not forward ssh keys.')
parser.add_argument('--args', '-a', help='Extra args to provide to the runtime. i.e. --args="--gpu"')
args = parser.parse_args(in_args)
manager = common.get_manager()
username = args.user
if not args.user:
username = os.environ['USER']
image = args.image
if not args.image:
image = 'jwp-build-latest'
volumes = args.volume
if not args.volume:
volumes = ['/home/{}/Develop'.format(username)]
container = args.container
if not args.container:
container = 'dev'
workdir = args.workdir
if not workdir:
workdir = '/home/{}'.format(username)
graphics_text = ''
if args.graphics:
graphics_text = set_up_graphics_forwards()
args_text = ''
if args.args:
args_text = args.args
# podman needs userns set to keep-id for volumes to work.
userns_text = ''
if manager == 'podman':
userns_text = '--userns=keep-id'
# Include volume for ssh keys.
ssh_text = ''
if not args.no_ssh:
if 'SSH_AUTH_SOCK' in os.environ.keys():
ssh_text = ('-v {ssh_auth_sock}:/.ssh_auth_sock '
'-e SSH_AUTH_SOCK=/.ssh_auth_sock').format(
ssh_auth_sock=os.environ['SSH_AUTH_SOCK'])
else:
print('SSH_AUTH_SOCK does not exist not adding ssh keys.')
volume_text = ''
for volume in volumes:
new_text = parse_volume(volume)
volume_text = volume_text + new_text
command = ('{manager} run -d'
' --user {username}'
' --name {container}'
' {userns_text}'
' --workdir {workdir}'
' --ipc=host'
' --net=host'
' -e DEV_CONTAIN_CONTAINER_NAME={container}'
' {volume_text}'
' {ssh_text} {graphics_text} {args_text}'
' {image}').format(
manager=manager,
username=username,
image=image,
volume_text=volume_text,
ssh_text=ssh_text,
graphics_text=graphics_text,
args_text=args_text,
container=container,
workdir=workdir,
userns_text=userns_text)
print('Running: {}'.format(command))
subprocess.run(command, shell=True)
def parse_volume(volume):
if not ':' in volume:
volume = str(pathlib.Path(volume).expanduser().resolve())
if os.path.exists(volume):
return ' --volume {volume}:{volume}:Z'.format(volume=volume)
else:
print('Requested volume ({}) not present on host. Exiting.'.format(volume),
file=sys.stderr)
sys.exit(1)
else:
volume = volume.split(':')
volume[0] = str(pathlib.Path(volume[0]).expanduser().resolve())
# Note the second path may not resolve on the host.
volume[1] = str(pathlib.Path(volume[1]).expanduser())
if os.path.exists(volume[0]):
return ' --volume {volume0}:{volume1}:Z'.format(volume0=volume[0], volume1=volume[1])
else:
print('Requested volume ({}) not present on host. Exiting.'.format(volume[0]),
file=sys.stderr)
sys.exit(1)
def set_up_graphics_forwards():
# XOrg
xorg = ''
if os.path.exists('/tmp/.X11-unix'):
xorg = (' -e DISPLAY=$DISPLAY'
' -v /tmp/.X11-unix:/tmp/.X11-unix:rw')
# Wayland
wayland = ''
if os.environ.get('WAYLAND_DISPLAY'):
wayland = (' -e XDG_RUNTIME_DIR=/tmp'
' -e WAYLAND_DISPLAY=$WAYLAND_DISPLAY'
' -v $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY:/tmp/$WAYLAND_DISPLAY')
# (User) dbus socket.
dbus = ''
# What kind of socket is it?
dbus_address = os.environ.get('DBUS_SESSION_BUS_ADDRESS')
# If a real path, need to mount it. Otherwise --net=host takes care of it.
if 'unix:path' in dbus_address:
dbus_path = dbus_address.split('=')[1]
dbus = dbus + '--volume {path}:{path}'.format(path=dbus_path)
# Regardless need to know where to look.
dbus = dbus + ' --env DBUS_SESSION_BUS_ADDRESS="$DBUS_SESSION_BUS_ADDRESS"'
return xorg + ' ' + wayland + ' ' + dbus
if __name__ == '__main__':
start(sys.argv[1:])
``` |
{
"source": "jpace121/development_tools",
"score": 2
} |
#### File: development_tools/dev_contain/clean.py
```python
import argparse
import sys
import subprocess
import shlex
import dev_contain.common as common
def clean(in_args):
parser = argparse.ArgumentParser(prog=sys.argv[0]+' clean', description='Prune system of uneeded containers and images.')
args = parser.parse_args(in_args)
builder = common.get_builder()
manager = common.get_manager()
if builder == 'buildah':
run_and_log('Removing buildah containers.', 'buildah rm --all')
run_and_log('Pruning buildah images.', 'buildah rmi --prune')
if builder == 'podman':
run_and_log('Removing podman images.', 'podman image prune')
if builder == 'docker':
run_and_log('Pruning dangling images.', 'docker image prune')
def run_and_log(comment, command):
print(comment)
print('Running: {}'.format(command))
subprocess.run(command, shell=True)
if __name__ == '__main__':
clean(sys.argv[1:])
``` |
{
"source": "jpace121/j7s-ros2",
"score": 2
} |
#### File: j7s/launch/j7s-sub.launch.py
```python
from launch import LaunchDescription
import launch_ros.actions
def generate_launch_description():
return LaunchDescription([
launch_ros.actions.Node(
package='j7s',
executable='j7s-sub',
output='log',
parameters = [{}],
remappings = [
('led_state', 'j7s_led_state')
]
)
])
``` |
{
"source": "jpace121/j7s-router",
"score": 2
} |
#### File: netem/netem/netem.py
```python
import subprocess
import psutil
class netem():
def __init__(self, interface, debug=False):
self._interface = interface
self._debug = debug
def _run(self, command):
if self._debug:
print("Command: " + command)
else:
subprocess.run(command)
def clear(self):
command = f"tc qdisc del dev {self._interface} root"
self._run(command)
def delay(self, time, std_dev):
command = (f"tc qdisc change dev {self._interface} root netem delay "
f"{time}ms {std_dev}ms distribution normal")
self._run(command)
def packet_loss(self, percent):
command = (f"tc qdisc change dev {self._interface} root netem loss "
f"{percent}%")
self._run(command)
def limit(self, bandwidth_kbit):
command = (f"tc qdisc add dev {self._interface} root tbf rate "
f"{bandwidth_kbit}kbit burst 1600 limit 3000")
self._run(command)
``` |
{
"source": "jpace121/ros2cli",
"score": 2
} |
#### File: ros2action/verb/send_goal.py
```python
import importlib
from action_msgs.msg import GoalStatus
import rclpy
from rclpy.action import ActionClient
from ros2action.api import action_name_completer
from ros2action.api import ActionTypeCompleter
from ros2action.verb import VerbExtension
from ros2cli.node import NODE_NAME_PREFIX
from rosidl_runtime_py import message_to_yaml
from rosidl_runtime_py import set_message_fields
import yaml
class SendGoalVerb(VerbExtension):
"""Send an action goal."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'action_name',
help="Name of the ROS action (e.g. '/fibonacci')")
arg.completer = action_name_completer
arg = parser.add_argument(
'action_type',
help="Type of the ROS action (e.g. 'example_interfaces/action/Fibonacci')")
arg.completer = ActionTypeCompleter(action_name_key='action_name')
parser.add_argument(
'goal',
help="Goal request values in YAML format (e.g. '{order: 10}')")
parser.add_argument(
'-f', '--feedback', action='store_true',
help='Echo feedback messages for the goal')
def main(self, *, args):
feedback_callback = None
if args.feedback:
feedback_callback = _feedback_callback
return send_goal(args.action_name, args.action_type, args.goal, feedback_callback)
def _goal_status_to_string(status):
if GoalStatus.STATUS_ACCEPTED == status:
return 'ACCEPTED'
elif GoalStatus.STATUS_EXECUTING == status:
return 'EXECUTING'
elif GoalStatus.STATUS_CANCELING == status:
return 'CANCELING'
elif GoalStatus.STATUS_SUCCEEDED == status:
return 'SUCCEEDED'
elif GoalStatus.STATUS_CANCELED == status:
return 'CANCELED'
elif GoalStatus.STATUS_ABORTED == status:
return 'ABORTED'
else:
return 'UNKNOWN'
def _feedback_callback(feedback):
print('Feedback:\n {}'.format(message_to_yaml(feedback.feedback, None)))
def send_goal(action_name, action_type, goal_values, feedback_callback):
goal_handle = None
node = None
action_client = None
try:
try:
# TODO(jacobperron): This logic should come from a rosidl related package
parts = action_type.split('/')
if len(parts) == 1:
raise ValueError()
if len(parts) == 2:
parts = [parts[0], 'action', parts[1]]
package_name = parts[0]
action_type = parts[-1]
if not all(parts):
raise ValueError()
except ValueError:
raise RuntimeError('The passed action type is invalid')
module = importlib.import_module('.'.join(parts[:-1]))
action_module = getattr(module, action_type)
goal_dict = yaml.safe_load(goal_values)
rclpy.init()
node_name = NODE_NAME_PREFIX + '_send_goal_{}_{}'.format(package_name, action_type)
node = rclpy.create_node(node_name)
action_client = ActionClient(node, action_module, action_name)
goal = action_module.Goal()
try:
set_message_fields(goal, goal_dict)
except Exception as ex:
return 'Failed to populate message fields: {!r}'.format(ex)
print('Waiting for an action server to become available...')
action_client.wait_for_server()
print('Sending goal:\n {}'.format(message_to_yaml(goal, None)))
goal_future = action_client.send_goal_async(goal, feedback_callback)
rclpy.spin_until_future_complete(node, goal_future)
goal_handle = goal_future.result()
if goal_handle is None:
raise RuntimeError(
'Exception while sending goal: {!r}'.format(goal_future.exception()))
if not goal_handle.accepted:
print('Goal was rejected.')
# no need to potentially cancel the goal anymore
goal_handle = None
return
print('Goal accepted with ID: {}\n'.format(bytes(goal_handle.goal_id.uuid).hex()))
result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, result_future)
result = result_future.result()
if result is None:
raise RuntimeError(
'Exception while getting result: {!r}'.format(result_future.exception()))
# no need to potentially cancel the goal anymore
goal_handle = None
print('Result:\n {}'.format(message_to_yaml(result.result, None)))
print('Goal finished with status: {}'.format(_goal_status_to_string(result.status)))
finally:
# Cancel the goal if it's still active
if (goal_handle is not None and
(GoalStatus.STATUS_ACCEPTED == goal_handle.status or
GoalStatus.STATUS_EXECUTING == goal_handle.status)):
print('Canceling goal...')
cancel_future = goal_handle.cancel_goal_async()
rclpy.spin_until_future_complete(node, cancel_future)
cancel_response = cancel_future.result()
if cancel_response is None:
raise RuntimeError(
'Exception while canceling goal: {!r}'.format(cancel_future.exception()))
if len(cancel_response.goals_canceling) == 0:
raise RuntimeError('Failed to cancel goal')
if len(cancel_response.goals_canceling) > 1:
raise RuntimeError('More than one goal canceled')
if cancel_response.goals_canceling[0].goal_id != goal_handle.goal_id:
raise RuntimeError('Canceled goal with incorrect goal ID')
print('Goal canceled.')
if action_client is not None:
action_client.destroy()
if node is not None:
node.destroy_node()
if rclpy.ok():
rclpy.shutdown()
```
#### File: ros2interface/verb/show.py
```python
from ros2interface.api import get_interface_path
from ros2interface.api import type_completer
from ros2interface.verb import VerbExtension
class ShowVerb(VerbExtension):
"""Output the interface definition."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'type',
help='Show an interface definition (e.g. "std_msgs/msg/String")')
arg.completer = type_completer
def main(self, *, args):
# TODO(kucheria) this logic should come from a rosidl related package
try:
parts = args.type.split('/')
if len(parts) < 2:
raise ValueError()
if not all(parts):
raise ValueError()
file_path = get_interface_path(parts)
except ValueError:
raise RuntimeError('The passed interface type is invalid')
except LookupError as e:
return str(e)
with open(file_path, 'r') as h:
print(h.read(), end='')
``` |
{
"source": "jpacerqueira/project_lost_saturn",
"score": 2
} |
#### File: CloudComposer-AirFlow/composer_https_post_example/dag_trigger.py
```python
import argparse
from datetime import datetime
import json
from tzlocal import get_localzone
import make_iap_request as iap
def main():
"""This main function calls the make_iap_request function which is defined
at
https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/iap/make_iap_request.py
and then prints the output of the function. The make_iap_request function
demonstrates how to authenticate to Identity-Aware Proxy using a service
account.
Returns:
A string containing the page body, or raises an exception if the page couldn't be retrieved.
"""
_LOCAL_TZ = get_localzone()
parser = argparse.ArgumentParser()
parser.add_argument("--url", dest='url', required=True,
help="The url of a resource sitting behind identity-aware proxy.")
parser.add_argument("--iapClientId", dest='iapClientId', required=True,
help="The Client ID of the IAP OAuth Client.")
parser.add_argument("--raw_path", dest='raw_path', required=True, help="GCS path to raw files.")
args = parser.parse_args()
# Force trailing slash because logic in avearge-speed DAG expects it this way.
raw_path = args.raw_path if args.raw_path.endswith('/') else args.raw_path + '/'
bucket = raw_path.lstrip('gs://').split('/')[0]
# This transformed path is relative to the bucket Variable in the Airflow environment.
# Note, the gs://<bucket> prefix is stripped because the GoogleCloudStorageToBigQueryOperator
# expects the source_objects as relative to the bucket param
transformed_path = raw_path.replace('/raw-', '/transformed-').replace('gs://'
+ bucket + '/', '')
failed_path = raw_path.replace('/raw-', '/failed-').replace('gs://' + bucket + '/', '')
# Note, we need to remove the trailing slash because of how the the spark saveAsTextFile
# method works.
transformed_path = transformed_path.rstrip('/')
# Place parameters to be passed as part of the dag_run triggered by this POST here.
# In this example we will pass the path where the raw files are and the path where we should
# place the transformed files.
conf = {
'raw_path': raw_path,
'transformed_path': transformed_path,
'failed_path': failed_path,
}
# The api signature requires a unique run_id
payload = {
'run_id': 'post-triggered-run-%s' % datetime.now(_LOCAL_TZ).strftime('%Y%m%d%H%M%s%Z'),
'conf': json.dumps(conf),
}
return iap.make_iap_request(args.url, args.iapClientId, method='POST',
data=json.dumps(payload))
if __name__ == "__main__":
main()
``` |
{
"source": "jpachebat/2021-assignment-pandas",
"score": 4
} |
#### File: jpachebat/2021-assignment-pandas/pandas_questions.py
```python
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
def load_data():
"""Load data from the CSV files referundum/regions/departments."""
referendum = pd.DataFrame(pd.read_csv('data/referendum.csv'))
regions = pd.DataFrame(pd.read_csv("data/regions.csv"))
departments = pd.DataFrame(pd.read_csv("data/departments.csv"))
return referendum, regions, departments
def merge_regions_and_departments(regions, departments):
"""Merge regions and departments in one DataFrame.
The columns in the final DataFrame should be:
['code_reg', 'name_reg', 'code_dep', 'name_dep']
"""
regions = regions.rename(columns={
"code": "code_reg", "name": "name_reg", "slug": "slug_reg"
}
)
departments = departments.rename(columns={
"code": "code_dep", "name": "name_dep", "slug": "slug_dep",
"region_code": "code_reg"
}
)
regions_and_departments = pd.merge(
departments,
regions,
how='left', on="code_reg")
regions_and_departments = regions_and_departments[
['code_dep', 'name_dep', 'code_reg', 'name_reg']
]
print(regions)
print(departments)
print(regions_and_departments)
return regions_and_departments
def merge_referendum_and_areas(referendum, regions_and_departments):
"""Merge referendum and regions_and_departments in one DataFrame.
You can drop the lines relative to DOM-TOM-COM departments, and the
french living abroad.
"""
referendum = referendum.rename(columns={
"Department code": "code_dep"
}
)
mask = [str(i) for i in range(1, 96)]
referendum = referendum[referendum["code_dep"].isin(mask)]
referendum['code_dep'] = referendum[
'code_dep'].apply(lambda x: "%02d" % int(x))
referendum_and_areas = pd.merge(
referendum, regions_and_departments,
how='left', on='code_dep'
)
return referendum_and_areas
def compute_referendum_result_by_regions(referendum_and_areas):
"""Return a table with the absolute count for each region.
The return DataFrame should be indexed by `code_reg` and have columns:
['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B']
"""
referendum_result_by_regions = referendum_and_areas[
[
'code_reg',
'name_reg',
'Registered',
'Abstentions',
'Null',
'Choice A',
'Choice B'
]
]
referendum_result_by_regions.set_index('code_reg')
referendum_result_by_regions = referendum_result_by_regions.groupby(
'code_reg').sum()
return referendum_result_by_regions
def plot_referendum_map(referendum_result_by_regions):
"""Plot a map with the results from the referendum.
* Load the geographic data with geopandas from `regions.geojson`.
* Merge these info into `referendum_result_by_regions`.
* Use the method `GeoDataFrame.plot` to display the result map. The results
should display the rate of 'Choice A' over all expressed ballots.
* Return a gpd.GeoDataFrame with a column 'ratio' containing the results.
"""
geo_region = gpd.read_file('data/regions.geojson')
geo_region = geo_region.rename(columns={'code': 'code_reg'})
referendum_result_by_regions = pd.merge(referendum_result_by_regions,
geo_region, on='code_reg',
how='left')
referendum_result_by_regions = gpd.GeoDataFrame(
referendum_result_by_regions
)
referendum_result_by_regions.plot(column="Choice A")
referendum_result_by_regions['ratio'] = referendum_result_by_regions[
'Choice A']/referendum_result_by_regions['Choice B']
plt.show()
return referendum_result_by_regions
if __name__ == "__main__":
referendum, df_reg, df_dep = load_data()
regions_and_departments = merge_regions_and_departments(
df_reg, df_dep
)
referendum_and_areas = merge_referendum_and_areas(
referendum, regions_and_departments
)
referendum_results = compute_referendum_result_by_regions(
referendum_and_areas
)
print(referendum_results)
plot_referendum_map(referendum_results)
plt.show()
``` |
{
"source": "jpackagebot/safersympify",
"score": 4
} |
#### File: safersympify/safersympify/safersympify.py
```python
r"""
Convert user input into SymPy expressions.
RECIPES:
Create a SymPy expression from user input (pure Python syntax with whitelisted oprators and functions only):
>>> expr = SaferSympify().str2sympy('-sqrt(1 + a**b*b)/((a**b)*b+1)')
>>> expr
-1/sqrt(a**b*b + 1)
Get free symbols:
>>> sorted(expr.free_symbols, key=lambda x: str(x))
[a, b]
Evaluate expression:
>>> expr.evalf(subs={'a': 1, 'b': 3, 'c': 5}) # Note extra values can be passed too
-0.500000000000000
Simplify expression:
>>> expr.simplify()
-1/sqrt(a**b*b + 1)
Pretty-print expression as Latex (could be displayed in browser with MathJax)
>>> sympy.latex(expr)
'- \\frac{1}{\\sqrt{a^{b} b + 1}}'
Pretty-print in terminal
>>> sympy.pprint(expr, use_unicode_sqrt_char=True)
-1
─────────────
__________
╱ b
╲╱ a ⋅b + 1
"""
import ast
import operator
import sympy
class SaferSympify:
""" Handles unsanitized user input instead of SymPy, which does not do that yet.
See SymPy PR12524 for details: https://github.com/sympy/sympy/pull/12524
"""
def __init__(self):
self.node_types_allowed = self._get_node_types_allowed()
self.binary_operator_types_allowed = self._get_binary_operator_types_allowed()
self.unary_operator_types_allowed = self._get_unary_operator_types_allowed()
self.functions_allowed = self._get_functions_allowed()
def str2sympy(self, string):
ast_expr = ast.parse(string, mode='eval')
root_node = ast_expr.body
sympy_expr = self.safer_eval(root_node)
return sympy_expr
def safer_eval(self, node):
node_type = type(node)
try:
node_handler = self.node_types_allowed[node_type]
except KeyError:
raise ValueError("Node type %s is not allowed." % node_type)
return node_handler(node)
def _get_node_types_allowed(self):
return {
ast.Name: self._symbol,
ast.Num: self._number,
ast.UnaryOp: self._unary_op,
ast.BinOp: self._binary_op,
ast.Call: self._function
}
def _get_unary_operator_types_allowed(self):
return {
ast.USub: operator.neg,
}
def _get_binary_operator_types_allowed(self):
return {
ast.Add: sympy.Add,
ast.Sub: operator.sub,
ast.Mult: sympy.Mul,
ast.Div: operator.truediv,
ast.Pow: operator.pow,
ast.BitXor: operator.xor,
}
def _get_functions_allowed(self):
return {
'sin': sympy.sin,
'cos': sympy.cos,
'sqrt': sympy.sqrt
}
def _symbol(self, node):
return sympy.Symbol(node.id)
def _number(self, node):
return sympy.Number(node.n)
def _unary_op(self, node):
operator_type = type(node.op)
o = self.unary_operator_types_allowed[operator_type]
operand = self.safer_eval(node.operand)
return o(operand)
def _binary_op(self, node):
operator_type = type(node.op)
o = self.binary_operator_types_allowed[operator_type]
left = self.safer_eval(node.left)
right = self.safer_eval(node.right)
return o(left, right)
def _function(self, node):
function_name = node.func.id
arg_list = []
for node_arg in node.args:
arg_list.append(self.safer_eval(node_arg))
try:
f = self.functions_allowed[function_name]
except KeyError:
raise ValueError("Function %s is not allowed" % function_name)
return f(*arg_list)
``` |
{
"source": "JPacoch/post-glacial-ds",
"score": 3
} |
#### File: JPacoch/post-glacial-ds/data_processing.py
```python
import rasterio
import numpy as np
import pandas as pd
import geopandas as gpd
import tensorflow as tf
import matplotlib.pyplot as plt
import sklearn.model_selection
from re import X
from PIL import Image
from cProfile import label
from pyproj import transform
#Loading prepared point dataset
points = gpd.read_file("C:/Users/pacoc/Desktop/Warsztat/Studia/mag/data/punkty/punkty1.gpkg", layer = 'punkty1')
#DEM / Hillshade for Poland
src = rasterio.open('C:/Users/pacoc/Desktop/Warsztat/Studia/mag/data/hillshade.tif')
windows_list = []
def createImgDataset(data, class_name, train_test_split=False):
print(class_name)
#Get window values
for point in data['geometry']:
x = point.xy[0][0]
y = point.xy[1][0]
row, col = src.index(x,y)
rst = src.read(1, window=rasterio.windows.Window(col_off=col, row_off=row,
width=128, height=128))
windows_list.append(rst)
# plt.imshow(rst, cmap='binary')
# plt.show()
# print(windows_list[0])
# print(len(windows_list[0]))
# print(type(windows_list[0]))
#Labelling
labels_list=[]
for elem in range(0, len(windows_list)):
elem = 0
labels_list.append(elem)
labels_list = np.asarray(labels_list).astype('float32')
print(labels_list)
print(type(labels_list))
# labels_list = tf.keras.utils.to_categorical(labels_list)
# print(labels_list)
# print(type(labels_list))
#List of arrays to list of tensors
#Tensor of tensors
def arrayListToTensor(list):
tensor_list = tf.convert_to_tensor(list, dtype=tf.float32)
print(type(tensor_list))
# print(tensor_list.shape)
# print(tensor_list.ndim)
# print(tensor_list.dtype)
return tensor_list
#List of tensors
# def arrayListToTensor(list):
# tensor_list=[]
# for arr in list:
# arr = tf.convert_to_tensor(arr, dtype=tf.float32)
# tensor_list.append(arr)
# print(tensor_list)
# # print(tensor_list[0].ndim)
# return tensor_list
tensor = arrayListToTensor(windows_list)
# plt.imshow(tensor[1])
# plt.show()
if train_test_split == True:
X_train, X_val, y_train, y_val = sklearn.model_selection.train_test_split(windows_list,
labels_list,
test_size=0.20,
random_state=42)
i = 0
for arr in range(1,len(X_train)):
arr = Image.fromarray(X_train[i])
arr.save(f'data/train/{class_name}/{i}.png', format='PNG')
i = i + 1
j = 0
for arr in range(1,len(X_val)):
arr = Image.fromarray(X_val[j])
arr.save(f'data/val/{class_name}/{j}.png', format='PNG')
j = j + 1
else:
i = 0
for arr in range(1,len(windows_list)):
arr = Image.fromarray(windows_list[i])
arr.save(f'data/manual/{class_name}/{i}.png', format='PNG')
i = i + 1
createImgDataset(points, 'denuded')
``` |
{
"source": "jpaDeveloper/estrutura-de-dados",
"score": 4
} |
#### File: estrutura-de-dados-master/aula2/hanoi.py
```python
cont=0
def hanoi(n, ori='A', dest='B', aux='C'):
global cont
cont += 1
if n == 1:
print('%s -> %s' % (ori, dest))
else:
hanoi(n - 1, ori, aux, dest)
print('%s -> %s' % (ori, dest))
hanoi(n - 1, aux, dest, ori)
for i in range(1, 5):
print('###### solução %s' % i)
cont=0
hanoi(i)
print('Tempo de execução %s'% cont)
```
#### File: estrutura-de-dados-master/aula2/sequencias.py
```python
lst = [1, 2, 3]
tpl = (1, 2, 3)
s = '123'
primeiro, *t = tpl
print(t)
print(primeiro)
lst[0], lst[1] = lst[1], lst[0]
print(lst[0])
print(tpl[0])
print(s[0])
def f(a, b):
return a * 2, b * 2
tpl = (1, 2)
a, b = f(*tpl)
print(a, b)
print(type(tpl))
```
#### File: estrutura-de-dados-master/aula3/lista_infinita.py
```python
def gerar_lista_infinita(inicio):
print('Inicio')
while True:
yield inicio
inicio += 1
for i in gerar_lista_infinita(1):
print(i)
```
#### File: estrutura-de-dados-master/aula5/expressaoAritimetica.py
```python
from collections import deque
#Complexidade: Tempo O(n) Espaço: O(n)
from aula5.fila import Fila
from aula4.pilha import Pilha
class FilaVaziaErro(Exception):
pass
class ErroLexico(Exception):
pass
class ErroSintatico(Exception):
pass
def analise_lexica(expressao):
fila = Fila()
especiais = '{[()]}+-*/.'
quant = ''
alfabeto = 'abcdefghijklmnopqrstuvwxyz'
sequenciaNumerica = '0123456789'
for caracter in expressao:
if caracter in especiais:
if len(quant):
fila.enfileirar(quant)
quant = ''
fila.enfileirar(caracter)
if caracter in set(sequenciaNumerica):
quant = quant + caracter
if caracter in set(alfabeto) or caracter == '':
raise ErroLexico('')
if len(quant):
fila.enfileirar(quant)
return fila
def analise_sintatica(fila):
especiais = '+-*/(){}[]'
filaObjeto = Fila()
caracter = ''
if not(len(fila)): raise ErroSintatico('')
while len(fila):
atual = fila.desenfileirar()
if atual in especiais:
if len(caracter):#Verificanto se é int ou float
if '.' not in caracter:
filaObjeto.enfileirar(int(caracter))
else: filaObjeto.enfileirar(float(caracter))
caracter = ''
filaObjeto.enfileirar(atual)
else: caracter += atual
if len(caracter):
if '.' not in caracter: caracter = int(caracter)
else: caracter = float(caracter)
filaObjeto.enfileirar(caracter)
return filaObjeto
def avaliar(expressao):
pilha = Pilha()
analise = analise_sintatica(analise_lexica(expressao))
especiais = '{[()]}'
matematicos = '+-*/'
for atual in analise:
pilha.empilhar(atual)
if len(pilha) >= 3:
p1, p2, p3 = pilha.desempilhar(), pilha.desempilhar(), pilha.desempilhar()
if str(p1) not in especiais and str(p3) not in especiais and str(p2) in matematicos:
if p2 == '+': controle = p3 + p1
elif p2 == '-': controle = p3 - p1
elif p2 == '*': controle = p3 * p1
else: controle = p3 / p1
pilha.empilhar(controle)
else:
pilha.empilhar(p3)
pilha.empilhar(p2)
pilha.empilhar(p1)
if str(atual) in ')]}':
pilha.desempilhar()
controle = pilha.desempilhar()
pilha.desempilhar()
pilha.empilhar(controle)
if len(pilha) >= 3:
p1, p2, p3 = pilha.desempilhar(), pilha.desempilhar(), pilha.desempilhar()
if str(p1) not in especiais and str(p3) not in especiais and str(p2) in matematicos:
if p2 == '+': controle = p3 + p1
elif p2 == '-': controle = p3 - p1
elif p2 == '*': controle = p3 * p1
else: controle = p3 / p1
pilha.empilhar(controle)
else:
pilha.empilhar(p3)
pilha.empilhar(p2)
pilha.empilhar(p1)
return pilha.desempilhar()
import unittest
class AnaliseLexicaTestes(unittest.TestCase):
def test_expressao_vazia(self):
fila = analise_lexica('')
self.assertTrue(fila.vazia())
def test_caracter_estranho(self):
self.assertRaises(ErroLexico, analise_lexica, 'a')
self.assertRaises(ErroLexico, analise_lexica, 'ab')
def test_inteiro_com_um_algarismo(self):
fila = analise_lexica('1')
self.assertEqual('1', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_inteiro_com_vários_algarismos(self):
fila = analise_lexica('1234567890')
self.assertEqual('1234567890', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_float(self):
fila = analise_lexica('1234567890.34')
self.assertEqual('1234567890', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('34', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_parenteses(self):
fila = analise_lexica('(1)')
self.assertEqual('(', fila.desenfileirar())
self.assertEqual('1', fila.desenfileirar())
self.assertEqual(')', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_chaves(self):
fila = analise_lexica('{(1)}')
self.assertEqual('{', fila.desenfileirar())
self.assertEqual('(', fila.desenfileirar())
self.assertEqual('1', fila.desenfileirar())
self.assertEqual(')', fila.desenfileirar())
self.assertEqual('}', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_colchetes(self):
fila = analise_lexica('[{(1.0)}]')
self.assertEqual('[', fila.desenfileirar())
self.assertEqual('{', fila.desenfileirar())
self.assertEqual('(', fila.desenfileirar())
self.assertEqual('1', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('0', fila.desenfileirar())
self.assertEqual(')', fila.desenfileirar())
self.assertEqual('}', fila.desenfileirar())
self.assertEqual(']', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_adicao(self):
fila = analise_lexica('1+2.0')
self.assertEqual('1', fila.desenfileirar())
self.assertEqual('+', fila.desenfileirar())
self.assertEqual('2', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('0', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_subtracao(self):
fila = analise_lexica('1-2.0')
self.assertEqual('1', fila.desenfileirar())
self.assertEqual('-', fila.desenfileirar())
self.assertEqual('2', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('0', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_multiplicacao(self):
fila = analise_lexica('1*2.0')
self.assertEqual('1', fila.desenfileirar())
self.assertEqual('*', fila.desenfileirar())
self.assertEqual('2', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('0', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_divisao(self):
fila = analise_lexica('1/2.0')
self.assertEqual('1', fila.desenfileirar())
self.assertEqual('/', fila.desenfileirar())
self.assertEqual('2', fila.desenfileirar())
self.assertEqual('.', fila.desenfileirar())
self.assertEqual('0', fila.desenfileirar())
self.assertTrue(fila.vazia())
def test_expresao_com_todos_simbolos(self):
expressao = '1/{2.0+3*[7-(5-3)]}'
fila = analise_lexica(expressao)
self.assertListEqual(list(expressao), [e for e in fila])
self.assertTrue(fila.vazia())
class AnaliseSintaticaTestes(unittest.TestCase):
def test_fila_vazia(self):
fila = Fila()
self.assertRaises(ErroSintatico, analise_sintatica, fila)
def test_int(self):
fila = Fila()
fila.enfileirar('1234567890')
fila_sintatica = analise_sintatica(fila)
self.assertEqual(1234567890, fila_sintatica.desenfileirar())
self.assertTrue(fila_sintatica.vazia())
def test_float(self):
fila = Fila()
fila.enfileirar('1234567890')
fila.enfileirar('.')
fila.enfileirar('4')
fila_sintatica = analise_sintatica(fila)
self.assertEqual(1234567890.4, fila_sintatica.desenfileirar())
self.assertTrue(fila_sintatica.vazia())
def test_expressao_com_todos_elementos(self):
fila = analise_lexica('1000/{222.125+3*[7-(5-3)]}')
fila_sintatica = analise_sintatica(fila)
self.assertListEqual([1000, '/', '{', 222.125, '+', 3, '*', '[', 7, '-', '(', 5, '-', 3, ')', ']', '}'],
[e for e in fila_sintatica])
class AvaliacaoTestes(unittest.TestCase):
def test_expressao_vazia(self):
self.assertRaises(ErroSintatico, avaliar, '')
def test_inteiro(self):
self.assert_avaliacao('1')
def test_float(self):
self.assert_avaliacao('2.1')
def test_soma(self):
self.assert_avaliacao('2+1')
def test_subtracao_e_parenteses(self):
self.assert_avaliacao('(2-1)')
def test_expressao_com_todos_elementos(self):
self.assertEqual(1.0, avaliar('2.0/[4*3+1-{15-(1+3)}]'))
def assert_avaliacao(self, expressao):
self.assertEqual(eval(expressao), avaliar(expressao))
if __name__ == '__main__':
unittest.main()
```
#### File: estrutura-de-dados-master/aula6/insertion_sort.py
```python
import unittest
#Tempo: O de n^2
#Memoria: O de 1
def insertion_sort(seq):
for i in range(1, len(seq)):
corrente = seq[i]
atul = i
while atul > 0 and seq[atul - 1] > corrente:
seq[atul] = seq[atul - 1]
atul = atul - 1
seq[atul] = corrente
return seq
class OrdenacaoTestes(unittest.TestCase):
def teste_lista_vazia(self):
self.assertListEqual([], insertion_sort([]))
def teste_lista_unitaria(self):
self.assertListEqual([1], insertion_sort([1]))
def teste_lista_binaria(self):
self.assertListEqual([1, 2], insertion_sort([2, 1]))
def teste_lista_binaria(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], insertion_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0]))
if __name__ == '__main__':
unittest.main()
```
#### File: estrutura-de-dados/estrutura-de-dados-master/min_max.py
```python
import unittest
import time
G = False
contN = None
M = None
m = None
def min_max(seq, contador = contN, maior = M, menor = m ):
global G #Variavel global para controle da estrutura de decisão
'''
:param seq: uma sequencia
:return: (min, max)
Retorna tupla cujo primeiro valor mínimo (min) é o valor
mínimo da sequencia seq.
O segundo é o valor máximo (max) da sequencia
A função max_min é O(n) por causa que seu tempo de processamento esta diretamente ligado a sua entrada, pois o numero numero de elementos na lista que
determina a quantidade de vezes que a resursão sera utilizada
e também após uma analise feita com a função time acoplada em cada caso de teste, foi constatado o tempo aproximado consequentemente calculado suas diferenças.
Tempo com lista 500 elementos em mili segundos 0.0010001659393310547
'''
if G == True:
max = maior
min = menor
if contador == len(seq):#Se a variavel contador for igual ao numero de elementos da lista
G = False
return min, max
if max < seq[contador]:
max = seq[contador]
if min > seq[contador]:
min = seq[contador]
return min_max(seq, contador + 1, max, min)
else:
if len(seq) == 0:
return None, None
elif len(seq) == 1:
return seq[0], seq[0]
if len(seq) > 1:
contN = 1
M = seq[0]
m = seq[0]
G = True
return min_max(seq, contN, M, m)
class MinMaxTestes(unittest.TestCase):
def test_lista_vazia(self):
self.assertTupleEqual((None, None), min_max([]))
def test_lista_len_1(self):
self.assertTupleEqual((1, 1), min_max([1]))
def test_lista_consecutivos(self):
self.assertTupleEqual((0, 500), min_max(list(range(501))))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpadilla/apistar",
"score": 2
} |
#### File: apistar/apistar/http.py
```python
from typing import Any, Callable, Dict, List, Tuple, TypeVar, Union # noqa
from urllib.parse import quote
from werkzeug.datastructures import Headers as WerkzeugHeaders
from werkzeug.datastructures import (
EnvironHeaders, ImmutableDict, ImmutableHeadersMixin, ImmutableMultiDict
)
from werkzeug.urls import url_decode
from apistar.compat import json
from apistar.pipelines import ArgName
from apistar.schema import validate
class WSGIEnviron(ImmutableDict):
pass
class Method(str):
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(environ['REQUEST_METHOD'])
class Scheme(str):
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(environ['wsgi.url_scheme'])
class Host(str):
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(environ.get('HTTP_HOST') or environ['SERVER_NAME'])
class Port(int):
@classmethod
def build(cls, environ: WSGIEnviron):
if environ['wsgi.url_scheme'] == 'https':
return cls(environ.get('SERVER_PORT') or 443)
return cls(environ.get('SERVER_PORT') or 80)
class MountPath(str):
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(quote(environ.get('SCRIPT_NAME', '')))
class RelativePath(str):
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(quote(environ.get('PATH_INFO', '')))
class Path(str):
@classmethod
def build(cls, environ: WSGIEnviron):
path = environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', '')
return cls(quote(path))
class QueryString(str):
@classmethod
def build(cls, environ: WSGIEnviron):
query_string = environ.get('QUERY_STRING', '')
return cls(query_string)
class URL(str):
@classmethod
def build(cls, environ: WSGIEnviron):
# https://www.python.org/dev/peps/pep-0333/#url-reconstruction
url = environ['wsgi.url_scheme'] + '://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME', ''))
url += quote(environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return cls(url)
class Body(bytes):
@classmethod
def build(cls, environ: WSGIEnviron):
content_length = int(environ.get('CONTENT_LENGTH', 0))
return environ['wsgi.input'].read(content_length)
class Headers(ImmutableHeadersMixin, WerkzeugHeaders):
def __init__(self, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict):
args = [list(args[0].items())]
super().__init__(*args, **kwargs)
@classmethod
def build(cls, environ: WSGIEnviron):
return cls(EnvironHeaders(environ))
class Header(str):
@classmethod
def build(cls, headers: Headers, arg_name: ArgName):
return headers.get(arg_name.replace('_', '-'))
class QueryParams(ImmutableMultiDict):
@classmethod
def build(cls, environ: WSGIEnviron):
query_string = environ.get('QUERY_STRING', '')
return cls(url_decode(query_string))
class QueryParam(str):
schema = None # type: type
@classmethod
def build(cls, params: QueryParams, arg_name: ArgName):
value = params.get(arg_name)
if value is None or cls.schema is None:
return value
if not isinstance(value, cls.schema):
value = validate(cls.schema, value)
return value
HeadersType = Union[
List[Tuple[str, str]],
Dict[str, str],
Headers
]
ResponseData = TypeVar('ResponseData')
class Request(object):
__slots__ = ('method', 'url', 'headers')
def __init__(self, method: str, url: str, headers: HeadersType=None) -> None:
self.method = method
self.url = url
self.headers = Headers(headers)
@classmethod
def build(cls, method: Method, url: URL, headers: Headers):
return cls(method=method, url=url, headers=headers)
class Response(object):
__slots__ = ('data', 'content', 'status', 'headers')
def __init__(self, data: Any, status: int=200, headers: HeadersType=None) -> None:
if headers is None:
headers_dict = {} # type: Union[Dict[str, str], Headers]
headers_list = [] # type: List[Tuple[str, str]]
elif isinstance(headers, dict):
headers_dict = headers
headers_list = list(headers.items())
elif isinstance(headers, list):
headers_dict = dict(headers)
headers_list = headers
else:
headers_dict = headers
headers_list = headers.to_list()
if isinstance(data, str):
content = data.encode('utf-8')
content_type = 'text/html; charset=utf-8'
elif isinstance(data, bytes):
content = data
content_type = 'text/html; charset=utf-8'
else:
content = json.dumps(data).encode('utf-8')
content_type = 'application/json'
if 'Content-Length' not in headers_dict:
headers_list += [('Content-Length', str(len(content)))]
if 'Content-Type' not in headers_dict:
headers_list += [('Content-Type', content_type)]
self.data = data
self.content = content
self.status = status
self.headers = Headers(headers_list)
@classmethod
def build(cls, data: ResponseData):
return cls(data=data)
```
#### File: apistar/apistar/templating.py
```python
import os
import jinja2
from apistar.exceptions import ConfigurationError
from apistar.pipelines import ArgName
from apistar.settings import Settings
class Templates(jinja2.Environment):
@classmethod
def build(cls, settings: Settings):
template_dirs = settings.get(['TEMPLATES', 'DIRS'])
loader = None # type: jinja2.BaseLoader
if len(template_dirs) == 1:
loader = jinja2.FileSystemLoader(template_dirs[0])
else:
loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader(template_dir)
for template_dir in template_dirs
])
return Templates(loader=loader)
class Template(jinja2.Template):
prefix = ''
suffixes = ['.html', '.txt']
path_delimiter = '__'
@classmethod
def build(cls, arg_name: ArgName, templates: Templates):
paths = arg_name.split(cls.path_delimiter)
path = os.path.join(cls.prefix, *paths)
for suffix in cls.suffixes:
try:
return templates.get_template(path + suffix)
except jinja2.TemplateNotFound:
pass
raise ConfigurationError('No template found for "%s".' % arg_name)
```
#### File: apistar/tests/test_commands.py
```python
import os
import click
from apistar import __version__, exceptions
from apistar.app import App
from apistar.main import setup_pythonpath
from apistar.test import CommandLineRunner
app = App()
runner = CommandLineRunner(app)
def test_help_flag():
result = runner.invoke([])
assert '--help' in result.output
assert '--version' in result.output
assert 'new' in result.output
assert 'run' in result.output
assert 'test' in result.output
assert result.exit_code == 0
def test_version_flag():
result = runner.invoke(['--version'])
assert __version__ in result.output
assert result.exit_code == 0
def test_custom_command():
def custom(var):
click.echo(var)
app = App(commands=[custom])
runner = CommandLineRunner(app)
result = runner.invoke([])
assert 'custom' in result.output
result = runner.invoke(['custom', '123'])
assert result.output == '123\n'
assert result.exit_code == 0
def test_custom_command_with_int_arguments():
def add(a: int, b: int):
click.echo(str(a + b))
app = App(commands=[add])
runner = CommandLineRunner(app)
result = runner.invoke([])
assert 'add' in result.output
result = runner.invoke(['add', '1', '2'])
assert result.output == '3\n'
assert result.exit_code == 0
def test_new():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject', '--layout', 'minimal'])
assert os.path.exists('myproject')
assert os.path.exists(os.path.join('myproject', 'app.py'))
assert os.path.exists(os.path.join('myproject', 'tests.py'))
def test_do_not_overwrite_existing_project():
with runner.isolated_filesystem():
result = runner.invoke(['new', 'myproject', '--layout', 'minimal'])
assert result.exit_code == 0
result = runner.invoke(['new', 'myproject', '--layout', 'minimal'])
assert result.exit_code != 0
def test_testsuite_minimal():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject', '--layout', 'minimal'])
os.chdir('myproject')
setup_pythonpath()
result = runner.invoke(['test'])
assert '2 passed' in result.output
assert result.exit_code == 0
def test_testsuite_standard():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject'])
os.chdir('myproject')
setup_pythonpath()
result = runner.invoke(['test'])
assert '2 passed' in result.output
assert result.exit_code == 0
# Add a failing test case.
failing_test_module = os.path.join('tests', 'test_failure.py')
with open(failing_test_module, 'w') as test_module:
test_module.write('def test_failure():\n raise Exception()\n')
result = runner.invoke(['test'])
assert '1 failed, 2 passed' in result.output
assert result.exit_code != 0
def test_testsuite_missing_tests_module():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject', '--layout', 'minimal'])
os.chdir('myproject')
setup_pythonpath()
os.remove('tests.py')
result = runner.invoke(['test'])
assert isinstance(result.exception, exceptions.ConfigurationError)
assert result.exit_code != 0
def test_missing_app_module():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject', '--layout', 'minimal'])
os.chdir('myproject')
setup_pythonpath()
os.remove('app.py')
result = runner.invoke(['run'])
assert isinstance(result.exception, exceptions.ConfigurationError)
assert result.exit_code != 0
def test_misconfigured_app_module():
with runner.isolated_filesystem():
runner.invoke(['new', 'myproject', '--layout', 'minimal'])
os.chdir('myproject')
setup_pythonpath()
with open('app.py', 'w') as app_module:
app_module.write('123\n')
result = runner.invoke(['run'])
assert isinstance(result.exception, exceptions.ConfigurationError)
assert result.exit_code != 0
```
#### File: apistar/tests/test_settings.py
```python
from apistar import App, Route, TestClient
from apistar.settings import Setting, Settings
def get_settings(settings: Settings):
return settings
def get_setting(ABC: Setting):
return {'ABC': ABC}
routes = [
Route('/settings/', 'GET', get_settings),
Route('/setting/', 'GET', get_setting),
]
settings = {
'ABC': 123,
'XYZ': 456
}
app = App(routes=routes, settings=settings)
client = TestClient(app)
def test_settings():
response = client.get('/settings/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
'XYZ': 456
}
def test_setting():
response = client.get('/setting/')
assert response.status_code == 200
assert response.json() == {
'ABC': 123,
}
def test_use_setting_as_argument():
abc = Setting(789)
assert get_setting(abc) == {'ABC': 789}
def test_settings_lookup():
settings = Settings(
ABC=123,
DEF={'XYZ': 456}
)
assert settings.get('ABC') == 123
assert settings.get(['DEF']) == {'XYZ': 456}
assert settings.get(['DEF', 'XYZ']) == 456
assert settings.get('missing') is None
assert settings.get(['ABC', 'missing']) is None
assert settings.get(['DEF', 'missing']) is None
assert settings.get(['DEF', 'missing'], '') == ''
``` |
{
"source": "jpadilla/black-online",
"score": 2
} |
#### File: black-online/api/app.py
```python
import os
import json
import base64
import lzma
import black
import urllib
import tempfile
from flask import Flask, render_template, request, jsonify
from flask_cors import cross_origin
TEMP_DIR = tempfile.gettempdir()
BASE_URL = "https://black.vercel.app"
BLACK_VERSION = os.getenv("BLACK_VERSION")
def get_black_version():
lockfile = json.load(open("./Pipfile.lock"))
package = lockfile["default"]["black"]
version = package.get("version")
ref = package.get("ref")
if version:
return version.lstrip("==")
if ref:
return ref[0:6]
app = Flask(__name__)
black_version = get_black_version()
def compress_state(data):
compressed = lzma.compress(json.dumps(data).encode("utf-8"))
return base64.urlsafe_b64encode(compressed).decode("utf-8")
def decompress_state(state):
compressed = base64.urlsafe_b64decode(state)
return json.loads(lzma.decompress(compressed))
def normalize_exception(exc):
exception_str = f"{exc}"
# Try to load contents dumped to tmp file.
if "helpful: " in exception_str:
try:
_, file_path = exception_str.split("helpful: ")
if file_path.startswith(TEMP_DIR):
with open(file_path) as f:
contents = f.read()
exception_str = f"{exception_str}\n\n{contents}"
except Exception:
pass
return exception_str
def format_code(source, fast, configuration):
try:
mode = black.FileMode(**configuration)
formatted = black.format_file_contents(source, fast=fast, mode=mode)
except black.NothingChanged:
formatted = source
except Exception as exc:
formatted = normalize_exception(exc)
return formatted
@app.route("/", methods=["POST", "GET"])
@cross_origin()
def index():
if request.method == "POST":
data = request.get_json()
source = data.get("source")
options = data.get("options", {})
line_length = int(options.get("line_length", 88))
skip_string_normalization = bool(
options.get("skip_string_normalization", False)
)
py36 = bool(options.get("py36", False))
pyi = bool(options.get("pyi", False))
fast = bool(options.get("fast", False))
else:
state = request.args.get("state")
if state:
state = decompress_state(state)
source = state.get("sc")
line_length = state.get("ll")
skip_string_normalization = state.get("ssn")
py36 = state.get("py36")
pyi = state.get("pyi")
fast = state.get("fast")
else:
source = render_template("source.py")
line_length = 88
skip_string_normalization = False
py36 = False
pyi = False
fast = False
formatted = format_code(
source,
fast=fast,
configuration={
"target_versions": black.PY36_VERSIONS if py36 else set(),
"line_length": line_length,
"is_pyi": pyi,
"string_normalization": not skip_string_normalization,
},
)
state = compress_state(
{
"sc": source,
"ll": line_length,
"ssn": skip_string_normalization,
"py36": py36,
"pyi": pyi,
"fast": fast,
}
)
options = [f"`--line-length={line_length}`"]
if skip_string_normalization:
options.append("`--skip-string-normalization`")
if py36:
options.append("`--py36`")
if pyi:
options.append("`--pyi`")
if fast:
options.append("`--fast`")
else:
options.append("`--safe`")
if BLACK_VERSION == "stable":
version = f"v{black_version}"
else:
version = f"https://github.com/psf/black/commit/{black_version}"
issue_data = {
"source_code": source,
"formatted_code": formatted,
"options": "\n".join(options),
"version": version,
"playground_link": f"{BASE_URL}/?version={BLACK_VERSION}&state={state}",
}
issue_body = urllib.parse.quote_plus(render_template("issue.md", **issue_data))
issue_link = f"https://github.com/psf/black/issues/new?body={issue_body}"
return jsonify(
{
"source_code": source,
"formatted_code": formatted,
"options": {
"line_length": line_length,
"skip_string_normalization": skip_string_normalization,
"py36": py36,
"pyi": pyi,
"fast": fast,
},
"state": state,
"issue_link": issue_link,
"version": black_version,
}
)
@app.route("/version", methods=["GET"])
@cross_origin()
def version():
return jsonify({"version": black_version})
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jpadilladev/the-guardpian",
"score": 2
} |
#### File: jpadilladev/the-guardpian/main.py
```python
from config.Config import Config
import logging as log
def main():
log.info('Starting The Guardpian...')
config = Config()
config.guardpian_service.start()
if __name__ == '__main__':
main()
```
#### File: the-guardpian/util/Gpio.py
```python
import logging
import RPi.GPIO as GPIO
log = logging.getLogger(__name__)
class Gpio:
def __init__(self, debug, pin=4):
self.debug = debug
self.pin = pin
if not self.debug:
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pin, GPIO.IN)
def add_event_detect(self, callback):
if not self.debug:
GPIO.add_event_detect(self.pin, GPIO.BOTH, callback=callback, bouncetime=200)
log.info("Added event detect to GPIO BOTH on pin " + str(self.pin))
def remove_event_detect(self):
if not self.debug:
GPIO.remove_event_detect(self.pin)
log.info("Removed event detect to GPIO pin " + str(self.pin))
def input(self):
if not self.debug:
return GPIO.input(self.pin)
else:
return True
def cleanup(self):
if not self.debug:
GPIO.cleanup()
``` |
{
"source": "jpadilla/notaso",
"score": 2
} |
#### File: notaso/home/views.py
```python
from django.db.models import Count
from django.views.generic import TemplateView
from ..comments.models import Comment
from ..professors.models import Professor
from ..universities.models import University
class HomeView(TemplateView):
template_name = "home.html"
def get_context_data(self, **kwargs):
if "view" not in kwargs:
kwargs["view"] = self
universities = University.objects.all()
universities = list(universities)
universities.sort(key=lambda x: x.get_grade())
professors = (
Professor.objects.annotate(num_comments=Count("comment"))
.filter(score__gt=0, num_comments__gte=10)
.order_by("-score")
)
comments = (
Comment.objects.all().exclude(body__exact="").order_by("-created_at", "-id")
)
kwargs["professors"] = professors[:5]
kwargs["universities"] = universities[:5]
kwargs["recent_comments"] = comments[:5]
kwargs["navbarSearchShow"] = True
return kwargs
```
#### File: notaso/restapiv2/views.py
```python
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from ..departments.models import Department
from ..professors.models import Professor
from ..universities.models import University
from .filters import ProfessorFilter, UniversityFilter
from .serializers import (
DepartmentSerializer,
ProfessorListSerializer,
ProfessorRetrieveSerializer,
UniversityListSerializer,
UniversityRetrieveSerializer,
)
class UniversityViewSet(ReadOnlyModelViewSet):
"""
### 1. Search Values
> Search university by name keyword
- ####Example:
* #####Search by University Name: [?search=Universidad de Puerto Rico](?search=Universidad de Puerto Rico)
---
### 2. Ordering Values
> Order universities by id, name or city
- ####Examples:
* #####Ordering by id: [?ordering=id](?ordering=id)
* #####Ordering by name: [?ordering=name](?ordering=name)
* #####Ordering by city: [?ordering=city](?ordering=city)
The API may also specify reverse orderings by prefixing the field name
with '-', like so:
* #####Reverse ordering: [?ordering=-name](?ordering=id)
Multiple orderings may also be specified:
* #####Multiple ordering: [?ordering=name,city](?ordering=name,city)
---
### 3. Filters Values
> Filters university by name or city.
- ####Examples:
* #####Filter by name: [?name=Universidad de Puerto Rico](?name=Universidad de Puerto Rico)
* #####Filter by city: [?city=San Juan](?city=San Juan)
---
##Retrieve professors by university departments
---
###1. Professors list by university department
> Search professors from an university by departments
**Only works retrieving one university, doesn't work for university list.**
- ####Example:
* #####Department slug: [?department=ciencias-de-computadora](?department=ciencias-de-computadora)
---
"""
queryset = University.objects.all()
serializer_class = UniversityListSerializer
filter_backends = (
filters.OrderingFilter,
filters.SearchFilter,
DjangoFilterBackend,
)
ordering_fields = ("id", "name", "city")
search_fields = ("name",)
filterset_class = UniversityFilter
lookup_field = "slug"
def retrieve(self, request, slug=None):
university = get_object_or_404(University, slug=slug)
serializer = UniversityRetrieveSerializer(
university, context={"request": request}
)
return Response(serializer.data)
class ProfessorViewSet(ReadOnlyModelViewSet):
"""
### 1. Search Values
> Search professors by name keywords
- ####Example:
* #####Search by Name: [?search=Jose](?search=Jose)
---
### 2. Ordering Values
> Order professors by id, first_name, last_name, score,
university name, university city and department name
- ####Examples:
* #####Ordering by id: [?ordering=id](?ordering=id)
* #####Ordering by first name: [?ordering=first_name](?ordering=first_name)
* #####Ordering by last name: [?ordering=last_name](?ordering=last_name)
* #####Ordering by score: [?ordering=score](?ordering=score)
* #####Ordering by university name: [?ordering=university__name](?ordering=university__name)
* #####Ordering by university city: [?ordering=university__city](?ordering=university__city)
* #####Ordering by department name: [?ordering=department__name](?ordering=department__name)
The API may also specify reverse orderings by prefixing the field name
with '-', like so:
* #####Reverse ordering: [?ordering=-first_name](?ordering=-first_name)
Multiple orderings may also be specified:
* #####Multiple ordering: [?ordering=first_name,last_name](?ordering=first_name,last_name)
---
###3. Filters Values
> Filters professors by name, university name,
universitycity, department name, gender or score.
- ####Examples:
* #####Filter by university name: [?university_name=Universidad de Puerto Rico](?university_name=Universidad de Puerto Rico)
* #####Filter by university_city: [?university_city=San Juan](?university_city=San Juan)
* #####Filter by department name: [?department=Ciencias de Computadora](?department=Ciencias de Computadora)
* #####Filter by gender: [?gender=M](?gender=M)
* #####Filter by score: [?score=90](?score=90)
---
"""
queryset = Professor.objects.all()
serializer_class = ProfessorListSerializer
filter_backends = (
filters.OrderingFilter,
filters.SearchFilter,
DjangoFilterBackend,
)
ordering_fields = (
"id",
"first_name",
"last_name",
"score",
"university__name",
"university__city",
"department__name",
)
search_fields = ("first_name", "last_name")
filterset_class = ProfessorFilter
lookup_field = "slug"
def retrieve(self, request, slug=None):
professor = get_object_or_404(self.queryset, slug=slug)
serializer = ProfessorRetrieveSerializer(professor)
return Response(serializer.data)
class DepartmentViewSet(ReadOnlyModelViewSet):
"""
### 1. Search Values
> Search departments by name keywords
- ####Example:
* #####Search by University Name: [?search=Ciencias de Computadora](?search=Ciencias de Computadora)
---
### 2. Ordering Values
> Order departments by id or name
- ####Examples:
* #####Ordering by id: [?ordering=id](?ordering=id)
* #####Ordering by name: [?ordering=name](?ordering=name)
The API may also specify reverse orderings by prefixing the field name
with '-', like so:
* #####Reverse ordering: [?ordering=-name](?ordering=-name)
Multiple orderings may also be specified:
* #####Multiple ordering: [?ordering=id,name](?ordering=id,name)
---
"""
queryset = Department.objects.all()
serializer_class = DepartmentSerializer
filter_backends = (
filters.OrderingFilter,
filters.SearchFilter,
DjangoFilterBackend,
)
ordering_fields = ("id", "name")
search_fields = ("name",)
lookup_field = "slug"
```
#### File: notaso/search/views.py
```python
from django.views.generic import ListView
from ..professors.models import Professor
class SearchView(ListView):
queryset = Professor.objects.all().order_by("-first_name", "last_name")
template_name = "search.html"
def get_context_data(self):
context = super(SearchView, self).get_context_data()
context.update(
{"search_term": self.request.GET.get("q", ""), "navbarSearchShow": True}
)
return context
def get_queryset(self):
queryset = super(SearchView, self).get_queryset()
search_term = self.request.GET.get("q")
if search_term:
return Professor.objects.filter(search_index=search_term)
return queryset[:10]
``` |
{
"source": "jpadilla/py-backwards-online",
"score": 3
} |
#### File: jpadilla/py-backwards-online/app.py
```python
import textwrap
from flask import Flask, render_template, request
from py_backwards.compiler import _transform
from py_backwards.const import TARGETS
app = Flask(__name__)
SOURCE_CODE = textwrap.dedent(
"""
def returning_range(x: int):
yield from range(x)
return x
def x_printer(x):
val: int
val = yield from returning_range(x)
print(f'val {val}')
def formatter(x: int) -> dict:
items: list = [*x_printer(x), x]
print(*items, *items)
return {'items': items}
result = {'x': 10, **formatter(10)}
print(result)
class NumberManager:
def ten(self):
return 10
@classmethod
def eleven(cls):
return 11
class ImportantNumberManager(NumberManager):
def ten(self):
return super().ten()
@classmethod
def eleven(cls):
return super().eleven()
print(ImportantNumberManager().ten())
print(ImportantNumberManager.eleven())
"""
)
@app.route('/', methods=['POST', 'GET'])
def index():
source = SOURCE_CODE
target = request.args.get('target', '2.7')
if request.method == 'POST':
source = request.form['source']
target = request.form['target']
try:
path = '/tmp/file.py'
transformed, _ = _transform(path, source, TARGETS[target])
error = None
except Exception as exc:
transformed = ''
error = exc
data = {
'source': source,
'transformed': transformed.strip(),
'error': error,
'targets': TARGETS.keys(),
'selected_target': target
}
return render_template('index.html', **data)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "JPadley18/dominos",
"score": 3
} |
#### File: dominos/dominos/api.py
```python
from ratelimit import limits, RateLimitException
from backoff import on_exception, expo
from dominos.exception import ApiError
from dominos.models import Stores, Menu, Basket, IngredientList
from dominos.utils import enum, update_session_headers
import requests
VARIANT = enum(PERSONAL=0, SMALL=1, MEDIUM=2, LARGE=3)
PAYMENT_METHOD = enum(CASH_ON_DELIVERY=0, CARD=1, PAYPAL=2, VISA_CHECKOUT=4)
FULFILMENT_METHOD = enum(COLLECTION=0, DELIVERY=1)
class Client(object):
'''
API class for the UK version of Dominos pizza website.
'''
BASE_URL = 'https://www.dominos.co.uk'
def __init__(self, session=requests.session()):
self.session = update_session_headers(session)
self.reset_store()
def new_session(self, session):
'''
Clear out the current session on the remote and setup a new one.
:return: A response from having expired the current session.
:rtype: requests.Response
'''
response = self.__get('/Home/SessionExpire')
self.session = update_session_headers(session)
return response
def reset_store(self):
'''
Clears out the current store and gets a cookie. Set the cross site
request forgery token for each subsequent request.
:return: A response having cleared the current store.
:rtype: requests.Response
'''
response = self.__get('/Store/Reset')
token = self.session.cookies['XSRF-TOKEN']
self.session.headers.update({'X-XSRF-TOKEN': token})
return response
def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json())
def get_nearest_store(self, postcode):
'''
Search for domino pizza stores using a postcode. This will only search
for local stores indicating delivery status and payment details.
:param string postcode: A postcode.
:return: A response containing stores matching the postcode.
:rtype: requests.Response
'''
return self.get_stores(postcode).local_store
def set_delivery_system(self, store, postcode, fulfilment_method=FULFILMENT_METHOD.DELIVERY):
'''
Set local cookies by initialising the delivery system on the remote.
Requires a store ID and a delivery postcode.
:param Store store: Store id.
:param string postcode: A postcode.
:return: A response having initialised the delivery system.
:rtype: requests.Response
'''
method = 'delivery' if fulfilment_method == FULFILMENT_METHOD.DELIVERY else 'collection'
params = {
'fulfilmentMethod': method,
'postcode': postcode,
'storeid': store.store_id
}
return self.__post('/Journey/Initialize', json=params)
def get_menu(self, store):
'''
Retrieve the menu from the selected store.
:param Store store: A store.
:return: The store menu.
:rtype: Menu
'''
params = {
'collectionOnly': not store.delivery_available,
'menuVersion': store.menu_version,
'storeId': store.store_id,
}
response = self.__get('/ProductCatalog/GetStoreCatalog', params=params)
return Menu(response.json())
def get_basket(self):
'''
Retrieve the basket for the current session.
:return: A response containing the basket for the current session.
:rtype: requests.Response
'''
response = self.__get('/CheckoutBasket/GetBasket')
return Basket(response.json())
def get_available_ingredients(self, item, size, store):
"""
Retrieves an IngredientList of ingredients that can be added/removed from the pizza
by name.
:param item: The item to find ingredients for
:param dominos.VARIANT size: The size of the pizza to be ordered
:param store: The store which the order will be placed at
:return: IngredientList: A list of available ingredients
"""
params = {
'isoCode': "en-GB",
'sizeId': size,
'id': item.item_id,
'storeId': store.store_id
}
response = self.__get("/PizzaCustomisation/PizzaViewModelBySize", params=params)
return IngredientList(response.json())
def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, options={'quantity': 1}):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param dict options: Dictionary of options like quantity and an ingredients list
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, options)
elif item_type == 'Side':
return self.add_side_to_basket(item, options['quantity'])
return None
def add_pizza_to_basket(self, item, variant=VARIANT.MEDIUM, options={}):
'''
Add a pizza to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.
:param dict options: Dictionary of options like quantity and an ingredients list. If nothing is
specified then a default quantity of 1 and the default ingredients for the pizza will be used.
:return: A response having added a pizza to the current basket.
:rtype: requests.Response
'''
ingredients = item.ingredients
params = {
'stepId': 0,
'quantity': options['quantity'],
'sizeId': variant,
'productId': item.item_id,
'ingredients': ingredients,
'productIdHalfTwo': 0,
'ingredientsHalfTwo': [],
'recipeReferrer': 0
}
return self.__post('/Basket/AddPizza', json=params)
def add_side_to_basket(self, item, quantity=1):
'''
Add a side to the current basket.
:param Item item: Item from menu.
:param int quantity: The quantity of side to be added.
:return: A response having added a side to the current basket.
:rtype: requests.Response
'''
item_variant = item[VARIANT.PERSONAL]
params = {
'productSkuId': item_variant['productSkuId'],
'quantity': quantity,
'ComplimentaryItems': []
}
return self.__post('/Basket/AddProduct', json=params)
def remove_item_from_basket(self, idx):
'''
Remove an item from the current basket.
:param int idx: Basket item id.
:return: A response having removed an item from the current basket.
:rtype: requests.Response
'''
params = {
'basketItemId': idx,
'wizardItemDelete': False
}
return self.__post('/Basket/RemoveBasketItem', json=params)
def set_payment_method(self, method=PAYMENT_METHOD.CASH_ON_DELIVERY):
'''
Select the payment method going to be used to make a purchase.
:param int method: Payment method id.
:return: A response having set the payment option.
:rtype: requests.Response
'''
params = {'paymentMethod': method}
return self.__post('/PaymentOptions/SetPaymentMethod', json=params)
def set_delivery_address(self):
'''
Set the delivery address for the order.
'''
pass
def process_payment(self):
'''
Proceed with payment using the payment method selected earlier.
:return: A response having processes the payment.
:rtype: requests.Response
'''
params = {
'__RequestVerificationToken': self.session.cookies,
'method': 'submit'
}
return self.__post('/PaymentOptions/Proceed', json=params)
def __get(self, path, **kargs):
'''
Make a HTTP GET request to the Dominos UK API with the given parameters
for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.get, path, **kargs)
def __post(self, path, **kargs):
'''
Make a HTTP POST request to the Dominos UK API with the given
parameters for the current session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
return self.__call_api(self.session.post, path, **kargs)
@on_exception(expo, (ApiError, RateLimitException), max_tries=10)
@limits(calls=5, period=1)
def __call_api(self, verb, path, **kargs):
'''
Make a HTTP request to the Dominos UK API with the given parameters for
the current session.
:param verb func: HTTP method on the session.
:param string path: The API endpoint path.
:params list kargs: A list of arguments.
:return: A response from the Dominos UK API.
:rtype: response.Response
'''
response = verb(self.__url(path), **kargs)
if response.status_code != 200:
raise ApiError('{}: {}'.format(response.status_code, response))
return response
def __url(self, path):
'''
Helper method to generate fully qualified URIs pertaining to specific
API actions.
:param string path: Relative API path to resource.
:return: Fully qualified URI to API resource.
:rtype: string
'''
return self.BASE_URL + path
``` |
{
"source": "JPadley18/pydrake",
"score": 2
} |
#### File: pydrake/pydrake/ddragon.py
```python
from .errors import APIError
import requests
ddragon_base = "http://ddragon.leagueoflegends.com/cdn/9.15.1/data/en_US/"
def get_static_data(extension):
"""
Retrieves a JSON file from DataDragon
:param extension: The filename to access
:return: The JSON data of the file
"""
# TODO: Smart Caching
r = requests.get(ddragon_base + extension)
if r.status_code != 200:
if r.status_code == 404:
raise APIError("404 Not Found - The file '{}' doesn't seem to exist".format(extension))
elif r.status_code == 403:
raise APIError("403 Forbidden - The file '{}' may not exist or may have been moved".format(extension))
else:
return r.json()
class Champion:
"""Encapsulates information about a Champion from the Data Dragon API
:ivar id: The Champion's ID
:ivar name: The Champion's name (e.g. `Kai'Sa`)
:ivar title: The Champion's title (e.g. `Daughter of the Void`)
:ivar blurb: The Champion's description
.. warning:: This should only be created through
:meth:`pydrake.PyDrake.get_champion_by_id`
"""
def __init__(self, data):
"""
Encapsulates information about a Champion from Data Dragon
:param data: the raw JSON data associated with the champion
"""
self.id = data['key']
self.name = data['name']
self.title = data['title']
self.blurb = data['blurb']
self.info = data['info']
self.tags = data['tags']
self.image = data['image']
self.stats = data['stats']
def get_champion_by_id(id):
"""Retrieves a Champion object from the Riot database with the given ID
:param id: the ID of the champion.
:return: a :class:`pydrake.ddragon.Champion` object with the given ID
.. warning:: This should only be called through
:meth:`pydrake.PyDrake.get_champion_by_id`
"""
raw = get_static_data("champion.json")
champion_raw = next((x for x in raw['data'].values() if x['key'] == str(id)), None)
if champion_raw is None:
raise ValueError("No champion found with ID: {}".format(id))
return Champion(champion_raw)
```
#### File: pydrake/pydrake/summonerv4.py
```python
class Summoner:
def __init__(self, data, region):
"""
Contains information received from the summoner/v4 endpoints
:param data: The data received from the endpoint as a dict
:param region: The region code that this summoner is associated with
"""
# Parse response data
self.name = data['name']
self.id = data['id']
self.account_id = data['accountId']
self.puuid = data['puuid']
self.iconId = data['profileIconId']
self.level = data['summonerLevel']
self.region = region
# This is only for later creating a Ranked Summoner object
self._raw = data
```
#### File: JPadley18/pydrake/tests.py
```python
from pydrake.summonerv4 import Summoner
from pydrake.leaguev4 import RankedSummoner
from pydrake.matchv4 import MatchList, Match
from pydrake.errors import APIError
from pydrake.ddragon import *
from pydrake import PyDrake
from os import path
import unittest
import json
here = path.join(path.abspath(path.dirname(__file__)), "tests/")
def get_attrs(obj):
"""
Utility function to return all non-function variables in an object
:param obj: The object to retrieve vars from
:return: The vars found, excluding all methods
"""
return [x for x in dir(obj) if not x.startswith('__') and not callable(getattr(obj, x))]
def has_null_attrs(obj):
"""
Returns a boolean value based on whether any of this object's attributes is
null or 'None'
:param obj: The object to check
:return: True if None attributes are found, else False
"""
attrs = get_attrs(obj)
null = [x for x in attrs if getattr(obj, x) is None]
return len(null) > 0
class TestClasses(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(path.join(here, "summoner-v4-summoners-by-name.json")) as raw:
cls.summonerv4byname = json.loads(raw.read())
with open(path.join(here, "league-v4-entries-by-summoner.json")) as raw:
cls.leaguev4bysummoner = json.loads(raw.read())
with open(path.join(here, "ddragon-champion.json")) as raw:
cls.ddragonchampions = json.loads(raw.read())
with open(path.join(here, "match-v4-matches.json")) as raw:
cls.matchv4matches = json.loads(raw.read())
with open(path.join(here, "match-v4-matchlists-by-account.json")) as raw:
cls.matchv4matchlists = json.loads(raw.read())
def test_summoner_v4_summoners_by_name(self):
summoner = Summoner(self.summonerv4byname, "euw1")
self.assertFalse(has_null_attrs(summoner))
self.assertEqual(summoner.name, "Janoccoli")
self.assertEqual(summoner.region, "euw1")
self.assertEqual(summoner.level, 103)
def test_league_v4_entries_by_summoner(self):
summoner = Summoner(self.summonerv4byname, "euw1")
ranked = RankedSummoner(self.leaguev4bysummoner, summoner)
self.assertFalse(has_null_attrs(ranked))
solo = ranked.get_ranked_queue("RANKED_SOLO_5x5")
self.assertFalse(has_null_attrs(solo))
self.assertEqual(len(ranked._ranks), 2)
self.assertEqual(solo.rank, 3)
self.assertEqual(solo.tier, "BRONZE")
def test_ddragon_champion(self):
champions = self.ddragonchampions['data'].values()
champion_objs = [Champion(x) for x in champions]
self.assertEqual(len(champion_objs), 145)
for x in champion_objs:
self.assertFalse(has_null_attrs(x))
def test_match_v4_matches(self):
match = Match(self.matchv4matches)
self.assertFalse(has_null_attrs(match))
self.assertEqual(len(match.participants), 10)
self.assertEqual(len(match.teams), 2)
def test_match_v4_matchlists_by_account(self):
matchlist = MatchList(self.matchv4matchlists)
self.assertEqual(len(matchlist._matches), 100)
self.assertFalse(has_null_attrs(matchlist))
for x in matchlist._matches:
self.assertFalse(has_null_attrs(x))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jpadmin/qlikdemo",
"score": 3
} |
#### File: qlikdemo/api/api.py
```python
from flask import Flask
from flask import request, jsonify
import time
def getRequestType(headers):
requestHeaders = dict(headers)
return requestHeaders['Content-Type']
#defines an object for the application
app = Flask(__name__)
#Creates in-memory data store for the messages
messageStore = [
{
'id' : 1111111111,
'message': "Welcome to Qlik message store"
}
]
def generateResponse(status, action, info=None, error=None, response=None):
responsedata = {
'status' : status,
'action' : action,
'info' : info,
'error' : error,
'response' : response
}
return jsonify(responsedata)
#Allows the user to post messages to the store using the HTTP POST method
@app.route('/api/message', methods=['POST'])
def addMessage():
if getRequestType(request.headers) == 'application/x-www-form-urlencoded':
requestdata = request.form
elif getRequestType(request.headers) == 'application/json':
requestdata = request.json
else:
return generateResponse(451, "Add Message", info="Failed", error="Invalid request: Only x-www-form-urlencoded or json")
if requestdata['message'] is None or requestdata['message'] == "":
return generateResponse(200, "Add Message", info="Failed", error="No or Blank Message data")
messageData = {
'id' : int(time.time()),
'message' : requestdata['message']
}
messageStore.append(messageData)
return generateResponse(200, "Add Message", info="Success", response=messageData)
#Allows the user to list messages from the store using the HTTP GET method
@app.route('/api/message', methods=['GET'])
def ListMessage():
return generateResponse(200, "List All Messages", info="Success", response=messageStore)
#Allows the user to retrieve specific message from the store using the HTTP GET method and the message ID
#Palindrome check is implemented
@app.route('/api/message/<msg_id>', methods=['GET'])
def ListSpecificMessage(msg_id):
my_message = None
for sMessage in messageStore:
if sMessage['id'] == int(msg_id):
my_message = sMessage
break
if my_message is not None:
if my_message['message'] == my_message['message'][::-1]:
palindrome = "Palindrome Check - Passed"
else:
palindrome = "Palindrome Check - Failed"
return generateResponse(200, "List Specific Message", info="Success::"+palindrome, response=my_message)
else:
return generateResponse(404, "List Specific Message", info="Failed", error="Your message id does not exist")
#Allows the user to delete specific message from the store using the HTTP POST method and the message ID
@app.route('/api/message/<msg_id>', methods=['POST'])
def DeleteSpecificMessage(msg_id):
my_message = None
for sMessage in messageStore:
if sMessage['id'] == int(msg_id):
my_message = sMessage
break
if my_message is not None:
messageStore.remove(my_message)
return generateResponse(200, "Delete Specific Message", info="Success", response=my_message)
else:
return generateResponse(404, "Delete Specific Message", info="Failed", error="Your message id does not exist")
if __name__ == '__main__':
app.run(host='0.0.0.0',port=80)
``` |
{
"source": "jpaganini/plascad",
"score": 2
} |
#### File: plascad/plas_cad/__main__.py
```python
from collections import OrderedDict
import shutil
import os
import glob
import argparse
import sys
import subprocess
sys.path.append("..")
import plas_cad
def main():
usage = ("usage: Plascad -i your.plasmid.seqs.fasta")
#version = 'Plascad {v}'.format(v=plas_cad.__version__)
###################################### checking dependencies ########################################
list_cmd = ['prodigal', 'blastp', 'hmmsearch']
for cmd in list_cmd:
exist = subprocess.call('command -v '+ cmd + '>> /dev/null', shell=True)
if exist == 0:
pass
else:
print(cmd + " not exist in path!")
sys.exit()
###################################### Arguments and declarations ########################################
parser = argparse.ArgumentParser()
parser.add_argument("-i",
help="input plasmids file for classification", type=str,
default='example/example.fasta')
parser.add_argument("-n", action='store_true',
help="prodigal normal mode")
parser.add_argument("-cMOBB",
help="alignment coverage for MOBB HMM profile",
default=75)
parser.add_argument("-cMOBC",
help="alignment coverage for MOBC HMM profile",
default=75)
parser.add_argument("-cMOBF",
help="alignment coverage for MOBF HMM profile",
default=75)
parser.add_argument("-cMOBT",
help="alignment coverage for MOBT HMM profile",
default=75)
parser.add_argument("-cMOBPB",
help="alignment coverage for MOBPB HMM profile",
default=75)
parser.add_argument("-cMOBH",
help="alignment coverage for MOBH HMM profile",
default=70)
parser.add_argument("-cMOBP",
help="alignment coverage for MOBP HMM profile",
default=65)
parser.add_argument("-cMOBV",
help="alignment coverage for MOBV HMM profile",
default=60)
parser.add_argument("-cMOBQ",
help="alignment coverage for MOBQ HMM profile",
default=55)
args = parser.parse_args()
directory = os.path.abspath(os.path.dirname(__file__))
file_input = os.path.basename(args.i)
file_name, file_ext = os.path.splitext(file_input)
dirname = os.path.dirname(os.path.abspath(args.i))
os.chdir(dirname)
###################################### Prodigal ###########################################################
cmdprodigal_meta = "prodigal" + " -i " + str(file_input) + \
" -a " + str(file_name) + ".faa " + " -p meta -q -o temp.txt"
cmdprodigal_normal = "prodigal" + " -i " + str(file_input) + \
" -a " + str(file_name) + ".faa " + " -q -o temp.txt"
if args.n:
os.system(cmdprodigal_normal)
else:
os.system(cmdprodigal_meta)
os.remove("temp.txt")
###################################### MOB hmmer ###############################################################
mob_hmm = os.path.join(directory, "database/hmm_module/MOB_hmm")
for root, dirnames, filenames in os.walk(mob_hmm):
for i in filenames:
cmdhmmsearch = "hmmsearch" + " --domtblout " + str(file_name) + "_" + str(i) + \
"_domtblout " + str(os.path.join(mob_hmm, i)) + " " + str(file_name) + ".faa "
os.system(cmdhmmsearch)
###################################### hmmer parsing #########################################################
cmdmobparsing = 'python ' + os.path.join(directory, "scripts/MOB_parser.py") + ' -i ' + str(file_name) + ' -cMOBB ' + str(args.cMOBB) + ' -cMOBC ' + str(args.cMOBC) + \
' -cMOBF ' + str(args.cMOBF)+ ' -cMOBT ' + str(args.cMOBT) + ' -cMOBPB ' + str(args.cMOBPB) + ' -cMOBH ' + str(args.cMOBH) \
+ ' -cMOBP ' + str(args.cMOBP) + ' -cMOBV ' + str(args.cMOBV) + ' -cMOBQ ' + str(args.cMOBQ) + "\n"
os.system (cmdmobparsing)
os.system('rm -rf *domtblout')
###################################### MPF system hmmer #######################################################
MPF_hmm = os.path.join(directory, "database/hmm_module/MPF_system_hmm")
for root, dirnames, filenames in os.walk(MPF_hmm):
for i in filenames:
cmdMPF = "hmmsearch" + " --domtblout " + str(file_name) + "_mob_" + str(i) + \
"_domtblout " + str(os.path.join(MPF_hmm, i)) + " " + str(file_name) + "_MOB_temp_mob.faa"
os.system(cmdMPF)
###################################### MPF parsing ##########################################################
cmdATPase_T4CP_parsing = 'python ' + os.path.join(directory, 'scripts/MPF_hmm_parser.py') + ' -i ' + str(file_name) + "\n"
os.system(cmdATPase_T4CP_parsing)
###################################### MPFF summary ##########################################################
cmdcatmpff = 'cat ' + str(file_name) + '_ATPase_temp_parsed_result_out ' + str(file_name) + '_T4CP_temp_parsed_result_out ' \
+ str(file_name) + "*MPFF*_result_out > " + str(file_name) + "_MPFF_summary_out"
cmdcatmpfg = 'cat ' + str(file_name) + '_ATPase_temp_parsed_result_out ' + str(file_name) + '_T4CP_temp_parsed_result_out ' \
+ str(file_name) + "*MPFG*_result_out > " + str(file_name) + "_MPFG_summary_out"
cmdcatmpfi = 'cat ' + str(file_name) + '_ATPase_temp_parsed_result_out ' + str(file_name) + '_T4CP_temp_parsed_result_out ' \
+ str(file_name) + "*MPFI*_result_out > " + str(file_name) + "_MPFI_summary_out"
cmdcatmpft = 'cat ' + str(file_name) + '_ATPase_temp_parsed_result_out ' + str(file_name) + '_T4CP_temp_parsed_result_out ' \
+ str(file_name) + "*MPFT*_result_out > " + str(file_name) + "_MPFT_summary_out"
os.system(cmdcatmpff)
os.system(cmdcatmpfg)
os.system(cmdcatmpfi)
os.system(cmdcatmpft)
###################################### MPFF classification ##########################################################
cmdConjparsing = 'python ' + os.path.join(directory, 'scripts/Conj_parser.py') + ' -i ' + str(file_name) + "\n"
os.system(cmdConjparsing)
cmdcatconj = 'cat ' + str(file_name) + "*Conj_summary_out > " + str(file_name) + "_Conj_out"
os.system(cmdcatconj)
###################################### Plasmids classification ######################################################
cmdPlasparsing = 'python ' + os.path.join(directory, 'scripts/Conj_mob_classification.py') + ' -i ' + str(file_name) + "\n"
os.system(cmdPlasparsing)
###################################### Plasmids ARGs identification ######################################################
cmdblastp = "blastp" + " -query " + str(file_name) + ".faa" + " -db "\
+ os.path.join(directory, 'database/ARGsdb/ARGsDB') + " -outfmt 6 -evalue 1e-5 -num_threads 20 " +\
"| sort -k1,1 -k12,12nr -k11,11n | sort -u -k1,1 --merge > " + str(file_name) + "_ARGs_blasp_result_out"
os.system(cmdblastp)
###################################### Plasmids ARGs parsing ######################################################
cmdARGsparsing = 'python ' + os.path.join(directory, 'scripts/plasmids_ARGs_parser.py') + ' -i ' + str(file_name) + \
' -db ' + os.path.join(directory, 'database/ARGsdb/ARGsDB.fasta') + ' -db_structure ' + os.path.join(directory, 'database/ARGsdb/ARGsDB_des.txt')
os.system(cmdARGsparsing)
###################################### Plasmids ARGs summary ######################################################
cmdARGssummary = 'python ' + os.path.join(directory, 'scripts/plasmids_result_summary.py') + ' -i ' + str(file_name)
os.system(cmdARGssummary)
###################################### summary result ######################################################
cmdcatsummary = 'cat ' + str(file_name) + '_Conj_plasmids_ids_result_out ' + str(file_name) + '_mob_unconj_plasmids_ids_result_out ' +\
str(file_name) + '_unmob_plasmids_ids_result_out > ' + str(file_name) + "_plasmids_classification_sum.txt"
os.system(cmdcatsummary)
###################################### plasmids map plotting ######################################################
cmdplot = 'python ' + os.path.join(directory, 'scripts/Plasmids_plot.py') + ' -i ' + str(file_name)
os.system(cmdplot)
###################################### remove temp ######################################################
os.system('rm -rf *out')
os.system('rm -rf *faa')
###################################### function ######################################################
if __name__ == '__main__':
main()
```
#### File: plas_cad/scripts/MPF_hmm_parser.py
```python
from collections import OrderedDict
import re
from Bio import SeqIO
import os
import fnmatch
import argparse
###################################### Arguments and declarations ########################################
parser = argparse.ArgumentParser()
parser.add_argument("-i",
help="input plasmids file prefix for classification",type=str)
args = parser.parse_args()
###################################### parsing MPF module #############################################
def MPF_parsing(in_domtblout, coverage):
dic = OrderedDict()
MPF_restult = open(str(in_domtblout) + "_" + "parsed_out", "w")
wanted1 = set()
wanted2 = set()
for line in open(in_domtblout, "r"):
if not line.startswith("#"):
KEY = "\t".join(str(line).strip().split()[0:6])
c_value = str(line).strip().split()[11]
domain_start = str(line).strip().split()[17]
domain_end = str(line).strip().split()[18]
wanted1.add(KEY + "\t" + c_value + "\t" + domain_start + "\t" + domain_end)
for line in wanted1:
try:
items = re.split("\t", line.strip())
key = '\t'.join(items[:6])
value1 = items[6]
value2 = items[7]
value3 = items[8]
if key not in dic:
dic[key] = [[item] for item in items[6:]]
else:
if float(value1) < float(dic[key][0][0]):
dic[key][0] = value1
if float(value2) < float(dic[key][1][0]):
dic[key][1][0] = value2
if float(value3) > float(dic[key][2][0]):
dic[key][2][0] = value3
except:
pass
for k, v in dic.items():
wanted2.add('{}\t{}\t{}\t{}\n'.format(k, *map(''.join, (v))))
###################################### parsing based on coverage ############################
MPF_wanted = OrderedDict()
for line in wanted2:
id = str(line).strip().split("\t")[0]
lis = str(line).strip().split("\t")
if ((float(lis[8]) - float(lis[7])) / float(lis[5])) * 100 >= int(coverage) or \
((float(lis[8]) - float(lis[7])) / float(lis[2])) * 100 >= int(coverage) and float(lis[6]) <= 0.01:
MPF_wanted[id] = line
for v in MPF_wanted.values():
MPF_restult.write(v)
MPF_restult.close()
##################################### MPF classification ####################################
for root, folders, files in os.walk(os.getcwd()):
for i in fnmatch.filter(files, '*hmm_domtblout'):
MPF_parsing(i, 50)
##################################### ATPase and T4CP summary ########################################
catATPase = 'cat ' + '*ATPase.hmm_domtblout_parsed_out> ' + str(args.i) + '_ATPase_temp_out'
catT4CP = 'cat ' + '*T4CP*_domtblout_parsed_out> ' + str(args.i) + '_T4CP_temp_out'
os.system(catATPase)
os.system(catT4CP)
#################################### ATPase and T4CP parsing result #######################################
def MPFF_parsed_result(summary, faa):
result = open(str(summary).rsplit("_", 1)[0] + "_parsed_result_out", "w")
diclocation = {}
location_wanted = set()
for record in SeqIO.parse(open(faa, "r"), "fasta"):
diclocation[record.id] = record.description.split("#")[1].strip() + "-" + record.description.split("#")[2].strip() + "\t" + record.description.split("#")[3].strip()
dicATPase_T4CP = {}
for line in open(summary, "r"):
key = str(line).strip().split("\t")[0]
e_value = float(str(line).strip().split("\t")[6])
if key not in dicATPase_T4CP.keys():
dicATPase_T4CP[key] = str(line).strip().split("\t")[3] + "\t" + \
str(line).strip().split("\t")[6] + "\t" + str(diclocation[key]) + "\n"
else:
if float(e_value) < float(str(dicATPase_T4CP[key]).split("\t")[1]):
dicATPase_T4CP[key] = str(line).strip().split("\t")[3] + "\t" + \
str(line).strip().split("\t")[6] + "\t" + str(diclocation[key]) + "\n"
for key, value in dicATPase_T4CP.items():
location_wanted.add(str(key) + "\t" + str(value).strip())
dic_wanted = {}
for line in location_wanted:
key = str(line).split("\t")[0].rsplit("_", 1)[0]
e_value = float(str(line).strip().split("\t")[2])
if key not in dic_wanted.keys():
dic_wanted[key] = "\t".join(str(line).split("\t")[1:])
else:
if float(e_value) < float(str(dic_wanted[key]).split("\t")[1]):
dic_wanted[key] = "\t".join(str(line).split("\t")[1:])
for k, v in dic_wanted.items():
result.write(str(k) + "\t" + str(v).strip() + "\n")
result.close()
MPFF_parsed_result(str(args.i) + '_ATPase_temp_out', str(args.i) + "_MOB_temp_mob.faa")
MPFF_parsed_result(str(args.i) + '_T4CP_temp_out', str(args.i) + "_MOB_temp_mob.faa")
################################################################################################
for root, folders, files in os.walk(os.getcwd()):
for i in fnmatch.filter(files, '*hmm_domtblout_parsed_out'):
MPFF_parsed_result(i, str(args.i) + "_MOB_temp_mob.faa")
################################################################################################
```
#### File: plas_cad/scripts/Plasmids_plot.py
```python
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from ElementTree_pretty import prettify
from Bio import SeqIO
import argparse
import os
###################################### Arguments and declarations ########################################
parser = argparse.ArgumentParser()
parser.add_argument("-i",
help="input plasmids file prefix for classification",type=str)
args = parser.parse_args()
####################################################### plasmids plot process #####################################
curdir = os.getcwd()
directory = os.path.abspath(os.path.dirname(__file__))
result_dir = os.path.join(curdir, str(args.i) +'_Conjugative_plasmids_map')
if not os.path.exists(result_dir):
os.makedirs(result_dir)
cmdcpjs = 'cp -r ' + os.path.join(directory, 'js ') + result_dir
os.system (cmdcpjs)
####################################################### main_program #####################################
def html_prepare(lis_in):
m = 33
n = 33
length = lis_in[0].split("\t")[5]
ID = lis_in[0].split("\t")[0]
tick = str(round(float(length)/50))
tick2 = str(round(float(length)/12))
result = open(os.path.join(result_dir, ID + ".html"), "w")
root = Element("html")
root.set('version', '1.0')
head = SubElement(root, 'head')
title = SubElement(head, 'script', attrib= {"src":"js/angularplasmid.complete.min.js"})
title.text = " "
# title.text = " src='js/angularplasmid.complete.min.js'"
body = SubElement(root, 'body')
style = SubElement(body, 'style')
ti_style = SubElement(style, "setting")
ti_style.text = " body {font-family: 'Lato';font-weight:400;}" \
".boundary {stroke-dasharray:2,2;stroke-width:2px}" \
".mdlabel {font-size:14px}" \
".smlabel {font-size:8px}" \
".white {fill:#fff}" \
".red {fill:rgb(192,64,64)}" \
".purple {fill:rgb(192,64,192)}" \
".blue {fill:rgb(64,192,192)}" \
".green {fill:rgb(64,192,64)}" \
".labelline {stroke:#333;stroke-dasharray:2,2;stroke-width:2px;}" \
".gold {fill:rgb(192,128,64)}" \
""
plasmid = SubElement(body, 'plasmid', attrib={"sequencelength": length, "plasmidheight": '700', "plasmidwidth":'700'})
plasmidtrack = SubElement(plasmid, 'plasmidtrack', attrib={"trackstyle":"fill:#ccc", "width":"5", "radius":"150"})
plasmidtrack.text = " "
plasmidtrack2 = SubElement(plasmid, 'plasmidtrack', attrib={"trackstyle":"fill:rgba(225,225,225,0.5)","radius":"140"})
tracklabel = SubElement(plasmidtrack2, "tracklabel", attrib={"text":ID, "labelstyle":"font-size:20px;font-weight:400"})
tracklabel.text = " "
tracklabel = SubElement(plasmidtrack2, "tracklabel", attrib={"text":length + " bp", "labelstyle":"ffont-size:10px", "vadjust":"20"})
tracklabel.text = " "
trackscale = SubElement(plasmidtrack2, "trackscale", attrib={"interval":tick, "style":"stroke:#999", "ticksize":"3"})
trackscale.text = " "
trackscale = SubElement(plasmidtrack2, "trackscale", attrib={"interval":tick, "style":"stroke:#999","direction": "in", "ticksize":"3"})
trackscale.text = " "
trackscale = SubElement(plasmidtrack2, "trackscale", attrib={"interval":tick2, "style":"stroke:#f00", "direction":"in", "showlabels":"1", "labelstyle":"fill:#999;stroke:none;text-anchor:middle;alignment-baseline:middle;font-size:10px"})
trackscale.text = " "
for i in lis_in:
if "MOB" in i:
mob_type = str(i).split("\t")[1]
start = str(i).split("\t")[3].split("-")[0]
end = str(i).split("\t")[3].split("-")[1]
if str(i).split("\t")[4] == str(1):
arrow_s = str(-2)
arrow_e = str(2)
else:
arrow_s = str(2)
arrow_e = str(-2)
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(85,0,170,0.9)", "arrowendlength":arrow_e, "arrowstartlength":arrow_s})
markerlabel = SubElement(trackmarker, "markerlabel", attrib={"type":"path", "class":"mdlabel purple","valign":"outer","vadjust":"23","text":mob_type})
markerlabel.text = " "
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(238,221,255,0.6)", "wadjust":"-5", "vadjust":"25"})
trackmarker.text = " "
if "ATPase" in i:
start = str(i).split("\t")[3].split("-")[0]
end = str(i).split("\t")[3].split("-")[1]
if str(i).split("\t")[4] == str(1):
arrow_s = str(-2)
arrow_e = str(2)
else:
arrow_s = str(2)
arrow_e = str(-2)
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(0,85,170,0.9)", "arrowendlength":arrow_e, "arrowstartlength":arrow_s})
markerlabel = SubElement(trackmarker, "markerlabel", attrib={"type":"path", "class":"mdlabel blue","valign":"outer","vadjust":"23","text":"ATPase"})
markerlabel.text = " "
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(221,238,255,0.6)", "wadjust":"-5", "vadjust":"25"})
trackmarker.text = " "
if "T4CP" in i:
start = str(i).split("\t")[3].split("-")[0]
end = str(i).split("\t")[3].split("-")[1]
if str(i).split("\t")[4] == str(1):
arrow_s = str(-2)
arrow_e = str(2)
else:
arrow_s = str(2)
arrow_e = str(-2)
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(170,85,0,0.9)", "arrowendlength":arrow_e, "arrowstartlength":arrow_s})
markerlabel = SubElement(trackmarker, "markerlabel", attrib={"type":"path", "class":"mdlabel gold", "valign":"outer","vadjust":"23", "text":"T4CP"})
markerlabel.text = " "
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(255,238,221,0.6)", "wadjust":"-5", "vadjust":"25"})
trackmarker.text = " "
if "MPFG" not in i and "MPFI" not in i and "MPFT" not in i and "MPFF" not in i and "MOB" not in i:
i = i + "\t" + str(n)
n +=15
start = str(i).split("\t")[3].split("-")[0]
end = str(i).split("\t")[3].split("-")[1]
des = str(i).split("\t")[1].split("__")[1]
adjARG = str(i).split("\t")[6]
if str(i).split("\t")[4] == str(1):
arrow_s = str(-2)
arrow_e = str(2)
else:
arrow_s = str(2)
arrow_e = str(-2)
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(170,0,85,0.9)", "arrowendlength":arrow_e, "arrowstartlength":arrow_s})
markerlabel = SubElement(trackmarker, "markerlabel", attrib={"type":"path", "class":"mdlabel red", "valign":"outer","vadjust":adjARG,"text":des,"showline":"1","lineclass":"labelline"})
markerlabel.text = " "
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(255,221,238,0.6)", "wadjust":"-5", "vadjust":"25"})
trackmarker.text = " "
if "virB" in i or "traC" in i or "traE" in i or "traH" in i or "traK" in i or "MPFG_41" in i or "MPFG_44" in i or "MPFG_51" in i or "MPFG_52" in i\
or "traL" in i or "traN" in i or "traU" in i or "traV" in i or "traW" in i or "traI" in i or "traQ" in i or "traM" in i or "traP" in i\
or "traR" in i or "traY" in i:
i = i + "\t" + str(m)
m +=15
start = str(i).split("\t")[3].split("-")[0]
end = str(i).split("\t")[3].split("-")[1]
des = str(i).split("\t")[1].rsplit("_",1)[1]
adj = str(i).split("\t")[6]
if str(i).split("\t")[4] == str(1):
arrow_s = str(-2)
arrow_e = str(2)
else:
arrow_s = str(2)
arrow_e = str(-2)
trackmarker = SubElement(plasmidtrack2, "trackmarker", attrib={"start":start, "end":end,"markerstyle":"fill:rgba(85,170,0,0.9)", "arrowendlength":arrow_e, "arrowstartlength":arrow_s})
markerlabel = SubElement(trackmarker, "markerlabel", attrib={"type":"path", "class":"mdlabel green","valign":"outer","vadjust":adj, "text":des, "showline":"1","lineclass":"labelline"})
markerlabel.text = " "
trackmarker = SubElement(plasmidtrack2, "trackmarker",attrib={"start": start, "end": end, "markerstyle": "fill:rgba(238,255,221,0.6)","wadjust": "-5", "vadjust": "25"})
trackmarker.text = " "
result.write(prettify(root))
# ####################################################### prepare the ploting plasmids #####################################
def plasmids_prepare(plas_fasta,conj_sum):
plas_length = {}
for record in SeqIO.parse(open(plas_fasta, "r"), "fasta"):
plas_length[str(record.id).strip()] = len(str(record.seq))
plot = []
dic = {}
for line in open(conj_sum, "r"):
key = str(line).strip().split("\t")[0]
if key in plas_length:
line = str(line).strip() + "\t" + str(plas_length[key]).strip()
if key not in dic.keys():
dic[key] = [str(line).strip()]
else:
dic[key].append(str(line).strip())
for k, v in dic.items():
plot.append(v)
for i in plot:
html_prepare(i)
if __name__ == "__main__":
plasmids_prepare(str(args.i) + ".fasta",str(args.i) + "_Conj_plasmids_loc_sum.txt")
``` |
{
"source": "jpages/twopy",
"score": 3
} |
#### File: twopy/interpreter/simple_interpreter.py
```python
from types import *
import dis
import importlib
import frontend
from frontend import model
# The singleton of the Interpreter
simple_interpreter_instance = None
def get_interpreter(maincode, subdirectory, args):
global simple_interpreter_instance
if simple_interpreter_instance == None:
simple_interpreter_instance = SimpleInterpreter(maincode, subdirectory, args)
return simple_interpreter_instance
class SimpleInterpreter:
def __init__(self, maincode, subdirectory, args):
# The main CodeObject
self.maincode = maincode
self.mainmodule = model.MModule(maincode)
# All loaded modules
self.modules = []
self.modules.append(self.mainmodule)
# The directory of the executed file
self.subdirectory = subdirectory
# Command-line arguments for the vm
self.args = args
# A list indexed by function identifiers, the first one has indice 0
self.functions = []
# The global environment
self.global_environment = {}
# List of all environments, the last one is the current call
self.environments = []
self.functions_called = []
# The Jit compiler instance, will be set by the launcher
self.jitcompiler = None
# Association between a Code object and a Function object to avoid duplication
self.code_to_function = dict()
# Iterate over opcodes and execute the code
def execute(self):
# Precompile the code by generating proper instructions and basic blocks
self.precompile()
# Start the execution
self.start()
def precompile(self):
# Generate the main function and recursively other functions in module
self.generate_function(self.maincode, "main", self.mainmodule, True)
# TODO: use identifiers instead of names to call functions
# code : the CodeObject of this function
# name : Function name
# module : the Module instance
# is_main : true if the function is top-level of a module
def generate_function(self, code, name, module, is_main):
if code in self.code_to_function:
return self.code_to_function[code]
function = model.Function(code.co_filename, code.co_argcount,
code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize, code.co_consts,
code.co_names, code.co_varnames, code.co_freevars, code.co_cellvars,
name, dis.get_instructions(code), self, module, is_main)
self.code_to_function[code] = function
self.functions.append(function)
if self.args.verbose:
print(dis.dis(code))
return function
# Start the execution after the compilation of functions
def start(self):
# Start from the main (toplevel) function
# The execution stack
self.stack = []
self.functions_called.append(self.functions[0])
env = {}
self.current_function().environments.append(env)
self.environments.append(env)
# Initialize primitive functions
for key, value in primitives.items():
self.global_environment[key] = value
self.execute_function(self.functions[0])
# Return the function currently called, the last one on the stack
def current_function(self):
return self.functions_called[-1]
''' Generate a new class and return it
func = main function of the class
name = class name
bases = direct superclasses
metaclass = The metaclass
'''
def make_class(self, func, name, *bases, metaclass=None, **kwds):
return model.MClass(self, func, name, bases, metaclass, kwds)
# Print the current stack from bottom to top
def print_stack(self):
i = len(self.stack) -1
for el in reversed(self.stack):
print("\t " + str(i) + " " + str(el))
i -= 1
# Push a value onto the stack
def push(self, value):
if self.args.execution:
print("PUSH " + str(value) + str(value.__class__))
self.stack.append(value)
# Pop a value from the stack
def pop(self):
if self.args.execution:
res = self.stack.pop()
print("POP " + str(res) + str(res.__class__))
return res
else:
return self.stack.pop()
# Execute the current function
def execute_function(self, mfunction):
self.functions_called.append(mfunction)
# Entry block
self.execute_block(mfunction.start_basic_block)
def execute_block(self, block):
for instruction in block.instructions:
self.execute_instruction(instruction)
# Dispatch of instructions
def execute_instruction(self, instruction):
if self.args.execution:
print("Execution of " + str(instruction))
if isinstance(instruction, model.POP_TOP):
self.POP_TOP(instruction)
elif isinstance(instruction, model.ROT_TWO):
self.ROT_TWO(instruction)
elif isinstance(instruction, model.ROT_THREE):
self.ROT_THREE(instruction)
elif isinstance(instruction, model.DUP_TOP):
self.DUP_TOP(instruction)
elif isinstance(instruction, model.DUP_TOP_TWO):
self.DUP_TOP_TWO(instruction)
elif isinstance(instruction, model.NOP):
self.NOP(instruction)
elif isinstance(instruction, model.UNARY_POSITIVE):
self.UNARY_POSITIVE(instruction)
elif isinstance(instruction, model.UNARY_NEGATIVE):
self.UNARY_NEGATIVE(instruction)
elif isinstance(instruction, model.UNARY_NOT):
self.UNARY_NOT(instruction)
elif isinstance(instruction, model.UNARY_INVERT):
self.UNARY_INVERT(instruction)
elif isinstance(instruction, model.BINARY_MATRIX_MULTIPLY):
self.BINARY_MATRIX_MULTIPLY(instruction)
elif isinstance(instruction, model.INPLACE_MATRIX_MULTIPLY):
self.INPLACE_MATRIX_MULTIPLY(instruction)
elif isinstance(instruction, model.BINARY_POWER):
self.BINARY_POWER(instruction)
elif isinstance(instruction, model.BINARY_MULTIPLY):
self.BINARY_MULTIPLY(instruction)
elif isinstance(instruction, model.BINARY_MODULO):
self.BINARY_MODULO(instruction)
elif isinstance(instruction, model.BINARY_ADD):
self.BINARY_ADD(instruction)
elif isinstance(instruction, model.BINARY_SUBTRACT):
self.BINARY_SUBTRACT(instruction)
elif isinstance(instruction, model.BINARY_SUBSCR):
self.BINARY_SUBSCR(instruction)
elif isinstance(instruction, model.BINARY_FLOOR_DIVIDE):
self.BINARY_FLOOR_DIVIDE(instruction)
elif isinstance(instruction, model.BINARY_TRUE_DIVIDE):
self.BINARY_TRUE_DIVIDE(instruction)
elif isinstance(instruction, model.INPLACE_FLOOR_DIVIDE):
self.INPLACE_FLOOR_DIVIDE(instruction)
elif isinstance(instruction, model.INPLACE_TRUE_DIVIDE):
self.INPLACE_TRUE_DIVIDE(instruction)
elif isinstance(instruction, model.GET_AITER):
self.GET_AITER(instruction)
elif isinstance(instruction, model.GET_ANEXT):
self.GET_ANEXT(instruction)
elif isinstance(instruction, model.BEFORE_ASYNC_WITH):
self.BEFORE_ASYNC_WITH(instruction)
elif isinstance(instruction, model.INPLACE_ADD):
self.INPLACE_ADD(instruction)
elif isinstance(instruction, model.INPLACE_SUBTRACT):
self.INPLACE_SUBTRACT(instruction)
elif isinstance(instruction, model.INPLACE_MULTIPLY):
self.INPLACE_MULTIPLY(instruction)
elif isinstance(instruction, model.INPLACE_MODULO):
self.INPLACE_MODULO(instruction)
elif isinstance(instruction, model.STORE_SUBSCR):
self.STORE_SUBSCR(instruction)
elif isinstance(instruction, model.DELETE_SUBSCR):
self.DELETE_SUBSCR(instruction)
elif isinstance(instruction, model.BINARY_LSHIFT):
self.BINARY_LSHIFT(instruction)
elif isinstance(instruction, model.BINARY_RSHIFT):
self.BINARY_RSHIFT(instruction)
elif isinstance(instruction, model.BINARY_AND):
self.BINARY_AND(instruction)
elif isinstance(instruction, model.BINARY_XOR):
self.BINARY_XOR(instruction)
elif isinstance(instruction, model.BINARY_OR):
self.BINARY_OR(instruction)
elif isinstance(instruction, model.INPLACE_POWER):
self.INPLACE_POWER(instruction)
elif isinstance(instruction, model.GET_ITER):
self.GET_ITER(instruction)
elif isinstance(instruction, model.GET_YIELD_FROM_ITER):
self.GET_YIELD_FROM_ITER(instruction)
elif isinstance(instruction, model.PRINT_EXPR):
self.PRINT_EXPR(instruction)
elif isinstance(instruction, model.LOAD_BUILD_CLASS):
self.LOAD_BUILD_CLASS(instruction)
elif isinstance(instruction, model.YIELD_FROM):
self.YIELD_FROM(instruction)
elif isinstance(instruction, model.GET_AWAITABLE):
self.GET_AWAITABLE(instruction)
elif isinstance(instruction, model.INPLACE_LSHIFT):
self.INPLACE_LSHIFT(instruction)
elif isinstance(instruction, model.INPLACE_RSHIFT):
self.INPLACE_RSHIFT(instruction)
elif isinstance(instruction, model.INPLACE_AND):
self.INPLACE_AND(instruction)
elif isinstance(instruction, model.INPLACE_XOR):
self.INPLACE_XOR(instruction)
elif isinstance(instruction, model.INPLACE_OR):
self.INPLACE_OR(instruction)
elif isinstance(instruction, model.BREAK_LOOP):
self.BREAK_LOOP(instruction)
elif isinstance(instruction, model.WITH_CLEANUP_START):
self.WITH_CLEANUP_START(instruction)
elif isinstance(instruction, model.WITH_CLEANUP_FINISH):
self.WITH_CLEANUP_FINISH(instruction)
elif isinstance(instruction, model.RETURN_VALUE):
self.RETURN_VALUE(instruction)
elif isinstance(instruction, model.IMPORT_STAR):
self.IMPORT_STAR(instruction)
elif isinstance(instruction, model.SETUP_ANNOTATIONS):
self.SETUP_ANNOTATIONS(instruction)
elif isinstance(instruction, model.YIELD_VALUE):
self.YIELD_VALUE(instruction)
elif isinstance(instruction, model.POP_BLOCK):
self.POP_BLOCK(instruction)
elif isinstance(instruction, model.END_FINALLY):
self.END_FINALLY(instruction)
elif isinstance(instruction, model.POP_EXCEPT):
self.POP_EXCEPT(instruction)
elif isinstance(instruction, model.HAVE_ARGUMENT):
self.HAVE_ARGUMENT(instruction)
elif isinstance(instruction, model.STORE_NAME):
self.STORE_NAME(instruction)
elif isinstance(instruction, model.DELETE_NAME):
self.DELETE_NAME(instruction)
elif isinstance(instruction, model.UNPACK_SEQUENCE):
self.UNPACK_SEQUENCE(instruction)
elif isinstance(instruction, model.FOR_ITER):
self.FOR_ITER(instruction)
elif isinstance(instruction, model.UNPACK_EX):
self.UNPACK_EX(instruction)
elif isinstance(instruction, model.STORE_ATTR):
self.STORE_ATTR(instruction)
elif isinstance(instruction, model.DELETE_ATTR):
self.DELETE_ATTR(instruction)
elif isinstance(instruction, model.STORE_GLOBAL):
self.STORE_GLOBAL(instruction)
elif isinstance(instruction, model.DELETE_GLOBAL):
self.DELETE_GLOBAL(instruction)
elif isinstance(instruction, model.LOAD_CONST):
self.LOAD_CONST(instruction)
elif isinstance(instruction, model.LOAD_NAME):
self.LOAD_NAME(instruction)
elif isinstance(instruction, model.BUILD_TUPLE):
self.BUILD_TUPLE(instruction)
elif isinstance(instruction, model.BUILD_LIST):
self.BUILD_LIST(instruction)
elif isinstance(instruction, model.BUILD_SET):
self.BUILD_SET(instruction)
elif isinstance(instruction, model.BUILD_MAP):
self.BUILD_MAP(instruction)
elif isinstance(instruction, model.LOAD_ATTR):
self.LOAD_ATTR(instruction)
elif isinstance(instruction, model.COMPARE_OP):
self.COMPARE_OP(instruction)
elif isinstance(instruction, model.IMPORT_NAME):
self.IMPORT_NAME(instruction)
elif isinstance(instruction, model.IMPORT_FROM):
self.IMPORT_FROM(instruction)
elif isinstance(instruction, model.JUMP_FORWARD):
self.JUMP_FORWARD(instruction)
elif isinstance(instruction, model.JUMP_IF_FALSE_OR_POP):
self.JUMP_IF_FALSE_OR_POP(instruction)
elif isinstance(instruction, model.JUMP_IF_TRUE_OR_POP):
self.JUMP_IF_TRUE_OR_POP(instruction)
elif isinstance(instruction, model.JUMP_ABSOLUTE):
self.JUMP_ABSOLUTE(instruction)
elif isinstance(instruction, model.POP_JUMP_IF_FALSE):
self.POP_JUMP_IF_FALSE(instruction)
elif isinstance(instruction, model.POP_JUMP_IF_TRUE):
self.POP_JUMP_IF_TRUE(instruction)
elif isinstance(instruction, model.LOAD_GLOBAL):
self.LOAD_GLOBAL(instruction)
elif isinstance(instruction, model.CONTINUE_LOOP):
self.CONTINUE_LOOP(instruction)
elif isinstance(instruction, model.SETUP_LOOP):
self.SETUP_LOOP(instruction)
elif isinstance(instruction, model.SETUP_EXCEPT):
self.SETUP_EXCEPT(instruction)
elif isinstance(instruction, model.SETUP_FINALLY):
self.SETUP_FINALLY(instruction)
elif isinstance(instruction, model.LOAD_FAST):
self.LOAD_FAST(instruction)
elif isinstance(instruction, model.STORE_FAST):
self.STORE_FAST(instruction)
elif isinstance(instruction, model.DELETE_FAST):
self.DELETE_FAST(instruction)
elif isinstance(instruction, model.STORE_ANNOTATION):
self.STORE_ANNOTATION(instruction)
elif isinstance(instruction, model.RAISE_VARARGS):
self.RAISE_VARARGS(instruction)
elif isinstance(instruction, model.CALL_FUNCTION):
self.CALL_FUNCTION(instruction)
elif isinstance(instruction, model.MAKE_FUNCTION):
self.MAKE_FUNCTION(instruction)
elif isinstance(instruction, model.BUILD_SLICE):
self.BUILD_SLICE(instruction)
elif isinstance(instruction, model.LOAD_CLOSURE):
self.LOAD_CLOSURE(instruction)
elif isinstance(instruction, model.LOAD_DEREF):
self.LOAD_DEREF(instruction)
elif isinstance(instruction, model.STORE_DEREF):
self.STORE_DEREF(instruction)
elif isinstance(instruction, model.DELETE_DEREF):
self.DELETE_DEREF(instruction)
elif isinstance(instruction, model.CALL_FUNCTION_KW):
self.CALL_FUNCTION_KW(instruction)
elif isinstance(instruction, model.CALL_FUNCTION_EX):
self.CALL_FUNCTION_EX(instruction)
elif isinstance(instruction, model.SETUP_WITH):
self.SETUP_WITH(instruction)
elif isinstance(instruction, model.EXTENDED_ARG):
self.EXTENDED_ARG(instruction)
elif isinstance(instruction, model.LIST_APPEND):
self.LIST_APPEND(instruction)
elif isinstance(instruction, model.SET_ADD):
self.SET_ADD(instruction)
elif isinstance(instruction, model.MAP_ADD):
self.MAP_ADD(instruction)
elif isinstance(instruction, model.LOAD_CLASSDEREF):
self.LOAD_CLASSDEREF(instruction)
elif isinstance(instruction, model.BUILD_LIST_UNPACK):
self.BUILD_LIST_UNPACK(instruction)
elif isinstance(instruction, model.BUILD_MAP_UNPACK):
self.BUILD_MAP_UNPACK(instruction)
elif isinstance(instruction, model.BUILD_MAP_UNPACK_WITH_CALL):
self.BUILD_MAP_UNPACK_WITH_CALL(instruction)
elif isinstance(instruction, model.BUILD_TUPLE_UNPACK):
self.BUILD_TUPLE_UNPACK(instruction)
elif isinstance(instruction, model.BUILD_SET_UNPACK):
self.BUILD_SET_UNPACK(instruction)
elif isinstance(instruction, model.SETUP_ASYNC_WITH):
self.SETUP_ASYNC_WITH(instruction)
elif isinstance(instruction, model.FORMAT_VALUE):
self.FORMAT_VALUE(instruction)
elif isinstance(instruction, model.BUILD_CONST_KEY_MAP):
self.BUILD_CONST_KEY_MAP(instruction)
elif isinstance(instruction, model.BUILD_STRING):
self.BUILD_STRING(instruction)
elif isinstance(instruction, model.BUILD_TUPLE_UNPACK_WITH_CALL):
self.BUILD_TUPLE_UNPACK_WITH_CALL(instruction)
def POP_TOP(self, instruction):
self.pop()
def ROT_TWO(self, instruction):
first = self.pop()
second = self.pop()
self.push(first)
self.push(second)
def ROT_THREE(self, instruction): print("NYI " + str(self))
def DUP_TOP(self, instruction): print("NYI " + str(self))
def DUP_TOP_TWO(self, instruction): print("NYI " + str(self))
def NOP(self, instruction): print("NYI " + str(self))
def UNARY_POSITIVE(self, instruction): print("NYI " + str(self))
def UNARY_NEGATIVE(self, instruction): print("NYI " + str(self))
def UNARY_NOT(self, instruction): print("NYI " + str(self))
def UNARY_INVERT(self, instruction): print("NYI " + str(self))
def BINARY_MATRIX_MULTIPLY(self, instruction): print("NYI " + str(self))
def INPLACE_MATRIX_MULTIPLY(self, instruction): print("NYI " + str(self))
def BINARY_POWER(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = pow(tos1, tos)
self.push(val)
def BINARY_MULTIPLY(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 * tos
self.push(val)
def BINARY_MODULO(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 % tos
self.push(val)
def BINARY_ADD(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 + tos
self.push(val)
def BINARY_SUBTRACT(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 - tos
self.push(val)
def BINARY_SUBSCR(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1[tos]
self.push(val)
def BINARY_FLOOR_DIVIDE(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 // tos
self.push(val)
def BINARY_TRUE_DIVIDE(self, instruction):
tos = self.pop()
tos1 = self.pop()
val = tos1 / tos
self.push(val)
def INPLACE_FLOOR_DIVIDE(self, instruction): print("NYI " + str(self))
def INPLACE_TRUE_DIVIDE(self, instruction): print("NYI " + str(self))
def GET_AITER(self, instruction): print("NYI " + str(self))
def GET_ANEXT(self, instruction): print("NYI " + str(self))
def BEFORE_ASYNC_WITH(self, instruction): print("NYI " + str(self))
def INPLACE_ADD(self, instruction):
second = self.pop()
first = self.pop()
self.push(first + second)
def INPLACE_SUBTRACT(self, instruction): print("NYI " + str(self))
def INPLACE_MULTIPLY(self, instruction):
second = self.pop()
first = self.pop()
self.push(first * second)
def INPLACE_MODULO(self, instruction): print("NYI " + str(self))
def STORE_SUBSCR(self, instruction): print("NYI " + str(self))
def DELETE_SUBSCR(self, instruction): print("NYI " + str(self))
def BINARY_LSHIFT(self, instruction): print("NYI " + str(self))
def BINARY_RSHIFT(self, instruction): print("NYI " + str(self))
def BINARY_AND(self, instruction): print("NYI " + str(self))
def BINARY_XOR(self, instruction): print("NYI " + str(self))
def BINARY_OR(self, instruction): print("NYI " + str(self))
def INPLACE_POWER(self, instruction): print("NYI " + str(self))
def GET_ITER(self, instruction):
# Create an iterator from the TOS object and push it on the stack
tos = self.pop()
self.push(iter(tos))
def GET_YIELD_FROM_ITER(self, instruction): print("NYI " + str(self))
def PRINT_EXPR(self, instruction): print("NYI " + str(self))
def LOAD_BUILD_CLASS(self, instruction):
# Push the function which will make the class
self.push(self.make_class)
def YIELD_FROM(self, instruction): print("NYI " + str(self))
def GET_AWAITABLE(self, instruction): print("NYI " + str(self))
def INPLACE_LSHIFT(self, instruction): print("NYI " + str(self))
def INPLACE_RSHIFT(self, instruction): print("NYI " + str(self))
def INPLACE_AND(self, instruction): print("NYI " + str(self))
def INPLACE_XOR(self, instruction): print("NYI " + str(self))
def INPLACE_OR(self, instruction): print("NYI " + str(self))
def BREAK_LOOP(self, instruction): print("NYI " + str(self))
def WITH_CLEANUP_START(self, instruction): print("NYI " + str(self))
def WITH_CLEANUP_FINISH(self, instruction): print("NYI " + str(self))
def RETURN_VALUE(self, instruction):
tos = self.pop()
# Reset the environment of the call
self.current_function().environments.pop()
self.environments.pop()
self.functions_called.pop()
# Push again the result
self.push(tos)
def IMPORT_STAR(self, instruction): print("NYI " + str(self))
def SETUP_ANNOTATIONS(self, instruction): print("NYI " + str(self))
def YIELD_VALUE(self, instruction):
#TODO
self.print_stack()
tos = self.pop()
print("TOS of a YIELD " + str(tos))
print("Class of TOS " + str(tos.__class__))
print("Instructions in block " + str(instruction.block.instructions))
self.current_function().environments.pop()
self.environments.pop()
self.functions_called.pop()
self.push(tos)
def POP_BLOCK(self, instruction):
# In the current model, this instruction is already handled
pass
def END_FINALLY(self, instruction): print("NYI " + str(self))
def POP_EXCEPT(self, instruction): print("NYI " + str(self))
def HAVE_ARGUMENT(self, instruction): print("NYI " + str(self))
def STORE_NAME(self, instruction):
tos = self.pop()
name = self.current_function().names[instruction.argument]
# If tos is the main function of a class, we are in fact
# adding a property to this class here, special treatment
if self.current_function().is_class:
self.current_function().mclass.add_attribute(name, tos)
# # If we are in the top level of the program
if instruction.function.is_main and instruction.function.name == "main":
# also make a global store
self.global_environment[name] = tos
self.current_function().environments[-1][name] = tos
def DELETE_NAME(self, instruction): print("NYI " + str(self))
def UNPACK_SEQUENCE(self, instruction):
# Unpack tuple items and push them on the stack right to left
tos = self.pop()
for item in reversed(tos):
self.push(item)
def FOR_ITER(self, instruction):
# TOS is an iterator
tos = self.pop()
need_jump = False
# Try to get a value from the iterator
try:
value = tos.__next__()
# Push back the iterator and the yield value
self.push(tos)
self.push(value)
except StopIteration:
# If it is exhausted, make a jump
need_jump = True
# Find the next block depending of the iterator
for block in instruction.block.next:
if block.instructions[0].offset == instruction.absolute_target:
# Make the jump
jump_block = block
else:
# Continue
notjump_block = block
if need_jump:
self.execute_block(jump_block)
else:
self.execute_block(notjump_block)
def UNPACK_EX(self, instruction): print("NYI " + str(self))
def STORE_ATTR(self, instruction):
# Get the attribute and the value and set it
obj = self.pop()
value = self.pop()
name = self.current_function().names[instruction.argument]
obj.set_attribute(name, value)
self.push(value)
def DELETE_ATTR(self, instruction): print("NYI " + str(self))
def STORE_GLOBAL(self, instruction): print("NYI " + str(self))
def DELETE_GLOBAL(self, instruction): print("NYI " + str(self))
def LOAD_CONST(self, instruction):
loaded_value = self.current_function().consts[instruction.argument]
self.push(loaded_value)
# If we load a Code Object, disassemble it
if isinstance(loaded_value, CodeType):
if self.args.verbose:
dis.dis(loaded_value)
def LOAD_NAME(self, instruction):
name = str(self.current_function().names[instruction.argument])
# try to find the name in local environments
if name in self.current_function().environments[-1]:
self.push(self.current_function().environments[-1][name])
else:
# Lookup in the global environment
self.push(self.global_environment[name])
def BUILD_TUPLE(self, instruction):
res = []
for i in range(0, instruction.argument):
res.append(self.pop())
res.reverse()
self.push(tuple(res))
def BUILD_LIST(self, instruction):
res = []
for i in range(0, instruction.argument):
res.append(self.pop())
res.reverse()
self.push(res)
def BUILD_SET(self, instruction):
res = set()
for i in range(0, instruction.argument):
res.add(self.pop())
self.push(res)
def BUILD_MAP(self, instruction): print("NYI " + str(self))
def LOAD_ATTR(self, instruction):
tos = self.pop()
name = self.current_function().names[instruction.argument]
# Lookup a name in a python module object
if isinstance(tos, model.MModule):
# Special case for a Module
fun = tos.lookup(name, False)
self.push(fun)
elif isinstance(tos, model.MObject):
# Access to an attribute of the model
res = tos.get_property(name)
# Two cases here, we accessed to a method or an attribute value
if isinstance(res, model.Function):
# If it's a function, we will make a method called later
# Set the object at the receiver of the method for later
res.receiver = tos
self.push(res)
else:
# Access to an attribute
attr = getattr(tos, name)
self.push(attr)
def COMPARE_OP(self, instruction):
second = self.pop()
first = self.pop()
# Perform the test and push the result on the stack
res = compare_functions[instruction.argument](first, second)
self.push(res)
def IMPORT_NAME(self, instruction):
module_name = self.current_function().names[instruction.argument]
from_list = self.pop()
level = self.pop()
# Add the subdirectory to the path to import
module_name = self.subdirectory + "." + module_name
# Find the module file
spec = importlib.util.find_spec(module_name)
# Create a module without executing it
pythonmodule = importlib.util.module_from_spec(spec)
# Now we need to execute this module, start by compiling it
co = frontend.compiler.compile_import(pythonmodule.__file__, self.args)
module = model.MModule(co)
self.modules.append(module)
# Generate a function for the module
fun = self.generate_function(co, self.current_function().names[instruction.argument], module, True)
env = {}
self.environments.append(env)
fun.environments.append(env)
self.execute_function(fun)
self.push(module)
def IMPORT_FROM(self, instruction): print("NYI " + str(self))
def JUMP_FORWARD(self, instruction):
for block in instruction.block.next:
if block.instructions[0].offset == instruction.absolute_target:
self.execute_block(block)
return
def JUMP_IF_FALSE_OR_POP(self, instruction):
value = self.pop()
jump_block = None
notjump_block = None
# Locate the target of the jump in next basic blocks
for block in instruction.block.next:
# If we need to make the jump
if block.instructions[0].offset == instruction.argument:
jump_block = block
else:
# Continue the execution in the second block
notjump_block = block
if not value:
self.push(value)
self.execute_block(jump_block)
else:
self.execute_block(notjump_block)
def JUMP_IF_TRUE_OR_POP(self, instruction):
value = self.pop()
jump_block = None
notjump_block = None
# Locate the target of the jump in next basic blocks
for block in instruction.block.next:
# If we need to make the jump
if block.instructions[0].offset == instruction.argument:
jump_block = block
else:
# Continue the execution in the second block
notjump_block = block
if value:
self.push(value)
self.execute_block(jump_block)
else:
self.execute_block(notjump_block)
def JUMP_ABSOLUTE(self, instruction):
for block in instruction.block.next:
# Make the jump
if block.instructions[0].offset == instruction.argument:
self.execute_block(block)
return
# TODO: We should have jump before, put an assertion here
def POP_JUMP_IF_FALSE(self, instruction):
value = self.pop()
jump_block = None
notjump_block = None
# Locate the target of the jump in next basic blocks
for block in instruction.block.next:
# If we need to make the jump
if block.instructions[0].offset == instruction.argument:
jump_block = block
else:
# Continue the execution in the second block
notjump_block = block
if not value:
self.execute_block(jump_block)
else:
self.execute_block(notjump_block)
def POP_JUMP_IF_TRUE(self, instruction):
value = self.pop()
jump_block = None
notjump_block = None
# Locate the target of the jump in next basic blocks
for block in instruction.block.next:
# If we need to make the jump
if block.instructions[0].offset == instruction.argument:
jump_block = block
else:
# Continue the execution in the second block
notjump_block = block
if value:
self.execute_block(jump_block)
else:
self.execute_block(notjump_block)
def LOAD_GLOBAL(self, instruction):
name = self.current_function().names[instruction.argument]
# Lookup in the global environment
if name in self.global_environment:
self.push(self.global_environment[name])
else:
# Lookup in its module to find a name
self.push(self.function.module.lookup(name, False))
def CONTINUE_LOOP(self, instruction): print("NYI " + str(self))
def SETUP_LOOP(self, instruction):
# For now, do nothing, the end of the loop wild discard the block
pass
def SETUP_EXCEPT(self, instruction): print("NYI " + str(self))
def SETUP_FINALLY(self, instruction): print("NYI " + str(self))
def LOAD_FAST(self, instruction):
varname = self.current_function().varnames[instruction.argument]
for env in reversed(self.current_function().environments):
if varname in env:
self.push(env[varname])
return
def STORE_FAST(self, instruction):
value = self.pop()
varname = self.current_function().varnames[instruction.argument]
self.current_function().environments[-1][varname] = value
def DELETE_FAST(self, instruction): print("NYI " + str(self))
def STORE_ANNOTATION(self, instruction): print("NYI " + str(self))
def RAISE_VARARGS(self, instruction): print("NYI " + str(self))
#TODO: factorize with other call functions
def CALL_FUNCTION(self, instruction):
# Default arguments
args = []
for i in range(0, instruction.argument):
# Pop all arguments of the call and put them in environment
args.append(self.pop())
# Put arguments in right order
args.reverse()
# Creating an empty environment
env = {}
# TOS is now the function to call
function = self.pop()
if isinstance(function, model.MClass):
# We have to make a new instance of a class
self.push(function.new_instance_interpreter(args))
return
elif isinstance(function, model.Function):
args_call = len(args)
args_function = function.argcount
if args_call < args_function:
# We are doing a method call here, add self parameter
# this parameter must be set before
args.insert(0, function.receiver)
else:
# Special case of a call to a primitive function
self.push(function(*args))
return
function.environments.append(env)
self.environments.append(env)
# Initialize the environment for the function call
for i in range(0, len(args)):
if not len(function.varnames) == 0:
env[function.varnames[i]] = args[i]
# Make the call
self.execute_function(function)
def MAKE_FUNCTION(self, instruction):
function_name = self.pop()
code = self.pop()
#TODO
free_variables = None
if (instruction.argument & 8) == 8:
# Making a closure, tuple of free variables
free_variables = self.pop()
if (instruction.argument & 4) == 4:
# Annotation dictionnary
annotations = self.pop()
if (instruction.argument & 2) == 2:
# keyword only default arguments
keyword_only = self.pop()
if (instruction.argument & 1) == 1:
# default arguments
default = self.pop()
# Generate a new Function Object
# TODO: check the module of the function
fun = self.generate_function(code, function_name, self.modules[-1], False)
# Fill the closure
if free_variables != None:
for value in free_variables:
for env in reversed(self.current_function().environments):
if value in env:
fun.closure[value] = env[value]
# Push the Function object on the stack
self.push(fun)
def BUILD_SLICE(self, instruction): print("NYI " + str(self))
def LOAD_CLOSURE(self, instruction):
# Search the name of the variable
varname = None
if instruction.argument < len(self.current_function().cellvars):
varname = self.current_function().cellvars[instruction.argument]
else:
i = instruction.argument - len(self.current_function().cellvars)
varname = self.current_function().cellvars[i]
self.push(varname)
def LOAD_DEREF(self, instruction):
# TODO: Flat representation of closures
varname = None
if instruction.argument < len(self.current_function().cellvars):
varname = self.current_function().cellvars[instruction.argument]
else:
varname = self.current_function().freevars[instruction.argument]
if varname not in self.current_function().closure:
# Lookup in environment
for env in reversed(self.current_function().environments):
if varname in env:
self.push(env[varname])
return
else:
# Get the value in the closure
self.push(self.current_function().closure[varname])
def STORE_DEREF(self, instruction): print("NYI " + str(self))
def DELETE_DEREF(self, instruction): print("NYI " + str(self))
def CALL_FUNCTION_KW(self, instruction):
# TOS is a tuple for keywords
keywords_tuple = self.pop()
print("keywords tuple " + str(keywords_tuple))
print(len(keywords_tuple))
# Creating an empty environment
env = {}
for element in keywords_tuple:
env[element] = self.pop()
print("env with keywords " + str(env))
# Default arguments
args = []
for i in range(0, instruction.argument - len(keywords_tuple)):
# Pop all arguments of the call and put them in environment
args.append(self.pop())
# Put positionnal arguments in right order
args.reverse()
# TOS is now the function to call
function = self.pop()
if not isinstance(function, model.Function):
# Special case of a call to a primitive function
self.push(function(*args))
return
# Initialize the environment for the function call
for i in range(0, len(args)):
env[function.varnames[i]] = args[i]
function.environments.append(env)
self.environments.append(env)
# Make the call
function.execute(self)
def CALL_FUNCTION_EX(self, instruction): print("NYI " + str(self))
def SETUP_WITH(self, instruction): print("NYI " + str(self))
def EXTENDED_ARG(self, instruction): print("NYI " + str(self))
def LIST_APPEND(self, instruction):
tos = self.pop()
list.append(self.stack[-instruction.argument], tos)
def SET_ADD(self, instruction): print("NYI " + str(self))
def MAP_ADD(self, instruction): print("NYI " + str(self))
def LOAD_CLASSDEREF(self, instruction): print("NYI " + str(self))
def BUILD_LIST_UNPACK(self, instruction): print("NYI " + str(self))
def BUILD_MAP_UNPACK(self, instruction): print("NYI " + str(self))
def BUILD_MAP_UNPACK_WITH_CALL(self, instruction): print("NYI " + str(self))
def BUILD_TUPLE_UNPACK(self, instruction): print("NYI " + str(self))
def BUILD_SET_UNPACK(self, instruction): print("NYI " + str(self))
def SETUP_ASYNC_WITH(self, instruction): print("NYI " + str(self))
def FORMAT_VALUE(self, instruction): print("NYI " + str(self))
def BUILD_CONST_KEY_MAP(self, instruction): print("NYI " + str(self))
def BUILD_STRING(self, instruction): print("NYI " + str(self))
def BUILD_TUPLE_UNPACK_WITH_CALL(self, instruction): print("NYI " + str(self))
def LOAD_METHOD(self, instruction): print("NYI " + str(self))
def CALL_METHOD(self, instruction): print("NYI " + str(self))
def op_lesser(first, second):
return first < second
def op_lesser_eq(first, second):
return first <= second
def op_eq(first, second):
return first == second
def op_noteq(first, second):
return first != second
def op_greater(first, second):
return first > second
def op_greater_eq(first, second):
return first >= second
def op_in(first, second):
return first in second
def op_notin(first, second):
return first not in second
def op_is(first, second):
return first is second
def op_notis(first, second):
return not (first is second)
# TODO
def op_exception_match(first, second):
print("NYI")
quit()
# TODO
def op_bad(first, second):
print("NYI")
quit()
compare_functions = (op_lesser, op_lesser_eq, op_eq, op_noteq, op_greater,
op_greater_eq, op_in, op_notin, op_is, op_notis, op_exception_match, op_bad)
# Dictionary between names and primitive functions
primitives = {
"abs" : abs,
"dict" : dict,
"help" : help,
"min" : min,
"setattr" : setattr,
"all" : all,
"dir" : dir,
"hex" : hex,
"next" : next,
"slice" : slice,
"any" : any,
"divmod" : divmod,
"id" : id,
"object" : object,
"sorted" : sorted,
"ascii" : ascii,
"enumerate" : enumerate,
"input" : input,
"oct" : oct,
"staticmethod" : staticmethod,
"bin" : bin,
"eval" : eval,
"int" : int,
"open" : open,
"str" : str,
"bool" : bool,
"exec" : exec,
"isinstance" : isinstance,
"ord" : ord,
"sum" : sum,
"bytearray" : bytearray,
"filter" : filter,
"issubclass" : issubclass,
"pow" : pow,
"super" : super,
"bytes" : bytes,
"float" : float,
"iter" : iter,
"print" : print,
"tuple" : tuple,
"callable" : callable,
"format" : format,
"len" : len,
"property" : property,
"type" : type,
"chr" : chr,
"frozenset" : frozenset,
"list" : list,
"range" : range,
"vars" : vars,
"classmethod" : classmethod,
"getattr" : getattr,
"locals" : locals,
"repr" : repr,
"zip" : zip,
"globals" : globals,
"map" : map,
"reversed" : reversed,
"__import__" : __import__,
"complex" : complex,
"hasattr" : hasattr,
"max" : max,
"round" : round,
"hash" : hash,
"delattr" : delattr,
"memoryview" : memoryview,
"set" : set,
}
```
#### File: jpages/twopy/main_script.py
```python
import argparse
import subprocess
import os.path
import glob
# Execute the compiler with the given arguments
# cmd: the command to execute
# python_file: the python file to execute with Twopy
def run_cmd(cmd, python_file):
command = cmd + " " + python_file
subprocess.run(command, shell=True)
def main():
# Argument parser
parser = argparse.ArgumentParser(description="Twopy Python compiler")
# TODO: option usage
# TODO: all additional options must be passed to twopy
parser.add_argument('--gdb',
help='Enable gdb debugging of Twopy',
action='store_true')
parser.add_argument("python_file", help="Python file to execute")
parser.add_argument("--time",
help="Print the time of the process",
action="store_true")
# Run benchmarks and exit
parser.add_argument("--benchs", "--benchmarks",
help="Run all benchmarks in benchmarks/ directory and exit",
action="store_true")
# This option is mandatory each time the FFI with C is modified
parser.add_argument("--compile_ffi", action="store_true",
help="Compile the C-FFI used by Twopy")
args = parser.parse_args()
# Contains arguments for running gdb
debug_string = ""
# Used to give env variables to CPython
env_vars = "PYTHONMALLOC=malloc "
if args.gdb:
debug_string = "gdb -ex run --args "
twopy_entry_point = "twopy.py"
if args.compile_ffi:
twopy_entry_point += " --compile_ffi"
# Current path
this_path = os.path.dirname(os.path.realpath(__file__))
# Make sure to have the correct absolute path, is the project was cloned as expected
this_path += "/../cpython/python"
cmd = env_vars + debug_string + " " + this_path + " " + twopy_entry_point
if args.time:
cmd = "time " + cmd
# This option launches twopy on every benchmark then exit
if args.benchs:
bench_dir = os.path.dirname(os.path.realpath(__file__)) + "/benchmarks/"
bench_list = glob.glob(bench_dir + "*.py")
# Automatically add the time
if not args.time:
cmd = "time " + cmd
# Execute each file
for file in bench_list:
print(file)
run_cmd(cmd, file)
else:
# run Twopy
run_cmd(cmd, args.python_file)
if __name__ == '__main__':
main()
```
#### File: twopy/tests/binary_search.py
```python
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
middle = (first + last) // 2
if alist[middle] == item:
found = True
else:
if item < alist[middle]:
# Search in the left part
last = middle - 1
else:
first = middle + 1
return found
alist = [1, 4, 6, 7, 9, 15, 33, 45, 68, 90]
print(binary_search(alist, 68))
```
#### File: twopy/tests/decorator.py
```python
def mon_decorateur(fonction):
print("Notre décorateur est appelé avec en paramètre la fonction {0}".format(fonction))
return fonction
@mon_decorateur
def salut():
print("Salut !")
salut()
```
#### File: twopy/tests/objects1.py
```python
class Animal:
pass
class Dog(Animal):
def __init__(self, name):
self.name = name
self.tricks = [] # creates a new empty list for each dog
def add_trick(self, trick):
self.tricks.append(trick)
def foo(self):
print(self)
print("test")
d = Dog('Fido')
e = Dog('Buddy')
d.add_trick('roll over')
d.add_trick('another trick')
e.add_trick('play dead')
e.foo()
print(d.tricks)
print(e.tricks)
```
#### File: twopy/tests/prime.py
```python
def primes(value):
for num in range(value):
res = True
for k in range(2, num):
if k != 0:
if num % k == 0:
res = False
if res == True:
print(num)
primes(100)
```
#### File: twopy/tests/pypy_example.py
```python
def f(a, b):
if b % 46 == 41:
return a - b
else:
return a + b
def strange_sum(n):
result = 0
while n >= 0:
result = f(result, n)
n -= 1
return result
print(strange_sum(10000000))
```
#### File: twopy/tests/quick_sort.py
```python
import random
# Very inefficient bubble sort
def bubble_sort(array):
for i in range(len(array)):
for j in range(i, len(array)):
if array[i] > array[j]:
# Swap these elements
temp = array[i]
array[i] = array[j]
array[j] = temp
return array
# More efficient quick sort (with a pivot)
def quick_sort(array):
less = []
equal = []
greater = []
if len(array) > 1:
# Chose a pivot
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
if x == pivot:
equal.append(x)
if x > pivot:
greater.append(x)
return quick_sort(less) + equal + quick_sort(greater)
else:
return array
random.seed()
array = [12, 4, 5, 6, 7, 3, 1, 15]
for i in range(10000):
array.append(int(random.random()*100000))
print(quick_sort(array))
print(bubble_sort(array))
```
#### File: twopy/tests/sum.py
```python
def foo(n):
res = 0
for i in range(n):
res = res + i
print(res)
return res
print(foo(10000))
```
#### File: twopy/tests/while_loops.py
```python
def test(n):
i = 0
res = 0
while i < n:
i = i + 1
print(res)
test(100)
```
#### File: jpages/twopy/twopy.py
```python
import frontend
import interpreter
import jit
import argparse
import os.path
def main():
# Argument parser
parser = argparse.ArgumentParser(description="Twopy Virtual Machine")
parser.add_argument("file", help="path to a python file")
parser.add_argument("--verbose", "-v", action="store_true",
help="enable verbose output")
parser.add_argument("--execution", action="store_true",
help="Print each variation of the stack during execution")
parser.add_argument("--inter", action="store_true",
help="Interpretation of the code")
parser.add_argument("--asm", action="store_true",
help="Print generated assembly code")
parser.add_argument("--maxvers", type=int,
help="Maximum number of generated versions for BBV.\n0 means infinite versions, default is 5.")
parser.add_argument("--no_std_lib", action="store_true",
help="Do not compile the standard library of Twopy. Not much will be executable.")
parser.add_argument("--stats", action="store_true",
help="Collect statistics on execution")
parser.add_argument("--compile_ffi", action="store_true",
help="Compile the C-FFI used by Twopy")
args = parser.parse_args()
# Compile to bytecode and get the main CodeObject
maincode = frontend.compiler.compile_source(args.file, args)
# Get the subdirectory of the executed file
head, tail = os.path.split(args.file)
inter = interpreter.simple_interpreter.get_interpreter(maincode, head, args)
if args.inter:
inter.execute()
else:
jitcompiler = jit.compiler.JITCompiler(inter, maincode)
inter.jitcompiler = jitcompiler
jitcompiler.execute()
main()
``` |
{
"source": "jpaggi/findbps",
"score": 3
} |
#### File: jpaggi/findbps/findbps.py
```python
from subprocess import Popen, PIPE
from pickle import dumps
from os import path
def findbps(reads, output, bowtie_options, motif, length, threshold, strand):
"""
Input:
reads: str of name of file where single-end, stranded
RNA-seq reads in fastq format are located
output:str of desired basename of output files
bowtie_options: str of bowtie options you wish to
be used for alignment of reads after splitting.
See the bowtie manual.
Recommend "-y -p 2 -v 0 -X 5000 -m 1 <index>"
motif: list of dictionaries representing 5'ss motif
position weight matrix. Each dictionary has a
key for each nucleotide, with a float of the
probability as keys.
length:int of the lowest acceptable number of bases
used to align a fragment of a read.
threshold: float of the lowest acceptable probability
that a sequence would be sampled from the
given martrix in order to attempt mapping.
Recommend 0.0 unless many false positives
strand:str either 'first' if reads are first-stranded
or 'second' if reads are second-stranded
Output:
output + '.bed':
A file in paired-end bed format with
information about the reads with a valid
alignment.
output + '_no_alignment.fastq':
Reads with no valid alignment in the
paired-end tab-delimited format
described in the bowtie manual split
as they were attempted to be aligned.
"""
#gets the name of the directory of this file
directory = path.dirname(path.realpath(__file__))
#make these arguments into strings so they can be passed to fp_checker.py
motif = '"' + dumps(motif) + '"'
length = str(length)
threshold = str(threshold)
#this process splits each read at the most likely 5'SS based on the
# given weight matrix and sends them to bowtie to be mapped
# see fp_checker.py for further details
fp_checker = Popen('python ' + directory + '/fp_checker.py ' +
motif +' '+ length +' '+ threshold +' '+ strand,
stdin = open(reads,'r'), stdout = PIPE, shell = True)
#this process maps each split read to the given genome
bowtie = Popen('bowtie --ff ' + bowtie_options + ' --12 - --un ' +
output+'_no_alignment.fastq',
stdin = fp_checker.stdout, stdout = PIPE, shell = True)
fp_checker.stdout.close()
#this process converts the bowtie output into a bed file
# see make_bed.py for further details
make_bed = Popen('python ' + directory + '/make_bed.py',
stdin = bowtie.stdout,
stdout = open(output + ".bed",'w'), shell = True)
bowtie.stdout.close()
make_bed.wait()
return 0
if __name__ == '__main__':
from sys import argv
reads = argv[1]
output = argv[2]
bowtie_options = argv[3]
motif = eval(argv[4])
length = int(argv[5])
threshold = float(argv[6])
strand = argv[7]
findbps(reads, output, bowtie_options, motif, length, threshold, strand)
```
#### File: jpaggi/findbps/make_bed.py
```python
def get_first_data(first):
bp_nucleotide = first[0]
bp_quality = first[1]
first = first[2:]
(ID, strand, chromosome, first_position, first_seq, first_quality,
ceiling, first_mismatches) = first.split('\t')
return (bp_nucleotide, bp_quality, ID, strand, chromosome,
int(first_position), first_seq, first_quality, first_mismatches)
def get_second_data(second):
(ID, strand, chromosome, second_position,second_seq, second_quality,
ceiling, second_mismatches) = second.split('\t')
return int(second_position), second_seq, second_quality, second_mismatches
def complement(n):
if n == 'A':
out = 'T'
elif n == 'T':
out = 'A'
elif n == 'G':
out = 'C'
elif n == 'C':
out = 'G'
else:
out = n
return out
def read_pair(inp):
#reads two lines and breaks up the first one to get information as described
first=inp.readline()
second=inp.readline()
if first == second:
return '','','','','','','','','',''
(bp_nucleotide, bp_quality, ID, strand, chromosome, first_position, first_seq,
first_quality, first_mismatches) = get_first_data(first)
second_position, second_seq, second_quality, second_mismatches = get_second_data(second)
#when strand is plus first corresponds to the 5'SS part
if strand=='+':
fp_start = first_position
fp_end = first_position + len(first_quality)
fp_seq = first_seq
bp_start = second_position
bp_end = second_position + len(second_quality)
bp_seq = second_seq + bp_nucleotide
quality = second_quality + bp_quality + first_quality
#when strand is minus second corresponds to the 5'SS part
else:
fp_start = second_position - 1
fp_end = second_position + len(second_quality) - 1
fp_seq = second_seq
bp_start = first_position - 1
bp_end = first_position + len(first_quality)
bp_seq = complement(bp_nucleotide) + first_seq
quality = first_quality + bp_quality + second_quality
return (ID,strand,chromosome,quality,
fp_start, fp_end, fp_seq,
bp_start, bp_end, bp_seq)
def make_bed(reads, out):
(ID,strand,chromosome,quality,
fp_start, fp_end, fp_seq,
bp_start, bp_end, bp_seq) = read_pair(reads)
total = 0
while ID:
total += 1
fp_start = str(fp_start)
fp_end = str(fp_end)
bp_start = str(bp_start)
bp_end = str(bp_end)
if strand == '+':
start = chromosome + '\t' + fp_start + '\t' + fp_end + '\t'
stop = chromosome + '\t' + bp_start + '\t' + bp_end + '\t'
seq = fp_seq + '\t' + bp_seq + '\n'
else:
start = chromosome + '\t' + bp_start + '\t' + bp_end + '\t'
stop = chromosome + '\t' + fp_start + '\t' + fp_end + '\t'
seq = bp_seq + '\t' + fp_seq + '\n'
line = start+stop+ID+'\t'+quality+'\t'+strand+'\t'+strand+'\t'+seq
out.write(line)
(ID,strand,chromosome,quality,
fp_start, fp_end, fp_seq,
bp_start, bp_end, bp_seq) = read_pair(reads)
return total
from sys import stdin, stdout
make_bed(stdin, stdout)
``` |
{
"source": "jpaggi/getcontacts",
"score": 2
} |
#### File: getcontacts/contact_calc/stratify_hbonds.py
```python
from vmd import *
import itertools
from .contact_utils import *
__all__ = ["stratify_hbond_subtypes"]
##############################################################################
# Functions
##############################################################################
def residue_vs_water_hbonds(hbonds, solvent_resn):
"""
Split hbonds into those involving residues only and those mediated by water.
"""
residue_hbonds, water_hbonds = [], []
for hbond in hbonds:
frame_idx, atom1_label, atom2_label, itype = hbond
if solvent_resn in atom1_label or solvent_resn in atom2_label:
water_hbonds.append(hbond)
else:
residue_hbonds.append(hbond)
return residue_hbonds, water_hbonds
def stratify_residue_hbonds(residue_hbonds):
"""
Stratify residue to residue hbonds into those between sidechain-sidechain,
sidechain-backbone, and backbone-backbone
"""
backbone_atoms = ['N', 'O']
hbss, hbsb, hbbb = [], [], []
# Iterate through each residue hbond and bin into appropriate subtype
for frame_idx, atom1_label, atom2_label, itype in residue_hbonds:
atom1 = atom1_label.split(":")[3]
atom2 = atom2_label.split(":")[3]
if atom1 not in backbone_atoms and atom2 not in backbone_atoms:
hbss.append([frame_idx, "hbss", atom1_label, atom2_label])
if (atom1 not in backbone_atoms and atom2 in backbone_atoms) or \
(atom1 in backbone_atoms and atom2 not in backbone_atoms):
hbsb.append([frame_idx, "hbsb", atom1_label, atom2_label])
if atom1 in backbone_atoms and atom2 in backbone_atoms:
hbbb.append([frame_idx, "hbbb", atom1_label, atom2_label])
return hbss, hbsb, hbbb
def stratify_water_bridge(water_hbonds, solvent_resn):
"""
Infer direct water bridges between residues that both have hbond
with the same water (ie res1 -- water -- res2)
"""
frame_idx, water_to_residues, _ = calc_water_to_residues_map(water_hbonds, solvent_resn)
water_bridges = set()
# Infer direct water bridges
for water in water_to_residues:
protein_atoms = sorted(list(water_to_residues[water]))
for res_atom_pair in itertools.combinations(protein_atoms, 2):
res_atom1, res_atom2 = res_atom_pair
if res_atom1 != res_atom2:
water_bridges.add((frame_idx, "wb", res_atom1, res_atom2, water))
wb = sorted([list(entry) for entry in water_bridges])
return wb
def stratify_extended_water_bridge(water_hbonds, solvent_resn):
"""
Infer extended water bridges between residues that form hbond with
water molecules that also have hbond between them.
(ie res1 -- water1 -- water2 -- res2)
"""
frame_idx, water_to_residues, solvent_bridges = calc_water_to_residues_map(water_hbonds, solvent_resn)
extended_water_bridges = set()
for water1, water2 in solvent_bridges:
if water1 not in water_to_residues or water2 not in water_to_residues:
continue
res_atom1_list, res_atom2_list = water_to_residues[water1], water_to_residues[water2]
for atom1 in res_atom1_list:
for atom2 in res_atom2_list:
extended_water_bridges.add((frame_idx, "wb2", atom1, atom2, water1, water2))
extended_water_bridges = sorted(list(extended_water_bridges))
wb2 = []
for frame_idx, atom1, water1, water2, atom2, itype in extended_water_bridges:
wb2.append([frame_idx, atom1, water1, water2, atom2, itype])
return wb2
def stratify_hbond_subtypes(hbonds, solvent_resn):
"""
Stratify the full hbonds list into the following subtypes: sidechain-sidechain,
sidechain-backbone, backbone-backbone, water-bridge, and extended water-bridge
Parameters
----------
hbonds: list, [[frame_idx, atom1_label, atom2_label, itype], ...]
List of all hydrogen bond contacts in a single frame. itype = "hb"
solvent_resn: string, default = TIP3
Denotes the resname of solvent in simulation
Returns
-------
hbond_subtypes: list, [[frame_idx, atom1_label, atom2_label, itype], ...]
List of all hydrogen contacts with itype = "hbss", "hbsb", "hbbb", "wb", or "wb2"
corresponding to sidechain-sidechain, sidechain-backbone, backbone-backbone,
water bridge and extended water bridge respectively.
"""
residue_hbonds, water_hbonds = residue_vs_water_hbonds(hbonds, solvent_resn)
hbss, hbsb, hbbb = stratify_residue_hbonds(residue_hbonds)
wb = stratify_water_bridge(water_hbonds, solvent_resn)
wb2 = stratify_extended_water_bridge(water_hbonds, solvent_resn)
hbonds = hbss + hbsb + hbbb + wb + wb2
return hbonds
```
#### File: jpaggi/getcontacts/get_contact_fingerprints.py
```python
from __future__ import division
import sys
import argparse
import numpy as np
from contact_calc.flare import compose_frequencytable, write_json
def parse_frequencyfiles(freq_files, freq_cutoff):
columns = len(freq_files)
ret = {}
for fidx, freq_file in enumerate(freq_files):
for line in freq_file:
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
tokens = line.split("\t")
res1 = tokens[0]
res2 = tokens[1]
freq = float(tokens[2])
if not (res1, res2) in ret:
ret[(res1, res2)] = np.zeros(columns)
ret[(res1, res2)][fidx] = freq
# Remove entries where no frequency exceeds 0.6
ret = {key: val for key, val in ret.items() if np.amax(val) > freq_cutoff}
return ret
def write_frequencytable(freq_table, col_labels, fname):
with open(fname, "w") as out_file:
out_file.write(",".join(["", ""] + col_labels) + "\n")
for (res1, res2) in freq_table:
freq_strings = [str(freq) for freq in freq_table[(res1, res2)]]
out_file.write(",".join([res1, res2] + freq_strings) + "\n")
def plot_frequencies(freq_table, col_labels, out_file, cluster_columns):
import pandas as pd
import matplotlib
import os
if "DISPLAY" not in os.environ:
matplotlib.use('agg')
import seaborn as sns;
sns.set(color_codes=True)
freq_matrix = np.array([freq_table[(r1, r2)] for (r1, r2) in freq_table])
row_labels = [r1 + " - " + r2 for (r1, r2) in freq_table]
pdframe = pd.DataFrame(freq_matrix, index=row_labels, columns=col_labels)
# Scale down figsize if too large
figsize = [pdframe.shape[1], pdframe.shape[0]]
if figsize[1] > 320:
figsize[0] *= 320 / figsize[1]
figsize[1] *= 320 / figsize[1]
# Create clustermap
fingerprints = sns.clustermap(pdframe,
figsize=figsize,
annot=True,
col_cluster=cluster_columns,
cmap='Blues')
# Remove color bar
fingerprints.cax.set_visible(False)
import matplotlib.pyplot as plt
plt.setp(fingerprints.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.setp(fingerprints.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
fingerprints.savefig(out_file)
def main():
# Parse command line arguments
class MyParser(argparse.ArgumentParser):
def error(self, message):
# Prints full program help when error occurs
self.print_help(sys.stderr)
sys.stderr.write('\nError: %s\n' % message)
sys.exit(2)
parser = MyParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--input_frequencies',
type=argparse.FileType('r'),
required=True,
nargs='+',
help="Paths to one or more residue frequency files")
parser.add_argument('--frequency_cutoff',
type=float,
required=False,
default=0.6,
help="Only interactions occurring at least this frequently will be plotted (default: 0.6)")
parser.add_argument('--column_headers',
type=str,
required=False,
nargs='+',
help="Header column labels. If nothing is specified, the input_frequencies filenames are used")
parser.add_argument('--cluster_columns',
type=bool,
required=False,
default=False,
help="Perform hierarchical clustering on the columns (default: False)")
parser.add_argument('--table_output',
type=str,
required=False,
default=None,
help="If specified, the tab-separated frequency table will be written to this file")
parser.add_argument('--plot_output',
type=str,
required=False,
default=None,
help="If specified, the heatmap will be written to this file (supports svg and png formats)")
parser.add_argument('--flare_output',
type=str,
required=False,
default=None,
help="If specified, a compare-flare will be written to this json-file")
args = parser.parse_args()
freq_table = parse_frequencyfiles(args.input_frequencies, args.frequency_cutoff)
# Determine column headers and exit on error
column_headers = [f.name for f in args.input_frequencies] if args.column_headers is None else args.column_headers
if len(column_headers) != len(args.input_frequencies):
parser.print_help(sys.stderr)
sys.stderr.write("\nError: --column_header arguments must match length of --input_frequencies\n")
sys.exit(2)
# Check output format and call corresponding function(s)
if args.table_output is None and args.flare_output is None and args.plot_output is None:
parser.print_help(sys.stderr)
sys.stderr.write("\nError: Either --table_output, --flare_output, or --plot_output must be specified\n")
sys.exit(2)
if args.table_output is not None:
write_frequencytable(freq_table, column_headers, args.table_output)
print("Wrote frequency table to "+args.table_output)
if args.flare_output is not None:
compare_flare = compose_frequencytable(freq_table, column_headers, args.frequency_cutoff)
write_json(compare_flare, args.flare_output)
print("Wrote multi flare to "+args.flare_output)
if args.plot_output is not None:
plot_frequencies(freq_table, column_headers, args.plot_output, args.cluster_columns)
print("Wrote fingerprint heatmap to "+args.plot_output)
if __name__ == '__main__':
main()
``` |
{
"source": "JPags/mcoc-cogs",
"score": 2
} |
#### File: mcoc-cogs/mcocTools/mcocTools.py
```python
import discord
import re
import csv
import random
import os
import datetime
from operator import itemgetter, attrgetter
from .utils import chat_formatting as chat
from .utils.dataIO import dataIO
from cogs.utils import checks
from discord.ext import commands
from . import hook as hook
class MCOCTools:
'''Tools for Marvel Contest of Champions'''
lookup_links = {
'event': (
'<http://simians.tk/MCOC-Sched>',
'[Tiny MCoC Schedule](https://docs.google.com/spreadsheets/d/e/2PACX-1vT5A1MOwm3CvOGjn7fMvYaiTKDuIdvKMnH5XHRcgzi3eqLikm9SdwfkrSuilnZ1VQt8aSfAFJzZ02zM/pubhtml?gid=390226786)',
'<NAME> Schedule',
'https://d2jixqqjqj5d23.cloudfront.net/assets/developer/imgs/icons/google-spreadsheet-icon.png'),
'rttl':(
'<https://drive.google.com/file/d/0B4ozoShtX2kFcDV4R3lQb1hnVnc/view>',
'[Road to the Labyrinth Opponent List](https://drive.google.com/file/d/0B4ozoShtX2kFcDV4R3lQb1hnVnc/view)',
'by Regal Empire {OG Wolvz}',
'http://svgur.com/s/48'),
'hook': (
'<http://hook.github.io/champions>',
'[hook/Champions by gabriel](http://hook.github.io/champions)',
'hook/champions for Collector',
'https://assets-cdn.github.com/favicon.ico'),
'spotlight': (
'<http://simians.tk/MCoCspotlight>',
'[MCOC Spotlight Dataset](http://simians.tk/MCoCspotlight)\nIf you would like to donate prestige, signatures or stats, join us at \n[CollectorDevTeam](https://discord.gg/BwhgZxk)'),
# 'marvelsynergy': (
# '<http://www.marvelsynergy.com/team-builder>',
# '[Marvel Synergy Team Builder](http://www.marvelsynergy.com/team-builder)',
# 'Marvel Synergy',
# 'http://www.marvelsynergy.com/images/marvelsynergy.png'),
'alsciende':(
'<https://alsciende.github.io/masteries/v10.0.1/#>',
'[Alsciende Mastery Tool](https://alsciende.github.io/masteries/v17.0.2/#)',
'by u/alsciende',
'https://images-ext-2.discordapp.net/external/ymdMNrkhO9L5tUDupbFSEmu-JK0X2bpV0ZE-VYTBICc/%3Fsize%3D1024/https/cdn.discordapp.com/avatars/268829380262756357/b55ae7fc51d9b741450f949accd15fbe.webp?width=80&height=80'),
'simulator': (
'<http://simians.tk/msimSDF>',
'[-SDF- Mastery Simulator](http://simians.tk/msimSDF)'),
# 'streak': (
# '<http://simians.tk/-sdf-streak>'
# '[Infinite Streak](http://simians.tk/-sdf-streak)'),
# #'http://simians.tk/SDFstreak')
}
mcolor = discord.Color.red()
COLLECTOR_ICON='https://raw.githubusercontent.com/JasonJW/mcoc-cogs/master/mcoc/data/cdt_icon.png'
icon_sdf = 'https://raw.githubusercontent.com/JasonJW/mcoc-cogs/master/mcoc/data/sdf_icon.png'
dataset = 'data/mcoc/masteries.csv'
def __init__(self, bot):
self.bot = bot
def present(self, lookup):
em=discord.Embed(color=self.mcolor,title='',description=lookup[1])
print(len(lookup))
if len(lookup) > 2:
em.set_footer(text=lookup[2],icon_url=lookup[3])
else:
em.set_footer(text='CollectorDevTeam',icon_url=self.COLLECTOR_ICON)
return em
@commands.command(pass_context=True,aliases={'collector','infocollector','about'})
async def aboutcollector(self,ctx):
"""Shows info about Collector"""
author_repo = "https://github.com/Twentysix26"
red_repo = author_repo + "/Red-DiscordBot"
server_url = "https://discord.gg/wJqpYGS"
dpy_repo = "https://github.com/Rapptz/discord.py"
python_url = "https://www.python.org/"
collectorpatreon = 'https://patreon.com/collectorbot'
since = datetime.datetime(2016, 1, 2, 0, 0)
days_since = (datetime.datetime.utcnow() - since).days
dpy_version = "[{}]({})".format(discord.__version__, dpy_repo)
py_version = "[{}.{}.{}]({})".format(*os.sys.version_info[:3],
python_url)
owner_set = self.bot.settings.owner is not None
owner = self.bot.settings.owner if owner_set else None
if owner:
owner = discord.utils.get(self.bot.get_all_members(), id=owner)
if not owner:
try:
owner = await self.bot.get_user_info(self.bot.settings.owner)
except:
owner = None
if not owner:
owner = "Unknown"
about = (
"Collector is an instance of [Red, an open source Discord bot]({0}) "
"created by [Twentysix]({1}) and improved by many.\n\n"
"The Collector Dev Team is backed by a passionate community who contributes and "
"creates content for everyone to enjoy. [Join us today]({2}) "
"and help us improve!\n\n"
"★ If you would like to support the Collector, please visit {3}.\n"
"★ Patrons and Collaborators recieve priority support and secrety stuff.\n\n~ JJW"
"".format(red_repo, author_repo, server_url, collectorpatreon))
devteam = ( "DeltaSigma#8530\n"
"JJW#8071\n"
)
supportteam=('phil_wo#3733\nSpiderSebas#9910\nsuprmatt#2753\ntaoness#5565\nOtriux#9964')
embed = discord.Embed(colour=discord.Colour.red(), title="Collector", url=collectorpatreon)
embed.add_field(name="Instance owned by", value=str(owner))
embed.add_field(name="Python", value=py_version)
embed.add_field(name="discord.py", value=dpy_version)
embed.add_field(name="About", value=about, inline=False)
embed.add_field(name="PrestigePartner",value='mutamatt#4704',inline=True)
embed.add_field(name='DuelsPartners',value='2OO2RC51#4587',inline=True)
embed.add_field(name='MapsPartners',value='jpags#5202\nBlooregarde#5848 ',inline=True)
embed.add_field(name='LabyrinthTeam',value='Kiryu#5755\nre-1#7595',inline=True)
embed.add_field(name='CollectorSupportTeam', value=supportteam,inline=True)
embed.add_field(name="CollectorDevTeam",value=devteam,inline=True)
embed.set_footer(text="Bringing joy since 02 Jan 2016 (over "
"{} days ago!)".format(days_since))
try:
await self.bot.say(embed=embed)
except discord.HTTPException:
await self.bot.say("I need the `Embed links` permission "
"to send this")
# @checks.admin_or_permissions(manage_server=True)
# @commands.command()
# async def tickets(self):
# ticketsjson = 'data/tickets/tickets.json'
# tickets = dataIO.load_json(ticketsjson)
# em = discord.Embed(title='Tickets')
# cnt = 0
# ids = tickets.keys()
#
# for ticket in :
# em.add_field(name='{} - filed by {}'.format(cnt, ticket['name'],value='{}\n id: {}'.format(ticket['message'],ticket)))
# await self.bot.say(embed=em)
@commands.command(help=lookup_links['event'][0], aliases=['events','schedule',])
async def event(self):
x = 'event'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['spotlight'][0],)
async def spotlight(self):
x = 'spotlight'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['rttl'][0],)
async def rttl(self):
x = 'rttl'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['simulator'][0],aliases=['msim'])
async def simulator(self):
x = 'simulator'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['alsciende'][0], aliases=('mrig',))
async def alsciende(self):
x = 'alsciende'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
@commands.command(help=lookup_links['hook'][0])
async def hook(self):
x = 'hook'
lookup = self.lookup_links[x]
await self.bot.say(embed=self.present(lookup))
# await self.bot.say('iOS dumblink:\n{}'.format(lookup[0]))
# @commands.command()
# async def keygen(self, prefix='SDCC17'):
# '''SDCC Code Generator
# No warranty :)'''
# letters='ABCDEFGHIJKLMNOPQURSTUVWXYZ'
# numbers='0123456789'
# package = []
# for i in range(0,9):
# lets='{}{}{}{}{}{}'.format(random.choice(letters),random.choice(letters),random.choice(numbers),random.choice(numbers),random.choice(letters),random.choice(letters))
# package.append(prefix+lets)
# em=discord.Embed(color=discord.Color.gold(),title='Email Code Generator',description='\n'.join(package))
# await self.bot.say(embed=em)
def _get_text(self, mastery, rank):
rows = csv_get_rows(self.dataset,'Mastery',mastery)
for row in rows:
text.append(row['Text'].format(row[str(rank)]))
return text
@checks.admin_or_permissions(manage_server=True, manage_roles=True)
@commands.command(name='gaps', pass_context=True, hidden=True)
async def _alliance_popup(self, ctx, *args):
'''Guild | Alliance Popup System'''
warning_msg =('The G.A.P.S. System will configure your server for basic Alliance Operations.'
'Roles will be added for summoners, alliance, officers, bg1, bg2, bg3'
'Channels will be added for announcements, alliance, & battlegroups.'
'Channel permissions will be configured.'
'After the G.A.P.S. system prepares your server, there will be additional instructions.'
'If you consent, press OK')
em = discord.Embed(color=ctx.message.author.color, title='G.A.P.S. Warning Message', description=warning_msg)
message = await self.bot.say(embed=em)
await self.bot.add_reaction(message, '❌')
await self.bot.add_reaction(message, '🆗')
react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author, timeout=30, emoji=['❌', '🆗'])
if react is not None:
if react.reaction.emoji == '❌':
await self.bot.say('G.A.P.S. canceled.')
return
elif react.reaction.emoji == '🆗':
message2 = await self.bot.say('G.A.P.S. in progess.')
else:
await self.bot.say('Ambiguous response. G.A.P.S. canceled')
return
server = ctx.message.server
adminpermissions = discord.PermissionOverwrite(administrator=True)
moderatorpermissions = discord.PermissionOverwrite(manage_roles=True)
moderatorpermissions.manage_server=True
moderatorpermissions.kick_members=True
moderatorpermissions.ban_members=True
moderatorpermissions.manage_channels=True
moderatorpermissions.manage_server=True
moderatorpermissions.manage_messages=True
moderatorpermissions.view_audit_logs=True
moderatorpermissions.read_messages=True
moderatorpermissions.create_instant_invite=True
roles = server.roles
rolenames = []
for r in roles:
rolenames.append('{}'.format(r.name))
aroles = ['officers', 'bg1', 'bg2', 'bg3', 'alliance', 'summoners']
# message = await self.bot.say('Stage 1: Creating roles')
if 'admin' not in rolenames:
admin = await self.bot.create_role(server=server, name='admin', color=discord.Color.gold(), hoist=False, mentionable=False)
if 'officers' not in rolenames:
officers = await self.bot.create_role(server=server, name='officers', color=discord.Color.light_grey(), hoist=False, mentionable=True)
if 'bg1' not in rolenames:
bg1 = await self.bot.create_role(server=server, name='bg1', color=discord.Color.blue(), hoist=False, mentionable=True)
if 'bg2' not in rolenames:
bg2 = await self.bot.create_role(server=server, name='bg2', color=discord.Color.purple(), hoist=False, mentionable=True)
if 'bg3' not in rolenames:
bg3 = await self.bot.create_role(server=server, name='bg3', color=discord.Color.orange(), hoist=False, mentionable=True)
if 'alliance' not in rolenames:
alliance = await self.bot.create_role(server=server, name='alliance', color=discord.Color.teal(), hoist=True, mentionable=True)
if 'summoners' not in rolenames:
summoners = await self.bot.create_role(server=server, name='summoners', color=discord.Color.lighter_grey(), hoist=True, mentionable=True)
roles = sorted(server.roles, key=lambda roles:roles.position, reverse=True)
em = discord.Embed(color=discord.Color.red(), title='Guild Alliance Popup System', description='')
positions = []
for r in roles:
positions.append('{} = {}'.format(r.position, r.mention))
if r.name == 'officers':
officers = r
elif r.name == 'bg1':
bg1 = r
elif r.name == 'bg2':
bg2 = r
elif r.name == 'bg3':
bg3 = r
elif r.name == 'alliance':
alliance = r
elif r.name == 'summoners':
summoners = r
elif r.name == 'admin':
admin = r
elif r.name=='everyone':
everyone = r
em.add_field(name='Stage 1 Role Creation',value='\n'.join(positions),inline=False)
await self.bot.say(embed=em)
everyone_perms = discord.PermissionOverwrite(read_messages = False)
everyoneperms = discord.ChannelPermissions(target=server.default_role, overwrite=everyone_perms)
readperm = discord.PermissionOverwrite(read_messages = True)
officerperms = discord.ChannelPermissions(target=officers, overwrite=readperm)
allianceperms = discord.ChannelPermissions(target=alliance, overwrite=readperm)
summonerperms = discord.ChannelPermissions(target=summoners, overwrite=readperm)
bg1perms = discord.ChannelPermissions(target=bg1, overwrite=readperm)
bg2perms = discord.ChannelPermissions(target=bg2, overwrite=readperm)
bg3perms = discord.ChannelPermissions(target=bg3, overwrite=readperm)
channellist = []
for c in server.channels:
channellist.append(c.name)
if 'announcements' not in channellist:
await self.bot.create_channel(server, 'announcements', everyoneperms, allianceperms, summonerperms)
# if 'alliance' not in channellist:
# await self.bot.create_channel(server, 'alliance', everyoneperms, allianceperms)
if 'alliance-chatter' not in channellist:
await self.bot.create_channel(server, 'alliance-chatter', everyoneperms, allianceperms)
if 'officers' not in channellist:
await self.bot.create_channel(server, 'officers', everyoneperms, officerperms)
if 'bg1aq' not in channellist:
await self.bot.create_channel(server, 'bg1aq', everyoneperms, officerperms, bg1perms)
if 'bg1aw' not in channellist:
await self.bot.create_channel(server, 'bg1aw', everyoneperms, officerperms, bg1perms)
if 'bg2aq' not in channellist:
await self.bot.create_channel(server, 'bg2aq', everyoneperms, officerperms, bg2perms)
if 'bg2aw' not in channellist:
await self.bot.create_channel(server, 'bg2aw', everyoneperms, officerperms, bg2perms)
if 'bg3aq' not in channellist:
await self.bot.create_channel(server, 'bg3aq', everyoneperms, officerperms, bg3perms)
if 'bg3aw' not in channellist:
await self.bot.create_channel(server, 'bg3aw', everyoneperms, officerperms, bg2perms)
channels= sorted(server.channels, key=lambda channels:channels.position, reverse=False)
channelnames=[]
for c in channels:
channelnames.append('{} = {} '.format(c.position, c.mention))
em = discord.Embed(color=discord.Color.red(), title='Guild Alliance Popup System', description='')
em.add_field(name='Stage 2 Create Channels',value='\n'.join(channelnames),inline=False)
await self.bot.say(embed=em)
em = discord.Embed(color=discord.Color.red(), titel= 'Guild Alliance Popup System', descritpion='')
# fixNotifcations = await self.bot.say('Stage 3: Attempting to set Default Notification to Direct Message Only')
try:
# mentions only
await self.bot.http.request(discord.http.Route('PATCH', '/guilds/{guild_id}', guild_id=server.id), json={'default_message_notifications': 1})
em.add_field(name='Stage 3: Notification Settings', value='I have modified the servers to use better notification settings.')
except Exception as e:
await self.bot.edit_message(fixNotifcations, "An exception occurred. check your log.")
await self.bot.say(embed=em)
em = discord.Embed(color=ctx.message.author.color, titel= 'Guild Alliance Popup System', descritpion='Server Owner Instructions')
em.add_field(name='Enroll for Collector announcements', value='Enroll a channel for Collector announcements\n```/addchan #announcements```\n', inline=False)
em.add_field(name='Set up Autorole', value='Default Role should be {}\n```/autorole role summoners```\n```/autorole toggle``` '.format(summoners.mention), inline=False)
await self.bot.say(embed=em)
await self.bot.delete_message(message2)
# @checks.is_owner()
# @commands.group(pass_context=True, hidden=True)
# async def inspect(self, ctx):
# @checks.is_owner()
@commands.command(pass_context=True, hidden=True, name='inspectroles', aliases=['inspectrole', 'ir',])
async def _inspect_roles(self, ctx):
server = ctx.message.server
roles = sorted(server.roles, key=lambda roles:roles.position, reverse=True)
positions = []
for r in roles:
positions.append('{} = {}'.format(r.position, r.name))
desc = '\n'.join(positions)
em = discord.Embed(color=discord.Color.red(), title='Collector Inspector: ROLES', description=desc)
await self.bot.say(embed=em)
@checks.admin_or_permissions(manage_roles=True)
@commands.command(name='norole',pass_context=True,hidden=True)
async def _no_role(self, ctx, role : discord.Role):
members = ctx.message.server.members
missing = []
print(str(len(missing)))
for member in members:
if not member.bot:
if role not in member.roles:
missing.append('{0.name} : {0.id}'.format(member))
print(str(len(missing)))
if len(missing) == 0:
await self.bot.say('No users are missing the role: {}'.format(role.name))
else:
pages = chat.pagify('\n'.join(missing))
for page in pages:
await self.bot.say(chat.box(page))
# @checks.admin_or_permissions(manage_server=True, manage_roles=True)
# @commands.command(name='setup', pass_context=True)
# async def collectorsetup(self,ctx,*args):
# '''Server Setup Guide
# Collector Role Requires admin
# '''
# 1) Check Roles present
# 2) Check Role Permissions
# 3) Check Role Order
# Manage Messages required for Cleanup
# Manage Server required for Role Creation / Deletion
# Manage Roles required for Role assignment / removal
# 2 ) Check roles
# 3 ) Check role order
# check1 = await self.setup_phase_one(ctx)
# if check1:
# await self.bot.say(embed=discord.Embed(color=discord.color.red(),
# title='Collector Setup Protocol',
# description='☑ setup_phase_one '))
# async def setup_phase_one(self, ctx):
# '''Check Server ROLES'''
# # repeat_phase = await self.setup_phase_one(ctx)
# # next_phase = await self.setup_phase_two(ctx)
#
# server = ctx.message.server
# roles = server.roles
# rolenames = []
# phase = True
# for r in roles:
# rolenames.append(r.name)
# required_roles={'Collector','officers','bg1','bg2','bg3','LEGEND','100%LOL','LOL','RTL','ROL','100%Act4','Summoner','TestRole1','TestRole2'}
# roles_fields={'officers': {True, discord.Color.lighter_grey(),},
# 'bg1':{True, discord.Color.blue(), },
# 'bg2':{True, discord.Color.purple(), },
# 'bg3':{True, discord.Color.orange(), },
# 'TestRole1':{True, discord.Color.default(), },
# 'TestRole2':{True, discord.Color.light_grey()},
# }
# stageone=['Setup Conditions 1:\nRoles Required for Guild Setup:',]
# for i in required_roles:
# if i in rolenames:
# stageone.append('☑️ {}'.format(i))
# else:
# stageone.append('❌ {}'.format(i))
# phase = False
# desc = '\n'.join(stageone)
# if phase == False:
# em=discord.Embed(color=discord.Color.red(),title='Server Setup Protocol [1]',description=desc)
# em.add_field(name='Corrective Action', value='Roles are missing. Create missing roles and Rerun test.\n🔁 == Rerun test\n❌ == Cancel setup')
# message = await self.bot.send_message(ctx.message.channel, embed=em)
# await self.bot.add_reaction(message,'\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}')
# await self.bot.add_reaction(message,'\N{CROSS MARK}')
# await self.bot.add_reaction(message, '\N{BLACK RIGHT-POINTING TRIANGLE}')
# react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author, timeout=120, emoji=['\N{CROSS MARK}','\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}','\N{BLACK RIGHT-POINTING TRIANGLE}'])
# if react is None or react.reaction.emoji == '\N{CROSS MARK}':
# try:
# await self.bot.delete_message(message)
# except:
# pass
# return None
# elif react.reaction.emoji == '\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}':
# await self.bot.delete_message(message)
# return await self.setup_phase_one(ctx)
# elif react.reaction.emoji == '\N{BLACK RIGHT-POINTING TRIANGLE}':
# await self.bot.delete_message(message)
# return await self.setup_phase_two(ctx)
# elif phase == True:
# await setup_phase_two
#
# async def setup_phase_two(self, ctx):
# '''Check Role ORDER'''
# server = ctx.message.server
# roles = sorted(server.roles, key=lambda roles:roles.position, reverse=True)
# required_roles = ('Collector','officers','bg1','bg2','bg3','LEGEND','100%LOL','LOL','RTL','ROL','100%Act4','Summoner', 'everyone')
# said = []
# em = discord.Embed(color=discord.Color.red(), title='Role Order Prerequisite',description='Role: Collector')
# positions = []
# for r in roles:
# positions.append('{} = {}'.format(r.position, r.name))
# em.add_field(name='Role Position on Server',value=chat.box('\n'.join(positions)),inline=False)
# said.append(await self.bot.say(embed=em))
# order = []
# c=len(required_roles)-1
# for r in required_roles:
# order.append('{} = {}'.format(c, r))
# c-=1
# em = discord.Embed(color=discord.Color.red(), title='',description='')
# em.add_field(name='Correct Role Positions', value =chat.box('\n'.join(order)),inline=False)
# perm_order = []
# phase = True
# for i in range(0,len(required_roles)-2):
# j = i+1
# if required_roles[j] > required_roles[i]:
# phase = False
# # perm_order.append('{} should be above {}'.format(required_roles[i],required_roles[j]))
# if phase == False:
# # em=discord.Embed(color=discord.Color.red(),title='Server Setup Protocol [2]',description=desc)
# em.add_field(name='Corrective Action', value='Roles are out of order. Adjust role order and Rerun test.')
# # em.add_field(name='',value='\n'.join(perm_order))
# message = await self.bot.send_message(ctx.message.channel, embed=em)
# said.append(message)
# await self.bot.add_reaction(message,'\N{BLACK LEFT-POINTING TRIANGLE}')
# await self.bot.add_reaction(message,'\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}')
# await self.bot.add_reaction(message,'\N{CROSS MARK}')
# await self.bot.add_reaction(message, '\N{BLACK RIGHT-POINTING TRIANGLE}')
# react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author, timeout=120, emoji=['\N{CROSS MARK}','\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}','\N{BLACK RIGHT-POINTING TRIANGLE}'])
# if react is None or react.reaction.emoji == '\N{CROSS MARK}':
# try:
# for message in said:
# await self.bot.delete_message(message)
# except:
# pass
# return None
# elif react.reaction.emoji == '\N{ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS}':
# for message in said:
# await self.bot.delete_message(message)
# return await self.setup_phase_two(ctx)
# elif react.reaction.emoji == '\N{BLACK RIGHT-POINTING TRIANGLE}':
# for message in said:
# await self.bot.delete_message(message)
# return await self.setup_phase_three(ctx)
# elif react.reaction.emoji == '\N{BLACK LEFT-POINTING TRIANGLE}':
# for message in said:
# await self.bot.delete_message(message)
# return await self.setup_phase_one(ctx)
# elif phase == True:
# await setup_phase_three
#
# async def setup_phase_three(self, ctx):
# '''Check Role Permissions'''
# message = await self.bot.say('initiate phase three')
@commands.command(pass_context=True, hidden=True)
async def awopp_calc(self, ctx, wr:int, gain:int, loss:int):
'''MutaMatt's War Opponent Calculator
https://en.wikipedia.org/wiki/Elo_rating_system
'''
playera = 1/(1+exp(10,(gain-loss)/400))
await self.bot.say('{}'.format(playera))
def load_csv(filename):
return csv.DictReader(open(filename))
def get_csv_row(filecsv, column, match_val, default=None):
print(match_val)
csvfile = load_csv(filecsv)
for row in csvfile:
if row[column] == match_val:
if default is not None:
for k, v in row.items():
if v == '':
row[k] = default
return row
def get_csv_rows(filecsv, column, match_val, default=None):
print(match_val)
csvfile = load_csv(filecsv)
package =[]
for row in csvfile:
if row[column] == match_val:
if default is not None:
for k, v in row.items():
if v == '':
row[k] = default
package.append(row)
return package
def tabulate(table_data, width, rotate=True, header_sep=True):
rows = []
cells_in_row = None
for i in iter_rows(table_data, rotate):
if cells_in_row is None:
cells_in_row = len(i)
elif cells_in_row != len(i):
raise IndexError("Array is not uniform")
rows.append('|'.join(['{:^{width}}']*len(i)).format(*i, width=width))
if header_sep:
rows.insert(1, '|'.join(['-' * width] * cells_in_row))
return chat.box('\n'.join(rows))
def setup(bot):
bot.add_cog(MCOCTools(bot))
``` |
{
"source": "J-Pai/408DaisyJetson",
"score": 3
} |
#### File: J-Pai/408DaisyJetson/daisy_brain.py
```python
import sys
import os
import face_recognition
import cv2
from daisy_spine import DaisySpine
from daisy_spine import Dir
from daisy_eye import DaisyEye
from multiprocessing import Process, Queue
from multiprocessing.managers import SyncManager
import time
import argparse
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_alexa_neuron')
connected = True
alexa_neuron = None
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
try:
manager.connect()
alexa_neuron = manager.get_alexa_neuron()
print("Brain connected to neuron manager.")
except ConnectionRefusedError:
print("Brain not connected to neuron manager.")
connected = False
faces = {
"Jessie": "../faces/JPai-2.jpg",
"teddy": "../faces/Teddy-1.jpg",
"Vladimir": "../faces/Vlad-1.jpg"
}
name = "JessePai"
data = None
eye = None
X_THRES = 100
Z_CENTER = 1500
Z_THRES = 100
STANDING_THRES = 850
pid = -1
def begin_tracking(name, data_queue, video=True):
print("Begin Tracking")
print("Video: ", video)
eye = DaisyEye(faces, data_queue)
eye.find_and_track_kinect(None, "CSRT", video_out=video)
data_queue.close()
def daisy_action(data_queue, debug=True):
spine = DaisySpine()
print("Getting Data")
print("Debug: ", debug)
print(spine.read_all_lines())
data = None
prev_statement = ""
already_waiting = False
standing = True
prev_standing = True
while True:
state = None
direction = None
currCount = 0
if connected:
currNeuron = alexa_neuron.copy()
if "state" in currNeuron:
state = currNeuron.get("state")
if "count" in currNeuron:
currCount = currNeuron.get("count")
if state == "moving":
direction = currNeuron.get("direction")
if state is None or state == "idle" or state == "moving" or state == "exercise":
statement = ""
if direction is not None:
already_waiting = False
out = None
if direction == "left" or direction == "counterclockwise":
out = spine.turn(Dir.CCW)
elif direction == "right" or direction == "clockwise":
out = spine.turn(Dir.CW)
elif direction == "forward":
out = spine.forward()
elif direction == "backward":
out = spine.backward()
else:
out = spine.halt()
if debug:
statement = ("Moving:", direction, out)
if state == "exercise":
already_waiting = False
if not data_queue.empty():
data = data_queue.get()
if data:
(status, bbox, center, distance, res) = data
if status != "WAITING":
center_y = center[1]
if center_y < STANDING_THRES:
standing = True
if center_y > STANDING_THRES:
standing = False
if standing != prev_standing:
prev_standing = standing
currCount = currCount + 1
alexa_neuron.update([('count', currCount)])
print("Num Squats:", currCount)
if state == "idle" and not already_waiting:
print("Waiting")
alexa_neuron.update([('tracking', False)])
already_waiting = True
out = spine.halt()
statement = ("Idling", out)
if debug and statement != prev_statement:
prev_statement = statement
print(statement)
continue
if not data_queue.empty():
data = data_queue.get()
if data:
(status, bbox, center, distance, res) = data
if not status:
continue
if status == "STOP":
break
if status == "WAITING" and not already_waiting:
print("Waiting")
alexa_neuron.update([('tracking', False)])
already_waiting = True
out = spine.halt()
statement = ("Waiting for TARGET", out)
elif status != "WAITING":
already_waiting = False
center_x = center[0]
center_y = center[1]
res_center_x = int(res[0] / 2)
res_center_y = int(res[1] / 2)
out = None
if center_x < res_center_x - X_THRES:
out = spine.turn(Dir.CW)
elif center_x > res_center_x + X_THRES:
out = spine.turn(Dir.CCW)
elif distance > Z_CENTER + Z_THRES:
out = spine.forward()
elif distance < Z_CENTER - Z_THRES:
out = spine.backward()
else:
out = spine.halt()
if debug:
statement = (center_x, res_center_x, center, distance, res, out)
if debug and statement != prev_statement:
prev_statement = statement
print(statement)
data = None
print("Action Thread Exited")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Start Daisy's Brain")
parser.add_argument("--no-debug", action="store_const", const=True, help="Disable debug output")
parser.add_argument("--no-video", action="store_const", const=True, help="Disable video output")
args = parser.parse_args()
print("Daisy's Brain is Starting ^_^")
if connected:
# Clear alexa neuron.
alexa_neuron.clear()
data = Queue()
action_p = Process(target = daisy_action, args=(data, not args.no_debug, ))
action_p.daemon = True
action_p.start()
pid = action_p.pid
begin_tracking("JessePai", data, not args.no_video)
action_p.terminate()
print("Brain Terminated +_+")
```
#### File: 408DaisyJetson/tests/pure_face_tracking.py
```python
import numpy as np
import cv2
import face_recognition
import sys
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
from queue import Queue as ImageQueue
from pylibfreenect2 import Freenect2, SyncMultiFrameListener
from pylibfreenect2 import FrameType, Registration, Frame
from pylibfreenect2 import setGlobalLogger
setGlobalLogger(None)
print("OpenGL Pipeline")
from pylibfreenect2 import OpenGLPacketPipeline
print("Starting Tracking")
def __draw_bbox(valid, frame, bbox, color, text):
if not valid:
return
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2, 1)
cv2.putText(frame, text, (bbox[0], bbox[1] - 4), \
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
def __scale_frame(frame, scale_factor = 1):
if scale_factor == 1:
return frame
return cv2.resize(frame, (0,0), fx=scale_factor, fy=scale_factor)
def face_locations(image):
pass
class NeuronManager(SyncManager):
pass
NeuronManager.register('get_web_neuron')
NeuronManager.register('get_alexa_neuron')
manager = NeuronManager(address=('', 4081), authkey=b'daisy')
manager.connect()
web_neuron = manager.get_web_neuron()
alexa_neuron = manager.get_alexa_neuron()
faces = {
"JessePai": "../faces/JPai-1.jpg",
# "VladMok": "./faces/Vlad.jpg",
# "TeddyMen": "./faces/TMen-1.jpg"
}
known_faces = {}
for person in faces:
image = face_recognition.load_image_file(faces[person])
print(person)
face_encoding_list = face_recognition.face_encodings(image)
if len(face_encoding_list) > 0:
known_faces[person] = face_encoding_list[0]
else:
print("\tCould not find face for person...")
pipeline = OpenGLPacketPipeline()
target = "JessePai"
fn = Freenect2()
num_devices = fn.enumerateDevices()
if num_devices == 0:
print("No device connected!")
serial = fn.getDeviceSerialNumber(0)
device = fn.openDevice(serial, pipeline = pipeline)
listener = SyncMultiFrameListener(FrameType.Color | FrameType.Depth)
device.setColorFrameListener(listener)
device.setIrAndDepthFrameListener(listener)
device.start()
registration = Registration(device.getIrCameraParams(),
device.getColorCameraParams())
undistorted = Frame(512, 424, 4)
registered = Frame(512, 424, 4)
bigdepth = Frame(1920, 1082, 4)
trackerObj = None
face_process_frame = True
bbox = None
track_bbox = None
while True:
timer = cv2.getTickCount()
frames = listener.waitForNewFrame()
color = frames["color"]
depth = frames["depth"]
registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth)
bd = np.resize(bigdepth.asarray(np.float32), (1080, 1920))
c = cv2.cvtColor(color.asarray(), cv2.COLOR_RGB2BGR)
face_bbox = None
new_track_bbox = None
face_locations = face_recognition.face_locations(c, number_of_times_to_upsample=0, model="cnn")
face_encodings = face_recognition.face_encodings(c, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(
[known_faces[target]], face_encoding, 0.6)
if len(matches) > 0 and matches[0]:
(top, right, bottom, left) = face_locations[0]
face_bbox = (left, top, right, bottom)
mid_w = int((left + right) / 2)
mid_h = int((top + bottom) / 2)
break
__draw_bbox(face_bbox is not None, c, face_bbox, (0, 0, 255), target)
c = __scale_frame(c, scale_factor = 0.5)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(c, "FPS : " + str(int(fps)), (100,50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
image = cv2.imencode('.jpg', c)[1].tostring()
web_neuron.update([('image', image)])
listener.release(frames)
self.so.close()
cv2.destroyAllWindows()
device.stop()
device.close()
```
#### File: 408DaisyJetson/tests/tracker_generic_code.py
```python
import cv2
CAM_NUM = 1
def track_object():
video = cv2.VideoCapture(CAM_NUM) # Setup the input video
video.set(3, 640)
video.set(4, 480)
ok, frame = video.read()
tracker = cv2.TrackerKCF_create() # Create the tracker object
bbox = cv2.selectROI(frame, False) # Select the desired object to track
ok = tracker.init(frame, bbox) # Initialize tracker with bbox and starting frame
while True:
timer = cv2.getTickCount()
_, frame = video.read()
ok, bbox = tracker.update(frame) # Update tracker with new frame to obtain new bbox
if ok:
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,255), 1)
cv2.imshow("Tracking", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('q'):
break
track_object()
``` |
{
"source": "jpaidoussi/lambda-router",
"score": 2
} |
#### File: src/lambda_router/app.py
```python
import logging
import threading
from typing import Any, Callable, Dict, List, Mapping, Optional
import attr
from . import exceptions, routers
from .config import Config
from .events import LambdaEvent
from .interfaces import Event, Router
from .proxies import DictProxy
@attr.s(kw_only=True)
class App:
"""
Provides the central object and entry point for a lambda execution.
:param name: The name of the application.
:param config: The configuration to use for this App. Can be any dict-like object but
generally is an instance of ``lambda_router.config.Config`.
:param event_class: The class to use for representing lambda events.
:param router: The ``Router`` instance to use for this app.
:param logger: The ``logging.Logger`` compatible logger instance to use for logging.
"""
name: str = attr.ib()
config: Config = attr.ib(factory=Config)
event_class: Event = attr.ib(default=LambdaEvent)
event_params: Optional[Dict[str, Any]] = attr.ib(default=None, repr=False)
router: Router = attr.ib(factory=routers.SingleRoute)
logger: logging.Logger = attr.ib(repr=False)
local_context: threading.local = attr.ib(repr=False, init=False, factory=threading.local)
execution_context: Optional[Any] = attr.ib(repr=False, init=False, default=None)
middleware_chain: Optional[List[Callable]] = attr.ib(repr=False, init=False, default=None)
exception_handlers: List[Callable] = attr.ib(repr=False, init=False, factory=list)
@logger.default
def _create_logger(self):
"""
Default initialiser that creates a stdlib logger.
"""
logger = logging.getLogger(self.name)
return logger
def __attrs_post_init__(self):
"""
Post-init hook. Used to load the middlware from the config. This requires the
config to already have been initialised before creating the App.
"""
self.load_middleware()
@property
def globals(self):
"""
Provides a proxied dict in the ``local_context``.
"""
if not hasattr(self.local_context, "globals"):
self.local_context.globals = DictProxy()
return self.local_context.globals
def route(self, **options: Mapping[str, Any]) -> Callable:
"""
Provides a decorator for adding a route via the configured router.
"""
def decorator(fn: Callable):
self.router.add_route(fn=fn, **options)
return fn
return decorator
def register_exception_handler(self, fn: Callable) -> Callable:
"""
Provides a decorator that registers a handler for any uncaught exceptions.
"""
self.exception_handlers.append(fn)
def decorator(fn: Callable):
return fn
return decorator
def load_middleware(self):
"""
Initialises the middlware from the app config.
"""
dispatch = self.router.dispatch
configured_middleware = self.config.get("MIDDLEWARE", [])
for middleware in configured_middleware:
mw_instance = middleware(dispatch)
dispatch = mw_instance
self.middleware_chain = dispatch
def dispatch(self, *, event: Event) -> Any:
"""
Dispatches a request via the configured middleware chain.
:param event: The ``Event`` object pass on to the middleware chain.
"""
return self.middleware_chain(event=event)
def _create_event(self, raw_event: Mapping[str, Any]) -> Event:
"""
Helper to create an event from the configured ``event_class``.
"""
params = {
"raw": raw_event,
"app": self,
}
if self.event_params is not None:
params.update(self.event_params)
return self.event_class.create(**params)
def __call__(self, raw_event: Mapping[str, Any], lambda_context: Any) -> Any:
"""
The main entry point that is invoked by the lambda runtime environment.
:param raw_event: The raw event mapping passed in from the lambda runtime.
:param lambda_context: The execution contect object passed in from the lambda runtime.
"""
event = self._create_event(raw_event)
self.execution_context = lambda_context
try:
response = self.dispatch(event=event)
except Exception as e:
# The AWS Lambda environment catches all unhandled exceptions
# without ever invoking the sys.excepthook handler, so this
# mechanism is provided as a way to pass on those exceptions
# without using sys.excepthook.
if not isinstance(e, exceptions.HandledError):
for fn in self.exception_handlers:
fn(self, event, e)
raise
return response
```
#### File: src/lambda_router/events.py
```python
from typing import Any, Dict, Mapping
import attr
from .interfaces import Event
@attr.s(kw_only=True)
class LambdaEvent(Event):
"""
A generic encapsulation of the Lambda event.
:param raw: The raw event has received from the lambda execution.
:param session: Per session / invocation storage.
:param app: A reference to the App this event was created from.
"""
raw: Mapping[str, Any] = attr.ib(repr=False)
session: Dict[str, Any] = attr.ib(repr=False, factory=dict)
app = attr.ib(repr=False)
@classmethod
def create(cls, *, raw, app):
return cls(raw=raw, app=app)
```
#### File: src/lambda_router/routers.py
```python
import json
from typing import Any, Callable, Dict, Optional
import attr
from .interfaces import Event, Router
@attr.s(kw_only=True)
class SingleRoute(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, event: Optional[Event]) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class EventField(Router):
"""
Routes on a the value of the specified top-level ``key`` in the
given ``Event.raw`` dict.
:param key: The name of the top-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, event: Event) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given event.
:raises ValueError: Raised if no route is defined or routing key is
not present in the event.
:rtype: callable
"""
field_value: str = event.raw.get(self.key, None)
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the event.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
route = self.get_route(event=event)
return route(event=event)
@attr.s(kw_only=True)
class SQSMessage:
meta: Dict[str, Any] = attr.ib(factory=dict)
body: Dict[str, Any] = attr.ib(factory=dict)
key: str = attr.ib()
event: Event = attr.ib()
@classmethod
def from_raw_sqs_message(cls, *, raw_message: Dict[str, Any], key_name: str, event: Event):
meta = {}
attributes = raw_message.pop("attributes", None)
if attributes:
meta.update(attributes)
body = body = raw_message.pop("body", "")
message_attribites = raw_message.pop("messageAttributes", None)
key = None
if message_attribites:
key_attribute = message_attribites.get(key_name, None)
if key_attribute is not None:
key = key_attribute["stringValue"]
for k, value in raw_message.items():
meta[k] = value
# Attempt to decode json body.
body = json.loads(body)
return cls(meta=meta, body=body, key=key, event=event)
@attr.s(kw_only=True)
class SQSMessageField(Router):
"""
Processes all message records in a given ``Event``, routing each based on
on the configured key.
:param key: The name of the message-level key to look for when routing.
:param routes: The routes mapping. Only set via ``add_route``
"""
key: str = attr.ib(kw_only=True)
routes: Dict[str, Callable] = attr.ib(init=False, factory=dict)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=self.key, event=event)
def add_route(self, *, fn: Callable, key: str) -> None:
"""
Adds the route with the given key.
:param fn: The callable to route to.
:type fn: callable
:param key: The key to associate the route with.
:type fn: str
"""
self.routes[key] = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the matching route for the value of the ``key`` in the
given message.
:raises ValueError: Raised if no route is defined or routing key is
not present in the message.
:rtype: callable
"""
field_value: str = message.key
if field_value is None:
raise ValueError(f"Routing key ({self.key}) not present in the message.")
try:
return self.routes[field_value]
except KeyError:
raise ValueError(f"No route configured for given field ({field_value}).")
def dispatch(self, *, event: Event) -> Any:
"""
Iterates over all the message records in the given Event and executes the
applicable callable as determined by the configured routes.
:param event: The event to parse for messages.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
@attr.s(kw_only=True)
class GenericSQSMessage(Router):
"""
Routes to a single defined route without any conditions.
:param route: The single defined route. Only set via ``add_route``.
"""
route: Optional[Callable] = attr.ib(init=False, default=None)
def _get_message(self, raw_message: Dict[str, Any], event: Event) -> SQSMessage:
return SQSMessage.from_raw_sqs_message(raw_message=raw_message, key_name=None, event=event)
def add_route(self, *, fn: Callable) -> None:
"""
Adds the single route.
:param fn: The callable to route to.
:type fn: callable
:raises ValueError: Raised when a single route has already been defined.
"""
if self.route is not None:
raise ValueError("Single route is already defined. SingleRoute can only have a single defined route.")
self.route = fn
def get_route(self, *, message: SQSMessage) -> Callable:
"""
Returns the defined route
:raises ValueError: Raised if no route is defined.
:rtype: callable
"""
if self.route is None:
raise ValueError("No route defined.")
return self.route
def dispatch(self, *, event: Event) -> Any:
"""
Gets the configured route and invokes the callable.
:param event: The event to pass to the callable route.
"""
messages = event.raw.get("Records", None)
if messages is None:
raise ValueError("No messages present in Event.")
for raw_message in messages:
message = self._get_message(raw_message, event=event)
route = self.get_route(message=message)
# Process each message now.
route(message=message)
# SQS Lambdas don't return a value.
return None
```
#### File: lambda-router/tests/test_appsync.py
```python
import copy
import pytest # noqa: F401
from lambda_router import appsync
@pytest.fixture(scope="module")
def example_request():
return {
"field": "getAssets",
"details": {
"arguments": {},
"identity": {
"claims": {
"sub": "2067d7de-8976-4790-921a-040892531db7",
"device_key": "eu-",
"event_id": "bab1a4b5-5055-4580-8e5f-8587a53d7e9a",
"token_use": "access",
"scope": "aws.cognito.signin.user.admin",
"auth_time": 1579158955,
"iss": "https: //cognito-idp.eu-west-1.amazonaws.com/eu-west-1_asdasdas",
"exp": 1579162555,
"iat": 1579158955,
"jti": "6e48da7c-7100-48a8-8faf-3d42a506f138",
"client_id": "abcde",
"username": "<PASSWORD>",
},
"defaultAuthStrategy": "ALLOW",
"groups": None,
"issuer": "https://cognito-idp.eu-west-1.amazonaws.com/eu-west-1_asdasdas",
"sourceIp": ["1.1.1.2"],
"sub": "2067d7de-8976-4790-921a-040892531db7",
"username": "<PASSWORD>",
},
"source": None,
"result": None,
"request": {
"headers": {
"x-forwarded-for": "1.1.1.2, 5.5.5.5",
"accept-encoding": "gzip, deflate",
"cloudfront-viewer-country": "ZA",
"cloudfront-is-tablet-viewer": "false",
"via": "1.1 abcde.cloudfront.net (CloudFront)",
"content-type": "application/json",
"cloudfront-forwarded-proto": "https",
"x-amzn-trace-id": "Root=1-",
"x-amz-cf-id": "aaa",
"authorization": "...",
"content-length": "105",
"x-forwarded-proto": "https",
"host": "a.appsync-api.eu-west-1.amazonaws.com",
"user-agent": "python-requests/2.20.1",
"cloudfront-is-desktop-viewer": "true",
"accept": "*/*",
"cloudfront-is-mobile-viewer": "false",
"x-forwarded-port": "443",
"cloudfront-is-smarttv-viewer": "false",
}
},
"info": {"fieldName": "getAssets", "parentTypeName": "Query", "variables": {}},
"error": None,
"prev": None,
"stash": {},
"outErrors": [],
},
}
class TestAppSyncEvent:
def test_create(self, example_request):
template = {"context": "details"}
event = appsync.AppSyncEvent.create(raw=example_request, app={}, template=template)
assert isinstance(event, appsync.AppSyncEvent)
assert "2067d7de-8976-4790-921a-040892531db7" == event.identity.username
assert "getAssets" == event.info.field_name
assert "content-length" in event.request.headers
assert {} == event.arguments
class TestAppSyncField:
def test_add_route(self):
router = appsync.AppSyncField()
def test_route_one(event):
return {"message": "ok"}
def test_route_two(event):
return {"message": "ok"}
router.add_route(fn=test_route_one, field="one")
router.add_route(fn=test_route_two, field="two")
assert router.routes
assert 2 == len(router.routes)
assert "one" in router.routes
assert "two" in router.routes
def test_get_route(self, example_request):
router = appsync.AppSyncField()
router.add_route(fn=lambda event: "ok", field="getAssets")
event = appsync.AppSyncEvent.create(raw=example_request, app={}, template={"context": "details"})
route = router.get_route(event=event)
assert route is not None
assert callable(route)
def test_get_route_with_invalid_field(self, example_request):
router = appsync.AppSyncField()
router.add_route(fn=lambda event: "ok", field="getPeople")
event = appsync.AppSyncEvent.create(raw=example_request, app={}, template={"context": "details"})
with pytest.raises(ValueError) as e:
router.get_route(event=event)
assert "No route configured for given field (field)." in str(e.value)
def test_dispatch(self, example_request):
router = appsync.AppSyncField()
def test_route_one(event):
return {"message": "ok"}
def test_route_two(event):
return {"message": "error"}
router.add_route(fn=test_route_one, field="getAssets")
router.add_route(fn=test_route_two, field="getPeople")
second_request = copy.deepcopy(example_request)
second_request["details"]["info"]["fieldName"] = "getPeople"
event = appsync.AppSyncEvent.create(raw=example_request, app={}, template={"context": "details"})
event2 = appsync.AppSyncEvent.create(raw=second_request, app={}, template={"context": "details"})
response = router.dispatch(event=event)
assert {"message": "ok"} == response
response = router.dispatch(event=event2)
assert {"message": "error"} == response
def test_dispatch_without_route(self, example_request):
router = appsync.AppSyncField()
with pytest.raises(ValueError) as e:
event = appsync.AppSyncEvent.create(raw=example_request, app={}, template={"context": "details"})
router.dispatch(event=event)
assert "No route configured" in str(e.value)
``` |
{
"source": "jpain3/Taming-the-Bull",
"score": 3
} |
#### File: Taming-the-Bull/Model-Free Approaches/BeerGame_Stocastic_Train_DQN.py
```python
import csv
import datetime
import random
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
from gym import Env
from gym.spaces import Discrete, Box
# https://github.com/jmpf2018/ShipAI
## Creating Environment
class BeerGameEnv(Env):
def __init__(self):
# Define the action and observation spaces
# Definition of the limits on orders the AI agent can make
self.AI_Entity = True
self.AI_Order_Plus = True # If set to true, then the agent order is relative to the incoming order
# and the action_space above should reflect that by spanning both
# positive and negative values.
if self.AI_Order_Plus != True:
MaxOrder = 50
self.action_space = spaces.Discrete(MaxOrder+1) # a discrete array representing the possible entity order choices,
# starting from 0
else:
Relative_Min_Order = -20 # Amount relative to the incoming the order the agent
Relative_Max_Order = +20 # can order itself
#Force sign conventions on Relative_ Max_ or Min_Order just in case
self.Relative_Min_Order = (-1)*np.sign(Relative_Min_Order)*Relative_Min_Order
self.Relative_Max_Order = np.sign(Relative_Max_Order)*Relative_Max_Order
#Determine action space full span (including 0)
Action_Space_Span = (-1)*self.Relative_Min_Order+self.Relative_Max_Order+1
self.action_space = spaces.Discrete(Action_Space_Span)
# Set Global state parameters
self.Random_Teams = True
self.Fixed_Team_Nb = 0 # Team number to place the AI on.
# If Random_Teams flag is True, then is is ignored
self.Random_AI_Position = True
self.AI_Position = 3 # Position in the supply chain to place the AI in, between 0 and 3.
# If Random_AI_Position flag is True, then is is ignored
self.Random_Horizon = True
self.min_horizon = 24 # If the Horizon is random, the lower bound on the horizon length
self.max_horizon = 200 # If the Horizon is random, the upper bound on the horizon length
self.fixed_horizon = 104 # Fixed horizon, only used if above Random_Horizon flag is set to False
self.Integer_Ordering = True
self.Noisey_Ordering = True
# Customer Order String
# Classic Beer Game
Step_Round = 4
self.Orders = ([4] * Step_Round) + ([9] * (1000 - Step_Round))
Second_Step = 150
self.Orders = self.Orders[0:Second_Step] + ([9] * (1000 - Second_Step))
# Finanical and Physical layout of the game
self.Holding_Cost = 0.50
self.Backorder_Cost = 1.00
self.Initial_OrderFlows = self.Orders[0]
self.Initial_Inventory = 12
self.Information_Delay = 2
self.Shipping_Delay = 2
# State space for the problem. Need to decide the scale of this.... what can the AI see and remember?
Agent_Sees = {'Agent_Order_Received': Box(0, np.inf, shape=(1,)),
'Agent_OH_Inventory': Box(0, np.inf, shape=(1,)),
'Agent_Backorder': Box(0, np.inf, shape=(1,)),
'Agent_Recent_Order': Box(0, np.inf, shape=(1,)),
'period': Box(0, 1000, shape=(1,)),
# NOTE: This could be bounded by np.inf but realistically t=1000 is an order of magnitude larger than the expected maximum time horizon
'AI_Entity_Index': Box(0, 3, shape=(1,))
# NOTE: This should be upper bounded by the largest possible entity index (reminder that Python indexes at 0)
}
# self.observation_space = gym.spaces.Dict(Agent_Sees)
# State space coerced into a box shape to better work with Keras syntax, note that the ShippingFlows are two
# different items here. Furthermore, note that this is defined via the Box shape, which is continuous, even
# though some of these observations may always be discrete (AI_Entity_Index) as an example
obs_space = spaces.Box(low=np.array([0, 0, 0, 0, 0, 0]),
high=np.array([np.inf, np.inf, np.inf, np.inf, 1000, 3]))
self.observation_space = obs_space
# Import the parameters from the classic beer game runs for use with Random Team matching
Team_Parameter_Filename = "JS Parameter Table.csv"
with open(Team_Parameter_Filename, newline='') as csvfile:
Team_Parameter_Data = list(csv.reader(csvfile))
All_Team_Parameters = np.asarray(Team_Parameter_Data)
# Remove header row
Team_Parameter_Header = All_Team_Parameters[0, :]
All_Team_Parameters = np.delete(All_Team_Parameters, (0), axis=0)
# Replace all blanks with 0's
All_Team_Parameters = np.asarray([[x or '0' for x in xs] for xs in All_Team_Parameters])
# Extract the team numbers and convert to integers or numbers from strings as appropriate
self.Team_Index = [int(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 1])]
self.Team_Name = np.ndarray.tolist(All_Team_Parameters[:, 0])
self.Entity_Code = np.ndarray.tolist(All_Team_Parameters[:, 2])
self.Entity_Index = [int(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 3])]
self.thetas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 4])]
self.alphas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 5])]
self.betas = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 6])]
self.S_primes = [float(item) for item in np.ndarray.tolist(All_Team_Parameters[:, 7])]
if self.Fixed_Team_Nb >= min(self.Team_Index) and self.Fixed_Team_Nb <= max(self.Team_Index):
Match_Team = self.Fixed_Team_Nb
# Create a mask of the rows that correspond to that team number
Match_Team_Mask = np.asarray(self.Team_Index) == Match_Team
# Filter, using the mask, the arrays that have the data for the team that was drawn
Match_Team_Theta = np.asarray(self.thetas)[Match_Team_Mask]
Match_Team_Alpha = np.asarray(self.alphas)[Match_Team_Mask]
Match_Team_Beta = np.asarray(self.betas)[Match_Team_Mask]
Match_Team_S_prime = np.asarray(self.S_primes)[Match_Team_Mask]
# Assemble the team parameters into a named list for later use in the main Beer Game function
Match_Team_Parameter_df = {"theta": np.ndarray.tolist(Match_Team_Theta),
"alpha_s": np.ndarray.tolist(Match_Team_Alpha),
"beta": np.ndarray.tolist(Match_Team_Beta),
"S_prime": np.ndarray.tolist(Match_Team_S_prime)}
self.Parameter_df = Match_Team_Parameter_df
else:
# Set the defualt ordering parameter values (for use if the random team flag above is False or no team number is provided)
self.Parameter_df = {"theta": [0.36] * 4,
"alpha_s": [0.26] * 4,
"beta": [0.34] * 4,
"S_prime": [17] * 4}
# Main function that runs the beer game for a single step
def PirateBeerGame_funct(self, AI_Entity_Index, AI_Order, Orders, Order_flows, Shipping_flows, OH_Inventory,
Backorder, L_hat, Production_Request, AI_Entity=False, AI_parameter_df=False,
Integer_Ordering=False, Noisey_Ordering=False, Noise_Mean=0, Noise_SD=1,
Parameter_df=False, Shipping_Delay=2, Information_Delay=2) -> object:
Relative_Ordering = self.AI_Order_Plus
#If relative ordering is true, translate the agent action into a relative number
if Relative_Ordering == True:
AI_Relative_Order = AI_Order + self.Relative_Min_Order # + 1
Final_Orders = np.empty(4, dtype=float)
OH_Inventory = np.array(OH_Inventory)
Shipping_flows = np.array(Shipping_flows)
Order_flows = np.array(Order_flows)
# Ensure that the order flow facing the retailer is the actual customer order
Order_flows[0, 0] = Orders
# Read in the ordering paramters
if Parameter_df != False:
theta = Parameter_df['theta']
alpha_s = Parameter_df['alpha_s']
beta = Parameter_df['beta']
S_prime = Parameter_df['S_prime']
else:
theta = [0.36] * 4
alpha_s = [0.26] * 4
beta = [0.34] * 4
S_prime = [17] * 4
TeamName = "Default Average Agents"
# Read in AI Ordering Parameters if present
if AI_parameter_df != False:
theta[AI_Entity_Index] = AI_parameter_df['theta']
alpha_s[AI_Entity_Index] = AI_parameter_df['alpha_s']
beta[AI_Entity_Index] = AI_parameter_df['beta']
S_prime[AI_Entity_Index] = AI_parameter_df['S_prime']
#####Recieve Inventory and Advance Shipping Delays#####
# Recieve shipments
New_OH_Inventory = OH_Inventory + Shipping_flows[:, 0]
# Advance shippping delays
Shipping_flows[:, 0] = Shipping_flows[:, (Shipping_Delay - 1)]
# Shipping_flows[:, (Shipping_Delay - 1)] = np.nan
#####Fill Orders######
# View Orders
Order_Received = Order_flows[:, 0]
# Calculate net order that needs to be fullfilled
Incoming_Order = Order_flows[:, 0] + Backorder
# Ship what you can
Outbound_shipments = np.maximum(0, np.minimum(New_OH_Inventory, Incoming_Order))
# Put shipments into lefthand shipping slot
Shipping_flows[0:3, 1] = Outbound_shipments[1:]
# Send shipments from retailer to the final customer
Final_Customer_Orders_Filled = Outbound_shipments[0]
# Update the On-Hand Inventory to account for outflows
OH_Inventory = New_OH_Inventory - Outbound_shipments
# Determine Backlog, if any
Inventory_Shortage = Order_flows[:, 0] - New_OH_Inventory
New_Backorder = np.maximum(0, Backorder + Inventory_Shortage)
Backorder = np.copy(New_Backorder)
# Remember observed order but then Overwrite processed order flow to NaN for debuging if needed
Observed_Order = np.copy(Order_flows[:, 0])
# Order_flows[:, 0] = np.nan ## ORIGINAL LINE OF CODE!!!! REPLACED TO AVOID NAN ISSUES !!!!!
# Order_flows[:, 0] = 0
#####Advance Order Slips and Brewers Brew######
# Advance order slips
Order_flows[:, 0] = Order_flows[:, (Information_Delay - 1)]
# Order_flows[:, (Information_Delay - 1)] = np.nan
# Brewers Brew
Shipping_flows[3, (Shipping_Delay - 1)] = Production_Request
#####PLACE ORDERS######
for i in range(0, 4):
Entity_Index = i
# Obsrve the total supply line and the previous demand
SL = sum(Shipping_flows[Entity_Index, :])
L = Observed_Order[Entity_Index]
# L hat is smoothing of observed demand from previous 2 periods
# if t == 0:
# L_hat[Entity_Index] = np.copy(Observed_Order[Entity_Index])
# Update L_hat (expected future orders) based on observed order
L_hat_new = theta[Entity_Index] * L + (1 - theta[Entity_Index]) * L_hat[Entity_Index]
L_hat[Entity_Index] = L_hat_new
# Note stock of current inventory
S = OH_Inventory[Entity_Index]
#! Note stock of current inventory inclusive of backorder position
S = OH_Inventory[Entity_Index] - Backorder[Entity_Index]
# Add noise to the order if needed
if (Noisey_Ordering == True):
eps = np.random.normal(Noise_Mean, Noise_SD)
else:
eps = 0
# AI Decision
if (AI_Entity == True) and (Entity_Index == AI_Entity_Index):
if (AI_Order != False):
#Check if agent decision is absolute or relative to the last order received
if (Relative_Ordering != True):
# here, agent action is absolute
Order_Placed = max(0,AI_Order)
else:
# here, the agent action is relative to the order received
Order_Placed = max(0,Order_Received[AI_Entity_Index] + AI_Relative_Order)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
else:
Order_Placed = max(0, L_hat[Entity_Index] + alpha_s[Entity_Index] * (
S_prime[Entity_Index] - S - beta[Entity_Index] * SL) + eps)
##TURN ON FOR INTEGER ONLY ORDERING
if Integer_Ordering:
Order_Placed = np.round(Order_Placed, 0)
if Entity_Index == 3:
Production_Request = Order_Placed
else:
Order_flows[Entity_Index + 1, (Information_Delay - 1)] = Order_Placed
# End of loop
# Make orders placed by each entity explict
Final_Orders[0:3] = Order_flows[1:, (Information_Delay - 1)]
Final_Orders[3] = Production_Request
fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
"Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
"Entity_Orders": Final_Orders, "Order_Received": Order_Received}
return fnt_output
# Resets the state space to the initial conditions for anothe repisode run
# Note that the output format for this function MUST match the output for the step function
# Any additional clean up or resetting of helper variables should occur eslewhere
def reset(self):
##################
# Assign and reset random game parameters
##################
#### Randomly Draw new teammates if applicable
if self.Random_Teams:
# Randomly draw a team number
Rand_Team = random.randint(min(self.Team_Index), max(self.Team_Index))
# Create a mask of the rows that correspond to that team number
Rand_Team_Mask = np.asarray(self.Team_Index) == Rand_Team
# Filter, using the mask, the arrays that have the data for the team that was drawn
Rand_Team_Theta = np.asarray(self.thetas)[Rand_Team_Mask]
Rand_Team_Alpha = np.asarray(self.alphas)[Rand_Team_Mask]
Rand_Team_Beta = np.asarray(self.betas)[Rand_Team_Mask]
Rand_Team_S_prime = np.asarray(self.S_primes)[Rand_Team_Mask]
# Assemble the team parameters into a named list for later use in the main Beer Game function
Rand_Team_Parameter_df = {"theta": np.ndarray.tolist(Rand_Team_Theta),
"alpha_s": np.ndarray.tolist(Rand_Team_Alpha),
"beta": np.ndarray.tolist(Rand_Team_Beta),
"S_prime": np.ndarray.tolist(Rand_Team_S_prime)}
self.Parameter_df = Rand_Team_Parameter_df
#### Randomly set game horizon if applicable
if self.Random_Horizon == True:
self.horizon = random.randint(self.min_horizon, self.max_horizon)
else:
self.horizon = self.fixed_horizon
#### Randomly set the agent's position on the team
if self.Random_AI_Position:
self.AI_Entity_Index = random.randint(0, 3)
else:
self.AI_Entity_Index = self.AI_Position
##################
# Resetting the global game parameters
##################
# Reset the time period to t=0 for the beginning of the game
self.period = 0
# Reset the various stocks of material both wihtin and without each player's position
self.Order_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.Shipping_flows = np.full([4, 2], self.Initial_OrderFlows, dtype=float)
self.OH_Inventory = [self.Initial_Inventory] * 4
self.Backorder = [0] * 4
self.Order_Received = [self.Initial_OrderFlows] * 4
self.L_hat = [self.Initial_OrderFlows] * 4
self.Order_History = np.full([4, self.horizon], 0, dtype=float)
self.Service_rate = [0] * self.horizon
self.OH_Inventory_History = np.full([4, self.horizon], 0, dtype=float)
self.Backlog_History = np.full([4, self.horizon], 0, dtype=float)
self.Production_Request = self.Initial_OrderFlows
self.Final_Orders = [0] * 4 # delete?
self.Amp_Vector = [0] * self.horizon # delete?
self.Reward_Vector = [0] * self.horizon # delete?
# Largely for later debugging and for record keeping, assemble the various items to reset at a global level
# together into a single list
# Output = {"AI_Entity_Index": AI_Entity_Index, "Parameter_df": Parameter_df,"horizon": horizon, "period": period,
# "Orders": Orders, "Order_flows": Order_flows, "Shipping_flows": Shipping_flows,
# "OH_Inventory": OH_Inventory, "Backorder": Backorder, "L_hat": L_hat,
# "Order_History": Order_History, "Service_rate": Service_rate,
# "OH_Inventory_History": OH_Inventory_History, "Backlog_History": Backlog_History,
# "Production_Request": Production_Request, "Amp_Vector": Amp_Vector, "Reward_Vector": Reward_Vector}
# Global_State = Output
# globals().update(Global_State)
##################
# Subset the global parameters to just those the agent is able to observe
##################
# Subset the full global state to just the part the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
period = self.period
# Note: The observed state outputted by the reset function MUST match the shape as that from the step function
# and must ONLY consist of the parts of the global state the agent can actually observe
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return (Observed_State)
# Takes the action by the agent, along with some simulation specific parameters, and updates the state
# Note that the output format fo this function MUST match the output for the reset function
# def step(self, action, Integer_Ordering, Noisey_Ordering, Parameter_df, AI_Entity_Index, CustOrders, horizon, period, OH_Inventory_History, Backlog_History):
def step(self, action):
# import globally assigned environmental variables
global period, AI_Entity_Index
# Check if the current period is the final one (the Horizon) and return dn=True for 'done' state
# Recall that Python indexes starting at 0! So if Horizon is t=52, need to stop at period = 51
if self.period == (self.horizon - 1):
dn = True
else:
dn = False
# Run the beer game function for a single step
BeerGame_output = self.PirateBeerGame_funct(AI_Entity_Index=self.AI_Entity_Index, AI_Order=action,
Orders=self.Orders[self.period], Order_flows=self.Order_flows,
Shipping_flows=self.Shipping_flows, OH_Inventory=self.OH_Inventory,
Backorder=self.Backorder, L_hat=self.L_hat,
Production_Request=self.Production_Request,
AI_Entity=self.AI_Entity,
Noisey_Ordering=self.Noisey_Ordering,
Integer_Ordering=self.Integer_Ordering,
Parameter_df=self.Parameter_df)
# Note on output of obove function call:
# fnt_output = {"Order_flows": Order_flows, "Shipping_flows": Shipping_flows, "OH_Inventory": OH_Inventory,
# "Backorder": Backorder, "L_hat": L_hat, "Production_Request": Production_Request,
# "Entity_Orders": Final_Orders, "Order_Received": Order_Received}
self.Order_flows = BeerGame_output['Order_flows']
self.Shipping_flows = BeerGame_output['Shipping_flows']
self.OH_Inventory = BeerGame_output['OH_Inventory']
self.Backorder = BeerGame_output['Backorder']
self.L_hat = BeerGame_output['L_hat']
self.Production_Request = BeerGame_output['Production_Request']
self.Order_Received = BeerGame_output['Order_Received']
# Don't use 'Entity_Orders' output right now
info = dict()
# Reward in any time other than the final time is the cost incurred by the AI that round.
# But in the final state, it's the total cost incurred by the entire team!
# Calculation of the running cost incurred so far for the entire team...
self.OH_Inventory_History[:, self.period] = BeerGame_output['OH_Inventory']
self.Backlog_History[:, self.period] = BeerGame_output['Backorder']
# Calculation of the cost incurred by the AI for just this one period...
Period_OH_Inventory = BeerGame_output['OH_Inventory']
Period_Backorder = BeerGame_output['Backorder']
AI_period_OH_Inventory = Period_OH_Inventory[self.AI_Entity_Index]
AI_period_Backorder = Period_Backorder[self.AI_Entity_Index]
AI_period_cost = AI_period_OH_Inventory * self.Holding_Cost + AI_period_Backorder * self.Backorder_Cost
AI_Reward = -AI_period_cost
reward = AI_Reward
#In final round, reward is total team cost, offset by costs incurred by AI so far in order to
# to make the entire episode cost the standard team cost
if dn == True:
Costs_Per_Period = self.OH_Inventory_History * self.Holding_Cost + self.Backlog_History * self.Backorder_Cost
Total_Costs_Per_Entity = np.sum(Costs_Per_Period, 1)
Total_Team_Costs = sum(Total_Costs_Per_Entity)
Team_Reward = -Total_Team_Costs
reward = Team_Reward #+ Total_Costs_Per_Entity[self.AI_Entity_Index]
#normalize final reward by the horizon
#if self.Random_Horizon == True:
# reward = reward/self.horizon
reward = reward / self.horizon
# Alt reward calculation
#reward = AI_Reward + Team_Reward / (self.period + 1) # Team_Reward matters more and more as time goes on?
#### Subset the global state to just the parts the agent has access to
Agent_Order_Received = self.Order_Received[self.AI_Entity_Index]
Agent_OH_Inventory = self.OH_Inventory[self.AI_Entity_Index]
Agent_Backorder = self.Backorder[self.AI_Entity_Index]
if self.AI_Entity_Index == 3:
Agent_Recent_Order = self.Production_Request
else:
Agent_Recent_Order = self.Order_flows[self.AI_Entity_Index + 1, (self.Information_Delay - 1)]
AI_Entity_Index = self.AI_Entity_Index
# Add to the period number
self.period += 1
period = self.period
Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
Agent_Recent_Order, period, AI_Entity_Index])
return Observed_State, reward, dn, info
## Main Code
if __name__ == '__main__':
from gym import Env, spaces
# Import methods to build DQN agent
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy,MaxBoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
# Get environment and set seed for reproduceability
env = BeerGameEnv()
Set_Random_Seed = True
if Set_Random_Seed:
Random_Seed = 11111111
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(Random_Seed)
random.seed(Random_Seed)
tf.random.set_seed(Random_Seed)
env.action_space.seed(Random_Seed)
# Count number of actions
nb_actions = env.action_space.n
# Build build simple model.
WINDOW_LENGTH = 4
input_shape = env.observation_space.shape
model = Sequential()
model.add(Flatten(input_shape = (WINDOW_LENGTH,) + env.observation_space.shape))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Configure and compile the DQN agent
memory = SequentialMemory(limit=2000, window_length=WINDOW_LENGTH)
policy = BoltzmannQPolicy()
policy = MaxBoltzmannQPolicy()
#policy = EpsGreedyQPolicy()
#Note, Boltzman policy and DQN is overestimating Q values, causing probabilitiies to explode...
#Double DQN helps mitigate this Q-value overestimation a bit
#Dueling networks appear to allow for a full run
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy, enable_dueling_network=True, dueling_type='avg')
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
mode = "Train"
#mode = "Test"
if mode == "Train":
now = datetime.datetime.now()
dt_string = now.strftime("%Y%m%d_%H%M%S")
ENV_NAME = "Beer_Game_Stocastic_DQN"
print('Training model....')
Full_Hist = dqn.fit(env, nb_steps=1e5, visualize=False, verbose=2)
Training_History = Full_Hist.history
#wieght_filename = f'dqn_{ENV_NAME}_{dt_string}_weights.h5f'
wieght_filename = 'dqn_test_fit.weights'
model_filename='dqn_test_fit_wide'
model_filename = 'dqn_test_fit'
#model_filename = 'Entity3Test'
dqn.save_weights(wieght_filename, overwrite=True)
dqn.model.save(model_filename, overwrite=True)
print('Training completed! Testing and printing Average Episodic Rewards')
dqn.test(env, nb_episodes=10, visualize=False)
avg_reward_list = Training_History['episode_reward']
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.title("Avg. Episodic Reward")
plt.show()
if mode == "Test":
### Load a saved and trained model
#wieght_filename = "ddpg_Beer_Game_Stocastic_DQN_20210614_115704_weights.h5f"
wieght_filename = 'dqn_test_fit.weights'
model_filename = 'dqn_test_fit.model'
#Trained_DQN = tf.saved_model.load(model_filename)
#dqn.load_weights(wieght_filename)
#test_out = dqn.test(env,nb_episodes=10, visualize=False)
agent = tf.keras.models.load_model(model_filename)
#Get the implied window size used when originally training the loaded model:
model_input_shape = (agent.get_layer(index=0).output_shape)[0] #Get the shape attribute from the input layer
Original_Batch_Size = model_input_shape[0] #First number is the number of items looked as simulateously
Original_Window_Size = model_input_shape[1] #Second number is the window used for any sequential memory
Original_Observation_Size = model_input_shape[2] #Third number and (and onwards for multi dimensional inputs) is the actual observed space
sub_mode = "Full"
sub_mode = "Single"
if sub_mode == "Single":
###Test for single observation:
#Observed_State = np.array([Agent_Order_Received, Agent_OH_Inventory, Agent_Backorder,
# Agent_Recent_Order, period, AI_Entity_Index])
#Note: need to muliply by window length!
obs = np.array([4, 0, 5,
4, 10, 2])
#Extract the order received from the observations set for use in relative ordering
Agent_Order_Received = obs[1]
#Expand initial observation out to fill history or window length
obs = np.tile(obs,(Original_Window_Size,1))
#Coerce the 1-D observation input into a 3-D array that TensorFlow will flattend and accept
resized_obs = obs[np.newaxis, ...]
qmatrix = agent.predict(resized_obs)
flattened_q = np.ndarray.flatten(qmatrix)
BestChoice = np.argmax(flattened_q)
Relative_Order = BestChoice + env.Relative_Min_Order # + 1 double check this plus one here...
Agent_Order = max(0, Agent_Order_Received + Relative_Order)
print("Agent Order:")
print(Agent_Order)
if sub_mode == "Full":
horizon = 120
reset_list = reset(horizon=horizon)
locals().update(reset_list)
Parameter_df = {"theta": [0.36] * 4,
"alpha_s": [0.26] * 4,
"beta": [0.34] * 4,
"S_prime": [17] * 4}
Holding_Cost = 0.50
Backorder_Cost = 1.00
AI_Entity = False
AI_Entity_Index = False
AI_Order = False
Integer_Ordering = True
Noisey_Ordering = False
for t in range(0, (horizon)):
if t >= 20:
t = t
# NESTED FUNCTION TO RUN THE GAME FOR ONE TIME STEP AND RETURN THE NEW STATE
BeerGame_output = PirateBeerGame_funct(AI_Entity_Index=AI_Entity_Index, AI_Order=AI_Order,
Orders=Orders[t],
Order_flows=Order_flows,
Shipping_flows=Shipping_flows, OH_Inventory=OH_Inventory,
Backorder=Backorder, L_hat=L_hat,
Production_Request=Production_Request, AI_Entity=AI_Entity,
Noisey_Ordering=Noisey_Ordering,
Integer_Ordering=Integer_Ordering,
Parameter_df=Parameter_df)
locals().update(BeerGame_output)
# Write values for analysis/plotting later
Order_History[:, t] = Entity_Orders
OH_Inventory_History[:, t] = OH_Inventory
Backlog_History[:, t] = Backorder
Service_rate[t] = Final_Customer_Orders_Filled / Orders[t]
# Calculate costs
Net_Inventory = OH_Inventory_History - Backlog_History
Costs_Per_Period = OH_Inventory_History * Holding_Cost + Backlog_History * Backorder_Cost
Total_Costs_Per_Entity = np.sum(Costs_Per_Period, 1)
Total_Team_Costs = sum(Total_Costs_Per_Entity)
print(Order_History)
print(Total_Team_Costs)
###GRAPHS###
import matplotlib.pyplot as plt
x = range(0, horizon)
plt.figure(1)
PlotObj = plt.plot(Order_History.T)
plt.title('Orders per Period')
plt.xlabel('Time')
plt.ylabel('Orders')
# showing legend
plt.legend(iter(PlotObj), ('0: Retailer', '1: Wholesaler', '2: Distributor', '3: Factory'))
plt.figure(2)
PlotObj = plt.plot(Net_Inventory.T)
plt.title('Net Inventory per Period')
plt.xlabel('Time')
plt.ylabel('Net Inventory (On-Hand less Backlog)')
# showing legend
plt.legend(iter(PlotObj), ('0: Retailer', '1: Wholesaler', '2: Distributor', '3: Factory'))
plt.show()
``` |
{
"source": "JPaiv/rust-movie-serverless-api",
"score": 2
} |
#### File: rust-movie-serverless-api/source-file-download/handler.py
```python
import boto3
import logging
import os
import subprocess
import sys
import tempfile
import zipfile
logging.getLogger().setLevel("INFO")
def handler(event, context):
temp_dir = tempfile.mkdtemp()
process = subprocess.run(['kaggle', 'datasets', 'download', 'shivamb/netflix-shows', '--path', temp_dir],
stdout=subprocess.PIPE,
universal_newlines=True)
process
with zipfile.ZipFile(f"{temp_dir}/netflix-shows.zip", 'r') as source_file_zip:
source_file_zip.extractall(temp_dir)
source_file_csv = [file for file in os.listdir(
temp_dir) if ".csv" in file]
print(source_file_csv)
_upload_source_data_to_s3(source_file_csv[0])
def _upload_source_data_to_s3(source_file_csv: str):
s3 = boto3.resource('s3')
s3.Bucket(os.environ["bucket_id"]).upload_file(
source_file_csv, "netflix.csv")
``` |
{
"source": "JPaja/Game-of-Life-Parallel",
"score": 3
} |
#### File: Game-of-Life-Parallel/src/Projekat1_1.py
```python
from threading import Condition, Event, Lock, Semaphore, Thread
import numpy as np
import threading
from numpy.core.defchararray import count
iterations = 3
n = 20
size = n**2
steps = np.zeros((iterations,n,n),dtype=bool)
#table = np.random.rand(size).reshape(n, n) > 0.5
table = np.zeros((n,n),dtype=bool)
table[0][1] = True
table[1][2] = True
table[2][0] = True
table[2][1] = True
table[2][2] = True
steps[0] = table
threads = np.zeros(table.shape,dtype=Thread)
brojaciSuseda = np.zeros(table.shape,dtype=int)
brojaciSusedaLocks = np.zeros(table.shape,dtype=object)
semafori = np.zeros(table.shape,dtype=Semaphore)
kondicijal = Condition()
lock = Lock()
brojac = 0
def printTable(table):
for i in range(0,table.shape[0]):
for j in range(0,table.shape[1]):
print(1 if table[i][j] else 0 ,end='')
print()
def getNeighborIndexes(i, j):
neighbors = []
for ii in range(-1,2):
for jj in range(-1,2):
if(ii == 0 and jj == 0):
continue
neighbors.append(((i+ii)%n, (j+jj)%n))
return neighbors
def executeNode(i,j):
global table
global iterations
global brojaciSuseda
global semafori
global brojaciSusedaLocks
global lock
global brojac
global size
neighbors = getNeighborIndexes(i,j)
iteration = 0
while iteration < iterations:
aliveNeighbors = 0
for (ii,jj) in neighbors:
brojaciSusedaLocks[ii][jj].acquire()
if table[ii][jj]:
aliveNeighbors += 1
brojaciSuseda[ii][jj]+= 1
if(brojaciSuseda[ii][jj] == 8): #u slucaju ivica ne racunamo ne postojece nodove
brojaciSuseda[ii][jj] = 0
semafori[ii][jj].release()
brojaciSusedaLocks[ii][jj].release()
semafori[i][j].acquire()
state = table[i][j]
table[i][j] = False
if aliveNeighbors == 3 or (aliveNeighbors == 2 and state):
table[i][j] = True
steps[iteration][i][j] = table[i][j]
iteration += 1
lock.acquire()
brojac+= 1
kondicijal.acquire()
if(brojac == size):
brojac = 0
kondicijal.notifyAll()
lock.release()
else:
lock.release()
kondicijal.wait()
kondicijal.release()
for i in range(0,table.shape[0]):
for j in range(0,table.shape[1]):
t = Thread(target=executeNode, args=(i,j))
threads[i][j] = t
brojaciSusedaLocks[i][j] = Lock()
semafori[i][j] = Semaphore(0)
for i in range(0,table.shape[0]):
for j in range(0,table.shape[1]):
threads[i][j].start()
for i in range(0,table.shape[0]):
for j in range(0,table.shape[1]):
threads[i][j].join()
printTable(table)
print("Completed")
```
#### File: Game-of-Life-Parallel/src/Projekat1_3.py
```python
from threading import Lock
import multiprocessing
from multiprocessing import Process, Value, Queue, Array
import numpy as np
import time
iterations = 5
n = 10
def getNeighborIndexes(i, j):
neighbors = []
for ii in range(-1,2):
for jj in range(-1,2):
if(ii == 0 and jj == 0):
continue
neighbors.append(((i+ii)%n, (j+jj)%n))
return neighbors
def executeNode(i,j,state,serviceQueue,queues):
global iterations
neighbors = getNeighborIndexes(i,j)
serviceQueue.put((0,i,j,state))
for it in range(iterations - 1):
iteration = it + 1
for (ii,jj) in neighbors:
queues[ii][jj].put_nowait((state,iteration))
aliveNeighbors = 0
cachedneighbors = []
for (ii,jj) in neighbors:
while True:
(neighborValue,neighborIteration) = queues[i][j].get()
if(iteration != neighborIteration):
cachedneighbors.append((neighborValue,neighborIteration))
continue
if(neighborValue):
aliveNeighbors+=1
break
for cachedneighbor in cachedneighbors:
queues[i][j].put_nowait(cachedneighbor)
if aliveNeighbors == 3 or (aliveNeighbors == 2 and state):
state = 1
else:
state = 0
serviceQueue.put((iteration,i,j,state))
def executeService(serviceQueue,tableData):
global iterations
global n
for _ in range(iterations * n**2):
(iteration,i,j,state) = serviceQueue.get()
tableData[iteration * n**2 + i * n + j] = state
if __name__ == "__main__":
tableData = Array('i',np.zeros(iterations * n ** 2, dtype=int))
serviceQueue = Queue()
sharedProcess = Process(target=executeService, args=(serviceQueue,tableData))
sharedProcess.start()
table = np.zeros((n,n),dtype=int)
table[0][1] = 1
table[1][2] = 1
table[2][0] = 1
table[2][1] = 1
table[2][2] = 1
queues = np.zeros((n,n),dtype=object)
for i in range(n):
for j in range(n):
queues[i][j] = Queue()
processes = []
for i in range(n):
for j in range(n):
processes.append(Process(target=executeNode, args=(i,j,table[i][j],serviceQueue,queues)))
for process in processes:
process.start()
for process in processes:
process.join()
sharedProcess.join()
steps = np.zeros((iterations,n,n),dtype=int)
for index in range(iterations * n**2):
iteration = int(index / n**2)
i = int((index / n) % n)
j = int(index % n)
steps[iteration][i][j] = tableData[index]
print(steps)
``` |
{
"source": "JPaja/Pascal2C_Transpiler",
"score": 4
} |
#### File: Pascal2C_Transpiler/src/lexer.py
```python
from src.token import Class, Token
class Lexer:
def __init__(self, text):
self.text = text
self.len = len(text)
self.pos = -1
def read_space(self):
while self.pos + 1 < self.len and self.text[self.pos + 1].isspace():
self.next_char()
def read_int(self):
lexeme = self.text[self.pos]
while self.pos + 1 < self.len and self.text[self.pos + 1].isdigit():
lexeme += self.next_char()
return int(lexeme)
def read_char(self):
lexeme = ''
while self.pos + 1 < self.len and self.text[self.pos + 1] != '\'':
lexeme += self.next_char()
self.pos += 1
return lexeme
def read_string(self):
lexeme = ''
while self.pos + 1 < self.len and self.text[self.pos + 1] != '"':
lexeme += self.next_char()
self.pos += 1
return lexeme
def is_keyword(self,c):
return c.isalnum() or c == '_'
def read_keyword(self):
lexeme = self.text[self.pos]
while self.pos + 1 < self.len and self.is_keyword(self.text[self.pos + 1]):
lexeme += self.next_char()
if lexeme == 'div':
return Token(Class.DIV,lexeme)
elif lexeme == 'mod':
return Token(Class.MOD,lexeme)
elif lexeme == 'not':
return Token(Class.NOT,lexeme)
elif lexeme == 'or':
return Token(Class.OR,lexeme)
elif lexeme == 'xor':
return Token(Class.OR,lexeme)
elif lexeme == 'and':
return Token(Class.AND,lexeme)
elif lexeme == 'begin':
return Token(Class.BEGIN,lexeme)
elif lexeme == 'end':
return Token(Class.END,lexeme)
elif lexeme == 'if':
return Token(Class.IF,lexeme)
elif lexeme == 'else':
return Token(Class.ELSE,lexeme)
elif lexeme == 'then':
return Token(Class.THEN,lexeme)
elif lexeme == 'for':
return Token(Class.FOR,lexeme)
elif lexeme == 'to':
return Token(Class.TO,lexeme)
elif lexeme == 'downto':
return Token(Class.DOWNTO,lexeme)
elif lexeme == 'do':
return Token(Class.DO,lexeme)
elif lexeme == 'while':
return Token(Class.WHILE,lexeme)
elif lexeme == 'break':
return Token(Class.BREAK,lexeme)
elif lexeme == 'continue':
return Token(Class.CONTINUE,lexeme)
elif lexeme == 'repeat':
return Token(Class.REPEAT,lexeme)
elif lexeme == 'until':
return Token(Class.UNIIL,lexeme)
elif lexeme == 'var':
return Token(Class.VAR,lexeme)
elif lexeme == 'of':
return Token(Class.OF,lexeme)
elif lexeme == 'procedure':
return Token(Class.PROCEDURE,lexeme)
elif lexeme == 'function':
return Token(Class.FUNCTION,lexeme)
elif lexeme == 'integer' or lexeme == 'char' or lexeme == 'string' or lexeme == 'real' or lexeme == 'boolean':
return Token(Class.TYPE, lexeme)
elif lexeme == 'array':
return Token(Class.Array, lexeme)
elif lexeme == 'exit':
return Token(Class.Exit, lexeme)
elif lexeme == 'true':
return Token(Class.BOOL, True)
elif lexeme == 'false':
return Token(Class.BOOL, False)
return Token(Class.ID, lexeme)
def next_char(self):
self.pos += 1
if self.pos >= self.len:
return None
return self.text[self.pos]
def next_token(self):
self.read_space()
curr = self.next_char()
if curr is None:
return Token(Class.EOF, curr)
elif curr.isdigit():
value = self.read_int()
curr = self.next_char()
if curr != '.':
self.pos -= 1
return Token(Class.INT, value)
curr = self.next_char()
if not curr.isdigit():
self.pos -= 2
return Token(Class.INT, value)
mantisa = self.read_int()
value = str(value) + '.' + str(mantisa)
value = float(value)
return Token(Class.Float, value)
elif self.is_keyword(curr):
return self.read_keyword()
elif curr == '\'':
text = self.read_char()
if(len(text) > 1):
return Token(Class.STRING, text)
if(len(text) == 1):
return Token(Class.CHAR, text[0])
return Token(Class.CHAR,'')
elif curr == '"':
return Token(Class.STRING, self.read_string())
elif curr == ':':
curr = self.next_char()
if curr == '=':
return Token(Class.ASSIGN, ':=')
self.pos -= 1
return Token(Class.Colon, ':')
elif curr == '+':
return Token(Class.PLUS, curr)
elif curr == '-':
return Token(Class.MINUS, curr)
elif curr == '*':
return Token(Class.STAR, curr)
elif curr == '/':
return Token(Class.FWDSLASH, curr)
elif curr == '=':
return Token(Class.EQ, curr)
elif curr == '<':
curr = self.next_char()
if curr == '>':
return Token(Class.NEQ, '<>')
elif curr == '=':
return Token(Class.LTE, '<=')
self.pos -= 1
return Token(Class.LT, '<')
elif curr == '>':
curr = self.next_char()
if curr == '=':
return Token(Class.GTE, '>=')
self.pos -= 1
return Token(Class.GT, '>')
elif curr == '(':
return Token(Class.LPAREN, curr)
elif curr == ')':
return Token(Class.RPAREN, curr)
elif curr == '[':
return Token(Class.LBRACKET, curr)
elif curr == ']':
return Token(Class.RBRACKET, curr)
elif curr == ';':
return Token(Class.SEMICOLON, curr)
elif curr == ',':
return Token(Class.COMMA, curr)
elif curr == '.':
curr = self.next_char();
if curr == '.':
return Token(Class.DOTDOT, '..')
self.pos -= 1
return Token(Class.DOT, '.')
self.die(curr)
def lex(self):
tokens = []
while True:
curr = self.next_token()
tokens.append(curr)
if curr.class_ == Class.EOF:
break
return tokens
def die(self, char):
raise SystemExit("Unexpected character: {}".format(char))
```
#### File: Pascal2C_Transpiler/src/parser.py
```python
from src.token import Class
from src.nodes import *
from functools import wraps
import pickle
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.curr = tokens.pop(0)
self.prev = None
def restorable(call):
@wraps(call)
def wrapper(self, *args, **kwargs):
state = pickle.dumps(self.__dict__)
result = call(self, *args, **kwargs)
self.__dict__ = pickle.loads(state)
return result
return wrapper
def eat(self, class_):
if self.curr.class_ == class_:
self.prev = self.curr
self.curr = self.tokens.pop(0)
else:
self.die_type(class_.name, self.curr.class_.name)
def program(self):
nodes = []
while self.curr.class_ in [Class.PROCEDURE, Class.FUNCTION]:
if self.curr.class_ == Class.PROCEDURE:
self.eat(Class.PROCEDURE)
id_ = Id(self.curr.lexeme)
self.eat(Class.ID)
args = self.args_()
self.eat(Class.SEMICOLON)
body = self.body()
self.eat(Class.SEMICOLON)
nodes.append(ProcImpl(id_,args,body))
elif self.curr.class_ == Class.FUNCTION:
self.eat(Class.FUNCTION)
id_ = Id(self.curr.lexeme)
self.eat(Class.ID)
args = self.args_()
self.eat(Class.Colon)
type_ = self.type()
self.eat(Class.SEMICOLON)
body = self.body()
self.eat(Class.SEMICOLON)
nodes.append(FuncImpl(type_,id_,args,body))
main = self.body()
self.eat(Class.DOT)
return Program(nodes,main)
def args_(self):
self.eat(Class.LPAREN)
declarations = []
ids =[]
while(self.curr.class_ != Class.RPAREN):
id_ = Id(self.curr.lexeme)
self.eat(Class.ID)
ids.append(id_)
if self.curr.class_ == Class.COMMA:
self.eat(Class.COMMA)
continue
self.eat(Class.Colon)
type_ = self.type()
declarations.append(Decl(type_, ids.copy(),None))
ids.clear()
if self.curr.class_ == Class.COMMA:
self.eat(Class.COMMA)
self.eat(Class.RPAREN)
return declarations
def array_(self):
self.eat(Class.LPAREN)
nodes =[]
while(self.curr.class_ != Class.RPAREN):
node = self.factor()
nodes.append(node)
if self.curr.class_ == Class.COMMA:
self.eat(Class.COMMA)
self.eat(Class.RPAREN)
return Elems(nodes)
def body(self):
vars_ = []
if(self.curr.class_ == Class.VAR):
vars_ = self.vars()
block = self.block()
return Body(vars_,block)
def vars(self):
self.eat(Class.VAR)
declarations = []
ids =[]
while(True):
id_ = Id(self.curr.lexeme)
self.eat(Class.ID)
ids.append(id_)
if self.curr.class_ == Class.COMMA:
self.eat(Class.COMMA)
continue
self.eat(Class.Colon)
type_ = self.type()
value_ = None
if(self.curr.class_ == Class.EQ):
self.eat(Class.EQ)
if(self.curr.class_ == Class.LPAREN):
value_ = self.array_()
else:
value_ = self.factor()
self.eat(Class.SEMICOLON)
declarations.append(Decl(type_, ids.copy(),value_))
ids.clear()
if self.curr.class_ != Class.ID:
break
return declarations
def type(self):
if self.curr.class_ == Class.TYPE:
type_ = Type(self.curr.lexeme)
self.eat(Class.TYPE)
if self.curr.class_ != Class.LBRACKET:
return type_
self.eat(Class.LBRACKET)
size = self.factor()
self.eat(Class.RBRACKET)
return SzArray(type_,size)
else:
self.eat(Class.Array)
self.eat(Class.LBRACKET)
leftRange = self.factor()
self.eat(Class.DOTDOT)
rightRange = self.factor()
self.eat(Class.RBRACKET)
self.eat(Class.OF)
type_ = Type(self.curr.lexeme)
self.eat(Class.TYPE)
return RangeArray(type_,leftRange,rightRange)
def block(self):
self.eat(Class.BEGIN)
nodes = self.nodes_untill(Class.END)
self.eat(Class.END)
return Block(nodes)
def nodes_untill(self, token):
nodes = []
while self.curr.class_ != token:
if self.curr.class_ == Class.ID:
nodes.append(self.id_())
self.eat(Class.SEMICOLON)
elif (self.curr.class_ == Class.Exit):
self.eat(Class.Exit)
arg = None
if (self.curr.class_ == Class.LPAREN):
self.eat(Class.LPAREN)
arg = self.expr()
self.eat(Class.RPAREN)
self.eat(Class.SEMICOLON)
nodes.append(Exit(arg))
elif (self.curr.class_ == Class.BREAK):
nodes.append(Break())
self.eat(Class.BREAK)
self.eat(Class.SEMICOLON)
elif (self.curr.class_ == Class.CONTINUE):
nodes.append(Continue())
self.eat(Class.CONTINUE)
self.eat(Class.SEMICOLON)
elif (self.curr.class_ == Class.REPEAT):
# nodes.append(Repeat())
self.eat(Class.REPEAT)
nodes2 = self.nodes_untill(Class.UNIIL)
#elif (self.curr.class_ == Class.UNIIL):
self.eat(Class.UNIIL)
cond = self.logic()
nodes.append(Until(cond,nodes2))
self.eat(Class.SEMICOLON)
elif self.curr.class_ == Class.FOR:
nodes.append(self.for_())
self.eat(Class.SEMICOLON)
elif self.curr.class_ == Class.WHILE:
nodes.append(self.while_())
self.eat(Class.SEMICOLON)
elif self.curr.class_ == Class.IF:
nodes.append(self.if_())
self.eat(Class.SEMICOLON)
else:
self.die_deriv(self.block.__name__)
return nodes
def for_(self):
self.eat(Class.FOR)
init = self.id_()
downto = False
if self.curr.class_ == Class.TO:
self.eat(Class.TO)
else:
downto = True
self.eat(Class.DOWNTO)
to = self.expr()
self.eat(Class.DO)
block = self.block()
return For(init, to, block,downto)
def while_(self):
self.eat(Class.WHILE)
cond = self.logic()
self.eat(Class.DO)
block = self.block()
return While(cond, block)
def if_(self):
statements = []
else_block = None
self.eat(Class.IF)
while True:
cond = self.logic()
self.eat(Class.THEN)
block = self.block()
statements.append(IfStatement(cond,block))
if self.curr.class_ != Class.ELSE:
break
self.eat(Class.ELSE)
if self.curr.class_ != Class.IF:
else_block = self.block()
break
self.eat(Class.IF)
return If(statements, else_block)
def id_(self):
id_ = Id(self.curr.lexeme)
self.eat(Class.ID)
if self.curr.class_ == Class.LPAREN:
self.eat(Class.LPAREN)
if id_.value in ['write','writeln','read','readln']:
args = self.formatargs()
else:
args = self.args()
self.eat(Class.RPAREN)
return FuncCall(id_, args)
elif self.curr.class_ == Class.ASSIGN:
self.eat(Class.ASSIGN)
expr = self.expr()
return Assign(id_, expr)
elif self.curr.class_ == Class.LBRACKET:
self.eat(Class.LBRACKET)
index_ = self.expr()
self.eat(Class.RBRACKET)
elem = ArrayElem(id_,index_)
if(self.curr.class_ != Class.ASSIGN):
return elem
self.eat(Class.ASSIGN)
expr = self.expr()
return Assign(elem, expr)
else:
return id_
def args(self):
args = []
while self.curr.class_ != Class.RPAREN:
if len(args) > 0:
if self.curr.class_ != Class.COMMA:
a = 1
self.eat(Class.COMMA)
args.append(self.expr())
return Args(args)
def formatargs(self):
args = []
while self.curr.class_ != Class.RPAREN:
if len(args) > 0:
self.eat(Class.COMMA)
expr = self.expr()
left = None
right = None
if self.curr.class_ == Class.Colon:
self.eat(Class.Colon)
no = self.curr.lexeme
self.eat(Class.INT)
left = Int(no)
if self.curr.class_ == Class.Colon:
self.eat(Class.Colon)
no = self.curr.lexeme
self.eat(Class.INT)
right = Int(no)
args.append(FormatArg(expr,left,right))
return Args(args)
def logic(self):
first = self.compare()
while self.curr.class_ in [Class.AND, Class.OR, Class.XOR]:
if self.curr.class_ == Class.AND:
op = self.curr.lexeme
self.eat(Class.AND)
second = self.compare()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.OR:
op = self.curr.lexeme
self.eat(Class.OR)
second = self.compare()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.XOR:
op = self.curr.lexeme
self.eat(Class.XOR)
second = self.compare()
first = BinOp(op, first, second)
return first
def factor(self):
if self.curr.class_ == Class.INT:
no = self.curr.lexeme
self.eat(Class.INT)
return Int(no)
if self.curr.class_ == Class.BOOL:
no = self.curr.lexeme
self.eat(Class.BOOL)
return Bool(no)
elif self.curr.class_ == Class.Float:
no = self.curr.lexeme
self.eat(Class.Float)
return Float(no)
elif self.curr.class_ == Class.CHAR:
value = Char(self.curr.lexeme)
self.eat(Class.CHAR)
return value
elif self.curr.class_ == Class.STRING:
value = String(self.curr.lexeme)
self.eat(Class.STRING)
return value
elif self.curr.class_ == Class.ID:
return self.id_()
elif self.curr.class_ in [Class.MINUS, Class.NOT]:
op = self.curr
self.eat(self.curr.class_)
first = None
if self.curr.class_ == Class.LPAREN:
self.eat(Class.LPAREN)
first = self.logic()
self.eat(Class.RPAREN)
else:
first = self.factor()
return UnOp(op.lexeme, first)
elif self.curr.class_ == Class.LPAREN:
self.eat(Class.LPAREN)
first = self.logic()
self.eat(Class.RPAREN)
return first
elif self.curr.class_ == Class.SEMICOLON:
return None
else:
self.die_deriv(self.factor.__name__)
def term(self):
if self.curr.class_ == Class.LPAREN:
self.eat(Class.LPAREN)
first = self.expr()
self.eat(Class.RPAREN)
else:
first = self.factor()
while self.curr.class_ in [Class.STAR, Class.FWDSLASH, Class.DIV,Class.MOD]:
if self.curr.class_ == Class.STAR:
op = self.curr.lexeme
self.eat(Class.STAR)
second = self.factor()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.FWDSLASH:
op = self.curr.lexeme
self.eat(Class.FWDSLASH)
second = self.factor()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.DIV:
op = self.curr.lexeme
self.eat(Class.DIV)
second = self.factor()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.MOD:
op = self.curr.lexeme
self.eat(Class.MOD)
second = self.factor()
first = BinOp(op, first, second)
return first
def expr(self):
return self.logic()
def expr2(self):
first = self.term()
while self.curr.class_ in [Class.PLUS, Class.MINUS]:
if self.curr.class_ == Class.PLUS:
op = self.curr.lexeme
self.eat(Class.PLUS)
second = self.term()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.MINUS:
op = self.curr.lexeme
self.eat(Class.MINUS)
second = self.term()
first = BinOp(op, first, second)
return first
def compare(self):
first = self.expr2()
while self.curr.class_ in [Class.EQ, Class.NEQ, Class.LT, Class.GT, Class.LTE, Class.GTE]:
if self.curr.class_ == Class.EQ:
op = self.curr.lexeme
self.eat(Class.EQ)
second = self.expr2()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.NEQ:
op = self.curr.lexeme
self.eat(Class.NEQ)
second = self.expr2()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.LT:
op = self.curr.lexeme
self.eat(Class.LT)
second = self.expr2()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.GT:
op = self.curr.lexeme
self.eat(Class.GT)
second = self.expr2()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.LTE:
op = self.curr.lexeme
self.eat(Class.LTE)
second = self.expr2()
first = BinOp(op, first, second)
elif self.curr.class_ == Class.GTE:
op = self.curr.lexeme
self.eat(Class.GTE)
second = self.expr2()
first = BinOp(op, first, second)
return first
def parse(self):
return self.program()
def die(self, text):
raise SystemExit(text)
def die_deriv(self, fun):
self.die("Derivation error: {}".format(fun))
def die_type(self, expected, found):
self.die("Expected: {}, Found: {}".format(expected, found))
```
#### File: Pascal2C_Transpiler/src/token.py
```python
from enum import Enum, auto
class Class(Enum):
ID = auto()
EOF = auto()
Exit = auto()
Array = auto()
TYPE = auto()
INT = auto()
Float = auto()
BOOL = auto()
CHAR = auto()
STRING = auto()
ASSIGN = auto()
PLUS = auto()
MINUS = auto()
STAR = auto()
FWDSLASH = auto()
DIV = auto()
MOD = auto()
EQ = auto()
NEQ = auto()
LT = auto()
GT = auto()
LTE = auto()
GTE = auto()
OR = auto()
AND = auto()
NOT = auto()
XOR = auto()
LPAREN = auto()
RPAREN = auto()
LBRACKET = auto()
RBRACKET = auto()
SEMICOLON = auto()
COMMA = auto()
DOT = auto()
Colon = auto()
DOTDOT = auto()
BEGIN = auto()
END = auto()
IF = auto()
ELSE = auto()
THEN = auto()
FOR = auto()
TO = auto()
DOWNTO = auto()
WHILE = auto()
DO = auto()
BREAK = auto()
CONTINUE = auto()
REPEAT = auto()
UNIIL = auto()
VAR = auto()
OF = auto()
PROCEDURE = auto()
FUNCTION = auto()
class Token:
def __init__(self, class_, lexeme):
self.class_ = class_
self.lexeme = lexeme
def __str__(self):
return "<{} {}>".format(self.class_, self.lexeme)
``` |
{
"source": "jpak1996/smashtime",
"score": 3
} |
#### File: smashtime/challonge/matches.py
```python
from challonge import api
def index(tournament, **params):
"""Retrieve a tournament's match list."""
return api.fetch_and_parse(
"GET",
"tournaments/%s/matches" % tournament,
**params)
def show(tournament, match_id):
"""Retrieve a single match record for a tournament."""
return api.fetch_and_parse(
"GET",
"tournaments/%s/matches/%s" % (tournament, match_id))
def update(tournament, match_id, **params):
"""Update/submit the score(s) for a match."""
api.fetch(
"PUT",
"tournaments/%s/matches/%s" % (tournament, match_id),
"match",
**params)
```
#### File: jpak1996/smashtime/smashtime.py
```python
from collections import OrderedDict
from twilio.rest import TwilioRestClient
import challonge
import time
import string
jank = raw_input("\nTournament url? [excluding \'http://challonge.com/\']:\n\n\tFor example, if url is \'http://challonge.com/melee_singles_09\',\n\ttype in \'melee_singles_09\'\n")
CHALLONGE_CREDS = open('creds/challonge_creds.txt', 'r').readline()
challonge.set_credentials("canigetapak", CHALLONGE_CREDS)
tournament = challonge.tournaments.show(jank)
participants = challonge.participants.index(tournament["id"])
num_entrants = len(participants)
class PlayerObject:
def __init__(self, players):
self.players = players
for x in range(0,num_entrants):
phone_string = participants[x]["display-name-with-invitation-email-address"]
index1 = phone_string.find('<') + 1
index2 = phone_string.find('@')
phone_substr = phone_string[index1:index2]
phone_len = index2 - index1
if phone_len!=10 and phone_len!=11 :
print("Janky phone number for entrant: " + self.players[x]['name'])
print("Either that or there is a '<' or '@' character in their tag >:|")
self.players.append({'name': participants[x]['name'], 'id': participants[x]['id'], 'number':phone_substr, 'flag': -1})
def players_return_id(self,x):
return self.players[x]['id']
def players_return_number(self,x):
return self.players[x]['number']
def players_return_name(self,x):
return self.players[x]['name']
def players_return_flag(self,x):
return self.players[x]['flag']
def players_set_flag(self,x,f):
self.players[x]['flag'] = f
twilio_file = open('creds/twilio_creds.txt', 'r')
ACCOUNT_SID = string.strip(twilio_file.readline())
AUTH_TOKEN = string.strip(twilio_file.readline())
FROM_NUM = string.strip(twilio_file.readline())
print ACCOUNT_SID
print AUTH_TOKEN
print FROM_NUM
def main2():
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
tournament = challonge.tournaments.show(jank)
matches = challonge.matches.index(tournament["id"])
num_matches = len(matches)
players = []
PO = PlayerObject(players)
print tournament["state"]
#tournament state possibilities
#pending
#underway
#awaiting_review
#complete
if tournament["state"] == "pending":
print "Please start the tournament."
exit()
while(tournament["state"]=="underway"):
tournament = challonge.tournaments.show(jank)
print tournament["state"]
matches = challonge.matches.index(tournament["id"])
if tournament["state"]!="underway":
break
#first match_id vs second match_id
#PO could have a match_id field storing the last match they played to be compared with a constantly updated match_id
for x in range(0,num_matches):
if matches[x]["state"] == "open":
player1_id = matches[x]["player1-id"]
player2_id = matches[x]["player2-id"]
counter = -1
for elem in range(0,num_entrants):
if PO.players_return_id(elem) == player1_id:
#for round 1 players
if PO.players_return_flag(elem) == -1:
PO.players_set_flag(elem,matches[x]["id"])
player1_name = PO.players_return_name(elem)
player1_number = PO.players_return_number(elem)
counter+=1
elif PO.players_return_flag(elem) == matches[x]["id"]:
#for when the match has not finished
counter+=0
else:
#for new matches
PO.players_set_flag(elem,matches[x]["id"])
player1_name = PO.players_return_name(elem)
player1_number = PO.players_return_number(elem)
counter+=1
elif PO.players_return_id(elem) == player2_id:
if PO.players_return_flag(elem) == -1:
PO.players_set_flag(elem,matches[x]["id"])
player2_name = PO.players_return_name(elem)
player2_number = PO.players_return_number(elem)
counter+=1
elif PO.players_return_flag(elem) == matches[x]["id"]:
counter+=0
else:
PO.players_set_flag(elem,matches[x]["id"])
player2_name = PO.players_return_name(elem)
player2_number = PO.players_return_number(elem)
counter+=1
if counter==1:
msg = player1_name + ', please report to the TO for your match with ' + player2_name + '.'
client.messages.create(
to = player1_number,
from_ = FROM_NUM,
body = msg,
)
print(msg)
msg = player2_name + ', please report to the TO for your match with ' + player1_name + '.'
client.messages.create(
to = player2_number,
from_ = FROM_NUM,
body = msg,
)
print(msg)
counter = -1
break
time.sleep(10)
print "Props to the winner."
main2()
``` |
{
"source": "jpakkane/batchcompiler",
"score": 2
} |
#### File: jpakkane/batchcompiler/batchtest.py
```python
import sys, os, subprocess, shutil, random
iface_template = '''export module M{};
// Import statements here.
import M{};
import M{};
export int f{}() {{
return f{}() + f{}();
}}
'''
root_case = '''export module M{};
export int f{}() {{
return 1;
}}
'''
class BatchTest:
def __init__(self):
if not shutil.which('cl'):
sys.exit('cl.exe not found, run from the VS tools prompt.')
self.num_files = 100
self.cl_cmd = ['cl', '/nologo', '/c', '/experimental:module']
self.sources_to_compile = set()
self.waiting_for = {} # Key is module ID, value is sources waiting for said module.
def fnames_for(self, i):
return ('src{}.ixx'.format(i),
'M{}.ifc'.format(i),
'src{}.obj'.format(i))
def create_files(self):
if os.path.exists(self.fnames_for(0)[0]):
print('Sources already exist.')
return
for i in range(self.num_files):
first = i + random.randint(1, 5)
second = i + random.randint(1, 5)
first = min(first, self.num_files-1)
second = min(second, self.num_files-1)
fnames = self.fnames_for(i)
if i == self.num_files - 1:
with open(fnames[0], 'w') as ofile:
ofile.write(root_case.format(i, i))
else:
with open(fnames[0], 'w') as ofile:
ofile.write(iface_template.format(i, first, second, i, first, second))
def mark_as_needing(self, trial, missing_mod):
if missing_mod not in self.waiting_for:
self.waiting_for[missing_mod] = [trial]
else:
self.waiting_for[missing_mod].append(trial)
def try_compile(self, trial):
src_name = self.fnames_for(trial)[0]
cp = subprocess.run(self.cl_cmd + [src_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
if cp.returncode == 0:
return None
assert('could not find module' in cp.stdout)
for message_line in cp.stdout.split('\n'):
if 'could not find module' in message_line:
return int(message_line.split("'")[-2][1:])
sys.exit('Could not find module error message!')
def module_created(self, modname):
for new_one in self.waiting_for.pop(modname, []):
self.sources_to_compile.add(new_one)
print('Module', modname, 'finished')
def build(self):
for i in range(self.num_files):
# In the real implementation the compiler would have to check
# here if the input source is up to date. That is, if
# the output object file exists and is newer than all files
# it depends on. It might make sense to delete all
# the output ifc file immediately for all stale files.
self.sources_to_compile.add(i)
while len(self.sources_to_compile) > 0:
trial = self.sources_to_compile.pop()
missing_mod = self.try_compile(trial)
if missing_mod is not None:
self.mark_as_needing(trial, missing_mod)
else:
self.module_created(trial)
if len(self.waiting_for) > 0:
print(self.waiting_for)
sys.exit('Could not compile all sources in this target. Bad module dependencies.')
if __name__ == '__main__':
bt = BatchTest()
bt.create_files()
bt.build()
``` |
{
"source": "jpakkane/crename",
"score": 2
} |
#### File: jpakkane/crename/crename.py
```python
import os, sys, os.path, pathlib, shutil, subprocess, re
def handle_makefiles():
file_finder = re.compile(r'\b[-_a-zA-Z0-9/]+\.C\b')
# If you want to process CMake files, then change
# this glob to **/CMakeLists.txt (not tested)
for f in pathlib.Path('.').glob('**/Makefile*'):
outlines = []
def file_matcher(mobj):
# Only change those strings that point to existing
# files on disk. If your build system does something
# fancier like building paths by string concatenation,
# you might need to loosen the requirements here.
fname = mobj.group(0)
fpath = f.parent / fname
if fpath.exists():
return fname[:-2] + '.cpp'
else:
return fname
for line in f.open():
outlines.append(file_finder.sub(file_matcher, line))
f.write_text(''.join(outlines))
def rename_files():
for f in pathlib.Path('.').glob('**/*.C'):
subprocess.check_call(['git', 'mv', f, f.with_suffix('.cpp')])
def process():
handle_makefiles()
rename_files()
if __name__ == '__main__':
if not os.path.exists('.git'):
sys.exit('This script must be run in the root of the git directory.')
if not shutil.which('git'):
sys.exit('Git not available.')
process()
``` |
{
"source": "JPalakapilly/baselines-RAISE",
"score": 3
} |
#### File: baselines/behavioral_sim/agents.py
```python
import pandas as pd
import numpy as np
import cvxpy as cvx
#### file to make the simulation of people that we can work with
class Person():
""" Person (parent?) class -- will define how the person takes in a points signal and puts out an energy signal
baseline_energy = a list or dataframe of values. This is data from SinBerBEST
points_multiplier = an int which describes how sensitive each person is to points
"""
def __init__(self, baseline_energy_df, points_multiplier = 1):
self.baseline_energy_df = baseline_energy_df
self.baseline_energy = np.array(self.baseline_energy_df["net_energy_use"])
self.points_multiplier = points_multiplier
baseline_min = self.baseline_energy.min()
baseline_max = self.baseline_energy.max()
baseline_range = baseline_max - baseline_min
self.min_demand = np.maximum(0, baseline_min + baseline_range * .05)
self.max_demand = np.maximum(0, baseline_min + baseline_range * .95)
def energy_output_simple_linear(self, points):
"""Determines the energy output of the person, based on the formula:
y[n] = -sum_{rolling window of 5} points + baseline_energy + noise
inputs: points - list or dataframe of points values. Assumes that the
list will be in the same time increment that energy_output will be.
For now, that's in 1 hour increments
"""
points_df = pd.DataFrame(points)
points_effect = (
points_df
.rolling(
window = 5,
min_periods = 1)
.mean()
)
time = points_effect.shape[0]
energy_output= []
for t in range(time):
temp_energy = self.baseline_energy[t] - points_effect.iloc[t]*self.points_multiplier + \
np.random.normal(1)
energy_output.append(temp_energy)
return pd.DataFrame(energy_output)
def pure_linear_signal(self, points, baseline_day=0):
"""
A linear person. The more points you give them, the less energy they will use
(within some bounds) for each hour. No rolling effects or anything. The simplest
signal.
"""
# hack here to always grab the first day from the baseline_energy
output = np.array(self.baseline_energy)[baseline_day*24:baseline_day*24+10]
points_effect = np.array(points * self.points_multiplier)
output = output - points_effect
# impose bounds/constraints
output = np.maximum(output, self.min_demand)
output = np.minimum(output, self.max_demand)
return output
def get_min_demand(self):
return self.min_demand
# return np.quantile(self.baseline_energy, .05)
def get_max_demand(self):
return self.max_demand
# return np.quantile(self.baseline_energy, .95)
class Person_with_hysteresis(Person):
""" Wendy -- Determines the energy output of the person, based on the formula:
y[n] = f(points) + baseline_energy + noise
f: super special secret function that Wendy designs with hysteresis
inputs: points - list or dataframe of points values. Assumes that the
list will be in the same time increment that energy_output will be.
For now, that's in 5 minute increments"""
def __init__(self, baseline_energy, points_multiplier = 1):
pass
class FixedDemandPerson(Person):
def __init__(self, baseline_energy_df, points_multiplier = 1):
super().__init__(baseline_energy_df, points_multiplier)
def demand_from_points(self, points, baseline_day=0):
# hack here to always grab the first day from the baseline_energy
output = np.array(self.baseline_energy)[baseline_day*24:baseline_day*24+10]
total_demand = np.sum(output)
points_effect = np.array(points * self.points_multiplier)
output = output - points_effect
# scale to keep total_demand (almost) constant
# almost bc imposing bounds afterwards
output = output * (total_demand/np.sum(output))
# impose bounds/constraints
output = np.maximum(output, self.min_demand)
output = np.minimum(output, self.max_demand)
return output
def adverserial_linear(self, points, baseline_day=0):
# hack here to always grab the first day from the baseline_energy
output = np.array(self.baseline_energy)[baseline_day*24:baseline_day*24+10]
total_demand = np.sum(output)
points_effect = np.array(points * self.points_multiplier)
output = output + points_effect
# scale to keep total_demand (almost) constant
# almost bc imposing bounds afterwards
output = output * (total_demand/np.sum(output))
# impose bounds/constraints
output = np.maximum(output, self.min_demand)
output = np.minimum(output, self.max_demand)
return output
class DeterministicFunctionPerson(Person):
def __init__(self, baseline_energy_df, points_multiplier = 1):
super().__init__(baseline_energy_df, points_multiplier)
def threshold_response_func(self, points):
points = np.array(points) * self.points_multiplier
threshold = np.mean(points)
return [p if p>threshold else 0 for p in points]
def exponential_response_func(self, points):
points = np.array(points) * self.points_multiplier
points_effect = [p**2 for p in points]
return points_effect
def sin_response_func(self,points):
points = np.array(points)
# n = np.max(points)
# points = [np.sin((float(i)/float(n))*np.pi) for i in points]
points = [np.sin(float(i)*np.pi)*self.points_multiplier for i in points]
points = points
return points
def routine_output_transform(self, points_effect, baseline_day=0):
output = np.array(self.baseline_energy)[baseline_day*24:baseline_day*24+10]
total_demand = np.sum(output)
# scale to keep total_demand (almost) constant
# almost bc imposing bounds afterwards
output = output - points_effect
output = output * (total_demand/np.sum(output))
# impose bounds/constraints
output = np.maximum(output, self.min_demand)
output = np.minimum(output, self.max_demand)
return output
def threshold_response(self, points):
points_effect = self.threshold_response_func(points)
output = self.routine_output_transform(points_effect)
return output
def sin_response(self, points):
points_effect = self.sin_response_func(points)
output = self.routine_output_transform(points_effect)
return output
def exp_response(self, points):
points_effect = self.exponential_response_func(points)
output = self.routine_output_transform(points_effect)
return output
def threshold_exp_response(self,points):
points_effect = self.exponential_response_func(points)
points_effect = self.threshold_response_func(points_effect)
output = self.routine_output_transform(points_effect)
return output
def linear_response(self, points):
points_effect = points*self.points_multiplier
output = self.routine_output_transform(points_effect)
return output
class MananPerson1(Person):
def __init__(self, baseline_energy_df, points_multiplier=.8):
# ignores baseline_energy_df
# this is just for backwards compatability
self.baseline_energy_hour = 300
self.day_of_week_multiplier = np.array([1.1, 1.15, 1, 0.9, 0.8])
self.hour_multiplier = np.array([0.8, 0.9, 1, 0.9, 0, 0.9, 1.1, 1.1, 1.0, 0.9])
self.AFFINITY_TO_POINTS = points_multiplier
self.ENERGY_STD_DEV = 5
self.baseline_energy_day = np.array(self.baseline_energy_hour * self.hour_multiplier)
self.total_baseline_day = np.sum(self.baseline_energy_day)*self.day_of_week_multiplier
self.min_demand = self.baseline_energy_day.min()*self.day_of_week_multiplier.min()
self.max_demand = self.baseline_energy_day.max()*self.day_of_week_multiplier.max()
self.MAX_DIFFERENTIAL = 20
def redistributed_energy(self, points, day_num):
energy_curve = cvx.Variable(len(points))
objective = cvx.Minimize(energy_curve.T * points)
constraints = [
cvx.sum(energy_curve, axis=0, keepdims=True)
== self.total_baseline_day[day_num]
]
for hour in range(10):
constraints += [energy_curve[hour] >= 0]
for hour in range(1, 10):
constraints += [
cvx.abs(energy_curve[hour] - energy_curve[hour - 1])
<= self.MAX_DIFFERENTIAL
]
problem = cvx.Problem(objective, constraints)
problem.solve()
return energy_curve.value
def predicted_energy_behavior(self, points, day_num):
perfect_energy_use = self.redistributed_energy(points, day_num)
baseline_energy_use = self.baseline_energy_day*self.day_of_week_multiplier[day_num]
means = np.empty(len(perfect_energy_use))
for i in range(len(perfect_energy_use)):
lesser, greater = (
(perfect_energy_use[i], baseline_energy_use[i])
if perfect_energy_use[i] < baseline_energy_use[i]
else (baseline_energy_use[i], perfect_energy_use[i])
)
means[i] = lesser + 0.8 * (greater - lesser)
sample = np.random.normal(means, self.ENERGY_STD_DEV)
return np.maximum(np.zeros(sample.shape), sample)
``` |
{
"source": "jpalanco/edr",
"score": 3
} |
#### File: edr/scripts/repack_deb.py
```python
import argparse
import os
import subprocess
import shutil
import tempfile
parser = argparse.ArgumentParser(
description='Repack the velocigrr package with a new config file.')
parser.add_argument('config_file', type=str,
help="The config file to embed.")
parser.add_argument('deb_package', type=str,
help="The path to the velociraptor deb.")
class TempDirectory(object):
"""A self cleaning temporary directory."""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name, True)
def main():
args = parser.parse_args()
if not args.deb_package.endswith("deb"):
raise RuntimeError("Expeting a debian package not %s:" % args.deb_package)
with open(args.config_file) as fd:
config_lines = list(fd.readlines())
with TempDirectory() as temp_dir_name:
deb_package = os.path.abspath(args.deb_package)
subprocess.check_call(
"ar p " + deb_package + " control.tar.xz | tar -xJ",
shell=True, cwd=temp_dir_name)
with open(os.path.join(temp_dir_name, "postinst")) as fd:
postinst_lines = list(fd.readlines())
# Inject the config into the postinst script.
new_postinst = (
[postinst_lines[0],
"cat << EOF > /etc/velociraptor.config.yaml\n"] +
config_lines + ["EOF\n\n"] + postinst_lines[1:])
with open(os.path.join(temp_dir_name, "postinst"), "wt") as fd:
fd.write("".join(new_postinst))
subprocess.check_call("tar cJf control.tar.xz *[!z]",
shell=True, cwd=temp_dir_name)
subprocess.check_call(["cp", deb_package, deb_package + "_repacked.deb"],
cwd=temp_dir_name)
subprocess.check_call(["ar", "r", deb_package + "_repacked.deb", "control.tar.xz"],
cwd=temp_dir_name)
if __name__ == '__main__':
main()
``` |
{
"source": "jpalat/AIS-Vessel-Data-Pipeline",
"score": 3
} |
#### File: AIS-Vessel-Data-Pipeline/src/process_ais_data.py
```python
import yaml
import os
import math
import numpy as np
import pandas as pd
def main():
"""Driver code to run the big steps of pre-processing the data.
First, all the script parameters are loaded by reading the ``.yaml`` ``config_file`` and this config is unpacked.
All the csv files within a specified directory are found and returned as ``csv_files``, along with each file's
year, month, and zone in ``all_files_meta``.
Then, these csv files are read and organized into a large set of trajectories ordered by id (mmsi). Finally, these
trajectories are discretized before being written into an output csv containing only rows of id-state-action-state
transitions.
Another yaml file is written to ``meta_file`` to specify the final grid parameters, output directories, and the
year, month, and zone of all the files read in.
"""
# file containing important options, directories, parameters, etc.
config_file = "config.yml"
# file to write final grid_params and the csv files' respective years, months, and zones
meta_file = "meta_data.yml"
# gets the config dictionary and unpacks it
config = get_config(config_file)
options = config["options"]
directories = config["directories"]
meta_params = config["meta_params"]
grid_params = config["grid_params"]
# gets the csv files available and their metadata
csv_files, all_files_meta = collect_csv_files(
options, directories, meta_params
)
# reads the collected csv files and assembles trajectories
trajectories, grid_params = read_data(csv_files, options, grid_params)
# processes (fits to grid) trajectories and writes generates sequences to output file
write_data(trajectories, options, directories, grid_params)
# writes file metadata, paths, and grid parameters to ``meta_file``
directories_out = {
"in_dir_path": directories["out_dir_path"],
"in_dir_data": directories["out_dir_file"],
}
out_dict = {
"all_files_meta": all_files_meta,
"options": options,
"directories": directories_out,
"grid_params": grid_params,
}
with open(meta_file, "w") as outfile:
yaml.dump(out_dict, outfile, default_flow_style=False)
def get_config(config_file):
"""Helper function to get dictionary of script parameters.
Mostly boilerplate code to read in ``config_file`` as a ``.yaml`` file that specifies important script parameters.
Upon success, this config dictionary is returned to main to be unpacked.
Args:
config_file (str): The name of the ``.yaml`` file containing the script configuration parameters, located in the
directory the script is run.
Returns:
dict: The script configuration parameters.
"""
with open(config_file, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
def collect_csv_files(options, directories, meta_params):
"""Traverses the directory containing the decompressed AIS data to get the csv names for further processing.
Uses the ``os`` library to find all csv files within the directory defined by ``directories``, populating the
``csv_files`` list for later reading of all valid csv files found and logging file metadata in ``all_files_meta``.
Args:
options (dict): The script options specified in the ``config_file``.
directories (dict): The input and output paths and files specified in the ``config_file``.
meta_params (dict): The time and zone boundaries specified in the ``config_file``.
Returns:
tuple: A list with paths to all valid csv files found and a dictionary with the year, month, and zone
corresponding to each csv file's origin.
"""
# initialize a list that will be filled with all csv file names from root
csv_files = []
all_files_meta = {}
for root, dirs, files in os.walk(
directories["in_dir_path"] + directories["in_dir_data"]
):
for file in files:
if file.endswith(".csv"):
year, month, zone = get_meta_data(
file
) # finds the data year, month, and zone based on the file name
# only considers valid years and months if time is bounded and valid zones if zones are bounded
if (
not options["bound_time"]
or (
meta_params["min_year"]
<= year
<= meta_params["max_year"]
)
) and (
not options["bound_zone"]
or (
meta_params["min_zone"]
<= zone
<= meta_params["max_zone"]
)
):
if (
not options["bound_time"]
or (
not year == meta_params["min_year"]
or month >= meta_params["min_month"]
)
) and (
not options["bound_time"]
or (
not year == meta_params["max_year"]
or month <= meta_params["max_month"]
)
):
# csv_files will contain file locations relative to current directory
csv_files.append(os.path.join(root, file))
# create dictionary to describe file characteristics
file_meta = {
"year": year,
"month": month,
"zone": zone,
}
all_files_meta[file] = file_meta
return csv_files, all_files_meta
def read_data(csv_files, options, grid_params):
"""Iterate through each csv file to segregate each trajectory by its mmsi id.
Reads each csv in ``csv_files`` to obtain coordinates and timestamp series associated with each mmsi id encountered.
Optionally, the boundaries of the grid later specified can be inferred by calculating the minimum and maximum
longitudes and latitudes by setting ``options['bound_lon']`` and ``options['bound_lat']`` to ``False``, respectively
in the ``config_file``.
It can also be specified to only read the first ``options['max_rows']`` of each csv file by setting
``options['limit_rows']`` to True in the ``config_file``.
Args:
csv_files (list): Paths to all valid csv files found.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
tuple: A pandas DataFrame of all data entries with the format ``['MMSI', 'LON', 'LAT', 'TIME']`` and a
dictionary that specifies the minimum and maximum latitudes and longitudes in the dataset.
"""
# overwrite hard boundaries on longitude and latitude if not bounded in config.yaml
if not options["bound_lon"]:
grid_params["min_lon"] = 180
grid_params["max_lon"] = -180
if not options["bound_lat"]:
grid_params["min_lat"] = 90
grid_params["max_lat"] = -90
# holds all ais data, one dataframe per csv file
ais_data = []
# get data from all csv files
for csv_file in csv_files:
# reads in the raw data with columns and number of rows specified in config.yaml
nrows = options["max_rows"] if options["limit_rows"] else None
usecols = ["MMSI", "LON", "LAT", "BaseDateTime"]
ais_df = pd.read_csv(csv_file, usecols=usecols, nrows=nrows)
ais_df = ais_df[usecols]
# interprets raw time entries as datetime objects and drops original column
ais_df["TIME"] = pd.to_datetime(
ais_df["BaseDateTime"], format="%Y-%m-%dT%H:%M:%S"
)
ais_df.drop(columns="BaseDateTime", inplace=True)
# keeps only rows in boundaries if specified
if options["bound_lon"]:
ais_df = ais_df.loc[
(ais_df["LON"] >= grid_params["min_lon"])
& (ais_df["LON"] <= grid_params["max_lon"])
]
if options["bound_lat"]:
ais_df = ais_df.loc[
(ais_df["LAT"] >= grid_params["min_lat"])
& (ais_df["LAT"] <= grid_params["max_lat"])
]
# infers grid boundaries if no boundaries are specified
if (
not options["bound_lon"]
and ais_df["LON"].min() < grid_params["min_lon"]
):
grid_params["min_lon"] = ais_df["LON"].min()
if (
not options["bound_lon"]
and ais_df["LON"].max() > grid_params["max_lon"]
):
grid_params["max_lon"] = ais_df["LON"].max()
if (
not options["bound_lat"]
and ais_df["LAT"].min() < grid_params["min_lat"]
):
grid_params["min_lat"] = ais_df["LAT"].min()
if (
not options["bound_lat"]
and ais_df["LAT"].max() > grid_params["max_lat"]
):
grid_params["max_lat"] = ais_df["LAT"].max()
# appends current dataframe to list of all dataframes
ais_data.append(ais_df)
# merges dataframes from all csvs
trajectories = pd.concat(ais_data, axis=0, ignore_index=True)
# rounds inferred grid boundaries to nearest degree to provide some padding to each boundary
if not options["bound_lon"]:
grid_params["min_lon"] = float(math.floor(grid_params["min_lon"]))
grid_params["max_lon"] = float(math.ceil(grid_params["max_lon"]))
if not options["bound_lat"]:
grid_params["min_lat"] = float(math.floor(grid_params["min_lat"]))
grid_params["max_lat"] = float(math.ceil(grid_params["max_lat"]))
# number of columns in the resulting grid
grid_params["num_cols"] = math.ceil(
(grid_params["max_lon"] - grid_params["min_lon"])
/ grid_params["grid_len"]
)
return trajectories, grid_params
def write_data(trajectories, options, directories, grid_params):
"""Writes all trajectories to an output csv file using a discretized state and action grid.
Uses the trajectories variable to look at each id-state-action-state transition to discretize all states and to
interpolate actions if specified. These discretized states with interpolated or arbitrary actions are then written
to the output csv specified by ``options['out_dir'] + options['out_file']``. Each trajectory is also sorted by
its timestamp.
The trajectories have their ids aliased by a counter variable that only increments whenever a trajectory is
going to appear in the final csv. Self-transitions are discarded, and because of the huge grid size, most
trajectories will be discarded since they will never transition between grid squares.
Args:
trajectories (pandas.DataFrame): All data entries with columns ``['MMSI', 'LON', 'LAT', 'TIME']``.
options (dict): The script options specified in the ``config_file``.
directories (dict): The input and output paths and files specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
"""
# sorts based on MMSI, then sorts by timestamps within MMSI groups, drops the time column
trajectories.sort_values(["MMSI", "TIME"], inplace=True)
trajectories.drop(columns="TIME", inplace=True)
# creates a new column of discretized states based on coordinate pairs
trajectories["STATE"] = get_state(
trajectories["LON"].values, trajectories["LAT"].values, grid_params
)
# looks at state differences within MMSI trajectories and only keeps the states with nonzero differences
# trajectories with only one state are kept because they will have a first row with 'nan' for diff
non_self_transitions = (
trajectories["STATE"].groupby(trajectories["MMSI"]).diff().ne(0)
)
trajectories = trajectories.loc[non_self_transitions]
# rounds latitude and longitude to specified precision
trajectories = trajectories.round(
{"LON": options["prec_coords"], "LAT": options["prec_coords"]}
)
# drops the trajectories with fewer states than ``options['min_states']``
traj_lengths = trajectories["MMSI"].value_counts()
traj_keep = traj_lengths[
traj_lengths > options["min_states"] - 1
].index.values
trajectories = trajectories.loc[trajectories["MMSI"].isin(traj_keep)]
# aliases the MMSI column to ascending integers to enumerate trajectories and make easier to read
alias = {
mmsi: ind for ind, mmsi in enumerate(trajectories["MMSI"].unique())
}
trajectories["MMSI"] = trajectories["MMSI"].map(alias)
# resets index now that manipulation of this dataframe has finished
trajectories.reset_index(drop=True, inplace=True)
# creates a series of stacked dataframes, each dataframe representing an interpolated state transition
sas = trajectories.groupby("MMSI").apply(
lambda x: get_action(x, options, grid_params)
)
if isinstance(
sas, pd.DataFrame
): # becomes a DataFrame when every trajectory has only one sas triplet
sas = sas[0]
# merge Series of dictionaries
ids = []
prevs = []
acts = []
curs = []
lons = []
lats = []
for traj in sas:
ids += traj["ID"]
prevs += traj["PREV"]
acts += traj["ACT"]
curs += traj["CUR"]
if options["append_coords"]:
lons += traj["LON"]
lats += traj["LAT"]
# prepare final dictionary with built lists and proper heading name
sas_data = {
"sequence_id": ids,
"from_state_id": prevs,
"action_id": acts,
"to_state_id": curs,
}
if options["append_coords"]:
sas_data["lon"] = lons
sas_data["lat"] = lats
# writes new dataframe to final csv
sas = pd.DataFrame(sas_data)
sas.to_csv(
directories["out_dir_path"] + directories["out_dir_file"], index=False
)
def get_bounds(zone):
"""Helper function to get longitude boundaries corresponding to zone.
Calculates the minimum and maximum longitudes corresponding to an integer zone
representing a Universal Transverse Mercator coordinate system zone. Each zone is
6 degrees wide, dividing the Earth into 60 zones, starting with zone 1 at 180 deg W. This function
also wraps the zone with a modulo operator, so zone -1 would map to zone 58.
Args:
zone (int): The Universal Transverse Mercator coordinate system zone.
Returns:
tuple: The minimum and maximum longitudes of the zone passed in.
"""
min_lon = (
6.0 * ((zone - 1) % 60)
) - 180.0 # counts 6 degrees per zone, offset by -180
return min_lon, (min_lon + 6.0)
def get_meta_data(file_name):
"""Helper function to retrieve a given file name's year, month, and zone.
Takes a string file_name formatted as ``'AIS_yyyy_mm_Zone##.csv'`` and returns the numerical
values of ``yyyy, mm, ##`` corresponding to year, month, and zone number as a tuple.
Args:
file_name (str): The file name to be parsed in format ``'AIS_yyyy_mm_Zone##.csv'``.
Returns:
tuple: The year, month, and zone corresponding to the filename passed in.
"""
meta_file_data = file_name.split(
"_"
) # splits csv file on '_' character, which separates relevant file info
year = int(meta_file_data[-3]) # third to last element of file is the year
month = int(
meta_file_data[-2]
) # second to last element of file is the month
# get zone number for csv file being read
ending_raw = meta_file_data[
-1
] # gets last part of the file, with format "ZoneXX.csv"
ending_data = ending_raw.split(
"."
) # splits last part of file on '.' character
zone_raw = ending_data[0] # gets "ZoneXX" string
zone_data = zone_raw[
-2:
] # gets last 2 characters of "ZoneXX" string - will be the zone number
zone = int(zone_data)
return year, month, zone
def get_state(cur_lon, cur_lat, grid_params):
"""Discretizes a coordinate pair into its state space representation in a Euclidean grid.
Takes in a coordinate pair ``cur_lon``, ``cur_lat`` and grid parameters to calculate the integer state representing
the given coordinate pair. This coordinate grid is always row-major. ``(min_lon, min_lat)`` represent the
bottom-left corner of the grid.
Example:
A 3 x 4 grid would have the following state enumeration pattern::
8 9 10 11
4 5 6 7
0 1 2 3
With each grid square's area bounded in the following way::
(min_lon, min_lat + grid_len) (min_lon + grid_len, min_lat + grid_len)
|
(min_lon, min_lat) ---------------------- (min_lon + grid_len, min_lon)
In this example, the bottom left of state 0's boundaries would be the point ``min_lon, min_lat``, and the total
area mapping to state 0 would be the square with ``min_lon, min_lat`` as the bottom left corner and each side of
the square with length ``grid_len``. The inclusive parts of the square's boundaries mapping to zero are solid
lines.
Args:
cur_lon (float): The longitude of the data point.
cur_lat (float): The latitude of the data point.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
int: A state corresponding to the discretized representation of ``cur_lon``, ``cur_lat``.
"""
# normalize lat and lon to the minimum values
norm_lon = cur_lon - grid_params["min_lon"]
norm_lat = cur_lat - grid_params["min_lat"]
# find the row and column position based on grid_len
col = norm_lon // grid_params["grid_len"]
row = norm_lat // grid_params["grid_len"]
# find total state based on num_cols in final grid
return (row * grid_params["num_cols"] + col).astype(int)
def get_action(traj, options, grid_params):
"""Wrapper function for other ``get_action`` functions.
Calls the correct ``get_action`` variant based on the options input and returns the resulting output with
interpolated actions for all entries in the series.
Args:
traj (pandas.DataFrame): A pandas DataFrame with all the states encountered in a trajectory with their
respective coordinates.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
dict: The sequence of state-action-state triplets for the passed in trajectory.
"""
# retrieves trajectory data
traj_num = traj.name
states = traj["STATE"]
last_state = states.iloc[-1]
lon = traj["LON"]
lat = traj["LAT"]
# prepares a dictionary of state transitions to be fed row-by-row as a DataFrame to the interpolation functions
data = {
"ID": [traj_num] * (len(states) - 1),
"PREV": states.iloc[:-1].values,
"CUR": states.iloc[1:].values,
}
# if specified, appends the original entry coordinates (not discretized) for each 'PREV' entry
if options["append_coords"]:
data["LON"] = lon.iloc[:-1].values
data["LAT"] = lat.iloc[:-1].values
# formats the final data dictionary as a DataFrame
traj_df = pd.DataFrame(data)
# selects specified interpolation function and applies it row-wise to ``traj_df``
if not options["interp_actions"]:
traj_df = traj_df.apply(
lambda x: get_action_arb(x, options, grid_params), axis=1
)
else:
if options["allow_diag"]:
traj_df = traj_df.apply(
lambda x: get_action_interp_with_diag(x, options, grid_params),
axis=1,
)
else:
traj_df = traj_df.apply(
lambda x: get_action_interp_reg(x, options, grid_params),
axis=1,
)
# merges the dictionary series
states_out = []
acts_out = []
lon_out = []
lat_out = []
for traj in traj_df:
states_out += traj["PREV"]
acts_out += traj["ACT"]
if options["append_coords"]:
lon_out += traj["LON"]
lat_out += traj["LAT"]
states_out.append(last_state)
# appends the final state to each trajectory as its own row to allow for easier plotting of trajectories
if options["append_coords"]:
states_out.append(-1)
acts_out.append(-1)
lon_out.append(lon.iloc[-1])
lat_out.append(lat.iloc[-1])
# instantiates final dataframe-ready dictionary with state-action-state triplets
data_out = {
"ID": [traj_num] * len(acts_out),
"PREV": states_out[:-1],
"ACT": acts_out,
"CUR": states_out[1:],
}
# adds coordinate fields for final output if specified in options
if options["append_coords"]:
data_out["LON"] = lon_out
data_out["LAT"] = lat_out
return data_out
def get_action_arb(row, options, grid_params):
"""Calculates an arbitrary action from the previous state to current state relative to the previous state.
First, the relative offset between the current and previous state in rows and columns is calculated.
The action is then calculated according to a spiral rule beginning with the previous state, so self-transitions
are defined as ``0`` as an initial condition. Spiral inspired by the polar function ``r = theta``.
Example:
For example, if ``prev_state = 5``, ``cur_state = 7``, and ``num_cols = 4``, then our state grid is populated
as follows::
8 9 10 11
4 p 6 c
0 1 2 3
Where p represents the location of the previous state, and c represents the location of the current state.
Then the current state's position relative to the previous state is ``rel_row = 0``, ``rel_col = 2``. Our action
spiral then looks like this::
15 14 13 12 11 15 14 13 12 11
16 4 3 2 10 16 4 3 2 10
17 5 0 1 9 -> 17 5 p 1 c
18 6 7 8 24 18 6 7 8 24
19 20 21 22 23 19 20 21 22 23
Thus, this algorithm will return ``9`` as the action.
Args:
row (pandas.Series): One row of the DataFrame the function is applied to, containing the trajectory number,
previous state, current state, longitude, and latitude.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
dict: State-action-state triplets that interpolate between ``prev_state`` and ``cur_state``.
"""
# retrieves transition data
traj_num = row["ID"].astype(int)
prev_state = row["PREV"].astype(int)
cur_state = row["CUR"].astype(int)
num_cols = grid_params["num_cols"]
# gets row, column decomposition for previous and current states
prev_row = prev_state // num_cols
prev_col = prev_state % num_cols
cur_row = cur_state // num_cols
cur_col = cur_state % num_cols
# calculates current state's position relative to previous state
rel_row = cur_row - prev_row
rel_col = cur_col - prev_col
# simple routine to calculate a spiral set of actions
# the sequence defined by layer corresponds to the total number of grid squares in each spiral layer
action_num = x = y = i = 0
layer = (2 * i + 1) ** 2 # sets breakpoint for when to increment i
while not (x == rel_col and y == rel_row):
if action_num == layer - 1:
i += 1 # move to next spiral
x = i
layer = (2 * i + 1) ** 2 # calculate breakpoint for next spiral
elif (
x == i and y < i
): # traverses from beginning of layer to top right corner
y += 1
elif x > -i and y == i: # traverses from top right to top left corner
x -= 1
elif (
x == -i and y > -i
): # traverses from top left to bottom left corner
y -= 1
elif (
x < i and y == -i
): # traverses from bottom left to bottom right corner
x += 1
elif (
x == i and y < 0
): # traverses from bottom left corner to end of layer
y += 1
action_num += 1
# prepares final data dictionary to build DataFrame
out_data = {
"ID": [traj_num],
"PREV": [prev_state],
"ACT": [action_num],
"CUR": [cur_state],
}
# overwrites the coordinates of the first state in interpolated transitions to be original raw values
if options["append_coords"]:
out_data["LON"] = [row["LON"]]
out_data["LAT"] = [row["LAT"]]
return out_data
def get_action_interp_with_diag(row, options, grid_params):
"""Calculates the actions taken from the previous state to reach the current state, interpolating if necessary.
First, the relative offset between the current and previous state in rows and columns is calculated.
Then the sign of ``rel_row`` and ``rel_col`` are then used to iteratively describe a sequence of actions
from the previous state to current state, breaking up state transitions with multiple actions if
the states are not adjacent (including diagonals, resulting in 9 possible actions). This interpolation
assumes a deterministic system.
Example:
For example, if ``prev_state = 5``, ``cur_state = 7``, and ``num_cols = 4``, then our state grid is populated
as follows::
8 9 10 11
4 p 6 c
0 1 2 3
Output snippet::
pd.DataFrame({})
Where p represents the location of the previous state, and c represents the location of the current state.
Then the current state's position relative to the previous state is ``rel_row = 0``, ``rel_col = 2``. Our
action spiral then looks like this::
4 3 2 4 3 2
5 0 1 -> 5 p 1 c
7 8 9 6 7 8
Output snippet::
pd.DataFrame({
'ID': [traj_num, ],
'PREV': [prev_state, ],
'ACT': [1, ],
'CUR': [prev_state + 1, ]
})
Because the current state is not adjacent (including diagonals), we interpolate by taking the action that
brings us closest to the current state: action ``1``, resulting in a new action spiral and a new previous
state::
4 3 2 4 3 2
5 0 1 -> 5 p c
7 8 9 6 7 8
Final output::
pd.DataFrame({
'ID': [traj_num] * 2,
'PREV': [prev_state, prev_state + 1],
'ACT': [1, 1],
'CUR': [prev_state + 1, cur_state]
})
Now, our new previous state is adjacent to the current state, so we can take action ``1``, which updates our
previous state to exactly match the current state, so the algorithm terminates and returns the list of
state-action-state transitions.
Args:
row (pandas.Series): One row of the DataFrame the function is applied to, containing the trajectory number,
previous state, current state, longitude, and latitude.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
dict: State-action-state triplets that interpolate between ``prev_state`` and ``cur_state``.
"""
# retrieves transition data
traj_num = row["ID"].astype(int)
prev_state = row["PREV"].astype(int)
cur_state = row["CUR"].astype(int)
num_cols = grid_params["num_cols"]
# instantiate lists to hold column values for final DataFrame output
prevs = []
acts = []
curs = []
lons = []
lats = []
# gets row, column decomposition for previous and current states
prev_row = prev_state // num_cols
prev_col = prev_state % num_cols
cur_row = cur_state // num_cols
cur_col = cur_state % num_cols
# calculates current state's position relative to previous state
rel_row = cur_row - prev_row
rel_col = cur_col - prev_col
# write output rows until rel_row and rel_col are both zero
# out_rows = []
while not (rel_row == 0 and rel_col == 0):
# selects action to minimize rel_row and rel_col
action = -1
if rel_row > 0 and rel_col > 0:
action = 2
elif rel_row > 0 and rel_col == 0:
action = 3
elif rel_row > 0 and rel_col < 0:
action = 4
elif rel_row == 0 and rel_col > 0:
action = 1
elif rel_row == 0 and rel_col < 0:
action = 5
elif rel_row < 0 and rel_col > 0:
action = 8
elif rel_row < 0 and rel_col == 0:
action = 7
elif rel_row < 0 and rel_col < 0:
action = 6
# moves rel_row and rel_col in the opposite directions of their signs
row_diff = -np.sign(rel_row)
col_diff = -np.sign(rel_col)
# updates states and relative row, column based on action selected
rel_row += row_diff
rel_col += col_diff
temp_row = prev_row - row_diff
temp_col = prev_col - col_diff
temp_state = temp_row * num_cols + temp_col
prev_state = prev_row * num_cols + prev_col
# records an interpolated state-action-state transition
prevs.append(prev_state)
acts.append(action)
curs.append(temp_state)
# gets the coordinates of the interpolated state - will be the coordinates of the middle of the state
if options["append_coords"]:
lon, lat = state_to_coord(prev_state, options, grid_params)
lons.append(lon)
lats.append(lat)
prev_row = temp_row
prev_col = temp_col
# prepares final data dictionary to build DataFrame
out_data = {
"ID": [traj_num] * len(prevs),
"PREV": prevs,
"ACT": acts,
"CUR": curs,
}
# overwrites the coordinates of the first state in interpolated transitions to be original raw values
if options["append_coords"]:
lons[0] = row["LON"]
lats[0] = row["LAT"]
out_data["LON"] = lons
out_data["LAT"] = lats
return out_data
def get_action_interp_reg(row, options, grid_params):
"""Calculates the actions taken from the previous state to reach the current state, interpolating if necessary.
First, the relative offset between the current and previous state in rows and columns is calculated.
Then the sign of ``rel_row`` and ``rel_col`` are then used to iteratively describe a sequence of actions
from the previous state to current state, breaking up state transitions with multiple actions if
the states are not adjacent (only actions are right, left, up, down, and none). This interpolation
assumes a deterministic system.
Example:
For example, if ``prev_state = 5``, ``cur_state = 7``, and ``num_cols = 4``, then our state grid is populated
as follows::
8 9 10 11
4 p 6 c
0 1 2 3
Output snippet::
pd.DataFrame({})
Where p represents the location of the previous state, and c represents the location of the current state.
Then the current state's position relative to the previous state is ``rel_row = 0``, ``rel_col = 2``. Our action
spiral then looks like this::
2 2
3 0 1 -> 3 p 1 c
4 4
Output snippet::
output: pd.DataFrame({
'ID': [traj_num, ],
'PREV': [prev_state, ],
'ACT': [1, ],
'CUR': [prev_state + 1, ]
})
Because the current state is not adjacent, we interpolate by taking the action that brings us closest to
the current state: action ``1``, resulting in a new action spiral and a new previous state::
2 1
3 0 1 -> 2 p c
4 4
Final output::
pd.DataFrame({
'ID': [traj_num] * 2,
'PREV': [prev_state, prev_state + 1],
'ACT': [1, 1],
'CUR': [prev_state + 1, cur_state]
})
Now, our new previous state is adjacent to the current state, so we can take action ``1``, which updates our
previous state to exactly match the current state, so the algorithm terminates and returns the list of
state-action-state transitions.
Args:
row (pandas.Series): One row of the DataFrame the function is applied to, containing the trajectory number,
previous state, current state, longitude, and latitude.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
dict: State-action-state triplets that interpolate between ``prev_state`` and ``cur_state``.
"""
# retrieves transition data
traj_num = row["ID"].astype(int)
prev_state = row["PREV"].astype(int)
cur_state = row["CUR"].astype(int)
num_cols = grid_params["num_cols"]
# instantiate lists to hold column values for final DataFrame output
prevs = []
acts = []
curs = []
lons = []
lats = []
# gets row, column decomposition for previous and current states
prev_row = prev_state // num_cols
prev_col = prev_state % num_cols
cur_row = cur_state // num_cols
cur_col = cur_state % num_cols
# calculates current state's position relative to previous state
rel_row = cur_row - prev_row
rel_col = cur_col - prev_col
# write output rows until rel_row and rel_col are both zero
while not (rel_row == 0 and rel_col == 0):
# selects action to reduce the largest of rel_row and rel_col
action = -1
if rel_row > 0 and rel_col > 0:
action = 2 if rel_row > rel_col else 1
elif rel_row > 0 and rel_col == 0:
action = 2
elif rel_row > 0 and rel_col < 0:
action = 2 if rel_row > -rel_col else 3
elif rel_row == 0 and rel_col > 0:
action = 1
elif rel_row == 0 and rel_col < 0:
action = 3
elif rel_row < 0 and rel_col > 0:
action = 4 if -rel_row > rel_col else 1
elif rel_row < 0 and rel_col == 0:
action = 4
elif rel_row < 0 and rel_col < 0:
action = 4 if -rel_row > -rel_col else 3
# moves rel_row and rel_col in the opposite directions of their signs
row_diff = -np.sign(rel_row) if (action == 2 or action == 4) else 0
col_diff = -np.sign(rel_col) if (action == 1 or action == 3) else 0
# updates states and relative row, column based on action selected
rel_row += row_diff
rel_col += col_diff
temp_row = prev_row - row_diff
temp_col = prev_col - col_diff
temp_state = temp_row * num_cols + temp_col
prev_state = prev_row * num_cols + prev_col
# records an interpolated state-action-state transition
prevs.append(prev_state)
acts.append(action)
curs.append(temp_state)
# gets the coordinates of the interpolated state - will be the coordinates of the middle of the state
if options["append_coords"]:
lon, lat = state_to_coord(prev_state, options, grid_params)
lons.append(lon)
lats.append(lat)
prev_row = temp_row
prev_col = temp_col
# prepares final data dictionary to build DataFrame
out_data = {
"ID": [traj_num] * len(acts),
"PREV": prevs,
"ACT": acts,
"CUR": curs,
}
# overwrites the coordinates of the first state in interpolated transitions to be original raw values
if options["append_coords"]:
lons[0] = row["LON"]
lats[0] = row["LAT"]
out_data["LON"] = lons
out_data["LAT"] = lats
return out_data
def state_to_coord(state, options, grid_params):
"""Inverse function for ``get_state``.
Calculates the coordinates of the middle of the passed in state in the specified grid passed in.
Args:
state (int): The discretized grid square returned by ``get_state``.
options (dict): The script options specified in the ``config_file``.
grid_params (dict): The grid parameters specified in the ``config_file``.
Returns:
tuple: The longitude and latitude representing the middle of the state passed in.
"""
# calculates the integer state's row and column representation in the grid
state_col = state % grid_params["num_cols"]
state_row = state // grid_params["num_cols"]
# calculates the latitude and longitude corresponding to the middle of the grid square
state_lon = round(
grid_params["min_lon"] + grid_params["grid_len"] * (state_col + 0.5),
options["prec_coords"],
)
state_lat = round(
grid_params["min_lat"] + grid_params["grid_len"] * (state_row + 0.5),
options["prec_coords"],
)
return state_lon, state_lat
if __name__ == "__main__":
main()
``` |
{
"source": "jpalat/psiTurk",
"score": 3
} |
#### File: psiTurk/psiturk/utils.py
```python
import os
import urllib2
import json
def get_my_ip():
"""
Asks and external server what your ip appears to be (useful is
running from behind a NAT/wifi router). Of course, incoming port
to the router must be forwarded correctly.
"""
if 'OPENSHIFT_SECRET_TOKEN' in os.environ:
my_ip = os.environ['OPENSHIFT_APP_DNS']
else:
my_ip = json.load(urllib2.urlopen(
'http://httpbin.org/ip'
))['origin']
return my_ip
def colorize(target, color, use_escape=True):
''' Colorize target string. Set use_escape to false when text will not be
interpreted by readline, such as in intro message.'''
def escape(code):
''' Escape character '''
return '\001%s\002' % code
if color == 'purple':
color_code = '\033[95m'
elif color == 'cyan':
color_code = '\033[96m'
elif color == 'darkcyan':
color_code = '\033[36m'
elif color == 'blue':
color_code = '\033[93m'
elif color == 'green':
color_code = '\033[92m'
elif color == 'yellow':
color_code = '\033[93m'
elif color == 'red':
color_code = '\033[91m'
elif color == 'white':
color_code = '\033[37m'
elif color == 'bold':
color_code = '\033[1m'
elif color == 'underline':
color_code = '\033[4m'
else:
color_code = ''
if use_escape:
return escape(color_code) + target + escape('\033[0m')
else:
return color_code + target + '\033[m'
``` |
{
"source": "jpalczewski/pills",
"score": 3
} |
#### File: hardware/dht/__init__.py
```python
from .DHT22 import sensor
import time
import pigpio
async def poll_once():
pi = pigpio.pi()
s = sensor(pi, 24, LED=None, power=None,DHT11=False)
s.trigger()
time.sleep(0.2)
humidity = s.humidity()
temperature = s.temperature()
s.cancel()
pi.stop()
return (humidity, temperature)
``` |
{
"source": "jpalczewski/probable-broccoli",
"score": 2
} |
#### File: jpalczewski/probable-broccoli/test.py
```python
from dotenv import load_dotenv
from IPython import embed
import os
from fbchat.models import *
load_dotenv()
from fbchat import Client,log
class EchoBot(Client):
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(thread_id, message_object.uid)
self.markAsRead(thread_id)
log.info("{} from {} in {}".format(message_object, thread_id, thread_type.name))
if message_object.text == "polska":
reply = Message(text="Husaria")
self.send(reply, thread_id=thread_id, thread_type=thread_type)
self.changeThreadEmoji("🥵", thread_id=thread_id)
if message_object.text == "raczej":
self.changeThreadColor(thread_id=thread_id, color= ThreadColor.VIKING)
if message_object.text == "debug":
embed()
# If you're not the author, echo
if author_id != self.uid:
self.send(message_object, thread_id=thread_id, thread_type=thread_type)
client = EchoBot(os.getenv("FB_LOGIN"), os.getenv("FB_PASS"))
client.listen()
``` |
{
"source": "jpallister/Sublime-Text-AutoCorrect-Plugin",
"score": 3
} |
#### File: jpallister/Sublime-Text-AutoCorrect-Plugin/AutoCorrect.py
```python
import sublime_plugin
import SubList
import sublime
import functools
# Eventually this should be end user updatable
corrections = {"bitwdith": "bit-width", "wdith": "width"}
class AutoCorrectCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.run_command("insert", {"characters": " "})
sublime.set_timeout(self.do_correction, 10)
def do_correction(self):
edit = self.view.begin_edit()
rs = self.view.sel()
regions_to_correct = []
for r in rs:
if r.b - r.a == 0:
word = self.view.word(r.begin() - 1)
sword = self.view.substr(word)
can_local_correct = sword in corrections # locally set should take priority
correct = SubList.match(sword)
if word.end() <= r.begin() - 1 and (can_local_correct or correct != ""):
if can_local_correct:
correct = corrections[sword]
regions_to_correct.append((correct, word))
sublime.status_message("AutoCorrect: \"" + sword + "\" corrected to \"" + correct + "\"")
self.view.end_edit(edit)
edit = self.view.begin_edit()
for i, (correct, word) in enumerate(regions_to_correct[::-1]):
reg = self.view.word(word)
self.view.replace(edit, reg, correct)
self.view.add_regions("correction_outline" + str(i), [self.view.word(reg.begin())], "mark", sublime.DRAW_EMPTY)
sublime.set_timeout(functools.partial(self.remove_regions, len(regions_to_correct)), 500)
self.view.end_edit(edit)
def remove_regions(self, n):
for i in range(n):
self.view.erase_regions("correction_outline" + str(i))
``` |
{
"source": "JPalmaUCT/2FasesCompilador",
"score": 3
} |
#### File: JPalmaUCT/2FasesCompilador/analizador_lexico.py
```python
import json
#La clave es el lexema (lo que lee del programa) y el valor es el token
#ejemplo lexema: = ; toquen: operador asignacion
#aqui en el token en vez de que se repita lo mismo a parte podria ir una descripcion
#ejemplo 'ari': 'ari / condicional if'
#Faltan varias fijense en la tablita y agregenlas porfaa
reservadas = { 'ari':'if', #if
'chayri':'else', #else
'kawsachiy':'while', #while
'jaykaxpax':'for', #for
'imbabura':'function', #function
'harkay':'break', #break
'apachimuy' : 'import', #import
'kutichipuy' : 'return', #return
'pachan': 'tipo_entero', #int
'killaywaska' : 'tipo_string', #string
'pasaqlla': 'tipo_flotante', #float
'huknin':'tipo_booleano', #boolean
'chiqap':'valor_booleano', #true
'llulla':'valor_booleano', #false
'chitiripuy' : 'tipo_alerta', #alerta. Nuevo tipo de dato para nuestro contexto
'chaa' : 'alerta', #verde
'karwa':'alerta', #amarillo
'antipuka':'alerta', #rojo
'anchuna' : 'sen_evacuar', #evacuar
'kakuy':'sen_no_evacuar', #no evacuar
#'apachimuy': 'apachimuy / decision', #decision
'rikhuchiy':'imprimir', #print
'puncu': 'input', #input
'tapuyAriq':'funcion_medir_volcan', #medirVolcan
'apu':'operador_or',
'alqa':'operador_and'
}
operadores = {'=': 'operador_asignacion',
'+': 'operador_suma',
'-' : 'operador_resta',
'/' : 'operador_division',
'*': 'operador_multiplicacion',
'++' : 'operador_incremento',
'--' : 'operador_decremento'}
comparadores = {'<':'comparador',
'<=':'comparador',
'>':'comparador',
'>=':'comparador',
'==':'comparador',
'!=':'comparador'}
delimitadores = {'(':'parentesis_apertura',
')':'parentesis_cierre',
'{':'delimitador_apertura',
'}':'delimitador_cierre',
';':'fin_sentencia'}
#obtengo los lexemas posibles de cada bloque (un arreglo)
#por ejmplo para delimitadores_lexema seria : ['(',')','{','}']
operadores_lexema = operadores.keys()
comparadores_lexema = comparadores.keys()
reservadas_lexema = reservadas.keys()
delimitadores_lexema = delimitadores.keys()
"""#jaja
for i in reservadas:
print (i)
for i in operadores:
print (i)
for i in comparadores:
print (i)
for i in delimitadores:
print (i)"""
#Letras del quechua que estan permitidas
permitidos = ['a','c','h','i','j','k','l','m','n','ntilde','p','q', 'r', 's', 't', 'u','w','y',
'A','C','H','I','J','K','L','M','N','NTILDE','P','Q', 'R', 'S', 'T', 'U','W','Y','_']
numeros = ['0','1','2','3','4','5','6','7','8','9']
#comprueba si el lexema que lee desde el archivo es un identificador (letra seguidad de letras o numeros)
def es_identificador(lexema):
esIdentificador = True
inicial = lexema[0] #primera palabra si es una letra
if not inicial in permitidos:
esIdentificador = False
if len(lexema) > 1:
for i in range(1,len(lexema)):
if not lexema[i] in permitidos and not lexema[i] in numeros:
esIdentificador = False
return esIdentificador
#comprueba si el lexema que lee desde el archivo es un numero flotante (que lleva . si o si)
def es_flotante(lexema):
comprobacion = False
for dig in lexema:
if dig == ".":
comprobacion = True
if comprobacion:
try:
float(lexema)
except:
comprobacion = False
return comprobacion
#comprueba si el lexema que lee desde el archivo es un entero
def es_entero(lexema):
return lexema.isdigit()
"""def es_cadena(lexema):
return type(lexema).__name__ == "str"""
#tabla contendra todos los tokens que detecte en el archivo
estructura = {}
tabla = []
#a tabla se le agregara cada token (un elemento que retorn crearToken)
def crearToken(token,lexema,linea):
myToken = {}
myToken["token"] = token
myToken["lexema"] = lexema
myToken["linea"] = linea
return myToken
def eliminarEspaciosyComentarios(codigo):
for i in range(len(codigo)):
codigo[i] = codigo[i].strip()
cod_sin_espacio = []
for lex in codigo:
if lex != "":
cod_sin_espacio.append(lex)
indice = len(codigo)
for i in range(len(cod_sin_espacio)):
if len(cod_sin_espacio[i]) >= 2:
if cod_sin_espacio[i][0] =='/' and cod_sin_espacio[i][1] =='/':
print(indice)
indice = i
print("new")
print(indice)
cod_sin_espacio = cod_sin_espacio[:indice]
return cod_sin_espacio
#Se abre el archivo en modo lectura
f=open("programa", "r")
i =f.read()
linea = 0
program = i.split('\n') #separados por salto de linea y metidos a un array['todo','el','programa','asi']
identificado = False
for line in program:
#los separa por espacio
codigo = line.split(' ')
#Elimina espacios en blanco en el codigo
codigo = eliminarEspaciosyComentarios(codigo)
#Se eliminan los espacios en blanco
#for i in range(len(codigo)):
#codigo[i] = codigo[i].strip()
linea += 1
for lexema in codigo:
if lexema in operadores_lexema:
myToken = crearToken(operadores[lexema],lexema,linea)
identificado = True
if lexema in reservadas_lexema:
myToken = crearToken(reservadas[lexema],lexema,linea)
identificado = True
if lexema in comparadores_lexema:
myToken = crearToken(comparadores[lexema],lexema,linea)
identificado = True
if lexema in delimitadores_lexema:
myToken = crearToken(delimitadores[lexema],lexema,linea)
identificado = True
if es_entero(lexema):
myToken = crearToken("numero_entero",lexema,linea)
identificado = True
if es_flotante(lexema):
myToken = crearToken("numero_flotante",lexema,linea)
identificado = True
#Si no se identifico el lexema
if not identificado:
#Ve si se trata de un identificador
if es_identificador(lexema):
myToken = crearToken("identificador",lexema,linea)
tabla.append(myToken)
#Si difinitivamente no lo identifica
else:
print ("error, no se que es:", lexema, "en la linea", linea)
#Si se identifico el lexema
else:
#se agrega a la tabla de tokens
tabla.append(myToken)
identificado = False
estructura["Tokens"] = tabla
#Muestra tabla de tokens
#for token in tabla:
# print (token)
with open('tokens.json', 'w') as json_file:
json.dump(estructura, json_file)
#detalles:
#por ahora cada token debe estar separado por un espacio en el archivo ejmplo:
#estara bien: a = 20 / 3.8 (porque estan separados)
#estara mal: a = 20/ 3.8 (porque el 20 lo lee como 20/ y no sabe lo que es eso)
#igual se puede arreglar eso
#en el ejemplo no sabe lo que es x porque no esta en el alfabero (asi debe ser)
``` |
{
"source": "jpalvesl/doom-fire-algorithm",
"score": 3
} |
#### File: pythonic-doom-fire/pythonic-doom-fire/doom_fire.py
```python
from abc import ABC, abstractmethod
from color_palette import ColorPalette
from functools import reduce
from random import randint
class DoomFire(ABC):
def __init__(self, width, height, pixel_size = 4, decay_rate = 2, \
windforce = 1, fire_source_inc = (4, 6), \
fire_source_enabled = True, color_palette = ColorPalette()):
self.width = width
self.height = height
self.pixel_size = pixel_size
self.decay_rate = decay_rate
self.windforce = windforce
self.fire_source_inc = fire_source_inc
self.color_palette = color_palette
self.max_intensity = len(self.color_palette.get_colors()) - 1
self.pixels_array = [0] * self.width * self.height
self.fire_source_enabled = fire_source_enabled
if self.fire_source_enabled:
self.create_fire_source()
def create_fire_source(self):
self.pixels_array[-self.width:] = [self.max_intensity] * self.width
self.fire_source_enabled = True
def destroy_fire_source(self):
self.pixels_array[-self.width:] = [0] * self.width
self.fire_source_enabled = False
def has_fire_source(self):
return self.fire_source_enabled
def increase_fire_source(self):
fire_source_row = self.pixels_array[-self.width:]
for i, f in enumerate(fire_source_row):
if f == self.max_intensity:
continue
inc = randint(self.fire_source_inc[0], self.fire_source_inc[1])
fire_source_row[i] += inc if f + inc <= self.max_intensity else \
self.max_intensity - f
self.pixels_array[-self.width:] = fire_source_row
fire_source_row_sum = reduce(lambda x, y: x + y, fire_source_row)
if fire_source_row_sum > 0 and not self.fire_source_enabled:
self.fire_source_enabled = True
def decrease_fire_source(self):
fire_source_row = self.pixels_array[-self.width:]
for i, f in enumerate(fire_source_row):
if f == 0:
continue
dec = randint(self.fire_source_inc[0], self.fire_source_inc[1])
fire_source_row[i] -= dec if f - dec >= 0 else f
self.pixels_array[-self.width:] = fire_source_row
fire_source_row_sum = reduce(lambda x, y: x + y, fire_source_row)
if fire_source_row_sum == 0 and self.fire_source_enabled:
self.fire_source_enabled = False
def update(self):
for j in range(self.width):
for i in range(self.height - 1):
current_pixel_index = i * self.width + j
below_pixel_index = current_pixel_index + self.width
below_pixel_intensity = self.pixels_array[below_pixel_index]
decay = randint(0, self.decay_rate)
new_pixel_intensity = 0
if below_pixel_intensity - decay > 0:
new_pixel_intensity = below_pixel_intensity - decay
wind_direction = randint(-self.windforce, self.windforce)
# Checking if the wind direction exceeds the boundaries of
# pixels array and if it does, reverse it
if current_pixel_index + wind_direction >= \
len(self.pixels_array) or current_pixel_index + \
wind_direction < 0:
wind_direction = -wind_direction
pixel_neighbor_index = current_pixel_index + wind_direction
self.pixels_array[pixel_neighbor_index] = new_pixel_intensity
@abstractmethod
def render(self):
pass
``` |
{
"source": "jpambrun/neural-structured-learning",
"score": 2
} |
#### File: neural_structured_learning/tools/build_graph_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from neural_structured_learning.tools import build_graph as build_graph_lib
from neural_structured_learning.tools import graph_utils
import tensorflow as tf
from google.protobuf import text_format
class BuildGraphTest(absltest.TestCase):
def _create_embedding_file(self):
return self.create_tempfile('embeddings.tfr').full_path
def _create_graph_file(self):
return self.create_tempfile('graph.tsv').full_path
def _write_embeddings(self, embedding_output_path):
example1 = """
features {
feature {
key: "id"
value: { bytes_list { value: [ "A" ] } }
}
feature {
key: "embedding"
value: { float_list { value: [ 1, 1, 0 ] } }
}
}
"""
example2 = """
features {
feature {
key: "id"
value: { bytes_list { value: [ "B" ] } }
}
feature {
key: "embedding"
value: { float_list { value: [ 1, 0, 1] } }
}
}
"""
example3 = """
features {
feature {
key: "id"
value: { bytes_list { value: [ "C" ] } }
}
feature {
key: "embedding"
value: { float_list { value: [ 0, 1, 1] } }
}
}
"""
# The embedding vectors above are chosen so that the cosine of the angle
# between each pair of them is 0.5.
with tf.io.TFRecordWriter(embedding_output_path) as writer:
for example_str in [example1, example2, example3]:
example = text_format.Parse(example_str, tf.train.Example())
writer.write(example.SerializeToString())
def testGraphBuildingNoThresholding(self):
"""All edges whose weight is greater than 0 are retained."""
embedding_path = self._create_embedding_file()
self._write_embeddings(embedding_path)
graph_path = self._create_graph_file()
build_graph_lib.build_graph([embedding_path],
graph_path,
similarity_threshold=0)
g_actual = graph_utils.read_tsv_graph(graph_path)
self.assertDictEqual(
g_actual, {
'A': {
'B': 0.5,
'C': 0.5
},
'B': {
'A': 0.5,
'C': 0.5
},
'C': {
'A': 0.5,
'B': 0.5
}
})
def testGraphBuildingWithThresholding(self):
"""Edges below the similarity threshold are not part of the graph."""
embedding_path = self._create_embedding_file()
self._write_embeddings(embedding_path)
graph_path = self._create_graph_file()
build_graph_lib.build_graph([embedding_path],
graph_path,
similarity_threshold=0.51)
g_actual = graph_utils.read_tsv_graph(graph_path)
self.assertDictEqual(g_actual, {})
if __name__ == '__main__':
# Ensure TF 2.0 behavior even if TF 1.X is installed.
tf.compat.v1.enable_v2_behavior()
absltest.main()
``` |
{
"source": "jpambrun/pynndescent",
"score": 2
} |
#### File: pynndescent/pynndescent/sparse_threaded.py
```python
import joblib
import math
import numba
import numpy as np
import pynndescent.sparse as sparse
from pynndescent.utils import heap_push, make_heap, seed, tau_rand_int
from pynndescent.threaded import (
new_rng_state,
per_thread_rng_state,
parallel_calls,
effective_n_jobs_with_context,
chunk_rows,
shuffle_jit,
init_rp_tree_reduce_jit,
new_build_candidates,
nn_decent_reduce_jit,
deheap_sort_map_jit,
)
# NNDescent algorithm
INT32_MIN = np.iinfo(np.int32).min + 1
INT32_MAX = np.iinfo(np.int32).max - 1
# Map Reduce functions to be jitted
@numba.njit(nogil=True)
def sparse_current_graph_map_jit(
heap, rows, n_neighbors, inds, indptr, data, rng_state, seed_per_row, sparse_dist,
):
rng_state_local = rng_state.copy()
for i in rows:
if seed_per_row:
seed(rng_state_local, i)
if heap[0][i, 0] < 0.0:
for j in range(n_neighbors - np.sum(heap[0][i] >= 0.0)):
idx = np.abs(tau_rand_int(rng_state_local)) % data.shape[0]
from_inds = inds[indptr[i] : indptr[i + 1]]
from_data = data[indptr[i] : indptr[i + 1]]
to_inds = inds[indptr[idx] : indptr[idx + 1]]
to_data = data[indptr[idx] : indptr[idx + 1]]
d = sparse_dist(from_inds, from_data, to_inds, to_data)
heap_push(heap, i, d, idx, 1)
return True
def sparse_init_random(
current_graph,
inds,
indptr,
data,
dist,
n_neighbors,
chunk_size,
rng_state,
parallel,
seed_per_row=False,
):
n_vertices = data.shape[0]
n_tasks = int(math.ceil(float(n_vertices) / chunk_size))
# store the updates in an array
max_heap_update_count = chunk_size * n_neighbors * 2
heap_updates = np.zeros((n_tasks, max_heap_update_count, 4), dtype=np.float32)
heap_update_counts = np.zeros((n_tasks,), dtype=np.int64)
rng_state_threads = per_thread_rng_state(n_tasks, rng_state)
def current_graph_map(index):
rows = chunk_rows(chunk_size, index, n_vertices)
return (
index,
sparse_current_graph_map_jit(
current_graph,
rows,
n_neighbors,
inds,
indptr,
data,
rng_state_threads[index],
seed_per_row=seed_per_row,
sparse_dist=dist,
),
)
# run map functions
for index, status in parallel(parallel_calls(current_graph_map, n_tasks)):
if status is False:
raise ValueError("Failed in random initialization")
return
@numba.njit(nogil=True, fastmath=True)
def sparse_init_rp_tree_map_jit(
rows, leaf_array, inds, indptr, data, heap_updates, sparse_dist
):
count = 0
for n in rows:
if n >= leaf_array.shape[0]:
break
tried = set([(-1, -1)])
for i in range(leaf_array.shape[1]):
la_n_i = leaf_array[n, i]
if la_n_i < 0:
break
for j in range(i + 1, leaf_array.shape[1]):
la_n_j = leaf_array[n, j]
if la_n_j < 0:
break
if (la_n_i, la_n_j) in tried:
continue
from_inds = inds[indptr[la_n_i] : indptr[la_n_i + 1]]
from_data = data[indptr[la_n_i] : indptr[la_n_i + 1]]
to_inds = inds[indptr[la_n_j] : indptr[la_n_j + 1]]
to_data = data[indptr[la_n_j] : indptr[la_n_j + 1]]
d = sparse_dist(from_inds, from_data, to_inds, to_data)
hu = heap_updates[count]
hu[0] = la_n_i
hu[1] = d
hu[2] = la_n_j
hu[3] = 1
count += 1
hu = heap_updates[count]
hu[0] = la_n_j
hu[1] = d
hu[2] = la_n_i
hu[3] = 1
count += 1
tried.add((la_n_i, la_n_j))
tried.add((la_n_j, la_n_i))
return count
def sparse_init_rp_tree(
inds, indptr, data, dist, current_graph, leaf_array, chunk_size, parallel
):
n_vertices = data.shape[0]
n_tasks = int(math.ceil(float(n_vertices) / chunk_size))
# store the updates in an array
max_heap_update_count = chunk_size * leaf_array.shape[1] * leaf_array.shape[1] * 2
heap_updates = np.zeros((n_tasks, max_heap_update_count, 4), dtype=np.float32)
heap_update_counts = np.zeros((n_tasks,), dtype=np.int64)
def init_rp_tree_map(index):
rows = chunk_rows(chunk_size, index, n_vertices)
return (
index,
sparse_init_rp_tree_map_jit(
rows, leaf_array, inds, indptr, data, heap_updates[index], dist,
),
)
def init_rp_tree_reduce(index):
return init_rp_tree_reduce_jit(
n_tasks, current_graph, heap_updates, offsets, index
)
# run map functions
for index, count in parallel(parallel_calls(init_rp_tree_map, n_tasks)):
heap_update_counts[index] = count
# sort and chunk heap updates so they can be applied in the reduce
max_count = heap_update_counts.max()
offsets = np.zeros((n_tasks, max_count), dtype=np.int64)
def shuffle(index):
return shuffle_jit(
heap_updates, heap_update_counts, offsets, chunk_size, n_vertices, index
)
parallel(parallel_calls(shuffle, n_tasks))
# then run reduce functions
parallel(parallel_calls(init_rp_tree_reduce, n_tasks))
@numba.njit(nogil=True, fastmath=True)
def sparse_nn_descent_map_jit(
rows,
max_candidates,
inds,
indptr,
data,
new_candidate_neighbors,
old_candidate_neighbors,
heap_updates,
offset,
sparse_dist,
):
count = 0
for i in rows:
i -= offset
for j in range(max_candidates):
p = int(new_candidate_neighbors[0][i, j])
if p < 0:
continue
for k in range(j, max_candidates):
q = int(new_candidate_neighbors[0][i, k])
if q < 0:
continue
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = sparse_dist(from_inds, from_data, to_inds, to_data)
hu = heap_updates[count]
hu[0] = p
hu[1] = d
hu[2] = q
hu[3] = 1
count += 1
hu = heap_updates[count]
hu[0] = q
hu[1] = d
hu[2] = p
hu[3] = 1
count += 1
for k in range(max_candidates):
q = int(old_candidate_neighbors[0][i, k])
if q < 0:
continue
from_inds = inds[indptr[p] : indptr[p + 1]]
from_data = data[indptr[p] : indptr[p + 1]]
to_inds = inds[indptr[q] : indptr[q + 1]]
to_data = data[indptr[q] : indptr[q + 1]]
d = sparse_dist(from_inds, from_data, to_inds, to_data)
hu = heap_updates[count]
hu[0] = p
hu[1] = d
hu[2] = q
hu[3] = 1
count += 1
hu = heap_updates[count]
hu[0] = q
hu[1] = d
hu[2] = p
hu[3] = 1
count += 1
return count
def sparse_nn_descent(
inds,
indptr,
data,
n_vertices,
n_neighbors,
rng_state,
max_candidates=50,
dist=sparse.sparse_euclidean,
n_iters=10,
delta=0.001,
rp_tree_init=False,
leaf_array=None,
verbose=False,
n_jobs=None,
seed_per_row=False,
):
if rng_state is None:
rng_state = new_rng_state()
with joblib.Parallel(prefer="threads", n_jobs=n_jobs) as parallel:
n_tasks = effective_n_jobs_with_context(n_jobs)
chunk_size = int(math.ceil(n_vertices / n_tasks))
current_graph = make_heap(n_vertices, n_neighbors)
if rp_tree_init:
sparse_init_rp_tree(
inds,
indptr,
data,
dist,
current_graph,
leaf_array,
chunk_size,
parallel,
)
sparse_init_random(
current_graph,
inds,
indptr,
data,
dist,
n_neighbors,
chunk_size,
rng_state,
parallel,
seed_per_row=seed_per_row,
)
# store the updates in an array
# note that the factor here is `n_neighbors * n_neighbors`, not `max_candidates * max_candidates`
# since no more than `n_neighbors` candidates are added for each row
max_heap_update_count = chunk_size * n_neighbors * n_neighbors * 4
heap_updates = np.zeros((n_tasks, max_heap_update_count, 4), dtype=np.float32)
heap_update_counts = np.zeros((n_tasks,), dtype=np.int64)
for n in range(n_iters):
if verbose:
print("\t", n, " / ", n_iters)
(new_candidate_neighbors, old_candidate_neighbors) = new_build_candidates(
current_graph,
n_vertices,
n_neighbors,
max_candidates,
chunk_size,
rng_state,
parallel,
seed_per_row=seed_per_row,
)
def nn_descent_map(index):
rows = chunk_rows(chunk_size, index, n_vertices)
return (
index,
sparse_nn_descent_map_jit(
rows,
max_candidates,
inds,
indptr,
data,
new_candidate_neighbors,
old_candidate_neighbors,
heap_updates[index],
offset=0,
sparse_dist=dist,
),
)
def nn_decent_reduce(index):
return nn_decent_reduce_jit(
n_tasks, current_graph, heap_updates, offsets, index
)
# run map functions
for index, count in parallel(parallel_calls(nn_descent_map, n_tasks)):
heap_update_counts[index] = count
# sort and chunk heap updates so they can be applied in the reduce
max_count = heap_update_counts.max()
offsets = np.zeros((n_tasks, max_count), dtype=np.int64)
def shuffle(index):
return shuffle_jit(
heap_updates,
heap_update_counts,
offsets,
chunk_size,
n_vertices,
index,
)
parallel(parallel_calls(shuffle, n_tasks))
# then run reduce functions
c = 0
for c_part in parallel(parallel_calls(nn_decent_reduce, n_tasks)):
c += c_part
if c <= delta * n_neighbors * data.shape[0]:
break
def deheap_sort_map(index):
rows = chunk_rows(chunk_size, index, n_vertices)
return index, deheap_sort_map_jit(rows, current_graph)
parallel(parallel_calls(deheap_sort_map, n_tasks))
return current_graph[0].astype(np.int64), current_graph[1]
``` |
{
"source": "jpan127/RJD-MP3",
"score": 3
} |
#### File: jp/python/server.py
```python
import socket # Sockets
import sys # Flush
import logging # Print debug
import json # Writing data
import os.path # Checking file path exists
import datetime # Get current date + time
import pprint # Pretty print
import time
import http.server
import socketserver
import threading
# Default address
PORT = 11111
IP = "0.0.0.0"
# Set logging level
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)s) %(message)s',)
""" Opens a UDP socket in server mode """
class UdpServer():
def __init__(self, port=PORT, ip=IP):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(1)
self.port = port
self.ip = ip
self.sock.bind((self.ip, self.port))
# logging.debug("UDP Socket initialized.")
# logging.debug("Listening on port: %i", self.port)
# sys.stdout.flush()
def listen(self):
try:
while True:
data, addr = self.sock.recvfrom(4096)
if data:
logging.debug("Packet: %s" % data)
sys.stdout.flush()
else:
break
except socket.timeout as e:
pass
except Exception as e:
raise
def close(self):
logging.debug("Closing socket: %i", self.port)
sys.stdout.flush()
self.sock.close()
class UdpClient():
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.setblocking(0)
# self.sock.bind(("192.168.1.250", 11111))
def send(self, message):
print(self.sock.sendto(message, (self.ip, self.port)))
def close(self):
self.sock.close()
# Thread 1
def host_http_server():
logging.debug("Creating HTTP Server...")
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), handler)
# Start running
logging.debug("Serving on port: %i", PORT)
sys.stdout.flush()
httpd.serve_forever()
# Thread 2
def diagnostic_port():
logging.debug("Creating UDP port...")
udp_server = UdpServer(6666, "192.168.1.250")
while True:
udp_server.listen()
# Thread 3
def command_port():
udp_client = UdpClient("192.168.1.250", 5555)
logging.debug("Created client")
while True:
logging.debug("Sending...")
udp_client.send(str.encode("pingpong"))
time.sleep(0.5)
def main():
threads = [
threading.Thread(target=host_http_server),
threading.Thread(target=diagnostic_port),
threading.Thread(target=command_port)
]
for thread in threads:
thread.dameon = True
thread.start()
# Keep main thread alive
try:
while threading.active_count() > 0:
time.sleep(0.001)
except KeyboardInterrupt:
logging.debug("Received KeyboardInterrupt...")
logging.debug("Closing all threads.")
sys.exit()
if __name__ == "__main__":
main()
```
#### File: mp3/server/views.py
```python
from django.shortcuts import render, redirect
from django.conf.urls import url
from django.views.generic import ListView, TemplateView
import socket, time, os, random, ctypes, sys
from django.views import View
import threading
class AboutView(TemplateView):
template_name = "index.html"
class Server():
# 192.168.43.194 -- phone IP
server = '192.168.43.194'
port = 11000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
con = s.connect((server,port))
def send(self,action):
if action is 'play':
self.send_data(0x04040000)
elif action is 'pause':
self.send_data(0x07040000)
elif action is 'previous':
self.send_data(0x06040000)
elif action is 'next':
self.send_data(0x05040000)
elif action is 'fastforward':
self.send_data(0x08040000)
elif action is 'shuffle':
self.send_data(0x0B040000)
elif action is 'volumeUp':
self.send_data(0x12040000)
elif action is 'volumeDown':
self.send_data(0x13040000)
return
def send_data(self,hex_value):
msg_value = (hex_value).to_bytes(4, byteorder='big')
self.s.sendto(msg_value, (self.server,self.port))
def close(self):
self.s.close()
class Play(View):
def get(self,request):
s = Server()
s.send('play')
return redirect('home')
class Pause(View):
def get(self,request):
s = Server()
s.send('pause')
return redirect('home')
class Previous(View):
def get(self,request):
s = Server()
s.send('previous')
return redirect('home')
class Next(View):
def get(self,request):
s = Server()
s.send('next')
return redirect('home')
class Shuffle(View):
def get(self,request):
s = Server()
s.send('shuffle')
return redirect('home')
class Fastforward(View):
def get(self,request):
s = Server()
s.send('fastforward')
return redirect('home')
class volumeUp(View):
def get(self,request):
s = Server()
s.send('volumeUp')
return redirect('home')
class volumeDown(View):
def get(self,request):
s = Server()
s.send('volumeDown')
return redirect('home')
``` |
{
"source": "jpanchal/gildedrose-api",
"score": 3
} |
#### File: gildedrose-api/rest_api/permissions.py
```python
from rest_framework.permissions import BasePermission
from .models import Itemlist
class IsOwner(BasePermission):
"""Custom permission class to allow itemlist owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the itemlist owner."""
if isinstance(obj, Itemlist):
return obj.owner == request.user
return obj.owner == request.user
```
#### File: gildedrose-api/rest_api/views.py
```python
from rest_framework import generics, permissions
from .permissions import IsOwner
from .serializers import ItemlistSerializer, UserSerializer, CartSerializer
from .models import Itemlist, Cart
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import status
class CreateView(generics.ListCreateAPIView):
"""
GET itemlists/
POST itemlists/
"""
queryset = Itemlist.objects.all()
serializer_class = ItemlistSerializer
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
IsOwner)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class DetailsView(generics.RetrieveUpdateDestroyAPIView):
"""
GET itemlists/:id/
PUT itemlists/:id/
DELETE itemlists/:id/
"""
queryset = Itemlist.objects.all()
serializer_class = ItemlistSerializer
permission_classes = (
permissions.IsAuthenticated,
IsOwner)
class CartCreateView(generics.ListCreateAPIView):
"""
GET buyitem/
POST buyitem/
"""
queryset = Cart.objects.all()
serializer_class = CartSerializer
permission_classes = (
permissions.IsAuthenticated,
IsOwner)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class CartDetailsView(generics.RetrieveUpdateDestroyAPIView):
"""
GET buyitem/:id/
PUT buyitem/:id/
DELETE buyitem/:id/
"""
queryset = Cart.objects.all()
serializer_class = CartSerializer
permission_classes = (
permissions.IsAuthenticated,
IsOwner)
class UserView(generics.ListAPIView):
"""
GET users/
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetailsView(generics.RetrieveAPIView):
"""
GET users/:id/
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class RegisterUsers(generics.CreateAPIView):
"""
POST auth/register/
"""
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
username = request.data.get("username", "")
password = request.data.get("password", "")
if not username and not password:
return Response(
data={
"message": "username and password is required to register a user"
},
status=status.HTTP_400_BAD_REQUEST
)
new_user = User.objects.create_user(
username=username, password=password
)
return Response(
data=UserSerializer(new_user).data,
status=status.HTTP_201_CREATED
)
``` |
{
"source": "jpanda1230/OCRReaderPython",
"score": 3
} |
#### File: jpanda1230/OCRReaderPython/ocrspace_example.py
```python
import requests
import json
def ocr_space_file(filename, overlay=False, api_key='helloworld', language='eng'):
""" OCR.space API request with local file.
Python3.5 - not tested on 2.7
:param filename: Your file path & name.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
with open(filename, 'rb') as f:
r = requests.post('https://api.ocr.space/parse/image',
files={filename: f},
data=payload,
)
return r.content.decode()
def ocr_space_url(url, overlay=False, api_key='helloworld', language='eng'):
""" OCR.space API request with remote file.
Python3.5 - not tested on 2.7
:param url: Image url.
:param overlay: Is OCR.space overlay required in your response.
Defaults to False.
:param api_key: OCR.space API key.
Defaults to 'helloworld'.
:param language: Language code to be used in OCR.
List of available language codes can be found on https://ocr.space/OCRAPI
Defaults to 'en'.
:return: Result in JSON format.
"""
payload = {'url': url,
'isOverlayRequired': overlay,
'apikey': api_key,
'language': language,
}
r = requests.post('https://api.ocr.space/parse/image',
data=payload,
)
return r.content.decode()
# Use examples:
test_file = ocr_space_file(filename='example_image.png', overlay=False ,api_key ='<KEY>', language='eng')
test_url = ocr_space_url(url='http://i.imgur.com/31d5L5y.jpg',overlay=False ,api_key ='<KEY>', language='eng')
# some JSON:
#x = '{ "name":"John", "age":30, "city":"New York"}'
# parse x:
y = json.loads(test_file)
z= json.loads(test_url)
# the result is a Python dictionary:
print(y.get('ParsedResults')[0].get('ParsedText'))
print(z.get('ParsedResults')[0].get('ParsedText'))
``` |
{
"source": "jpanetta/Inflatables",
"score": 2
} |
#### File: Inflatables/python/fd_validation.py
```python
import numpy as np
from numpy.linalg import norm
from MeshFEM import sparse_matrices
def preamble(obj, xeval, perturb, etype, fixedVars = []):
if (xeval is None): xeval = obj.getVars()
if (perturb is None): perturb = np.random.uniform(low=-1,high=1, size=obj.numVars())
if (etype is None): etype = obj.__class__.EnergyType.Full
xold = obj.getVars()
perturb = np.copy(perturb)
perturb[fixedVars] = 0.0
return (xold, xeval, perturb, etype)
def fdGrad(obj, fd_eps, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
def evalAt(x):
obj.setVars(x)
val = obj.energy(etype)
return val
fd_delta_E = (evalAt(xeval + perturb * fd_eps) - evalAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
obj.setVars(xold)
return fd_delta_E
def validateGrad(obj, fd_eps = 1e-6, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
obj.setVars(xeval)
g = obj.gradient(etype)
analytic_delta_E = g.dot(perturb)
fd_delta_E = fdGrad(obj, fd_eps, xeval, perturb, etype, fixedVars)
return (fd_delta_E, analytic_delta_E)
def validateHessian(obj, fd_eps = 1e-6, xeval = None, perturb = None, etype = None, fixedVars = []):
xold, xeval, perturb, etype = preamble(obj, xeval, perturb, etype, fixedVars)
def gradAt(x):
obj.setVars(x)
val = obj.gradient(etype)
return val
obj.setVars(xeval)
h = obj.hessian(etype)
fd_delta_grad = (gradAt(xeval + perturb * fd_eps) - gradAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
analytic_delta_grad = h.apply(perturb)
obj.setVars(xold)
return (norm(analytic_delta_grad - fd_delta_grad) / norm(fd_delta_grad), fd_delta_grad, analytic_delta_grad)
def gradConvergence(obj, perturb=None, energyType=None, fixedVars = []):
epsilons = np.logspace(-9, -3, 100)
errors = []
if (energyType is None): energyType = obj.EnergyType.Full
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
for eps in epsilons:
fd, an = validateGrad(obj, etype=energyType, perturb=perturb, fd_eps=eps, fixedVars = fixedVars)
err = np.abs(an - fd) / np.abs(an)
errors.append(err)
return (epsilons, errors, an)
def gradConvergencePlot(obj, perturb=None, energyType=None, fixedVars = []):
from matplotlib import pyplot as plt
eps, errors, ignore = gradConvergence(obj, perturb, energyType, fixedVars)
plt.title('Directional derivative fd test for gradient')
plt.ylabel('Relative error')
plt.xlabel('Step size')
plt.loglog(eps, errors)
plt.grid()
def hessConvergence(obj, perturb=None, energyType=None, fixedVars = []):
epsilons = np.logspace(-9, -3, 100)
errors = []
if (energyType is None): energyType = obj.EnergyType.Full
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
for eps in epsilons:
err, fd, an = validateHessian(obj, etype=energyType, perturb=perturb, fd_eps=eps, fixedVars = fixedVars)
errors.append(err)
return (epsilons, errors, an)
def hessConvergencePlot(obj, perturb=None, energyType=None, fixedVars = []):
from matplotlib import pyplot as plt
eps, errors, ignore = hessConvergence(obj, perturb, energyType, fixedVars)
plt.title('Directional derivative fd test for Hessian')
plt.ylabel('Relative error')
plt.xlabel('Step size')
plt.loglog(eps, errors)
plt.grid()
```
#### File: Inflatables/python/utils.py
```python
import numpy as np
import MeshFEM, mesh
import registration
import os
import pickle, gzip
def load(path):
"""
load a pickled gzip object
"""
return pickle.load(gzip.open(path, 'rb'))
def save(obj, path):
"""
save an object to a pickled gzip
"""
pickle.dump(obj, gzip.open(path, 'wb'))
def sheetTrisForVar(sheet, varIdx):
"""
Get indices of triangles influencing a particular equilibrium variable of the sheet.
(indices < sheet.mesh().numTrix() refer to triangles in the top sheet, the
rest to triangles in the bottom sheet.)
"""
v = sheet.vtxForVar(varIdx)
result = []
m = sheet.mesh()
if v.sheet & 1: result.extend(np.where(m.triangles() == v.vi)[0])
if v.sheet & 2: result.extend(np.where(m.triangles() == v.vi)[0] + m.numTris())
return result
def maskForIndexList(indices, size):
mask = np.zeros(size, dtype=np.bool)
mask[indices] = True
return mask
def freshPath(path, suffix='', excludeSuffix = False):
if path is None: return
if not os.path.exists(path + suffix): return path if excludeSuffix else path + suffix
i = 0
candidatePath = lambda i: f'{path}.{i}{suffix}'
while os.path.exists(candidatePath(i)): i += 1
print(f'Requested path exists; using fresh path {candidatePath(i)}')
return f'{path}.{i}' if excludeSuffix else candidatePath(i)
def allEnergies(obj):
return {name: obj.energy(etype) for name, etype in obj.EnergyType.__members__.items()}
def allGradientNorms(obj, freeVariables = None):
if freeVariables is None:
freeVariables = np.arange(obj.numVars(), dtype=np.int)
return {name: np.linalg.norm(obj.gradient(etype)[freeVariables]) for name, etype in obj.EnergyType.__members__.items()}
def loadObj(path):
V, F = [], []
for l in open(path, 'r'):
comps = l.strip().split(' ')
specifier = comps[0].lower()
if (specifier == 'v'): V.append([float(c) for c in comps[1:]])
if (specifier == 'l' or specifier == 'f'): F.append([int(i) - 1 for i in comps[1:]])
return np.array(V), np.array(F)
def normalizedParamEnergies(obj):
ET = obj.EnergyType
return [obj.energy(et) / reg if reg != 0 else obj.energy(et)
for (et, reg) in [(ET.Fitting, 1.0),
(ET.AlphaRegularization, obj.alphaRegW),
(ET.PhiRegularization, obj.phiRegW),
(ET.BendingRegularization, obj.bendRegW)]]
def bbox(P):
return np.min(P, axis=0), np.max(P, axis=0)
def bbox_dims(P):
bb = bbox(P)
return bb[1] - bb[0]
def getClosestPointDistances(P):
"""
Gets the distance of each point in a point collection P to its closest other point in P.
"""
closestDist = []
for p in P:
closestDist.append(np.partition(np.linalg.norm(p - P, axis=1), 1)[1])
return closestDist
def prototypeScaleNormalization(P, placeAtopFloor = False, objectScale = 750, reorient = False):
if reorient: P = registration.align_points_with_axes(P)
bb = bbox(P)
c = (bb[0] + bb[1]) / 2 # use center of bounding box rather than center of mass
t = -c
if (placeAtopFloor): t[2] = -bb[0][2]
return (P + t) * (objectScale / np.max(bb[1] - bb[0]))
def renderingNormalization(P, placeAtopFloor = False):
"""
Return the transformation function that maps the points `P` in a standard
configuration for rendering.
"""
c = np.mean(P, axis=0)
bb = bbox(P)
t = -c
if placeAtopFloor:
t[2] = -bb[0][2]
s = 1.0 / np.max(bb[1] - bb[0])
return lambda x: s * (x + t)
def isWallTri(sheet_mesh, is_wall_vtx):
"""
Determine which triangles are part of a wall (triangles made of three wall vertices).
"""
return is_wall_vtx[sheet_mesh.triangles()].all(axis=1)
def pad2DTo3D(P):
if P.shape[1] == 3: return P
return np.pad(P, [(0, 0), (0, 1)], mode='constant')
import itertools
def nth_choice(n, *args):
return next(itertools.islice(itertools.product(*args), n, None))
def writeFields(path, m, name1, field1, *args):
mfw = mesh.MSHFieldWriter(path, m.vertices(), m.triangles())
data = [name1, field1] + list(args)
for name, field in zip(data[0::2], data[1::2]):
mfw.addField(name, field)
del mfw
import mesh_utilities
def getLiftedSheetPositions(origSheetMesh, uv, target_surf):
paramSampler = mesh_utilities.SurfaceSampler(pad2DTo3D(uv), target_surf.triangles())
return paramSampler.sample(origSheetMesh.vertices(), target_surf.vertices())
import parametrization
def getSquashedLiftedPositionsFromLiftedPos(optSheetMesh, liftedPos, liftFrac = 0.2, freeBoundary = False):
flatPos = None
if freeBoundary:
# Note: we assume the design sheet has already been registered with the target boundary...
flatPos = optSheetMesh.vertices()
else:
# If we're fixing the boundary, the flattened state must perfectly match the target surface's boundary.
# Do this by mapping the design sheet to the interior of the target surface's boundary harmonically.
bv = optSheetMesh.boundaryVertices()
flatPos = parametrization.harmonic(optSheetMesh, liftedPos[bv])
return flatPos + liftFrac * (liftedPos - flatPos)
def getSquashedLiftedPositions(optSheetMesh, origSheetMesh, uv, target_surf, liftFrac = 0.2):
liftedPos = getLiftedSheetPositions(origSheetMesh, uv, target_surf)
return getSquashedLiftedPositionsFromLiftedPos(optSheetMesh, liftedPos, liftFrac)
import mesh, glob
def getBoundingBox(framesDir):
minCorner = [ np.inf, np.inf, np.inf]
maxCorner = [-np.inf, -np.inf, -np.inf]
for i in glob.glob(f'{framesDir}/step_*.msh'):
V = mesh.Mesh(i, embeddingDimension=3).vertices()
minCorner = np.min([minCorner, V.min(axis=0)], axis=0)
maxCorner = np.max([maxCorner, V.max(axis=0)], axis=0)
return np.array([minCorner, maxCorner])
def printBoundingBox(framesDir):
print('{', ', '.join(map(str, getBoundingBox(framesDir).ravel(order='F'))), '}')
def getTargetSurf(tas):
tsf = tas.targetSurfaceFitter()
return mesh.Mesh(tsf.targetSurfaceV, tsf.targetSurfaceF)
################################################################################
# Strain analysis
################################################################################
def getStrains(isheet):
getStrain = lambda ted: ted.principalBiotStrains() if hasattr(ted, 'principalBiotStrains') else (np.sqrt(ted.eigSensitivities().Lambda()) - 1)
return np.array([getStrain(ted) for ted in isheet.triEnergyDensities()])
def tensionStates(isheet):
return [ted.tensionState() for ted in isheet.triEnergyDensities()]
# Get the amount by which each element is compressed. This is
# zero for elements in complete tension or the increase in
# strain needed to put the element in tension.
def compressionMagnitudes(isheet):
def cm(ted):
l = ted.eigSensitivities().Lambda()
if (l[0] < 1): return 1 - np.sqrt(l[0]) # full compression case
return np.max([np.sqrt(1 / np.sqrt(l[0])) - np.sqrt(l[1]), 0]) # partial compression or full tension case.
return np.array([cm(ted) for ted in isheet.triEnergyDensities()])
# Get the amount by which each element is "fully compressed" (nonzero
# only for elements in full compression rather than partial tension).
def fullCompressionMagnitudes(isheet):
return np.clip(1.0 - np.sqrt(np.array([ted.eigSensitivities().Lambda()[0] for ted in isheet.triEnergyDensities()])), 0.0, None)
def writeStrainFields(path, isheet):
vm = isheet.visualizationMesh()
strains = getStrains(isheet)
mfw = mesh.MSHFieldWriter(path, vm.vertices(), vm.elements())
mfw.addField("tensionState", tensionStates(isheet))
mfw.addField("compressionMagnitude", compressionMagnitudes(isheet))
mfw.addField("lambda_0", strains[:, 0])
mfw.addField("lambda_1", strains[:, 1])
def strainHistogram(isheet):
from matplotlib import pyplot as plt
strains = getStrains(isheet)
plt.hist(strains[:, 0], bins=500, range=(-0.4,0.1), label='$\lambda_0$');
plt.hist(strains[:, 1], bins=500, range=(-0.4,0.1), label='$\lambda_1$');
plt.legend()
plt.grid()
plt.title('Principal strains');
def cumulativeArcLen(loopPts):
numPts, numComp = loopPts.shape
arcLen = np.empty(numPts)
arcLen[0] = 0.0
for i in range(1, numPts):
arcLen[i] = arcLen[i - 1] + np.linalg.norm(loopPts[i] - loopPts[i - 1])
return arcLen
################################################################################
# Curve operations
################################################################################
def samplePointsOnLoop(loopPts, numSamples, offset):
"""
Sample `numSamples` evenly spaced along the arlength of a closed polyline "loopPts"
This closed loop is represented by a list of points, with the first and
last point coinciding.
The first sample point is placed at `offset`, a relative arclength position along the curve in [0, 1].
If `offset` is a list of `n` floats (instead of just a float), then we generate n * numSamples points
at the specified offsets (with the sampled points for each offset value interleaved).
"""
assert(np.linalg.norm(loopPts[-1] - loopPts[0]) == 0)
numPts, numComp = loopPts.shape
arcLen = cumulativeArcLen(loopPts)
arcLen /= arcLen[-1] # normalize arc lengths to [0, 1]
# Arc length position of the sample points
if (not isinstance(offset, list)):
offset = [offset]
s = np.vstack([np.fmod(np.linspace(0, 1, numSamples, endpoint=False) + o, 1.0) for o in offset]).ravel(order='F')
samples = np.empty((len(s), numComp))
for c in range(numComp):
samples[:, c] = np.interp(s, arcLen, loopPts[:, c])
return samples
import shapely
import shapely.ops
import shapely.geometry as shp
def normalOffset(polygon, dist):
"""
Offset points on the planar curve or shp.Polygon "polygon" in the normal
direction by "dist". This curve should lie in a "z = const" plane or the
result will be distorted.
Returns a **list** of the resulting polygon(s) (shp.Polygon instances),
as an inward offset can divide the input polygon into multiple pieces.
"""
if not isinstance(polygon, shp.Polygon):
polygon = shp.Polygon(polygon[:, 0:2])
offsetResult = polygon.buffer(dist)
# Note: the result could be a Polygon or a MultiPolygon...
if (isinstance(offsetResult, shp.Polygon)):
return [offsetResult]
elif (isinstance(offsetResult, shp.MultiPolygon)):
return list(offsetResult)
else: raise Exception('Unexpected polygon offset result type')
def getBoundary(polygon, getAll = False):
"""
Get the boundary of a shapely polygon.
If `getAll` is true, we return a list with all boundary polylines sorted by descending length;
if false, we return the largest one and print a warning.
"""
result = polygon.boundary
if result.geom_type == 'LineString':
if getAll: return [np.array(result)]
return np.array(result)
if result.geom_type == 'MultiLineString':
allBoundaries = sorted([np.array(r) for r in result], key=lambda a: -len(a))
if getAll: return allBoundaries
print('WARNING: union boundary has multiple components; returning the largest one')
return allBoundaries[0]
raise Exception('Unexpected boundary result type')
def unionPolygons(polygons):
"""
Union two or more polygons [ptsA, ptsB, ...] described by point lists `ptsA` and `ptsB`.
(For each of these lists, the first and last points must agree)
"""
return shapely.ops.unary_union([shp.Polygon(p) for p in polygons])
import os
def get_nonexistant_path(fname_path):
"""
Get the path to a filename which does not exist by incrementing path.
From https://stackoverflow.com/a/43167607/122710
"""
if not os.path.exists(fname_path):
return fname_path
filename, file_extension = os.path.splitext(fname_path)
i = 1
new_fname = "{}-{}{}".format(filename, i, file_extension)
while os.path.exists(new_fname):
i += 1
new_fname = "{}-{}{}".format(filename, i, file_extension)
return new_fname
import scipy
import scipy.sparse
def reconnectPolygons(polygons, originatingPolygon, minGap = 0):
"""
Add the line segments of the minimal length necessary to connect the entries of
polygon list `polygons`, only allowing line segments that lie within the
originating polygon (using a minimum spanning tree).
This is meant to address the problem where eroding a polygon can separate it
into a bunch of small polygons that we want to connect at the seam width.
Unfortunately, we can have two polygons whose ground-truth connection line
(indicated by * below) exceeds the distance of their nearest points (a and b)
a--- * --------+
|
b----------------+
(here the "--" lines represent thin polygons). This will result in a reconnection
failure. It could be mitigated by splitting up large polygons with some threshold,
but we instead opt for the reconnectPolygons2 algorithm below.
"""
#pickle.dump(polygons, open(get_nonexistant_path('polygons.pkl'), 'wb'))
#pickle.dump(originatingPolygon, open(get_nonexistant_path('originatingPolygon.pkl'), 'wb'))
inputPolygons = polygons
polygons = [shp.Polygon(p) for p in polygons]
originatingPolygon = shp.Polygon(originatingPolygon)
n = len(polygons)
dists = np.full((n, n), np.inf)
closestPoints = np.empty((n, n), dtype='O')
for i, pi in enumerate(polygons):
for j, pj in enumerate(polygons):
if (i >= j): continue; # only compute upper triangle
cp = np.vstack([np.array(o.coords) for o in shapely.ops.nearest_points(pi, pj)])
connectionDist = np.linalg.norm(np.subtract(*cp))
distToOrig = shp.Point(cp.mean(axis=0)).distance(originatingPolygon)
if (distToOrig > 0.25 * connectionDist): continue # If the candidate connecting line strays too far outside the originating polygon, it is probably invalid
dists [i, j] = connectionDist
closestPoints[i, j] = cp
outputPolylines = inputPolygons.copy()
for mst_edge in zip(*scipy.sparse.csgraph.minimum_spanning_tree(dists).nonzero()):
i, j = sorted(mst_edge)
if (dists[i, j] < minGap): continue # no connection needed
outputPolylines.append(closestPoints[i, j])
return outputPolylines
import scipy.spatial
def reconnectPolygons2(inputPolygons, originatingPolygon, fuseWidth, includeExtensions=False):
"""
Hopefully superior algorithm for inserting line segments to reconnect the
distinct polygons that arose from an erosion operation on originatingPolygon.
This one works by detecting "bridges"--regions of `originatingPolygon \ inputPolygons`
that connect two distinct polygons of inputPolygons--and then joining the
closest points of these input polygons (after intersecting with a
neighborhood of the bridge).
"""
eps = 1e-6
polygons = [shp.Polygon(p).buffer(fuseWidth / 2 + eps) for p in inputPolygons]
originatingPolygon = shp.Polygon(originatingPolygon)
bridges = [p for p in originatingPolygon.difference(shapely.ops.unary_union(polygons)) if p.boundary.length > 3 * fuseWidth]
outputPolylines = inputPolygons.copy()
for b in bridges:
distances = np.array([b.distance(p) for p in polygons])
# If "b" actually bridges between two polygons, connect these
# polygons' closest points (restricted to a neighborhood of the bridge)
closest = np.argsort(distances)
if (distances[closest[1]] < fuseWidth / 2):
bridgeRegion = b.buffer(2 * fuseWidth)
p0, p1 = shapely.ops.nearest_points(bridgeRegion.intersection(polygons[closest[0]]),
bridgeRegion.intersection(polygons[closest[1]]))
outputPolylines.append(np.array([np.asarray(p0), np.asarray(p1)]))
elif includeExtensions:
if (b.boundary.length > 4 * fuseWidth):
bdryPts = np.array(b.boundary)
_, p0 = shapely.ops.nearest_points(b, polygons[closest[0]])
b_to_p0 = scipy.spatial.distance.cdist([np.asarray(p0)], bdryPts[:, 0:2])[0]
farthest = np.argmax(b_to_p0)
if (b_to_p0[farthest] > 4 * fuseWidth):
p1, _ = shapely.ops.nearest_points(polygons[closest[0]], shp.Point(bdryPts[farthest, 0:2]))
outputPolylines.append(np.array([np.asarray(p1), bdryPts[farthest, 0:2]]))
return outputPolylines
```
#### File: Inflatables/python/wall_width_formulas.py
```python
import numpy as np
def wallWidthForCanonicalWidth(canonicalWidth, channelSpacing):
return canonicalWidth * channelSpacing / (2 * np.pi)
def canonicalWallWidthForGeometry(wallWidth, channelSpacing):
return wallWidth * 2 * np.pi / channelSpacing
def spacingForCanonicalAndPhysicalWallWidths(canonicalWidth, physicalWidths):
return physicalWidths * 2 * np.pi / canonicalWidth
# Largest singular value of the mapping from 3D to the plane.
def stretchFactorForCanonicalWallWidth(w):
# stretched width / unstretched width
return 2 * np.pi / (w + (2 / np.pi) * (2 * np.pi - w))
# Inverse of stretchFactorForCanonicalWallWidth
def canonicalWallWidthForStretchFactor(s):
return ((2 * np.pi / s) - 4) / (1 - 2 / np.pi)
``` |
{
"source": "jpanganiban/simplestruct",
"score": 3
} |
#### File: jpanganiban/simplestruct/simplestruct.py
```python
__version__ = "0.1.0"
class StructMeta(type):
def __new__(meta, name, bases, dct):
"""Override struct class instantiation so we can store the class
definition."""
dct['__struct_meta__'] = dct
return super(StructMeta, meta).__new__(meta, name, bases, dct)
class Struct(object):
__metaclass__ = StructMeta
def __init__(self, **kwargs):
"""Override object constructor to validate the values being set
first."""
for attr, val in kwargs.items():
self._validate_attr(attr, val)
setattr(self, attr, val)
def __setattr__(self, attr, val):
"""Override object setter to validate the value being set first."""
self._validate_attr(attr, val)
super(Struct, self).__setattr__(attr, val)
def _validate_attr(self, attr, val):
"""Validates the value being set."""
if attr not in self.__struct_meta__:
raise AttributeError(
"Attribute '%s' is not defined in '%s'" % (attr, self.__class__)
)
attr_type = self.__struct_meta__[attr]
# If the passed value is None, Just let it be.
if val is None:
return
# If the defined attribute is a list, validate each item to match the
# defined specified attribute type.
#
# ie.
#
# class Person(Struct):
# siblings = [str]
#
# Each instance of the attribute `sibling` must be of type `str`.
#
if isinstance(attr_type, list):
if len(attr_type) > 0:
attr_type = attr_type[0]
for v in val:
if not isinstance(v, attr_type):
raise TypeError("")
return
# If the defined attribute is a tuple, validate each item in the tuple
# to match the defined attribute type.
#
# ie.
#
# class Person(Struct):
# age_and_gender = (int, str)
#
# The first value of the attribute `age_and_gender` must be of type
# `int` and the second value must be of type `str`.
#
if isinstance(attr_type, tuple):
for i, v in enumerate(val):
if v is not None and not isinstance(v, attr_type[i]):
raise TypeError("")
return
# If the defined attribute is just a type, just validate it.
#
# ie.
#
# class Person(Struct):
# age = int
#
# The value of the attribute `age` must be of type `int`.
if not isinstance(val, attr_type):
raise TypeError("")
``` |
{
"source": "jpaniagualaconich/django-sslserver",
"score": 2
} |
#### File: django-sslserver/sslserver/testcases.py
```python
import os
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test.testcases import LiveServerTestCase, LiveServerThread, QuietWSGIRequestHandler
from sslserver.management.commands.runsslserver import (
SecureHTTPServer, WSGIRequestHandler, default_ssl_files_dir,
)
class SecureQuietWSGIRequestHandler(WSGIRequestHandler, QuietWSGIRequestHandler):
pass
class SecureLiveServerThread(LiveServerThread):
def _create_server(self):
cert_file = os.path.join(default_ssl_files_dir(), "development.crt")
key_file = os.path.join(default_ssl_files_dir(), "development.key")
return SecureHTTPServer(
(self.host, self.port),
SecureQuietWSGIRequestHandler,
cert_file,
key_file,
)
class SecureLiveServerTestCase(LiveServerTestCase):
server_thread_class = SecureLiveServerThread
class SecureStaticLiveServerTestCase(StaticLiveServerTestCase):
server_thread_class = SecureLiveServerThread
``` |
{
"source": "jpanikulam/amanuensis",
"score": 3
} |
#### File: jpanikulam/amanuensis/analyze.py
```python
import numpy as np
from scipy.io import wavfile
from scipy import signal
from matplotlib import pyplot as plt
import notes
def load():
pass
def compare_note(f):
best_note = None
best_error = np.inf
best_freq = None
for note_name, freq in notes.notes.items():
error = np.abs(f - freq)
if error < best_error:
best_error = error
best_note = note_name
best_freq = notes.notes[best_note]
return (best_note, best_freq)
def comparogram(note_name, audio_signal, fs):
note_freq = notes.notes[note_name]
T = 1.0 / fs
signal_seconds = audio_signal.shape[0] * T
t = np.arange(0.0, T * 5.0, T)
note_signal = np.sin(t * (2.0 * np.pi * note_freq))
correlation = signal.fftconvolve(audio_signal / np.std(audio_signal), note_signal / np.std(note_signal), 'same')
times = np.arange(0.0, signal_seconds, T)
plt.plot(times, correlation, label=' '.join(note_name))
def compare(freq, audio_signal, fs):
T = 1.0 / fs
t = np.arange(0.0, 0.25, T)
note_signal = np.sin(t * (2.0 * np.pi * freq))
correlation = signal.fftconvolve(audio_signal / np.std(audio_signal), note_signal / np.std(note_signal), 'same')
return correlation
def detect_harmonics(note_name, audio_signal, fs):
# MIN_POWER = 1.2e10
T = 1.0 / fs
signal_seconds = audio_signal.shape[0] * T
times = np.arange(0.0, signal_seconds, T)
fundamental_frequency = notes.notes[note_name]
# harmonics = range(1, 4 + 1)
harmonics = [(1.0 / 4.0), (1.0 / 3.0), (1.0 / 2.0), 1.0, 2.0, 3.0]
present_harmonics = np.zeros((audio_signal.shape[0], len(harmonics)))
for i in range(len(harmonics)):
harmonic_freq = fundamental_frequency * harmonics[i]
print harmonic_freq
likely_note = compare_note(harmonic_freq)[0]
correlation = compare(harmonic_freq, audio_signal, fs)
box_filter = np.ones(fs * 0.1)
correlation_power = np.sqrt(signal.fftconvolve(np.abs(correlation) ** 2.0, box_filter, 'same'))
# plt.plot(times, correlation, label='{} [Hz]'.format(harmonic_freq))
# plt.plot(times[::100], correlation_power[::100], label='${}$ : {} [Hz]'.format(likely_note, harmonic_freq))
present_harmonics[:, i] = correlation_power
f, axes = plt.subplots(len(harmonics), 1, sharex=True)
plt.title(note_name)
for i in range(len(harmonics)):
axes[i].plot(times, present_harmonics[:, i], label="{}".format(harmonics[i]))
axes[i].set_ylim([0.0, 400e3])
# min_power = 0.7 * np.max(present_harmonics[:, i - 1])
# min_power = np.percentile(present_harmonics[:, i - 1], 70.0)
# axes[i - 1].plot(times, present_harmonics[:, i - 1] > min_power, label="{}".format(i))
def correlate(harmonic_freq, audio_signal, fs):
correlation = compare(harmonic_freq, audio_signal, fs)
box_filter = np.ones(fs * 0.1)
correlation_power = np.sqrt(np.abs(signal.fftconvolve(np.abs(correlation) ** 2.0, box_filter, 'same')))
return correlation_power
# def find_integer_contributors(freq):
# for f in
def generate_harmonic_image(audio_signal, fs):
powers = []
note_freqs = sorted(notes.notes.values())
applied_freqs = []
# int(0.1 * fs)
for freq in note_freqs:
# print "f: {} Hz".format(freq)
correlation = correlate(freq, audio_signal, fs)
powers.append(correlation[::1000])
applied_freqs.append(freq)
np_powers = np.array(powers)
maxes = np.max(np_powers, axis=0) + 1e-3
plt.imshow(np.log(np_powers / maxes))
x_indices = np.arange(0, np_powers.shape[1], 100)
y_indices = np.arange(0, np_powers.shape[0], 20)
plt.xticks(x_indices, x_indices * 1000 * (1.0 / fs), fontsize=9)
plt.yticks(y_indices, np.array(applied_freqs)[y_indices], fontsize=9)
plt.show()
if __name__ == '__main__':
# fn = "/home/jacob/repos/amanuensis/data/rocky_mtn_high.wav"
# fn = "/home/jacob/repos/amanuensis/data/country_roads.wav"
# fn = "/home/jacob/repos/amanuensis/data/reference_guitar.wav"
fn = "/home/jacob/repos/amanuensis/data/tuning_reference.wav"
fs, data = wavfile.read(fn)
# start_seconds = 0.6
# end_seconds = 1.5
# start_seconds = 1.00
# end_seconds = 1.5
start_seconds = 1.0
end_seconds = 2.5
start_samples = fs * start_seconds
end_samples = fs * end_seconds
segment_seconds = 0.1
segment_size = fs * segment_seconds
# first_chunk_chan0 = data[5000:50000, 0]
# first_chunk_chan0 = data[start_samples:end_samples]
first_chunk_chan0 = data[start_samples:end_samples, 0]
generate_harmonic_image(first_chunk_chan0, fs)
# wavfile.write('test_chunk.wav', fs, first_chunk_chan0)
```
#### File: jpanikulam/amanuensis/notes.py
```python
def clean_split(text, delim=','):
text = text.strip()
return map(lambda o: o.strip(), text.split(delim))
def read_notes(file):
notes = {}
for line in file:
split = clean_split(line, ',')[:-1]
if split[-1] == '':
continue
notes[(split[0], int(split[1]))] = float(split[2])
return notes
# Map notes to frequencies
notes = read_notes(open('notes.txt'))
# Map frequencies to note tuples
inv_notes = {v: k for k, v in notes.items()}
if __name__ == '__main__':
path = 'notes.txt'
with open(path) as f:
read_notes(f)
``` |
{
"source": "jpanikulam/cvxbind",
"score": 3
} |
#### File: cvxbind/cvxbind/main.py
```python
import os
import argparse
from utils import Log
from parse_cvxgen import ParseCVX
from gen_cpp import GenCPP
def main():
parser = argparse.ArgumentParser(description='CVXGEN Python Binding Generator')
parser.add_argument('path', metavar='path', default='./images',
help='Give the target path')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Decide verbosity')
args = parser.parse_args()
Log.set_verbose(args.verbose)
path = os.path.realpath(args.path)
parsed_cvx = ParseCVX.read_file(path)
write_text = GenCPP.make_cvx_binding(parsed_cvx)
print write_text
if __name__ == '__main__':
main()
```
#### File: cvxbind/cvxbind/parse_cvxgen.py
```python
from utils import Log, Utils
import re
import string
import os
class ParseCVX(object):
@classmethod
def read_file(self, file_path):
'''Compute a list of stripped text'''
f = file(file_path)
data = list(f)
data_stripped = map(string.strip, data)
return self.read(data_stripped)
@classmethod
def read(self, data):
'''Parse a list of stripped lines
TODO:
- Also include constraints
'''
section = None
sections = ['dimensions', 'parameters', 'variables', 'minimize', 'end']
content = {
'dimensions': [],
'parameters': [],
'variables': [],
}
section_dict = {
'dimensions': self.parse_dimension,
'parameters': self.parse_parameter,
'variables': self.parse_parameter,
}
for l_num, dirty_line in enumerate(data):
Log.log("On line {}".format(l_num))
line = dirty_line.strip()
if '#' in line:
line = line.split('#', 1)[0]
if line == '':
pass
elif line.startswith('#'):
continue
elif (line in sections) or (section is None):
if line == 'end':
section = None
else:
section = line
elif section == 'dimensions':
content[section].append(self.parse_dimension(line))
elif section in ['parameters', 'variables']:
content[section].append(self.parse_parameter(line))
else:
Log.warn("Unknown line {}: {}".format(l_num, line))
return content
@classmethod
def parse_parameter(self, line):
'''
dimension_expr: string expression for reflecting dimension
TODO:
- Should have used regex!
- Don't do blind text-replacement for special structure flags
'''
split = Utils.clean_split(line, None, maxsplit=1)
if len(split) == 1:
name = split[0]
_type = 'scalar'
return {
'name': name,
'dimensions': None,
'special': None,
'array_bounds': None,
'type': _type
}
name, second_half = split[:2]
Log.log("{}: Found parameter or variable".format(name))
# Handle special structure flags
special_features = ['psd', 'nsd', 'diagonal', 'nonnegative']
special = set()
for special_feature in special_features:
if special_feature in second_half:
special.add(special_feature)
Log.log("{}: Registering special behavior: {}".format(name, special))
second_half = second_half.replace(special_feature, '')
# Handle arrays
is_array = self.is_array(line)
if is_array:
array_var, name = is_array
array_expr = second_half.split(', ')[-1]
second_half = second_half.split(', ' + array_expr)[0]
array_expr = array_expr.split('=')[-1]
lower_bound, upper_bound = array_expr.split('..')
array_bounds = (lower_bound, upper_bound) # Strings
Log.log("{}: Registering array bounds expression: [{}...{}]".format(name, lower_bound, upper_bound))
else:
array_bounds = None
# Dimensions
dimensions = self.get_dimensions(line)
if dimensions is None:
_type = 'scalar'
elif len(dimensions) == 2:
_type = 'matrix'
elif len(dimensions) == 1:
_type = 'vector'
Log.log("{}: Registering vector dimension: {}".format(name, dimensions))
parameter = {
'name': name,
'dimensions': dimensions,
'special': special,
'array_bounds': array_bounds,
'type': _type
}
return parameter
@classmethod
def consume_parameter(self, line):
# -- Array handling
# Is it an array?
is_array = self.is_array(line)
if is_array is not None:
index_var, name = is_array
# - Check if we have an initializer, like x[0] or something
# Required to be 't', for now
if index_var.isdigit() or (index_var != 't'):
is_array_initializer = True
array_bounds = index_var
else:
is_array_initializer = False
array_bounds = self.get_array_bounds(line)
Log.log("Registering array {} with indexing variable, {}".format(name, index_var))
if is_array_initializer:
Log.log("{}: Is an initializer".format(name))
# -- Not an array
else:
array_bounds = None
is_array_initializer = False
name = Utils.clean_split(line, None, maxsplit=1)[0]
Log.log("Registering non-array {}".format(name))
# -- Get dimensions
dimensions = self.get_dimensions(line)
if dimensions is None:
_type = 'scalar'
elif dimensions['cols'] != '1':
_type = 'matrix'
else:
_type = 'vector'
if dimensions is not None:
Log.log("{}: Registering dimensions as {}x{}".format(name, dimensions['rows'], dimensions['cols']))
else:
Log.log("{}: Registering as sclar".format(name))
special = self.get_special(line)
parameter = {
'name': name,
'dimensions': dimensions,
'array_bounds': array_bounds,
'type': _type,
'special': special,
'initializer': is_array_initializer
}
return parameter
@classmethod
def get_special(self, line):
special_features = ['psd', 'nsd', 'diagonal', 'nonnegative']
special = set()
for special_feature in special_features:
if special_feature in line:
special.add(special_feature)
Log.log("{}: Registering special behavior: {}".format(special_feature, special))
return special
@classmethod
def get_array_bounds(self, line):
if '=' not in line:
return None
_, after_eq = Utils.clean_split(line, '=')
upper, lower = Utils.clean_split(after_eq, '..')
return (upper, lower)
@classmethod
def is_array(self, line):
m = re.search(r"\[([A-Za-z0-9_]+)\]", line)
if m is not None:
return Utils.remove_chars(m.group(), '[', ']'), line[:m.start()].strip()
else:
return None
@classmethod
def get_dimensions(self, line):
stripped = line.strip()
if '(' not in stripped:
return None
else:
dimensions = stripped[stripped.find("(") + 1:stripped.find(")")]
if ',' in dimensions:
rows, cols = Utils.clean_split(dimensions, ',')
dimension_data = {
'rows': rows,
'cols': cols
}
else:
dimension_data = {
'rows': dimensions.strip(), 'cols': '1'
}
return dimension_data
@classmethod
def parse_dimension(self, line):
"""Currently don't support arithmetic expressions"""
dim_name, dim_value = Utils.clean_split(line, '=')
dim_value = int(dim_value) # Explicitly convert to int
Log.log("{}: Registering dimension: {}".format(dim_name, dim_value))
dimension = {
'name': dim_name,
'value': dim_value
}
return dimension
if __name__ == '__main__':
Log.set_verbose()
fpath = os.path.dirname(os.path.realpath(__file__))
test_path = os.path.join(fpath, '..', 'test', 'description.cvxgen')
print ParseCVX.read_file(test_path)
``` |
{
"source": "jpanikulam/experiments",
"score": 2
} |
#### File: gpgpu/generators/render_volume_defs.py
```python
import generate_opencl_structs
def main():
cfg_defd = [
{
'type': 'int',
'length': 1,
'name': 'method',
},
{
'type': 'float',
'length': 1,
'name': 'step_size',
'default': '0.01'
},
{
'type': 'float',
'length': 1,
'name': 'max_dist',
'default': "25.0"
},
{
'type': 'int',
'length': 1,
'name': 'render_mode',
},
{
'type': 'int',
'length': 1,
'name': 'max_iteration',
}
]
definitions = [
("RenderVolumeConfig", cfg_defd),
]
destination = "/home/jacob/repos/experiments/gpgpu/kernels/render_volume"
generate_opencl_structs.write_files(definitions, destination)
if __name__ == '__main__':
main()
```
#### File: rendering/codegen/generate_drifter_ui.py
```python
from generate_imgui import MenuGenerator
def main():
full_menu = MenuGenerator("PlannerConfiguration")
# Debug
debug = full_menu.add_submenu("Debug")
debug.add_toggle("visualize_last_state", "Visualize Last State Cost", default="false")
debug.add_toggle("visualize_last_state", "Visualize Last State Cost", default="false")
debug.add_toggle("show_cost", "Show Cost", default="true")
debug.add_toggle("enable_look_target", "Enable Look Target", default="false")
debug.add_toggle("show_trajectory", "Show Trajectory", default="true")
debug.add_toggle("convenience_flag", "Set Convenient Flag", default="true")
# Optimization
optimization = full_menu.add_submenu("Optimization")
optimization.add_int_scalar("max_iterations", "Max Iterations", (0, 100), default="15")
optimization.add_scalar('min_state_damping', 'Minimum State Damping', (1e-5, 10.0), "1.0")
optimization.add_scalar('min_ctrl_damping', 'Minimum Control Damping', (1e-5, 10.0), "1.0")
optimization.add_scalar('qxx_min_eigenvalue', 'State Minimum Eigenvalue', (1e-5, 1.0), "1.0")
optimization.add_scalar('quu_min_eigenvalue', 'Control Minimum Eigenvalue', (1e-5, 1.0), "1.0")
# Bounds
bounds = full_menu.add_submenu("Bounds")
bounds.add_scalar('v_max_weight', 'Speed Bound Weight', (10.0, 500.0), 250.0)
bounds.add_scalar('max_speed', 'Maximum Speed (m/s)', (0.0, 15.0), 15.0)
bounds.add_scalar('min_speed', 'Minimum Speed (m/s)', (-10.0, 0.0), -10.0)
bounds.add_scalar('max_accel', 'Maximum Acceleration (m/s^2)', (0.0, 1.0), 0.5)
bounds.add_scalar('phi_weight', 'double', (0.0, 25.0), 0.1)
bounds.add_scalar('phi_max', 'double', (0.0, 5.0), 3.0)
bounds.add_scalar('phi_max_bound_weight', 'double', (0.0, 250.0), 75.0)
# Goal
goals = full_menu.add_submenu("Goal")
goals.add_scalar('goal_weight', 'Goal Weight', (0.0, 10.0), 1.0)
goals.add_scalar('terminal_vel_weight', 'Terminal Velocity Weight', (0.0, 20.0), 10.0)
goals.add_scalar('pointing_weight', 'Pointing Target Weight', (0.0, 20.0), 4.0)
goals.add_scalar('path_weight', 'Path Weight', (0.0, 20.0), 1.0)
goals.add_scalar('path_margin', 'Path Margin (m)', (0.0, 1.0), 0.2)
goals.add_toggle('enable_path', 'Enable Path', "false")
# Obstacles
obstacles = full_menu.add_submenu("Obstacles")
obstacles.add_scalar('avoid_weight', 'Avoid Area Weight', (0.0, 10.0), 3.0)
# Control
control = full_menu.add_submenu("Control")
control.add_scalar('acceleration_weight', 'Acceleration Weight', (0.0, 2.0), 0.)
control.add_scalar('phidot_weight', 'Steering Angle Rate Weight', (0.0, 10.0), 1.0)
control.add_scalar('phi_dot_max', 'Maximum Value of dPhi/dt', (0.0, 3.0), 1.5)
control.add_scalar('phi_dot_max_bound_weight',
'Weight for Enforcing Maximum Value of dPhi/dt', (0.0, 250.0), 75.0)
full_menu.generate(
namespace=("planning", "drifter"),
cfg_loc="planning/drifter/drifter_configuration",
ui_loc="planning/drifter/drifter_ui_elements"
)
if __name__ == '__main__':
main()
```
#### File: rendering/shaders/generate_type_map_gl.py
```python
mapping = {
"GL_FLOAT": (1, "GLfloat", "float", "glUniform1f"),
"GL_FLOAT_VEC2": (2, "GLfloat", "VecNf<2>", "glUniform2fv"),
"GL_FLOAT_VEC3": (3, "GLfloat", "VecNf<3>", "glUniform3fv"),
"GL_FLOAT_VEC4": (4, "GLfloat", "VecNf<4>", "glUniform4fv"),
"GL_INT": (1, "GLint"),
"GL_INT_VEC2": (2, "GLint"),
"GL_INT_VEC3": (3, "GLint"),
"GL_INT_VEC4": (4, "GLint"),
"GL_UNSIGNED_INT": (1, "GLuint"),
"GL_UNSIGNED_INT_VEC2": (2, "GLuint"),
"GL_UNSIGNED_INT_VEC3": (3, "GLuint"),
"GL_UNSIGNED_INT_VEC4": (4, "GLuint"),
"GL_BOOL": (1, "GLboolean"),
"GL_BOOL_VEC2": (2, "GLboolean"),
"GL_BOOL_VEC3": (3, "GLboolean"),
"GL_BOOL_VEC4": (4, "GLboolean"),
"GL_FLOAT_MAT2": (4, "GLfloat", "MatNf<2, 2>", "glUniformMatrix2fv"),
"GL_FLOAT_MAT2x3": (6, "GLfloat", "MatNf<2, 3>", "glUniformMatrix2x3fv"),
"GL_FLOAT_MAT2x4": (8, "GLfloat", "MatNf<2, 4>", "glUniformMatrix2x4fv"),
"GL_FLOAT_MAT3": (9, "GLfloat", "MatNf<3, 3>", "glUniformMatrix3fv"),
"GL_FLOAT_MAT3x2": (6, "GLfloat", "MatNf<3, 2>", "glUniformMatrix3x2fv"),
"GL_FLOAT_MAT3x4": (12, "GLfloat", "MatNf<3, 4>", "glUniformMatrix3x4fv"),
"GL_FLOAT_MAT4": (16, "GLfloat", "MatNf<4, 4>", "glUniformMatrix4fv"),
"GL_FLOAT_MAT4x2": (8, "GLfloat", "MatNf<4, 2>", "glUniformMatrix4x2fv"),
"GL_FLOAT_MAT4x3": (12, "GLfloat", "MatNf<4, 3>", "glUniformMatrix4x3fv"),
}
list_content_type = {
"GL_FLOAT": "GL_FLOAT",
"GL_FLOAT_VEC2": "GL_FLOAT",
"GL_FLOAT_VEC3": "GL_FLOAT",
"GL_FLOAT_VEC4": "GL_FLOAT",
"GL_INT": "GL_INT",
"GL_INT_VEC2": "GL_INT",
"GL_INT_VEC3": "GL_INT",
"GL_INT_VEC4": "GL_INT",
"GL_UNSIGNED_INT": "GL_UNSIGNED_INT",
"GL_UNSIGNED_INT_VEC2": "GL_UNSIGNED_INT",
"GL_UNSIGNED_INT_VEC3": "GL_UNSIGNED_INT",
"GL_UNSIGNED_INT_VEC4": "GL_UNSIGNED_INT",
"GL_BOOL": "GL_BOOL",
"GL_BOOL_VEC2": "GL_BOOL",
"GL_BOOL_VEC3": "GL_BOOL",
"GL_BOOL_VEC4": "GL_BOOL",
"GL_FLOAT_MAT2": "GL_FLOAT",
"GL_FLOAT_MAT2x3": "GL_FLOAT",
"GL_FLOAT_MAT2x4": "GL_FLOAT",
"GL_FLOAT_MAT3": "GL_FLOAT",
"GL_FLOAT_MAT3x2": "GL_FLOAT",
"GL_FLOAT_MAT3x4": "GL_FLOAT",
"GL_FLOAT_MAT4": "GL_FLOAT",
"GL_FLOAT_MAT4x2": "GL_FLOAT",
"GL_FLOAT_MAT4x3": "GL_FLOAT",
}
def make_uniform(type_enum, xdt):
n_elements, type_name, cc_type, function = xdt
txt = "void Shader::set(const std::string& name, const {}& arg) const ".format(cc_type) + "{"
txt += """
const std::string err_str = name + " was not available";
if (debug_mode_) {{
if (0u == uniform_from_name_.count(name)) {{
jcc::Warning() << "Not using " << name << " in shader" << std::endl;
return;
}}
}} else {{
JASSERT_EQ(uniform_from_name_.count(name), 1u, err_str.c_str());
}}
""".format()
txt += "\n const auto& desc = uniform_from_name_.at(name);"
txt += '\n JASSERT_EQ(desc.type, static_cast<int>({type_enum}), "Mismatched argument type");'.format(
type_enum=type_enum
)
if n_elements > 1:
if 'MatNf' in cc_type:
txt += "\n {fnc}(desc.location, 1, false, arg.data());".format(fnc=function)
else:
txt += "\n {fnc}(desc.location, 1, arg.data());".format(fnc=function)
else:
txt += "\n {fnc}(desc.location, arg);".format(fnc=function)
txt += "\n}"
return txt
def line(txt, depth=0):
return (" " * depth) + txt + ";\n"
def make_attribute(type_enum, xdt):
n_elements, type_name, cc_type, function = xdt
txt = "void VertexArrayObject::set(const std::string& name, const std::vector<{}>& arg) ".format(cc_type) + "{"
txt += """
const std::string err_str = name + " was not available";
if (debug_mode_) {{
if (0u == attribute_from_name_.count(name)) {{
jcc::Warning() << "Not using " << name << " in shader" << std::endl;
return;
}}
}} else {{
JASSERT_EQ(attribute_from_name_.count(name), 1u, err_str.c_str());
}}
""".format()
txt += "\n const auto& desc = attribute_from_name_.at(name);"
txt += '\n JASSERT_EQ(desc.type, static_cast<int>({type_enum}), "Mismatched argument type");'.format(
type_enum=type_enum
)
txt += line("glBindVertexArray(vao_)", 1)
txt += line("GLuint vbo_id", 1)
txt += line("glGenBuffers(1, &vbo_id)", 1)
txt += line("glBindBuffer(GL_ARRAY_BUFFER, vbo_id)", 1)
buffer_data_args_text = "arg.size() * {n_elements} * sizeof({type_name})".format(
n_elements=n_elements,
type_name=type_name
)
txt += line("glBufferData(GL_ARRAY_BUFFER, " + buffer_data_args_text + ", arg.data(), GL_STATIC_DRAW)", 1)
txt += line("constexpr GLint SIZE = 3", 1)
txt += line("constexpr bool NORMALIZED = false", 1)
txt += line("constexpr int STRIDE = 0", 1)
txt += line("glVertexAttribPointer(desc.location, SIZE, {type_enum}, NORMALIZED, STRIDE, 0)".format(
type_enum=list_content_type[type_enum]), 1
)
txt += line("glEnableVertexAttribArray(desc.location)", 1)
txt += line("allocated_buffers_.push_back(vbo_id)", 1)
txt += "}"
return txt
def uniforms():
for type_enum, data in mapping.items():
if len(data) > 2:
print make_uniform(type_enum, data)
def attributes():
for type_enum, data in mapping.items():
if len(data) > 2:
print make_attribute(type_enum, data)
if __name__ == '__main__':
# uniforms()
attributes()
```
#### File: experiments/stacko/log_dist.py
```python
import numpy as np
from matplotlib import pyplot as plt
def huber(x, k):
abs_x = np.abs(x)
res = np.zeros_like(x)
x_gt_k = abs_x > k
x_lte_k = ~x_gt_k
res[x_gt_k] = k * (abs_x[x_gt_k] - (k * 0.5))
res[x_lte_k] = (x[x_lte_k] * x[x_lte_k]) * 0.5
return res
def main():
t = np.linspace(-5.0, 5.0, 1000)
k = 1.0
ht = huber(t, k)
# plt.plot(t, ht)
plt.plot(t, np.exp(-ht))
plt.plot(t, np.exp(-(t * t)))
plt.show()
if __name__ == '__main__':
main()
``` |
{
"source": "jpanikulam/op_graph",
"score": 2
} |
#### File: op_graph/examples/example_graphs.py
```python
from op_graph import graph
def vectorspring():
gr = graph.OpGraph('VectorSpring')
k = gr.scalar('k')
imass = gr.inv('imass', gr.scalar('mass'))
a = gr.vector('a', 3)
v = gr.time_antiderivative('v', a)
x = gr.time_antiderivative('x', v)
f = gr.mul('f', k, x)
gr.mul('a', imass, f)
return gr
def controlled_vectorspring():
gr = graph.OpGraph('VectorSpring')
k = gr.scalar('k')
imass = gr.inv(gr.anon(), gr.scalar('mass'))
u = gr.vector('u', 3)
a = gr.vector('a', 3)
v = gr.time_antiderivative('v', a)
x = gr.time_antiderivative('x', v)
force = gr.add('force', gr.mul(gr.anon(), k, x), u)
gr.mul('a', imass, force)
return gr
def simple_graph():
gr = graph.OpGraph('Simple')
a = gr.scalar('a')
gr.optimize(a)
b = gr.scalar('b')
gr.mul('ab', a, b)
d = gr.time_antiderivative('d', 'ab')
gr.time_antiderivative('e', d)
return gr
def double_integrator():
gr = graph.OpGraph('double_integrator')
gr.scalar('u')
gr.optimize('u')
gr.time_antiderivative('v', 'u')
gr.time_antiderivative('x', 'v')
return gr
def rotary_double_integrator():
gr = graph.OpGraph('double_integrator')
gr.vector('u', 3)
gr.optimize('u')
gr.time_antiderivative('w', 'u')
gr.so3('R')
gr.time_antiderivative('R', 'w')
return gr
all_graphs = [
vectorspring(),
simple_graph(),
double_integrator(),
rotary_double_integrator(),
controlled_vectorspring()
]
def main():
for gr in all_graphs:
print gr
if __name__ == '__main__':
main()
```
#### File: op_graph/op_graph/tensors.py
```python
from op_defs import op
def make_function(name, arguments, sexpr, commutative=False, associative=False):
# Construct a function and a commuted version of the same function
pass
def einsum(gr, a, b):
pass
"""
Notes:
- Clear distinction between vectors and covectors may be unnecessary?
- Could treat everything as a tensor of rank (n, n)
- And then two-forms/bilinear forms are just matrices
vector: vec(a)
matrix: (cov(a), vec(b))
bilinear form: (vec(a), vec(b))
two-form: (cov(a), cov(b))
>>> a = Tensor([3])
>>> b = Tensor([3])
>>> c = rmul(a, b)
-> Tensor([3])
== (a^k * b^k) -> c^k
>>> c = Tensor([], [3])
>>> d = rmul(a, c)
-> Scalar()
== (a^k * c_k) -> d
>>> D = Tensor([3], [3])
>>> E = Tensor([3], [3])
>>> F = rmul(D, E)
-> Tensor([3], [3])
== (D^a_b * E^b_c) -> F^a_c
(Am I applying rank correctly here?)
>>> A = Tensor([], [3, 3])
>>> x = Tensor([3])
>>> g = rmul(A, x)
-> g = Tensor([], [3])
== (A_ab * x^b) -> g_a
>>> c = rmul(x, g)
-> c = Scalar()
== (g_a * x^a) -> c
dc/dx^a = (dc/dg_b * dg^b/dx_a) + dc/dx^a
dc/dg = x^a
dg_a/dx^b = A_ab
dc/dx^a = g_a
dc/dx^a = (A_ab * x^a) + g_a
= (A_ab * x^a) + A_ab * x^a
= 2 * A_ab * x^a
"""
ricci_gemm = {
('matrix', 'matrix'): {
# Add a function for curring special arguments
'generate': lambda a, b: op('einsum', a, b, (('a', 'b'), ('b', 'c'), ('a', 'c'))),
'gen': lambda a, b: op('einsum', a, b, ('ab,bc->ac'))
}
}
```
#### File: op_graph/op_graph/text.py
```python
import os
from subprocess import Popen, PIPE, STDOUT
def form_line(ltokens=[], rtokens=[]):
"""Form line."""
if not isinstance(ltokens, (list, tuple)):
return str(ltokens) + ';'
ltext = ' '.join(map(str, ltokens))
rtext = ' '.join(map(str, rtokens))
if len(rtokens) > 0:
return '{ltext} = {rtext};'.format(ltext=ltext, rtext=rtext)
else:
return '{ltext};'.format(ltext=ltext)
def clang_fmt_once(text, clang_format_path='/home/jacob/repos/llvm/clang-format'):
"""Generate formatted code."""
if not os.path.exists(clang_format_path):
raise ValueError("No clang-format!")
p = Popen([clang_format_path], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
clang_format_stdout = p.communicate(input=str(text))[0]
text = clang_format_stdout.decode()
clean_text = text.replace("Can't find usable .clang-format, using LLVM style", "")
return clean_text
def clang_fmt(text, clang_format_path='/home/jacob/repos/llvm/clang-format'):
out = clang_fmt_once(text, clang_format_path)
return clang_fmt_once(out)
``` |
{
"source": "jpanikulam/sonder",
"score": 2
} |
#### File: sonder/tools/simulator.py
```python
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import cv2
import scipy.linalg
from points import points as sonar_points
def rot3d(theta, axis):
wx = np.cross(np.identity(3), axis / np.linalg.norm(axis) * theta)
return scipy.linalg.expm3(wx)
def rotate_pts(pts, R, t):
return R.dot(pts.transpose()).transpose() + t
def fig3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.axis('equal')
return ax
def render_pts(pts, shape=(256, 256), max_elevation=0.17):
# Exclude points outside of the imaging range
rcos_theta = np.linalg.norm(pts[:, :2], axis=1)
elevations = np.arctan2(pts[:, 2], rcos_theta)
in_range = np.fabs(elevations) < max_elevation
pts = pts[in_range]
ranges = np.linalg.norm(pts, axis=1)
bearings = np.arctan2(pts[:, 1], pts[:, 0])
plt.figure('Image')
plt.scatter(bearings, ranges)
return np.vstack([ranges, bearings]).transpose()
def draw_arc(ax, distance, bearing, elevation_range, R, t):
elevations = np.linspace(*elevation_range)
bearings = np.ones(elevations.shape) * bearing
ranges = np.ones(elevations.shape) * distance
rcos_theta = ranges * np.cos(elevations)
_x = rcos_theta * np.cos(bearings)
_y = rcos_theta * np.sin(bearings)
_z = ranges * np.sin(elevations)
xyz = np.vstack([_x, _y, _z])
# x, y, z = R.dot(xyz) + t
x, y, z, = rotate_pts(xyz.transpose(), R, t).transpose()
ax.plot(x, y, z)
def draw_arcs(ax, range_bearings, R, t, elevation_range=(-0.17, 0.17)):
for sph_pt in range_bearings:
draw_arc(ax, sph_pt[0], sph_pt[1], elevation_range, R, t)
def go():
test_pts = np.array([
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, -1.0, 0.0],
])
# test_pts = np.random.random((9, 3))
# test_pts[:, 0] += 1.0
# test_pts[:, 1] -= 0.5
points = np.mgrid[1:1.1:9j, -0.5:0.2:2j, 0:1]
test_pts = points.reshape(3, -1).T
ax3d = fig3d()
ax3d.scatter(test_pts[:, 0], test_pts[:, 1], test_pts[:, 2])
axis = np.array([1.0, 0.0, 0.0])
for r, t in [(0.0, (0.0, 0.1, 0.0)), (0.25, (0.0, 0.0, 0.0))]:
rot = rot3d(r, axis)
rtheta = render_pts(rotate_pts(test_pts, rot, t))
draw_arcs(ax3d, rtheta, rot.transpose(), -rot.transpose().dot(t))
ax3d.scatter(*t, color='k')
plt.show()
if __name__ == '__main__':
go()
``` |
{
"source": "jpanikulam/ukf",
"score": 3
} |
#### File: jpanikulam/ukf/dynamics.py
```python
import numpy as np
from matplotlib import pyplot as plt
class Enum(object):
def __init__(self, _list):
self._list = _list
def __getitem__(self, key):
if key in self._list:
return self._list.index(key)
else:
raise(KeyError("{} not in enum".format(key)))
def __getattr__(self, key):
return self[key]
state_names = [
'posx',
'posy',
'velx',
'vely',
]
control_names = [
'forcex',
'forcey',
]
States = Enum(state_names)
Controls = Enum(control_names)
real_m = 0.9
def dynamics(x, u, dt=0.1):
m = real_m
x_new = np.zeros(4)
dt2 = 0.5 * dt * dt
x_new[States.posx] = x[States.posx] + (x[States.velx] * dt) + (u[Controls.forcex] * dt2 / m)
x_new[States.posy] = x[States.posy] + (x[States.vely] * dt) + (u[Controls.forcey] * dt2 / m)
x_new[States.velx] = x[States.velx] + (u[Controls.forcex] * dt / m)
x_new[States.vely] = x[States.vely] + (u[Controls.forcey] * dt / m)
return x_new
if __name__ == '__main__':
sim_x = np.array([1.0, 2.0, 3.0, 4.0])
sim_u = np.array([0.0, 0.9])
xl = []
for k in range(20):
sim_x = dynamics(sim_x, sim_u)
xl.append(sim_x)
xl = np.array(xl)
# plt.plot(xl[:, 0], xl[:, 1])
# plt.show()
``` |
{
"source": "jpapadakis/gdal",
"score": 2
} |
#### File: autotest/gcore/hfa_read.py
```python
import pytest
from osgeo import gdal
import gdaltest
init_list = [
('byte.img', 4672),
('int16.img', 4672),
('uint16.img', 4672),
('int32.img', 4672),
('uint32.img', 4672),
('float32.img', 4672),
('float64.img', 4672),
('utmsmall.img', 50054),
('2bit_compressed.img', 11918)]
@pytest.mark.parametrize(
'filename,checksum',
init_list,
ids=[tup[0].split('.')[0] for tup in init_list],
)
@pytest.mark.require_driver('HFA')
def test_hfa_open(filename, checksum):
ut = gdaltest.GDALTest('HFA', filename, 1, checksum)
ut.testOpen()
###############################################################################
# Test bugfix for https://oss-fuzz.com/v2/testcase-detail/6053338875428864
def test_hfa_read_completedefn_recursion():
with gdaltest.error_handler():
gdal.Open('data/hfa_completedefn_recursion.img')
```
#### File: autotest/osr/osr_usgs.py
```python
from osgeo import gdal
from osgeo import osr
import pytest
###############################################################################
# Test the osr.SpatialReference.ImportFromUSGS() function.
#
def test_osr_usgs_1():
srs = osr.SpatialReference()
srs.ImportFromUSGS(
8, 0,
(0.0, 0.0,
gdal.DecToPackedDMS(47.0), gdal.DecToPackedDMS(62.0),
gdal.DecToPackedDMS(45.0), gdal.DecToPackedDMS(54.5),
0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
15)
assert srs.GetProjParm(osr.SRS_PP_STANDARD_PARALLEL_1) == pytest.approx(47.0, abs=0.0000005) and srs.GetProjParm(osr.SRS_PP_STANDARD_PARALLEL_2) == pytest.approx(62.0, abs=0.0000005) and srs.GetProjParm(osr.SRS_PP_LATITUDE_OF_CENTER) == pytest.approx(54.5, abs=0.0000005) and srs.GetProjParm(osr.SRS_PP_LONGITUDE_OF_CENTER) == pytest.approx(45.0, abs=0.0000005) and srs.GetProjParm(osr.SRS_PP_FALSE_EASTING) == pytest.approx(0.0, abs=0.0000005) and srs.GetProjParm(osr.SRS_PP_FALSE_NORTHING) == pytest.approx(0.0, abs=0.0000005), \
'Can not import Equidistant Conic projection.'
###############################################################################
# Test the osr.SpatialReference.ExportToUSGS() function.
#
def test_osr_usgs_2():
srs = osr.SpatialReference()
srs.ImportFromWkt("""PROJCS["unnamed",GEOGCS["NAD27",\
DATUM["North_American_Datum_1927",\
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,\
AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],\
PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],\
AUTHORITY["EPSG","4267"]],PROJECTION["Lambert_Conformal_Conic_2SP"],\
PARAMETER["standard_parallel_1",33.90363402777778],\
PARAMETER["standard_parallel_2",33.62529002777778],\
PARAMETER["latitude_of_origin",33.76446202777777],\
PARAMETER["central_meridian",-117.4745428888889],\
PARAMETER["false_easting",0],PARAMETER["false_northing",0],\
UNIT["metre",1,AUTHORITY["EPSG","9001"]]]""")
(proj_code, _, parms, datum_code) = srs.ExportToUSGS()
assert proj_code == 4 and datum_code == 0 and gdal.PackedDMSToDec(parms[2]) == pytest.approx(33.90363403, abs=0.0000005) and gdal.PackedDMSToDec(parms[3]) == pytest.approx(33.62529003, abs=0.0000005) and gdal.PackedDMSToDec(parms[4]) == pytest.approx(-117.4745429, abs=0.0000005) and gdal.PackedDMSToDec(parms[5]) == pytest.approx(33.76446203, abs=0.0000005), \
'Can not import Lambert Conformal Conic projection.'
```
#### File: source/_extensions/driverproperties.py
```python
import sphinx.locale
import docutils.statemachine
sphinx.locale.admonitionlabels['shortname'] = u''
sphinx.locale.admonitionlabels['built_in_by_default'] = u'' #u'Built-in by default'
sphinx.locale.admonitionlabels['supports_create'] = u'' #u'Supports Create()'
sphinx.locale.admonitionlabels['supports_createcopy'] = u'' #u'Supports CreateCopy()'
sphinx.locale.admonitionlabels['supports_georeferencing'] = u'' #u'Supports georeferencing'
sphinx.locale.admonitionlabels['supports_virtualio'] = u'' #u'Supports VirtualIO'
def setup(app):
app.add_node(shortname,
html=(visit_shortname_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('shortname', ShortName)
app.add_node(built_in_by_default,
html=(visit_built_in_by_default_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('built_in_by_default', BuiltInByDefault)
app.add_node(build_dependencies,
html=(visit_build_dependencies_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('build_dependencies', BuildDependencies)
app.add_node(supports_create,
html=(visit_supports_create_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('supports_create', CreateDirective)
app.add_node(supports_createcopy,
html=(visit_supports_createcopy_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('supports_createcopy', CreateCopyDirective)
app.add_node(supports_georeferencing,
html=(visit_supports_georeferencing_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('supports_georeferencing', GeoreferencingDirective)
app.add_node(supports_virtualio,
html=(visit_supports_virtualio_node, depart_node),
latex=(visit_admonition, depart_node),
text=(visit_admonition, depart_node))
app.add_directive('supports_virtualio', VirtualIODirective)
app.connect('env-purge-doc', purge_driverproperties)
return { 'parallel_read_safe': True, 'parallel_write_safe': True }
from docutils import nodes
def visit_admonition(self, node):
self.visit_admonition(node)
def depart_node(self, node):
self.depart_admonition(node)
class shortname(nodes.Admonition, nodes.Element):
pass
def visit_shortname_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition shortname')))
class built_in_by_default(nodes.Admonition, nodes.Element):
pass
def visit_built_in_by_default_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition built_in_by_default')))
class build_dependencies(nodes.Admonition, nodes.Element):
pass
def visit_build_dependencies_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition build_dependencies')))
class supports_create(nodes.Admonition, nodes.Element):
pass
def visit_supports_create_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition supports_create')))
class supports_createcopy(nodes.Admonition, nodes.Element):
pass
def visit_supports_createcopy_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition supports_createcopy')))
class supports_virtualio(nodes.Admonition, nodes.Element):
pass
def visit_supports_georeferencing_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition supports_georeferencing')))
class supports_georeferencing(nodes.Admonition, nodes.Element):
pass
def visit_supports_virtualio_node(self, node):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition supports_virtualio')))
from docutils.parsers.rst import Directive
from sphinx.locale import _
def finish_directive(_self, directive, node):
env = _self.state.document.settings.env
targetid = "%s-%d" % (directive, env.new_serialno(directive))
targetnode = nodes.target('', '', ids=[targetid])
_self.state.nested_parse(_self.content, _self.content_offset, node)
if not hasattr(env, 'all_' + directive):
setattr(env, 'all_' + directive, [])
getattr(env, 'all_' + directive).append({
'docname': env.docname,
'lineno': _self.lineno,
directive: node.deepcopy(),
'target': targetnode,
})
return [targetnode, node]
class ShortName(Directive):
# this enables content in the directive
has_content = True
def run(self):
node = shortname('\n'.join(self.content))
node += nodes.title(_('Driver short name'), _('Driver short name'))
return finish_directive(self, 'shortname', node)
class BuiltInByDefault(Directive):
# this enables content in the directive
has_content = True
def run(self):
if not self.content:
self.content = docutils.statemachine.StringList(['This driver is built-in by default'])
node = built_in_by_default('\n'.join(self.content))
node += nodes.title(_('Driver built-in by default'), _('Driver built-in by default'))
return finish_directive(self, 'built_in_by_default', node)
class BuildDependencies(Directive):
# this enables content in the directive
has_content = True
def run(self):
assert self.content, "Content should be defined for build_dependencies directive"
node = build_dependencies('\n'.join(self.content))
node += nodes.title(_('Build dependencies'), _('Build dependencies'))
return finish_directive(self, 'build_dependencies', node)
class CreateDirective(Directive):
# this enables content in the directive
has_content = True
def run(self):
if not self.content:
self.content = docutils.statemachine.StringList(['This driver supports the :cpp:func:`GDALDriver::Create` operation'])
node = supports_create('\n'.join(self.content))
node += nodes.title(_('Supports Create()'), _('Supports Create()'))
return finish_directive(self, 'supports_create', node)
class CreateCopyDirective(Directive):
# this enables content in the directive
has_content = True
def run(self):
if not self.content:
self.content = docutils.statemachine.StringList(['This driver supports the :cpp:func:`GDALDriver::CreateCopy` operation'])
node = supports_createcopy('\n'.join(self.content))
node += nodes.title(_('Supports CreateCopy()'), _('Supports CreateCopy()'))
return finish_directive(self, 'supports_createcopy', node)
class GeoreferencingDirective(Directive):
# this enables content in the directive
has_content = True
def run(self):
if not self.content:
self.content = docutils.statemachine.StringList(['This driver supports georeferencing'])
node = supports_georeferencing('\n'.join(self.content))
node += nodes.title(_('Supports Georeferencing'), _('Supports Georeferencing'))
return finish_directive(self, 'supports_georeferencing', node)
class VirtualIODirective(Directive):
# this enables content in the directive
has_content = True
def run(self):
if not self.content:
self.content = docutils.statemachine.StringList(['This driver supports :ref:`virtual I/O operations (/vsimem/, etc.) <virtual_file_systems>`'])
node = supports_virtualio('\n'.join(self.content))
node += nodes.title(_('Supports VirtualIO'), _('Supports VirtualIO'))
return finish_directive(self, 'supports_virtualio', node)
def purge_driverproperties(app, env, docname):
for directive in ['all_shortname',
'all_built_in_by_default',
'all_build_dependencies',
'all_supports_create',
'all_supports_createcopy',
'all_supports_georeferencing',
'all_supports_virtualio']:
if hasattr(env, directive):
setattr(env, directive, [ embed for embed in getattr(env, directive) if embed['docname'] != docname])
```
#### File: examples/pydrivers/ogr_PASSTHROUGH.py
```python
from osgeo import gdal, ogr
try:
# The gdal_python_driver module is defined by the GDAL library at runtime
from gdal_python_driver import BaseDriver, BaseDataset, BaseLayer
except ImportError:
# To be able to run in standalone mode
class BaseDriver(object):
pass
class BaseDataset(object):
pass
class BaseLayer(object):
RandomRead = 'RandomRead'
FastSpatialFilter = 'FastSpatialFilter'
FastFeatureCount = 'FastFeatureCount'
FastGetExtent = 'FastGetExtent'
StringsAsUTF8 = 'StringsAsUTF8'
pass
class Layer(BaseLayer):
def __init__(self, gdal_layer):
self.gdal_layer = gdal_layer
self.name = gdal_layer.GetName()
self.fid_name = gdal_layer.GetFIDColumn()
self.metadata = gdal_layer.GetMetadata_Dict()
self.iterator_honour_attribute_filter = True
self.iterator_honour_spatial_filter = True
self.feature_count_honour_attribute_filter = True
self.feature_count_honour_spatial_filter = True
def fields(self):
res = []
layer_defn = self.gdal_layer.GetLayerDefn()
for i in range(layer_defn.GetFieldCount()):
ogr_field_def = layer_defn.GetFieldDefn(i)
field_def = {"name": ogr_field_def.GetName(),
"type": ogr_field_def.GetType()}
res.append(field_def)
return res
def geometry_fields(self):
res = []
layer_defn = self.gdal_layer.GetLayerDefn()
for i in range(layer_defn.GetGeomFieldCount()):
ogr_field_def = layer_defn.GetGeomFieldDefn(i)
field_def = {"name": ogr_field_def.GetName(),
"type": ogr_field_def.GetType()}
srs = ogr_field_def.GetSpatialRef()
if srs:
field_def["srs"] = srs.ExportToWkt()
res.append(field_def)
return res
def test_capability(self, cap):
if cap in (BaseLayer.FastGetExtent, BaseLayer.StringsAsUTF8,
BaseLayer.RandomRead, BaseLayer.FastFeatureCount):
return self.gdal_layer.TestCapability(cap)
return False
def extent(self, force_computation):
# Impedance mismatch between SWIG GetExtent() and the Python
# driver API
minx, maxx, miny, maxy = self.gdal_layer.GetExtent(force_computation)
return [minx, miny, maxx, maxy]
def feature_count(self, force_computation):
return self.gdal_layer.GetFeatureCount(True)
def attribute_filter_changed(self):
if self.attribute_filter:
self.gdal_layer.SetAttributeFilter(str(self.attribute_filter))
else:
self.gdal_layer.SetAttributeFilter(None)
def spatial_filter_changed(self):
# the 'inf' test is just for a test_ogrsf oddity
if self.spatial_filter and 'inf' not in self.spatial_filter:
self.gdal_layer.SetSpatialFilter(
ogr.CreateGeometryFromWkt(self.spatial_filter))
else:
self.gdal_layer.SetSpatialFilter(None)
def _translate_feature(self, ogr_f):
fields = {}
layer_defn = ogr_f.GetDefnRef()
for i in range(ogr_f.GetFieldCount()):
if ogr_f.IsFieldSet(i):
fields[layer_defn.GetFieldDefn(
i).GetName()] = ogr_f.GetField(i)
geom_fields = {}
for i in range(ogr_f.GetGeomFieldCount()):
g = ogr_f.GetGeomFieldRef(i)
if g:
geom_fields[layer_defn.GetGeomFieldDefn(
i).GetName()] = g.ExportToIsoWkt()
return {'id': ogr_f.GetFID(),
'type': 'OGRFeature',
'style': ogr_f.GetStyleString(),
'fields': fields,
'geometry_fields': geom_fields}
def __iter__(self):
for f in self.gdal_layer:
yield self._translate_feature(f)
def feature_by_id(self, fid):
ogr_f = self.gdal_layer.GetFeature(fid)
if not ogr_f:
return None
return self._translate_feature(ogr_f)
class Dataset(BaseDataset):
def __init__(self, gdal_ds):
self.gdal_ds = gdal_ds
self.layers = [Layer(gdal_ds.GetLayer(idx))
for idx in range(gdal_ds.GetLayerCount())]
self.metadata = gdal_ds.GetMetadata_Dict()
def close(self):
del self.gdal_ds
self.gdal_ds = None
class Driver(BaseDriver):
def _identify(self, filename):
prefix = 'PASSTHROUGH:'
if not filename.startswith(prefix):
return None
return gdal.OpenEx(filename[len(prefix):], gdal.OF_VECTOR)
# Required
def identify(self, filename, first_bytes, open_flags, open_options={}):
return self._identify(filename) is not None
# Required
def open(self, filename, first_bytes, open_flags, open_options={}):
gdal_ds = self._identify(filename)
if not gdal_ds:
return None
return Dataset(gdal_ds)
# Test as standalone
if __name__ == '__main__':
import sys
drv = Driver()
assert drv.identify(sys.argv[1], None, 0)
ds = drv.open(sys.argv[1], None, 0)
for l in ds.layers:
l.geometry_fields()
l.fields()
l.test_capability(BaseLayer.FastGetExtent)
l.extent(True)
l.feature_count(True)
for f in l:
print(f)
```
#### File: gdal/perftests/overview.py
```python
from osgeo import gdal
import time
def doit(compress, threads):
gdal.SetConfigOption('GDAL_NUM_THREADS', str(threads))
filename = '/vsimem/test.tif'
ds = gdal.GetDriverByName('GTiff').Create(filename, 20000, 20000, 3,
options = ['COMPRESS=' + compress,
'TILED=YES'])
ds.GetRasterBand(1).Fill(50)
ds.GetRasterBand(3).Fill(100)
ds.GetRasterBand(3).Fill(200)
ds = None
ds = gdal.Open(filename, gdal.GA_Update)
start = time.time()
ds.BuildOverviews('CUBIC', [2,4,8])
end = time.time()
print('COMPRESS=%s, NUM_THREADS=%d: %.2f' % (compress, threads, end - start))
gdal.SetConfigOption('GDAL_NUM_THREADS', None)
doit('NONE', 0)
doit('NONE', 2)
doit('NONE', 4)
doit('NONE', 8)
doit('ZSTD', 0)
doit('ZSTD', 2)
doit('ZSTD', 4)
doit('ZSTD', 8)
```
#### File: utils/auxiliary/rectangle.py
```python
class GeoRectangle:
__slots__ = ['x', 'y', 'w', 'h']
def __init__(self, x, y, w, h, allow_negative_size=False):
if w <= 0:
if allow_negative_size:
x = x + w
w = -w
else:
w = 0
if h <= 0:
if allow_negative_size:
y = y + h
h = -h
else:
h = 0
self.x = x
self.y = y
self.w = w
self.h = h
def __eq__(self, other):
if not isinstance(other, GeoRectangle):
# don't attempt to compare against unrelated types
return False
return self.xywh == other.xywh
def __round__(self, *args, **kwargs):
return self.from_lrdu(*(round(i, *args, **kwargs) for i in self.lrdu))
def is_empty(self):
return self.w <= 0 or self.h <= 0
def intersect(self, other: "GeoRectangle"):
return GeoRectangle.from_min_max(
max(self.min_x, other.min_x),
min(self.max_x, other.max_x),
max(self.min_y, other.min_y),
min(self.max_y, other.max_y),
)
def union(self, other: "GeoRectangle"):
return GeoRectangle.from_min_max(
min(self.min_x, other.min_x),
max(self.max_x, other.max_x),
min(self.min_y, other.min_y),
max(self.max_y, other.max_y),
)
def round(self, digits):
self.x = round(self.x, digits)
self.y = round(self.y, digits)
self.w = round(self.w, digits)
self.h = round(self.h, digits)
def align(self, geo_transform):
# compute the pixel-aligned bounding box (larger than the feature's bbox)
left = self.min_x - (self.min_x - geo_transform[0]) % geo_transform[1]
right = self.max_x + (geo_transform[1] - ((self.max_x - geo_transform[0]) % geo_transform[1]))
bottom = self.min_y + (geo_transform[5] - ((self.min_y - geo_transform[3]) % geo_transform[5]))
top = self.max_y - (self.max_y - geo_transform[3]) % geo_transform[5]
return self.from_lrud(left, right, top, bottom)
def get_partition(self, part: "GeoRectangle"):
# part: x,y - part indexes; w,h - part counts
part_width = self.w / part.w
part_hight = self.h / part.h
return GeoRectangle(
self.x + part.x * part_width,
self.y + part.y * part_hight,
part_width,
part_hight,
)
@classmethod
def empty(cls):
return cls(0, 0, 0, 0)
@classmethod
def from_lrud(cls, l, r, u, d):
ret = cls(l, d, r - l, u - d)
return ret
@classmethod
# same as min_max
def from_lrdu(cls, l, r, d, u):
ret = cls(l, d, r - l, u - d)
return ret
@classmethod
def from_lurd(cls, l, u, r, d):
""" from projwin (minx maxy maxx miny) == (ulx uly lrx lry) == (l u r d) """
ret = cls(l, d, r - l, u - d)
return ret
@classmethod
# same as min_max
def from_xwyh(cls, x, w, y, h, allow_negative_size=False):
ret = cls(x, y, w, h, allow_negative_size)
return ret
@classmethod
# # same as cls
def from_xywh(cls, x, y, w, h, allow_negative_size=False):
ret = cls(x, y, w, h, allow_negative_size)
return ret
@classmethod
# # same as cls
def from_xywhps(cls, x, y, w, h, px, py):
ret = cls(x, y, w*px, h*py, True)
return ret
@classmethod
# same as lrdu
def from_min_max(cls, min_x, max_x, min_y, max_y):
ret = cls(min_x, min_y, max_x - min_x, max_y - min_y)
return ret
@classmethod
def from_center_and_radius(cls, cent_x, cent_y, rad_x, rad_y=None):
if rad_y is None:
rad_y = rad_x
x = cent_x - rad_x
y = cent_y - rad_y
w = rad_x * 2
h = rad_y * 2
ret = cls(x, y, w, h)
return ret
@classmethod
def from_points(cls, points):
return cls.from_min_max(
min(p[0] for p in points),
max(p[0] for p in points),
min(p[1] for p in points),
max(p[1] for p in points),
)
@classmethod
def from_geotransform_and_size(cls, gt, size):
if gt[2] or gt[4]:
return cls.from_points(get_points_extent(gt, *size))
else:
# faster method
origin = (gt[0], gt[3])
pixel_size = (gt[1], gt[5])
extent = cls.from_xywhps(*origin, *size, *pixel_size)
# extent_b = cls.from_points(get_points_extent(gt, *size))
# assert extent == extent_b
return extent
def to_pixels(self, pixel_size):
return self.from_xwyh(self.x / pixel_size[0], self.w / pixel_size[0],
self.y / pixel_size[1], self.h / pixel_size[1], True)
@classmethod
def from_geotransform_and_size_to_pix(cls, gt, size):
origin = (gt[0], gt[3])
pixel_size = (gt[1], gt[5])
pix_origin = list(origin[i] / pixel_size[i] for i in (0, 1))
# pix_bounds = list(origin[i] / pixel_size[i] + size[i] for i in (0, 1))
return cls.from_xwyh(pix_origin[0], size[0], pix_origin[1], size[1])
@property
def area(self):
return self.w * self.h
@property
def size(self):
return self.w, self.h
@property
def left(self):
return self.x
@property
def right(self):
return self.x + self.w
@property
def down(self):
return self.y
@property
def up(self):
return self.y + self.h
@property
def min_x(self):
return self.x
@property
def max_x(self):
return self.x + self.w
@property
def min_y(self):
return self.y
@property
def max_y(self):
return self.y + self.h
@property
def lurd(self):
return self.left, self.up, self.right, self.down
@property
def lrud(self):
return self.left, self.right, self.up, self.down
@property
def ldru(self):
return self.left, self.down, self.right, self.up
@property
def lrdu(self):
return self.left, self.right, self.down, self.up
@property
def xywh(self):
return self.x, self.y, self.w, self.h
@property
def xwyh(self):
return self.x, self.w, self.y, self.h
@property
def min_max(self):
return self.min_x, self.max_x, self.min_y, self.max_y
def __str__(self):
return f"Rectangle(x[{self.min_x},{self.max_x}], y[{self.min_y},{self.max_y}] wh[{self.w},{self.h}])"
def __repr__(self):
return f"Rectangle(x:{self.x}, y:{self.y}, w:{self.w}, h:{self.h})"
def __hash__(self):
return hash(self.xywh)
def get_points_extent(gt, cols, rows):
"""Return list of corner coordinates from a geotransform"""
def transform_point(px, py):
x = gt[0] + (px * gt[1]) + (py * gt[2])
y = gt[3] + (px * gt[4]) + (py * gt[5])
return x, y
return [
transform_point(0, 0),
transform_point(0, rows),
transform_point(cols, rows),
transform_point(cols, 0),
]
```
#### File: tests/gdal2tiles/test_add_gdal_warp_options_to_string.py
```python
import os
from unittest import TestCase
from xml.etree import ElementTree
import gdal2tiles
class AddGdalWarpOptionStringTest(TestCase):
def setUp(self):
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped.vrt"), 'r') as f:
self.orig_vrt = f.read()
def test_changes_option_tag_based_on_input_options(self):
modif_vrt = gdal2tiles.add_gdal_warp_options_to_string(self.orig_vrt, {
"foo": "bar",
"baz": "biz"
})
self.assertIn('<Option name="foo">bar</Option>', modif_vrt)
self.assertIn('<Option name="baz">biz</Option>', modif_vrt)
def test_no_changes_if_no_option(self):
modif_vrt = gdal2tiles.add_gdal_warp_options_to_string(self.orig_vrt, {})
self.assertEqual(modif_vrt, self.orig_vrt)
def test_no_changes_if_no_option_tag_present(self):
vrt_root = ElementTree.fromstring(self.orig_vrt)
vrt_root.remove(vrt_root.find("GDALWarpOptions"))
vrt_no_options = ElementTree.tostring(vrt_root).decode()
modif_vrt = gdal2tiles.add_gdal_warp_options_to_string(vrt_no_options, {
"foo": "bar",
"baz": "biz"
})
self.assertEqual(modif_vrt, vrt_no_options)
```
#### File: tests/gdal2tiles/test_reproject_dataset.py
```python
from unittest import mock, TestCase
from osgeo import gdal, osr
import gdal2tiles
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class ReprojectDatasetTest(TestCase):
def setUp(self):
self.DEFAULT_OPTIONS = {
'verbose': True,
'resampling': 'near',
'title': '',
'url': '',
}
self.DEFAULT_ATTRDICT_OPTIONS = AttrDict(self.DEFAULT_OPTIONS)
self.mercator_srs = osr.SpatialReference()
self.mercator_srs.ImportFromEPSG(3857)
self.geodetic_srs = osr.SpatialReference()
self.geodetic_srs.ImportFromEPSG(4326)
def test_raises_if_no_from_or_to_srs(self):
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, None, self.mercator_srs)
with self.assertRaises(gdal2tiles.GDALError):
gdal2tiles.reproject_dataset(None, self.mercator_srs, None)
def test_returns_dataset_unchanged_if_in_destination_srs_and_no_gcps(self):
from_ds = mock.MagicMock()
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
to_ds = gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.mercator_srs)
self.assertEqual(from_ds, to_ds)
@mock.patch('gdal2tiles.gdal', spec=gdal)
def test_returns_warped_vrt_dataset_when_from_srs_different_from_to_srs(self, mock_gdal):
mock_gdal.AutoCreateWarpedVRT = mock.MagicMock(spec=gdal.Dataset)
from_ds = mock.MagicMock(spec=gdal.Dataset)
from_ds.GetGCPCount = mock.MagicMock(return_value=0)
gdal2tiles.reproject_dataset(from_ds, self.mercator_srs, self.geodetic_srs)
mock_gdal.AutoCreateWarpedVRT.assert_called_with(from_ds,
self.mercator_srs.ExportToWkt(),
self.geodetic_srs.ExportToWkt())
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.