metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johannbrehmer/rl-6-nimmt",
"score": 3
} |
#### File: rl_6_nimmt/utils/replay_buffer.py
```python
import random
import numba
import numpy as np
import torch
from collections import defaultdict
import random
import logging
logger = logging.getLogger(__name__)
# Taken and modified from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
@numba.jit(nopython=True)
def _update(tree, tree_index, priority, total_priority):
# Change = new priority score - former priority score
change = priority - tree[tree_index]
tree[tree_index] = priority
# then propagate the change through tree
# this method is faster than the recursive loop
while tree_index != 0:
tree_index = (tree_index - 1) // 2
tree[tree_index] += change
# assert total_priority > 0
return tree
@numba.jit(nopython=True)
def _get_leaf_ids(n, tree, priority_segment):
idx = np.empty((n,), dtype=np.uint32)
for i in range(n):
# A value is uniformly sample from each range
value = priority_segment * i + random.random() * priority_segment
# print("value:", value)
# Experience index that correspond to each value is retrieved
idx[i] = _get_leaf_index(tree, value)
return idx
@numba.jit(nopython=True)
def _get_leaf_index(tree, v):
parent_index = 0
while True:
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= tree[left_child_index]:
parent_index = left_child_index
else:
v -= tree[left_child_index]
parent_index = right_child_index
return leaf_index
class SumTree:
# Here we initialize the tree with all nodes = 0, and initialize the data with all values = 0
def __init__(self, capacity):
self.data_pointer = 0
# Number of leaf nodes (final nodes) that contains experiences
self.capacity = capacity
self.num_items = 0
# Generate the tree with all nodes values = 0
# To understand this calculation (2 * capacity - 1) look at the schema below
# Remember we are in a binary node (each node has max 2 children) so 2x size of leaf (capacity) - 1 (root node)
# Parent nodes = capacity - 1
# Leaf nodes = capacity
self.tree = np.zeros(2 * capacity - 1)
# Contains the experiences (so the size of data is capacity)
self.data = np.zeros(capacity, dtype=object)
def add(self, priority, data):
# Look at what index we want to put the experience
tree_index = self.data_pointer + self.capacity - 1
""" tree:
0
/ \
0 0
/ \ / \
tree_index 0 0 0 We fill the leaves from left to right
"""
# Update data frame
self.data[self.data_pointer] = data
# Update the leaf
self.update(tree_index, priority)
# Add 1 to data_pointer
self.data_pointer += 1
if self.data_pointer >= self.capacity: # If we're above the capacity, we go back to first index (we overwrite)
self.data_pointer = 0
else:
self.num_items += 1
def update(self, tree_index, priority):
self.tree = _update(self.tree, tree_index, priority, self.total_priority)
# @numba.jit(nopython=True)
def get_leaf(self, v):
leaf_index = _get_leaf_index(self.tree, v)
data_index = leaf_index - self.capacity + 1
# assert isinstance(self.data[data_index], dict)
return leaf_index, self.tree[leaf_index], self.data[data_index]
@property
def total_priority(self):
return self.tree[0] # Returns the root node
class PriorityReplayBuffer:
"""
A lot is going on here, which needs some explaining:
1. We want to use priority replay to draw more often from memories/transitions, which have a higher proportion of
information.
2. Memories are weighted according to the temporal difference error. Naively implementing this would be inefficient
(e.g. sorting the array by weights for example) -> SumTree helps here
3. Due to the weights introduced, we actually contradict our first reason to introduce a random replay buffer
decorrelation of memories. To avoid this, we borrow an idea from importance sampling.
4. When calculating the error between q targets and predicted q values, we assign the memories with a high
priority/high temporal difference error a lower weight. The rationale behind this: "Hey you will see this values quite often,
so do not overemphasis it too much.
"""
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
# Making the tree
self.dtype = dtype
self.device = device
if max_length is None:
raise ValueError("PriorityReplayBuffer needs max length!")
self.tree = SumTree(max_length)
self.absolute_error_upper = 1.0 # clipped abs error
# stored as ( state, action, reward, next_state ) in SumTree
self.epsilon = 0.01 # Hyperparameter that we use to avoid some experiences to have 0 probability of being taken
self.alpha = 0.6 # Hyperparameter that we use to make a tradeoff between taking only exp with high priority and sampling randomly
self.beta = 0.4 # importance-sampling, from initial value increasing to 1
self.beta_increment = 0.001
def store(self, **experience):
# Find the max priority
max_priority = np.max(self.tree.tree[-self.tree.capacity :])
# If the max priority = 0 we can't put priority = 0 since this experience will never have a chance to be selected
# So we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
self.tree.add(max_priority, experience) # set the max priority for new priority
def sample(self, n):
priority_segment = self.tree.total_priority / n # priority segment
self.beta = np.min([1.0, self.beta + self.beta_increment])
start_idx = len(self.tree.tree) - self.tree.capacity
end_idx = start_idx + self.tree.num_items
min_prob = np.min(self.tree.tree[start_idx:end_idx]) / self.tree.total_priority # for later calculate ISweight
minibatch, b_idx, importance_smapling_weights = self.get_samples(min_prob, n, priority_segment)
# for key, value in minibatch.items(): # convert to arrays
# value = self.stackify(value)
# minibatch[key] = value
return b_idx, importance_smapling_weights, minibatch
def get_samples(self, min_prob, n, priority_segment):
leaf_idx = _get_leaf_ids(n, self.tree.tree, priority_segment)
data_idx = leaf_idx - self.tree.capacity + 1
priorities = self.tree.tree[leaf_idx]
data_batch = self.tree.data[data_idx]
assert not 0 in data_batch, "Wrong data in sample detected"
probs = priorities / self.tree.total_priority
importance_smapling_weights = np.power(probs / min_prob, -self.beta)
# assert isinstance(self.data[data_index], dict)
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
# for x in data_batch:
# for key, value in x.items():
# minibatch[key].append(value)
return minibatch, leaf_idx, importance_smapling_weights
def batch_update(self, tree_idx, abs_errors):
"""'
must be called to update priorities
"""
abs_errors += self.epsilon # convert to abs and avoid 0
if isinstance(abs_errors, torch.Tensor):
abs_errors = abs_errors.cpu().numpy()
clipped_errors = np.minimum(abs_errors, self.absolute_error_upper)
ps = clipped_errors ** self.alpha
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
def __len__(self):
return self.tree.num_items
class History:
""" Generic replay buffer. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
self.memories = None
self.max_length = max_length
self.data_pointer = 0
self.is_full = False
if max_length:
self.memories = np.empty((max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.device = device
self.dtype = dtype
def store(self, **kwargs):
self.memories[self.data_pointer] = kwargs
self.is_full = False
self.data_pointer += 1
if self.max_length is not None and self.data_pointer >= self.max_length:
self.data_pointer = 0
self.is_full = True
if self.data_pointer >= self.memories.shape[0] and self.max_length is None:
# self.memories.resize(self.memories.shape * 2) # Raises some ValueError
self.memories = np.resize(self.memories, self.memories.shape[0] * 2)
# @timeit
def sample(self, n):
idx = random.sample(range(len(self)), k=n)
data_batch = self.memories[idx]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return idx, None, minibatch
def rollout(self, n=None):
""" When n is not None, returns only the last n entries """
data_batch = self.memories[: len(self)] if n is None else self.memories[len(self) - n : len(self)]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return minibatch
def __len__(self):
if self.max_length is None:
return self.data_pointer
else:
if self.is_full:
return self.max_length
else:
return self.data_pointer
def clear(self):
if self.max_length:
self.memories = np.empty((self.max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.data_pointer = 0
def __add__(self, other):
raise DeprecationWarning("Is not used anymore... I hope?")
assert list(self.memories.keys()) == list(other.memories.keys())
history = History(self.max_length)
history.memories = dict()
for key, val in self.memories.items():
history.memories[key] = val + other.memories[key]
return history
class SequentialHistory(History):
""" Generic replay buffer where each entry represents a sequence of events. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
super().__init__(max_length=max_length, dtype=dtype, device=device)
self.current_sequence = dict()
def current_sequence_length(self):
if len(self.current_sequence) == 0:
return 0
else:
return len(self.current_sequence[list(self.current_sequence.keys())[0]])
def store(self, **kwargs):
# Store in temporary sequence buffer
if self.current_sequence_length() == 0: # Nothing saved in current sequence
for key, val in kwargs.items():
self.current_sequence[key] = [val]
self.current_sequence["first"] = [True]
else:
for key, val in kwargs.items():
self.current_sequence[key].append(val)
self.current_sequence["first"].append(False)
def flush(self):
""" Push current sequence to ("long-term") memory """
assert self.current_sequence_length() > 0
super().store(**self.current_sequence)
self.current_sequence = dict()
``` |
{
"source": "johannbrehmer/workflow-madminer",
"score": 2
} |
#### File: bin/internal/shower_card.py
```python
import sys
import re
import os
import logging
logger = logging.getLogger('madgraph.shower_card')
pjoin = os.path.join
class ShowerCardError(Exception):
pass
class ShowerCard(dict):
""" """
true = ['.true.', 't', 'true', '1']
false = ['.false.', 'f', 'false', '0']
logical_vars = ['ue_enabled', 'hadronize', 'b_stable', 'pi_stable', 'wp_stable',
'wm_stable', 'z_stable', 'h_stable', 'tap_stable', 'tam_stable',
'mup_stable', 'mum_stable', 'is_4lep', 'is_bbar', 'combine_td']
string_vars = ['extralibs', 'extrapaths', 'includepaths', 'analyse']
for i in range(1,100):
string_vars.append('dm_'+str(i))
int_vars = ['nsplit_jobs', 'maxprint', 'nevents', 'pdfcode', 'rnd_seed', 'rnd_seed2', 'njmax']
float_vars = ['maxerrs', 'lambda_5', 'b_mass', 'qcut']
# names_dict has the following structure:
# var : {PYTHIA6: varpy6, HERWIG6: varhw6, HERWIGPP: varhwpp, PYTHIA8: varpy8}
# where varpy, varhw6 and varhwpp are mc_dependent names
# if a mc is not there, that variable is not supposed to be
# used / written for thar mc
names_dict = {\
'ue_enabled' : {'HERWIG6':'lhsoft', 'PYTHIA6': 'mstp_81', 'HERWIGPP': 'ue_hwpp', 'PYTHIA8': 'ue_py8'},
'pdfcode' : {'HERWIG6':'pdfcode', 'PYTHIA6': 'pdfcode', 'HERWIGPP': 'pdfcode', 'PYTHIA8': 'pdfcode'},
'nevents' : {'HERWIG6':'nevents', 'PYTHIA6': 'nevents', 'HERWIGPP': 'nevents', 'PYTHIA8': 'nevents'},
'hadronize' : {'PYTHIA6': 'mstp_111', 'HERWIGPP': 'hadronize_hwpp', 'PYTHIA8': 'hadronize_py8'},
'b_stable' : {'HERWIG6':'b_stable_hw', 'PYTHIA6': 'b_stable_py', 'HERWIGPP': 'b_stable_hwpp', 'PYTHIA8': 'b_stable_py8'},
'pi_stable' : {'HERWIG6':'pi_stable_hw', 'PYTHIA6': 'pi_stable_py', 'HERWIGPP': 'pi_stable_hwpp', 'PYTHIA8': 'pi_stable_py8'},
'wp_stable' : {'HERWIG6':'wp_stable_hw', 'PYTHIA6': 'wp_stable_py', 'HERWIGPP': 'wp_stable_hwpp', 'PYTHIA8': 'wp_stable_py8'},
'wm_stable' : {'HERWIG6':'wm_stable_hw', 'PYTHIA6': 'wm_stable_py', 'HERWIGPP': 'wm_stable_hwpp', 'PYTHIA8': 'wm_stable_py8'},
'z_stable' : {'HERWIG6':'z_stable_hw', 'PYTHIA6': 'z_stable_py', 'HERWIGPP': 'z_stable_hwpp', 'PYTHIA8': 'z_stable_py8'},
'h_stable' : {'HERWIG6':'h_stable_hw', 'PYTHIA6': 'h_stable_py', 'HERWIGPP': 'h_stable_hwpp', 'PYTHIA8': 'h_stable_py8'},
'tap_stable' : {'HERWIG6':'taup_stable_hw', 'PYTHIA6': 'taup_stable_py', 'HERWIGPP': 'taup_stable_hwpp', 'PYTHIA8': 'taup_stable_py8'},
'tam_stable' : {'HERWIG6':'taum_stable_hw', 'PYTHIA6': 'taum_stable_py', 'HERWIGPP': 'taum_stable_hwpp', 'PYTHIA8': 'taum_stable_py8'},
'mup_stable' : {'HERWIG6':'mup_stable_hw', 'PYTHIA6': 'mup_stable_py', 'HERWIGPP': 'mup_stable_hwpp', 'PYTHIA8': 'mup_stable_py8'},
'mum_stable' : {'HERWIG6':'mum_stable_hw', 'PYTHIA6': 'mum_stable_py', 'HERWIGPP': 'mum_stable_hwpp', 'PYTHIA8': 'mum_stable_py8'},
'is_4lep' : {'PYTHIA6':'is_4l_py'},
'is_bbar' : {'HERWIG6':'is_bb_hw'},
'maxprint' : {'HERWIG6':'maxpr_hw', 'PYTHIA6': 'maxpr_py', 'HERWIGPP': 'maxpr_hwpp', 'PYTHIA8': 'maxpr_py8'},
'rnd_seed' : {'HERWIG6':'rndevseed1_hw', 'PYTHIA6': 'rndevseed_py', 'HERWIGPP': 'rndevseed_hwpp', 'PYTHIA8': 'rndevseed_py8'},
'rnd_seed2' : {'HERWIG6':'rndevseed2_hw'},
'maxerrs' : {'HERWIG6':'err_fr_hw', 'PYTHIA6': 'err_fr_py', 'HERWIGPP': 'err_fr_hwpp', 'PYTHIA8': 'err_fr_py8'},
'lambda_5' : {'HERWIG6':'lambdaherw', 'PYTHIA6': 'lambdapyth', 'HERWIGPP': 'lambdaherw', 'PYTHIA8': 'lambdapyth'},
'b_mass' : {'HERWIG6':'b_mass', 'PYTHIA6': 'b_mass', 'HERWIGPP': 'b_mass', 'PYTHIA8': 'b_mass'},
'analyse' : {'HERWIG6':'hwuti', 'PYTHIA6':'pyuti', 'HERWIGPP':'hwpputi', 'PYTHIA8':'py8uti'},
'qcut' : {'PYTHIA8':'qcut'},
'njmax' : {'PYTHIA8':'njmax'}}
stdhep_dict = {'HERWIG6':'mcatnlo_hwan_stdhep.o', 'PYTHIA6':'mcatnlo_pyan_stdhep.o'}
def __init__(self, card=None, testing=False):
""" if testing, card is the content"""
self.testing = testing
dict.__init__(self)
self.keylist = self.keys()
if card:
self.read_card(card)
def read_card(self, card_path):
"""read the shower_card, if testing card_path is the content"""
if not self.testing:
content = open(card_path).read()
else:
content = card_path
lines = content.split('\n')
list_dm = []
for l in lines:
if '#' in l:
l = l.split('#',1)[0]
if '=' not in l:
continue
args = l.split('=',1) # here the 1 is important in case of string passed
key = args[0].strip().lower()
value = args[1].strip()
self.set_param(key, value)
if str(key).upper().startswith('DM'):
list_dm.append(int(key.split('_',1)[1]))
#special case for DM_*
for i in range(1,100):
if not i in list_dm:
self['dm_'+str(i)] = ''
self.text=content
def set_param(self, key, value, write_to = ''):
"""set the param key to value.
if write_to is passed then write the new shower_card:
if not testing write_to is an input path, if testing the text is
returned by the function
"""
if key in self.logical_vars:
if str(value).lower() in self.true:
self[key] = True
elif str(value).lower() in self.false:
self[key] = False
else:
raise ShowerCardError('%s is not a valid value for %s' % \
(value, key))
elif key in self.string_vars:
if value.lower() == 'none':
self[key] = ''
else:
self[key] = value
elif key in self.int_vars:
try:
self[key] = int(value)
except ValueError:
raise ShowerCardError('%s is not a valid value for %s. An integer number is expected' % \
(value, key))
elif key in self.float_vars:
try:
self[key] = float(value)
except ValueError:
raise ShowerCardError('%s is not a valid value for %s. A floating point number is expected' % \
(value, key))
else:
raise ShowerCardError('Unknown entry: %s = %s' % (key, value))
self.keylist.append(key)
#then update self.text and write the new card
if write_to:
logger.info('modify parameter %s of the shower_card.dat to %s' % (key, value))
key_re = re.compile('^(\s*)%s\s*=\s*(.+)\s*$' % key , re.IGNORECASE)
newlines = []
for line in self.text.split('\n'):
key_match = key_re.match(line)
if key_match and not ( str(key).upper().startswith('DM') ):
try:
comment = line.split('#')[1]
except:
comment = ''
if key not in self.logical_vars:
newlines.append('%s = %s #%s' % (key, value, comment))
else:
if key:
newlines.append('%s = %s #%s' % (key, 'T', comment))
else:
newlines.append('%s = %s #%s' % (key, 'F', comment))
elif key_match and ( str(key).upper().startswith('DM') ):
pass
else:
newlines.append(line)
if str(key).upper().startswith('DM') and not value.lower() in ['','none','default']:
newlines.append('%s = %s' % (str(key).upper(), value[0:len(value)]))
logger.info('please specify a decay through set DM_1 decay; see shower_card.dat for details')
self.text = '\n'.join(newlines) + '\n'
if self.testing:
return self.text
else:
open(write_to, 'w').write(self.text)
return ''
else:
return ''
def write_card(self, shower, card_path):
"""write the shower_card for shower in card_path.
if self.testing, card_path takes the value of the string"""
shower = shower.upper()
if shower.startswith('PYTHIA6'):
self.shower = 'PYTHIA6'
else:
self.shower = shower
lines = []
bool_dict = {True: '.true.', False: '.false.'}
bool_dict_num = {True: '1', False: '0'}
for key in self.keylist:
value = self[key]
if key in self.logical_vars:
# deal with special case for pythia:
if key in ['ue_enabled', 'hadronize'] and self.shower == 'PYTHIA6':
value = bool_dict_num[value]
else:
value = bool_dict[value]
elif key in self.string_vars:
# deal in a special way with analyse
if key == 'analyse':
if value is None or not value:
try:
value = self.stdhep_dict[self.shower]
except KeyError:
pass
try:
line = '%s="%s"' % (self.names_dict[key][self.shower].upper(), value)
lines.append(line)
continue
except KeyError:
continue
if value is None or not value:
value = ''
else:
value = '"%s"' % value
line = '%s=%s' % (key.upper(), value)
lines.append(line)
continue
elif key in self.int_vars:
value = '%d' % value
elif key in self.float_vars:
value = '%4.3f' % value
else:
raise ShowerCardError('Unknown key: %s = %s' % (key, value))
try:
line = '%s=%s' % (self.names_dict[key][self.shower].upper(), value.upper())
lines.append(line)
except KeyError:
pass
if self.testing:
return ('\n'.join(lines) + '\n')
else:
open(card_path, 'w').write(('\n'.join(lines) + '\n'))
``` |
{
"source": "johannchopin/htw-m1-softwarearchitecture",
"score": 2
} |
#### File: src/batch/BatchProcessing.py
```python
import os
import sys
from time import time
from typing import Set
from datetime import datetime
from ..EmailChecker import EmailChecker
from ..serving.CassandraViews import CassandraViewsInstance
from .CassandraWrapper import CassandraWrapper
EMAIL_CHUNKS_LENGTH = 200
EMAIL_SENT_TIMESTAMP_LIMIT = 30
PERCENTAGE_OF_SPAMS_TO_BLACKLIST = 0.2
if '-q' in sys.argv:
sys.stdout = open(os.devnull, 'w')
class BatchProcessing:
def __init__(self, masterDatasetCassandraInstance):
self.emailChecker = EmailChecker()
self.cassandraMasterDataset = masterDatasetCassandraInstance
self.cassandraViews = CassandraViewsInstance
self.spamSenderCount = 0
self.spamEmailsCount = 0
def getSendersEmailAdress(self) -> Set[str]:
sendersResponse = self.cassandraMasterDataset.execute(
"SELECT sender FROM emails;")
return {response.sender for response in sendersResponse}
def process(self):
while True:
self.cassandraViews.init_next_table()
senderEmailAdresses = self.getSendersEmailAdress()
for emailAdress in senderEmailAdresses:
self.processEmail(emailAdress)
timestamp = int(time() * 10**6)
self.cassandraViews.addSpamLog(timestamp, self.spamSenderCount)
self.cassandraViews.addSpamAmountDetectedByBatch(
self.spamEmailsCount)
self.spamSenderCount = 0 # reset counter
self.spamEmailsCount = 0 # reset counter
self.cassandraViews.use_next_table()
print("Batch process finished")
def areEmailsFromFlood(self, emails, emailsCount):
# TODO: refactor to for loop
counter = 0
while (counter + EMAIL_CHUNKS_LENGTH) < emailsCount:
timestamp1 = emails._current_rows[counter].timestamp.timestamp()
timestamp2 = emails._current_rows[counter +
EMAIL_CHUNKS_LENGTH].timestamp.timestamp()
if self._timestamp_diff(timestamp1, timestamp2) <= EMAIL_SENT_TIMESTAMP_LIMIT:
return True
counter += 1
return False
def emailsContainsSpamWords(self, emails, emailsCount):
emailContainingSpamWordsCounter = 0
for i in range(emailsCount):
emailBody = emails._current_rows[i].body
email = {'body': emailBody}
isSpam = self.emailChecker.isSpam(email)
if isSpam:
emailContainingSpamWordsCounter += 1
# Blacklist if 20% of emails are a spam
return (emailContainingSpamWordsCounter / emailsCount) > PERCENTAGE_OF_SPAMS_TO_BLACKLIST
def processEmail(self, emailAddress):
emailsResponse = self.cassandraMasterDataset.execute(
f"select * from emails where sender='{emailAddress}' ALLOW FILTERING;")
emailsCount = len(emailsResponse._current_rows)
if self.areEmailsFromFlood(emailsResponse, emailsCount):
self.insert_email_into_spam_view(emailAddress)
self.spamSenderCount += 1 # An email has been detected as spam
self.spamEmailsCount += emailsCount
else:
oneSpamHasBeenDetected = False
for email in emailsResponse:
if self.emailChecker.isSpam({'body': email.body}):
oneSpamHasBeenDetected = True
self.spamEmailsCount += 1
if oneSpamHasBeenDetected:
self.insert_email_into_spam_view(emailAddress)
self.spamSenderCount += 1 # An email has been detected as spam
def insert_email_into_spam_view(self, emailAddress):
self.cassandraViews.execute(
f"INSERT INTO {self.cassandraViews.getNextSpamsTableName()}(email) VALUES('{emailAddress}')")
def _timestamp_diff(self, timestamp1: float, timestamp2: float) -> int:
return int(abs(timestamp1 - timestamp2) * 10**3)
if __name__ == "__main__":
batchProcessing = BatchProcessing(CassandraWrapper())
batchProcessing.process()
``` |
{
"source": "JohanneBW/cds_language_assignments",
"score": 3
} |
#### File: Assignment_5/src/GameStop_LDA.py
```python
import sys,os
sys.path.append(os.path.join(".."))
from pprint import pprint
# data and nlp
import pandas as pd
import spacy
nlp = spacy.load("en_core_web_sm", disable=["ner"])
# visualisation
import matplotlib.pyplot as plt
import pyLDAvis.gensim
import seaborn as sns
from matplotlib import rcParams
# figure size in inches
rcParams['figure.figsize'] = 20,10
# LDA tools
import nltk
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
from utils import lda_utils
"""
---------- Main function ----------
"""
def main():
'''
------------------ Read data --------------------
'''
#Read in the data as a csv file
filename = os.path.join("..", "data", "r_wallstreetbets_posts.csv")
DATA = pd.read_csv(filename)
'''
The data set contains a lot of information which we do not need for our model.
This is information about username, individual links etc.
We are primarily going to use the title column in the data set. This column contains the actual text.
'''
#Only use tree columns from the csv and a sample of 10000.
DATA = DATA[["title","created_utc", "score"]].sample(10000)
#Split the text into individual senteces
#Create empty list where the scenteces will be stored
output=[]
#For every title in the column "title"
print("Creating Doc object...")
for title in DATA["title"]:
#Create a doc object by using the spaCy NLP function
doc = nlp(title)
#Append to the list
output.append(str(doc))
'''
----------- Process using gensim and spaCy ----------------
'''
'''
The next thing we do is using gensim to efficiently procude a model of bigrams and trigrams in the data.
We first create bigrams based on words appearing one after another frequently.
These bigrams are then fed into a trigram generator, which takes the bigram as the second part of a bigram.
'''
# Build the bigram and trigram models
print("Building bi- and trigrams...")
bigram = gensim.models.Phrases(output, min_count=20, threshold=100) # a higher threshold gives fewer phrases.
trigram = gensim.models.Phrases(bigram[output], threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
'''
We use the process_words function from our utils folder.
This function takes a text, nlp, bigram_mod, trigram_mod, stop_words and allowed_postags as arguments.
It uses gensim to preprocess the words and uses spaCy to lemmatize and POS tag.
'''
#Run the function with our arguments and set the allowed_postags to nouns and proper nouns
print("Processing the data...")
data_processed = lda_utils.process_words(output, nlp, bigram_mod, trigram_mod, allowed_postags=["NOUN", "PROPN"])
#Create Dictionary
#The dictionary converts each word into an integer value
print("Creating Dictionary...")
id2word = corpora.Dictionary(data_processed)
# Create Corpus: Term Document Frequency. The corpus creates a 'bag of words' model for all of the data
print("Creating Corpus...")
corpus = [id2word.doc2bow(text) for text in data_processed]
'''
--------------- Build LDA model ------------------------
'''
# Build LDA model using gensim
print("Building LDA model...")
lda_model = gensim.models.LdaMulticore(corpus=corpus, # vectorised corpus - list of list of tuples
id2word=id2word, # gensim dictionary - mapping words to IDS
num_topics=3, # topics. This will be explained later in the script
random_state=100, # set for reproducability
chunksize=10, # batch data for effeciency
passes=10, # number of full passes over data
iterations=100, # related to document rather than corpus
per_word_topics=True, # define word distibutions
minimum_probability=0.0) # minimum value
'''
-------------- Calculate model perplaxity ans coherence -------------------------
'''
# Compute Perplexity
print('\nPerplexity: ', lda_model.log_perplexity(corpus))
#A measure of how good the model is.
#Calculate and return per-word likelihood bound, using a chunk of documents as evaluation corpus.
#It returns the variational bound score calculated for each word.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model,
texts=data_processed,
dictionary=id2word,
coherence='c_v') #We use c_v as our choerence
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
'''
-------------- Find the most optimal number of topics -------------------------
'''
'''
We want to find the most optimal number of topics for our model.
Although the coherence value may be high at the high number of topics, it is not significant that it is the most optimal.
One of the reasons for this is that there will be more repetitions of words the more topics there are.
So if one wants to avoid this, it may be an advantage with fewer topics.
'''
print("Finding optimal topic number...")
model_list, coherence_values = lda_utils.compute_coherence_values(texts=data_processed,
corpus=corpus,
dictionary=id2word,
start=1, #The number of topics to start from
limit=40, #The maximum number of topics
step=2) #The steps between the number of topics
'''
When we first ran the part to find the most optimal topic number, we got the number of 7 topics to be the most optimal.
But when we later in the script saw the visualization of how the topics are distributed,
it became clear that they formed three main clusters, where the topics overlapped.
For this reason, we have chosen to include three topics in the model.
'''
'''
--------------------- Find most dominant topic per chunk ---------------------
'''
df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model,
corpus=corpus,
texts=data_processed)
#Reset the index
df_dominant_topic = df_topic_keywords.reset_index()
#Chose the columns for the dataframe
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.sample(10)
#Display setting to show more characters in column
pd.options.display.max_colwidth = 100
#Create dataframe
sent_topics_sorted_df = pd.DataFrame()
#Use groupby on the column containing the dominant topic
sent_topics_out_df_grpd = df_topic_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_out_df_grpd:
#Concatenate the sent_topics_sorted_df with the column Perc_Contribution
sent_topics_sorted_df = pd.concat([sent_topics_sorted_df,
grp.sort_values(['Perc_Contribution'], ascending=False).head(1)],
axis=0)
# Reset the index
sent_topics_sorted_df.reset_index(drop=True, inplace=True)
#Choe the columns for the dataframe
sent_topics_sorted_df.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Representative Text"]
'''
--------------------- Create dataframe for the values ---------------------
'''
values = list(lda_model.get_document_topics(corpus))
#Split tuples and keep only values per topic
split = []
for entry in values:
topic_prevelance = []
for topic in entry:
topic_prevelance.append(topic[1])
split.append(topic_prevelance)
#Create document-topic matrix
value_df = pd.DataFrame(map(list,zip(*split)))
print("Saving output...")
#Outpath for the dataframe
df_outpath = os.path.join("..", "output", "value_df.csv")
#Save datafram to a csv file
value_df.to_csv(df_outpath)
#Save vizualization to a png file
sns.lineplot(data=value_df.T.rolling(50).mean())
outpath_viz = os.path.join("..", "output", "topic_matrix_viz.png")
plt.savefig(outpath_viz)
print("Output saved")
#Define behaviour when called from command line
if __name__ == "__main__":
main()
``` |
{
"source": "JohanneBW/cds_visual_assignments",
"score": 3
} |
#### File: Assignment_4/src/nn-mnist.py
```python
import os
import sys
sys.path.append(os.path.join(".."))
import argparse
# Import teaching utils
import numpy as np
import utils.classifier_utils as clf_util
from utils.neuralnetwork import NeuralNetwork
# Import sklearn metrics
from sklearn import metrics
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from sklearn import datasets
"""
---------- Main function ----------
"""
def main():
"""
---------- Parameters ----------
"""
# Create an argument parser from argparse
ap = argparse.ArgumentParser()
# add argument about size of training data with 80% as default
ap.add_argument("-trs", "--train_size",
required=False, default = 0.8,
type = float,
help="The size of the train data as percent, the default is 0.8")
# add argument about size of test data with 20 % as default
ap.add_argument("-tes", "--test_size",
required=False,
default = 0.2,
type = float,
help="The size of the test data as percent, the default is 0.2")
# add argument about number of epochs with 20 epochs as default
ap.add_argument("-epo", "--epochs_number",
required=False,
default = 20,
type = int,
help="The number of epochs, the default is 20")
args = vars(ap.parse_args())
trs_size = args["train_size"]
tes_size = args["test_size"]
epochs_number = args["epochs_number"]
"""
---------- Neural network model ----------
"""
print("[nfo] Neural network model...")
# Fetch data. When fetching the data like this, the X and y is already defined as the data and the labels.
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# Convert to numpy arrays
X = np.array(X)
y = np.array(y)
# MinMax regularization
X = ( X - X.min())/(X.max() - X.min())
("[nfo] Splitting into train and test...")
# Split data. X contains the data and will be split into training and test data. y contains the labels and will split into train and test as well.
X_train, X_test, y_train, y_test = train_test_split(X,
y,
train_size = trs_size,
test_size=tes_size)
# Convert labels from integers to vectors
y_train = LabelBinarizer().fit_transform(y_train)
y_test = LabelBinarizer().fit_transform(y_test)
# Train the network
print("[INFO] training network...")
# The layers are 32 and 16 and the output is 10
nn = NeuralNetwork([X_train.shape[1], 32, 16, 10])
print("[INFO] {}".format(nn))
nn.fit(X_train, y_train, epochs=epochs_number)
# Evaluate network
print(["[INFO] Evaluating network..."])
predictions = nn.predict(X_test)
predictions = predictions.argmax(axis=1)
print(classification_report(y_test.argmax(axis=1), predictions))
#Define behaviour when called from command line
if __name__ == "__main__":
main()
``` |
{
"source": "johannemitzcisco/pnp-manager",
"score": 2
} |
#### File: python/pnp_manager/main.py
```python
import _ncs
import ncs
import ncs.maapi as maapi
from ncs.application import Service
class PnPDevice(Service):
@Service.create
def cb_create(self, tctx, root, service, proplist):
self.log.info('Service create(service=', service._path, ')')
vars = ncs.template.Variables()
template = ncs.template.Template(service)
for servicerole in service.role:
# This will use the last role with PnP info in the devices roles.
# Need to modify the service model to restrict to only one role
# with PnP info
role = root.device_role[servicerole.name]
if role.pnp.authgroup and role.pnp.day0_file:
self.log.info("Processing role: "+servicerole.name)
if role.pnp.authgroup is not None:
vars.add('AUTHGROUP', role.pnp.authgroup)
if root.devices.authgroups.group[role.pnp.authgroup].default_map.remote_name:
vars.add('USERNAME', root.devices.authgroups.group[role.pnp.authgroup].default_map.remote_name)
if root.devices.authgroups.group[role.pnp.authgroup].default_map.remote_password:
vars.add('PASSWORD', decrypt(root.devices.authgroups.group[role.pnp.authgroup].default_map.remote_password))
if role.pnp.username is not None:
vars.add('USERNAME', role.pnp.username)
if role.pnp.password is not None:
vars.add('PASSWORD', role.pnp.password)
if role.pnp.port is not None:
vars.add('PORT', role.pnp.port)
if role.pnp.day0_file is not None:
vars.add('DAY0-FILE', role.pnp.day0_file)
# if service.authgroup is not None:
# vars.add('AUTHGROUP', service.authgroup)
# if root.devices.authgroups.group[service.authgroup].default_map.remote_name:
# vars.add('USERNAME', root.devices.authgroups.group[service.authgroup].default_map.remote_name)
# if root.devices.authgroups.group[service.authgroup].default_map.remote_password:
# vars.add('PASSWORD', decrypt(root.devices.authgroups.group[service.authgroup].default_map.remote_password))
# if service.username is not None:
# vars.add('USERNAME', service.username)
# if service.password is not None:
# vars.add('PASSWORD', service.password)
# if service.port is not None:
# vars.add('PORT', service.port)
# if service.day0_file is not None:
# vars.add('DAY0-FILE', service.day0_file)
template.apply('pnp-manager-device-pnp-map', vars)
class Main(ncs.application.Application):
def setup(self):
self.log.info('Main RUNNING')
self.register_service('pnp-device-servicepoint', PnPDevice)
def teardown(self):
self.log.info('Main FINISHED')
def decrypt(value):
with maapi.Maapi() as m:
m.install_crypto_keys()
return _ncs.decrypt(value)
``` |
{
"source": "Johannes0Horn/ESRGAN_for_colab",
"score": 2
} |
#### File: codes/scripts/generate_mod_LR_bic.py
```python
import os
import sys
import cv2
import numpy as np
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from data.util import imresize_np
except ImportError:
pass
def generate_mod_LR_bic():
# set parameters
up_scale = 4
mod_scale = 4
# set data dir
sourcedir = '/content/Set14_Set5'
savedir = '/content/set14_5preprocessed'
saveHRpath = os.path.join(savedir, 'HR', 'x' + str(mod_scale))
saveLRpath = os.path.join(savedir, 'LR', 'x' + str(up_scale))
saveBicpath = os.path.join(savedir, 'Bic', 'x' + str(up_scale))
if not os.path.isdir(sourcedir):
print('Error: No source data found')
exit(0)
if not os.path.isdir(savedir):
os.mkdir(savedir)
if not os.path.isdir(os.path.join(savedir, 'HR')):
os.mkdir(os.path.join(savedir, 'HR'))
if not os.path.isdir(os.path.join(savedir, 'LR')):
os.mkdir(os.path.join(savedir, 'LR'))
if not os.path.isdir(os.path.join(savedir, 'Bic')):
os.mkdir(os.path.join(savedir, 'Bic'))
if not os.path.isdir(saveHRpath):
os.mkdir(saveHRpath)
else:
print('It will cover ' + str(saveHRpath))
if not os.path.isdir(saveLRpath):
os.mkdir(saveLRpath)
else:
print('It will cover ' + str(saveLRpath))
if not os.path.isdir(saveBicpath):
os.mkdir(saveBicpath)
else:
print('It will cover ' + str(saveBicpath))
filepaths = [f for f in os.listdir(sourcedir) if f.endswith('.png')]
num_files = len(filepaths)
# prepare data with augementation
for i in range(num_files):
filename = filepaths[i]
print('No.{} -- Processing {}'.format(i, filename))
# read image
image = cv2.imread(os.path.join(sourcedir, filename))
width = int(np.floor(image.shape[1] / mod_scale))
height = int(np.floor(image.shape[0] / mod_scale))
# modcrop
if len(image.shape) == 3:
image_HR = image[0:mod_scale * height, 0:mod_scale * width, :]
else:
image_HR = image[0:mod_scale * height, 0:mod_scale * width]
# LR
image_LR = imresize_np(image_HR, 1 / up_scale, True)
# bic
image_Bic = imresize_np(image_LR, up_scale, True)
cv2.imwrite(os.path.join(saveHRpath, filename), image_HR)
cv2.imwrite(os.path.join(saveLRpath, filename), image_LR)
cv2.imwrite(os.path.join(saveBicpath, filename), image_Bic)
if __name__ == "__main__":
generate_mod_LR_bic()
``` |
{
"source": "Johannes0Horn/mtl-dts",
"score": 3
} |
#### File: Johannes0Horn/mtl-dts/crf.py
```python
import itertools
import torch
import torch.nn as nn
class CRFLoss(nn.Module):
def __init__(self, L, init): # L = number of label types
super(CRFLoss, self).__init__()
self.start = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
self.T = nn.Parameter(torch.Tensor(L, L).uniform_(-init, init))
self.end = nn.Parameter(torch.Tensor(L).uniform_(-init, init))
def forward(self, scores, targets):
# scores (B x T x L), assumes no padding
# targets (B x T), assumes no padding
normalizers = self.compute_normalizers(scores)
target_scores = self.score_targets(scores, targets)
loss = (normalizers - target_scores).mean()
return loss
def decode(self, scores): # B x T x L
B, T, L = scores.size()
prev = self.start.unsqueeze(0) + scores[:, 0] # TODO (B x L)
back = []
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1) + self.T.transpose(0, 1)
prev, indices = cur.max(dim=1) # TODO (indices: B x L)
back.append(indices)
prev += self.end
max_scores, indices = prev.max(dim=1) # TODO (indices: B)
tape = [indices]
back = list(reversed(back))
for i in range(T - 1):
indices = torch.gather(back[i], 1, indices.unsqueeze(1)).squeeze(1) # TODO
tape.append(indices)
return max_scores, torch.stack(tape[::-1], dim=1)
# def decode_brute(self, scores):
# B, T, L = scores.size()
# all_targets = []
# yseq_scores = []
# for yseq in itertools.product(list(range(L)), repeat=T):
# targets = torch.LongTensor(yseq).expand(B, T)
# all_targets.append(torch.LongTensor(yseq))
# yseq_scores.append(self.score_targets(scores, targets))
# max_scores, indices = torch.stack(yseq_scores).max(dim=0)
# return max_scores, torch.stack(all_targets)[indices]
def compute_normalizers(self, scores):
B, T, L = scores.size()
prev = self.start + scores.transpose(0, 1)[0] # TODO (B x L)
for i in range(1, T):
cur = prev.unsqueeze(2) + scores.transpose(0, 1)[i].unsqueeze(1) + self.T.transpose(0, 1) # TODO: implement only using prev (no new definition)
prev = torch.logsumexp(cur, dim=1).clone()
prev += self.end
normalizers = torch.logsumexp(prev, 1) # TODO (B)
return normalizers
# def compute_normalizers_brute(self, scores):
# B, T, L = scores.size()
# yseq_scores = []
# for yseq in itertools.product(list(range(L)), repeat=T):
# targets = torch.LongTensor(yseq).expand(B, T)
# yseq_scores.append(self.score_targets(scores, targets))
# normalizers = torch.stack(yseq_scores).logsumexp(dim=0)
# return normalizers
def score_targets(self, scores, targets):
B, T, L = scores.size()
emits = scores.gather(2, targets.unsqueeze(2)).squeeze(2).sum(1) # B
trans = torch.stack(
[self.start.gather(0, targets[:, 0])] +
[self.T[targets[:, i], targets[:, i - 1]] for i in range(1, T)] +
[self.end.gather(0, targets[:, -1])]).sum(0) # B
return emits + trans # B
```
#### File: Johannes0Horn/mtl-dts/logger.py
```python
import os
import sys
import numpy as np
import statistics as stat
class Logger(object):
def __init__(self, log_path, on=True):
self.log_path = log_path
self.on = on
if self.on:
while os.path.isfile(self.log_path):
self.log_path += '+'
def log(self, string, newline=True):
if self.on:
with open(self.log_path, 'a') as logf:
logf.write(string)
if newline: logf.write('\n')
sys.stdout.write(string)
if newline: sys.stdout.write('\n')
sys.stdout.flush()
def log_perfs(self, perfs, best_args):
valid_perfs = [perf for perf in perfs if not np.isinf(perf)]
best_perf = max(valid_perfs)
self.log('%d perfs: %s' % (len(perfs), str(perfs)))
self.log('best perf: %g' % best_perf)
self.log("best args: %s" % str(best_args))
self.log('')
self.log('perf max: %g' % best_perf)
self.log('perf min: %g' % min(valid_perfs))
self.log('perf avg: %g' % stat.mean(valid_perfs))
self.log('perf std: %g' % (stat.stdev(valid_perfs)
if len(valid_perfs) > 1 else 0.0))
self.log('(excluded %d out of %d runs that produced -inf)' %
(len(perfs) - len(valid_perfs), len(perfs)))
``` |
{
"source": "johannes-99/nextcloud-service-python",
"score": 3
} |
#### File: johannes-99/nextcloud-service-python/api-server.py
```python
import nextcloudservice
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class FileList(Resource):
def get(self):
filesinput = nextcloudservice.read_webdav_dir("")
filesoutput = []
for file in filesinput:
filesoutput.append( {
'name' : file.name,
'dir' : file.directory, #TODO: bugfix dir contains name
'isdir' : file.isdir
})
return {'filelist': filesoutput }
api.add_resource(FileList, '/filelist')
class HelloWorld(Resource):
def get(self):
return {'hello': "world 1" }
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
```
#### File: johannes-99/nextcloud-service-python/nextcloudservice.py
```python
import os
from webdav3.client import Client
from flask_restful import Resource
class FileModel(Resource):
def __init__(self, name, directory):
self._name = name
self._dir = directory
self._isdir = self.isdir
@property
def isdir(self):
return self._name.endswith("/")
@property
def name(self):
return self._name
@name.setter
def name(self,name):
self._name = name
@property
def directory(self):
return "fooo"
@name.setter
def directory(self,directory):
self._directory = directory
options = {
}
client = Client(options)
client.verify = False
def read_webdav_dir(directory):
files = []
#"Fotos Save/"
webdavfiles = client.list("")
for webdavfile in webdavfiles:
file = FileModel(webdavfile,directory)
print(directory)
files.append( file )
return files
``` |
{
"source": "johannesbrand/pyulog",
"score": 3
} |
#### File: pyulog/pyulog/px4.py
```python
from __future__ import print_function
import numpy as np
__author__ = "<NAME>"
class PX4ULog(object):
"""
This class contains PX4-specific ULog things (field names, etc.)
"""
def __init__(self, ulog_object):
"""
@param ulog_object: ULog instance
"""
self._ulog = ulog_object
def get_mav_type(self):
""" return the MAV type as string from initial parameters """
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
return {0: 'Generic',
1: 'Fixed Wing',
2: 'Quadrotor',
3: 'Coaxial helicopter',
4: 'Normal helicopter with tail rotor',
5: 'Ground installation',
6: 'Ground Control Station',
7: 'Airship, controlled',
8: 'Free balloon, uncontrolled',
9: 'Rocket',
10: 'Ground Rover',
11: 'Surface Vessel, Boat, Ship',
12: 'Submarine',
13: 'Hexarotor',
14: 'Octorotor',
15: 'Tricopter',
16: 'Flapping wing',
17: 'Kite',
18: 'Onboard Companion Controller',
19: 'Two-rotor VTOL (Tailsitter)',
20: 'Quad-rotor VTOL (Tailsitter)',
21: 'Tiltrotor VTOL',
22: 'VTOL Standard', #VTOL reserved 2
23: 'VTOL reserved 3',
24: 'VTOL reserved 4',
25: 'VTOL reserved 5',
26: 'Onboard Gimbal',
27: 'Onboard ADSB Peripheral'}.get(mav_type, 'unknown type')
def get_estimator(self):
"""return the configured estimator as string from initial parameters"""
mav_type = self._ulog.initial_parameters.get('MAV_TYPE', None)
if mav_type == 1: # fixed wing always uses EKF2
return 'EKF2'
mc_est_group = self._ulog.initial_parameters.get('SYS_MC_EST_GROUP', None)
return {0: 'INAV',
1: 'LPE',
2: 'EKF2',
3: 'IEKF'}.get(mc_est_group, 'unknown ({})'.format(mc_est_group))
def add_roll_pitch_yaw(self):
""" convenience method to add the fields 'roll', 'pitch', 'yaw' to the
loaded data using the quaternion fields (does not update field_data).
Messages are: 'vehicle_attitude.q' and 'vehicle_attitude_setpoint.q_d',
'vehicle_attitude_groundtruth.q' and 'vehicle_vision_attitude.q' """
self._add_roll_pitch_yaw_to_message('vehicle_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_vision_attitude')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_groundtruth')
self._add_roll_pitch_yaw_to_message('vehicle_attitude_setpoint', '_d')
def _add_roll_pitch_yaw_to_message(self, message_name, field_name_suffix=''):
message_data_all = [elem for elem in self._ulog.data_list if elem.name == message_name]
for message_data in message_data_all:
q = [message_data.data['q'+field_name_suffix+'['+str(i)+']'] for i in range(4)]
roll = np.arctan2(2.0 * (q[0] * q[1] + q[2] * q[3]),
1.0 - 2.0 * (q[1] * q[1] + q[2] * q[2]))
pitch = np.arcsin(2.0 * (q[0] * q[2] - q[3] * q[1]))
yaw = np.arctan2(2.0 * (q[0] * q[3] + q[1] * q[2]),
1.0 - 2.0 * (q[2] * q[2] + q[3] * q[3]))
message_data.data['roll'+field_name_suffix] = roll
message_data.data['pitch'+field_name_suffix] = pitch
message_data.data['yaw'+field_name_suffix] = yaw
def get_configured_rc_input_names(self, channel):
"""
find all RC mappings to a given channel and return their names
:param channel: input channel (0=first)
:return: list of strings or None
"""
ret_val = []
for key in self._ulog.initial_parameters:
param_val = self._ulog.initial_parameters[key]
if key.startswith('RC_MAP_') and param_val == channel + 1:
ret_val.append(key[7:].capitalize())
if len(ret_val) > 0:
return ret_val
return None
``` |
{
"source": "johannesbreyer/HandleLenght",
"score": 3
} |
#### File: MeasureHandles.roboFontExt/lib/MeasureHandles.py
```python
from math import *
from mojo.drawingTools import *
#-----------
# constants
#-----------
#: Handle length in relation to circle radius, for creating circles with Bezier curves.
BEZIER_ARC_CIRCLE = 0.5522847498
#-----------
# functions
#-----------
def vector(xy, angle, distance):
"""
Calculate a new position based on a given angle and distance.
"""
x, y = xy
_x = x + cos(radians(angle)) * distance
_y = y + sin(radians(angle)) * distance
return _x, _y
def get_vector(pt1, pt2):
"""
Get the distance and angle between two points.
"""
(x1, y1), (x2, y2) = pt1, pt2
a = x2 - x1
b = y2 - y1
distance = sqrt(a ** 2 + b ** 2)
if a != 0:
angle_radians = atan(float(b) / a)
angle_degrees = degrees(angle_radians)
else:
angle_degrees = 0
return distance, angle_degrees
#---------
# objects
#---------
class MeasureHandles(object):
draw_box = True
draw_handles = True
draw_angles = True
draw_in = True
draw_out = True
radius = 0.3
font_size = 9
font = "Lucida Grande Bold"
stroke_width = 1
color_angle = 1, 0, 0
color_box = 1, 0, 0
color_handle = 1, 0, 0
stroke_alpha = 0.65
stroke_dash = 2
def __init__(self, glyph):
self.glyph = glyph
def _get_positions(self, bPoint):
# box origin point
x0, y0 = bPoint.anchor
# incoming bcp
w1, h1 = bPoint.bcpIn
x1, y1 = x0 + w1, y0 + h1
# outgoing bcp
w2, h2 = bPoint.bcpOut
x2, y2 = x0 + w2, y0 + h2
# done
return x0, y0, w1, h1, w2, h2, x1, y1, x2, y2
def draw(self, scale=1.0):
if self.glyph is not None:
# setup drawing
save()
fontSize(self.font_size * scale)
font(self.font)
# draw!
for contour in self.glyph:
for bPoint in contour.bPoints:
# draw box info
if self.draw_box:
self._draw_box(bPoint, scale)
# draw handles info
if self.draw_handles:
self._draw_handles(bPoint, scale)
# draw angles info
if self.draw_angles:
self._draw_angles(bPoint, scale)
# done with glyph
restore()
def _draw_handles(self, bPoint, scale):
# get positions
x0, y0, w1, h1, w2, h2, x1, y1, x2, y2 = self._get_positions(bPoint)
# setup drawing
save()
strokeWidth(self.stroke_width * scale)
sw = self.stroke_width * scale * self.stroke_dash
dashLine(sw, sw)
# draw incoming bcp
if self.draw_in:
if w1 != 0 or h1 != 0:
# draw line
fill(None)
c = self.color_handle + (self.stroke_alpha,)
stroke(*c)
line((x0, y0), (x1, y1))
# draw caption
d1 = sqrt(w1 ** 2 + h1 ** 2)
d1_caption = '%.2f' % d1
d1_w, d1_h = textSize(d1_caption)
d1_x = x0 + (w1 * 0.5) - (d1_w * 0.5)
d1_y = y0 + (h1 * 0.5) - (d1_h * 0.4)
fill(*self.color_handle)
stroke(None)
textBox(d1_caption, (d1_x, d1_y, d1_w, d1_h), align='center')
# draw outgoing bcp
if self.draw_out:
if w2 != 0 or h2 != 0:
# draw line
fill(None)
c = self.color_handle + (self.stroke_alpha,)
stroke(*c)
line((x0, y0), (x2, y2))
# draw caption
d2 = sqrt(w2 * w2 + h2 * h2)
d2_caption = '%.2f' % d2
d2_w, d2_h = textSize(d2_caption)
d2_x = x0 + (w2 * 0.5) - (d2_w * 0.5)
d2_y = y0 + (h2 * 0.5) - (d2_h * 0.4)
fill(*self.color_handle)
stroke(None)
textBox(d2_caption, (d2_x, d2_y, d2_w, d2_h), align='center')
# done
restore()
def _draw_box(self, bPoint, scale):
# get positions
x0, y0, w1, h1, w2, h2, x1, y1, x2, y2 = self._get_positions(bPoint)
# setup drawing
save()
strokeWidth(self.stroke_width * scale)
sw = self.stroke_width * scale * self.stroke_dash
dashLine(sw, sw)
# draw box for incoming bcp
if self.draw_in and not int(w1) == 0 and not int(h1) == 0:
# draw box
fill(None)
c = self.color_box + (self.stroke_alpha,)
stroke(*c)
rect(x0, y0, w1, h1)
# draw captions
stroke(None)
fill(*self.color_box)
# draw x caption
x1_caption = '%.2f' % abs(w1)
x1_w, x1_h = textSize(x1_caption)
x1_x = x0 + (w1 * 0.5) - (x1_w * 0.5)
x1_y = y1 - x1_h * 0.4
textBox(x1_caption, (x1_x, x1_y, x1_w, x1_h), align='center')
# draw y caption
y1_caption = '%.2f' % abs(h1)
y1_w, y1_h = textSize(y1_caption)
y1_x = x1 - (y1_w * 0.5)
y1_y = y0 + (h1 * 0.5) - (y1_h * 0.4)
textBox(y1_caption, (y1_x, y1_y, y1_w, y1_h), align='center')
# draw box for outcoming bcp
if self.draw_out and not int(w2) == 0 and not int(h2) == 0:
# draw box
fill(None)
c = self.color_box + (self.stroke_alpha,)
stroke(*c)
rect(x0, y0, w2, h2)
# draw captions
stroke(None)
fill(*self.color_box)
# draw x caption
x2_caption = '%.2f' % abs(w2)
x2_w, x2_h = textSize(x2_caption)
x2_x = x0 + (w2 * 0.5) - (x2_w * 0.5)
x2_y = y2 - x2_h * 0.4
textBox(x2_caption, (x2_x, x2_y, x2_w, x2_h), align='center')
# draw y caption
y2_caption = '%.2f' % abs(h2)
y2_w, y2_h = textSize(y2_caption)
y2_x = x2 - (y2_w * 0.5)
y2_y = y0 + (h2 * 0.5) - (y2_h * 0.4)
textBox(y2_caption, (y2_x, y2_y, y2_w, y2_h), align='center')
# done
restore()
def _draw_angles(self, bPoint, scale):
f = BEZIER_ARC_CIRCLE
# get positions
x0, y0, w1, h1, w2, h2, x1, y1, x2, y2 = self._get_positions(bPoint)
# setup drawing
save()
strokeWidth(self.stroke_width * scale)
sw = self.stroke_width * scale * self.stroke_dash
dashLine(sw, sw)
# draw angles for incoming BCP
if self.draw_in and not int(w1) == 0 and not int(h1) == 0:
handle_length, angle = get_vector((x0, y0), (x1, y1))
r = handle_length * self.radius
a1 = angle % 90
a2 = 90 - a1
if w1 > 0 and h1 > 0:
x3, y3 = vector((x0, y0), angle - a1 * 0.5, r)
x4, y4 = vector((x0, y0), angle + a2 * 0.5, r)
p1_x, p1_y = x0 + r, y0
p2_x, p2_y = x0, y0 + r
p3_x, p3_y = p1_x, p1_y + r * f
p4_x, p4_y = p2_x + r * f, p2_y
elif w1 > 0 and h1 < 0:
x3, y3 = vector((x0, y0), angle - a1 * 0.5, r)
x4, y4 = vector((x0, y0), angle + a2 * 0.5, r)
p1_x, p1_y = x0 + r, y0
p2_x, p2_y = x0, y0 - r
p3_x, p3_y = p1_x, p1_y - r * f
p4_x, p4_y = p2_x + r * f, p2_y
elif w1 < 0 and h1 < 0:
x3, y3 = vector((x0, y0), 180 + angle - a1 * 0.5, r)
x4, y4 = vector((x0, y0), 180 + angle + a2 * 0.5, r)
p2_x, p2_y = x0 - r, y0
p1_x, p1_y = x0, y0 - r
p3_x, p3_y = p1_x - r * f, p1_y
p4_x, p4_y = p2_x, p2_y - r * f
else:
x3, y3 = vector((x0, y0), 180 + angle - a1 * 0.5, r)
x4, y4 = vector((x0, y0), 180 + angle + a2 * 0.5, r)
p1_x, p1_y = x0 - r, y0
p2_x, p2_y = x0, y0 + r
p3_x, p3_y = p1_x, p1_y + r * f
p4_x, p4_y = p2_x - r * f, p2_y
# draw angle arch
c = self.color_angle + (self.stroke_alpha,)
stroke(*c)
fill(None)
newPath()
moveTo((p1_x, p1_y))
curveTo((p3_x, p3_y), (p4_x, p4_y), (p2_x, p2_y))
drawPath()
# draw angle captions
stroke(None)
fill(*self.color_angle)
# caption angle 1
caption_a1 = '%.2f' % a1
a1_w, a1_h = textSize(caption_a1)
a1_x = x3 - (a1_w * 0.5)
a1_y = y3 - (a1_h * 0.4)
textBox(caption_a1, (a1_x, a1_y, a1_w, a1_h), align='center')
# caption angle 2
caption_a2 = '%.2f' % a2
a2_w, a2_h = textSize(caption_a2)
a2_x = x4 - (a2_w * 0.5)
a2_y = y4 - (a2_h * 0.5)
textBox(caption_a2, (a2_x, a2_y, a2_w, a2_h), align='center')
# draw angles for outcoming BCP
if self.draw_out and not int(w2) == 0 and not int(h2) == 0:
handle_length, angle = get_vector((x0, y0), (x2, y2))
r = handle_length * self.radius
a1 = angle % 90
a2 = 90 - a1
if w2 > 0 and h2 > 0:
x5, y5 = vector((x0, y0), angle - a1 * 0.5, r)
x6, y6 = vector((x0, y0), angle + a2 * 0.5, r)
p1_x, p1_y = x0 + r, y0
p2_x, p2_y = x0, y0 + r
p3_x, p3_y = p1_x, p1_y + r * f
p4_x, p4_y = p2_x + r * f, p2_y
elif w2 > 0 and h2 < 0:
x5, y5 = vector((x0, y0), angle - a1 * 0.5, r)
x6, y6 = vector((x0, y0), angle + a2 * 0.5, r)
p1_x, p1_y = x0, y0 - r
p2_x, p2_y = x0 + r, y0
p3_x, p3_y = p1_x + r * f, p1_y
p4_x, p4_y = p2_x, p2_y - r * f
elif w2 < 0 and h2 < 0:
x5, y5 = vector((x0, y0), 180 + angle - a1 * 0.5, r)
x6, y6 = vector((x0, y0), 180 + angle + a2 * 0.5, r)
p1_x, p1_y = x0 - r, y0
p2_x, p2_y = x0, y0 - r
p3_x, p3_y = p1_x, p1_y - r * f
p4_x, p4_y = p2_x - r * f, p2_y
else:
x5, y5 = vector((x0, y0), 180 + angle - a1 * 0.5, r)
x6, y6 = vector((x0, y0), 180 + angle + a2 * 0.5, r)
p1_x, p1_y = x0 - r, y0
p2_x, p2_y = x0, y0 + r
p3_x, p3_y = p1_x, p1_y + r * f
p4_x, p4_y = p2_x - r * f, p2_y
# draw angle arch
c = self.color_angle + (self.stroke_alpha,)
stroke(*c)
fill(None)
newPath()
moveTo((p1_x, p1_y))
curveTo((p3_x, p3_y), (p4_x, p4_y), (p2_x, p2_y))
drawPath()
# draw angle captions
stroke(None)
fill(*self.color_angle)
# caption angle 1
caption_a1 = '%.2f' % a1
a1_w, a1_h = textSize(caption_a1)
a1_x = x5 - (a1_w * 0.5)
a1_y = y5 - (a1_h * 0.4)
textBox(caption_a1, (a1_x, a1_y, a1_w, a1_h), align='center')
# caption angle 2
caption_a2 = '%.2f' % a2
a2_w, a2_h = textSize(caption_a2)
a2_x = x6 - (a2_w * 0.5)
a2_y = y6 - (a2_h * 0.5)
textBox(caption_a2, (a2_x, a2_y, a2_w, a2_h), align='center')
# done
restore()
``` |
{
"source": "JohannesBruch/web_pipeline",
"score": 4
} |
#### File: JohannesBruch/web_pipeline/remove_annotated_images.py
```python
import yaml, os
def main():
# load yaml
# deserialise list from yaml
# load yaml list of image addresses
with open("phone_images.yaml", 'r') as stream:
phone_images = yaml.load(stream)
# load yaml list of model-name annotations
with open("phone_image_annotations.yaml", 'r') as stream:
phone_image_annotations = yaml.load(stream)
# load yaml list of image source URLs
with open("phone_image_URLs.yaml", 'r') as stream:
phone_image_URLs = yaml.load(stream)
# define string names of lists in a dictionary
dictionary = dict(phone_image_URLs=phone_image_URLs,
phone_image_annotations=phone_image_annotations,
phone_images=phone_images
)
# ask for address of DATA to be DELETED
print('Which images would you like to delete from the PI dataset?')
index_string = input("Lowest address: ") # reading address of first image to be removed
addresses_for_correction = [int(s) for s in index_string.split()
if s.isdigit()]
lowest_address = addresses_for_correction[0]
index_string = input("Highest address: ") # reading address of last image to be removed
addresses_for_correction = [int(s) for s in index_string.split()
if s.isdigit()]
highest_address = addresses_for_correction[0]
print('If you want to remove all images with addresses between')
print(lowest_address)
print('and')
print(highest_address)
print(', please reply "y" . Otherwise, the dataset will not be modified.')
correct_string = input("Reply: ")
if correct_string == 'y' and highest_address-lowest_address+1 > 10:
print('Are you sure you want to DELETE that many images?.')
correct_string = input("Reply: ")
if correct_string == 'y':
for address_for_correction in range(lowest_address, highest_address + 1):
image_path = r'' + str(address_for_correction) + '.jpg'
# remove image from dataset
try:
image_index = phone_images.index(image_path)
phone_image_URLs.pop(image_index)
phone_image_annotations.pop(image_index)
phone_images.pop(image_index)
print('The address ' + str(address_for_correction) + '.jpg has been removed from the dataset together with its annotation & URL,')
except ValueError:
print('Address' + str(address_for_correction) + 'was not found,')
# If file exists, delete it ##
if os.path.isfile(image_path):
os.remove(image_path)
print('and the .jpg file was deleted.')
else: # Show an error ##
print("but the .jpg file was not found.")
# save yaml
for key, value in dictionary.items():
stream = open('' + key + '.yaml', 'w')
yaml.dump(value, stream)
else:
print('The dataset has not been modified.')
``` |
{
"source": "JohannesBuchner/flight-reservation-emails",
"score": 3
} |
#### File: JohannesBuchner/flight-reservation-emails/summary.py
```python
import notmuch
import BeautifulSoup
import datetime
import dateutil.parser
import emailparser
import logging
from tzlocal import get_localzone
import sys
import os
logging.basicConfig(filename='emailparser.log',level=logging.DEBUG)
logFormatter = logging.Formatter("[%(name)s %(levelname)s]: %(message)s")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
consoleHandler.setLevel(logging.WARN)
logging.getLogger().addHandler(consoleHandler)
if len(sys.argv) < 3:
sys.stderr.write("""SYNOPSIS: %(exe)s <query>
database: absolute path to notmuch database
query: query to use. Has to be in quotes.
Example usage:
%(exe)s 'schema.org/FlightReservation OR ticket OR flight OR flug OR viaje OR booking OR confirmation OR confirmacion'
To speed up date parsing, you can specify the languages to consider with the
LANGUAGES environment variable:
LANGUAGES="en de es" <cmd>
Author: <NAME> (c) 2017
""" % dict(exe=sys.argv[0]))
sys.exit(1)
db = notmuch.Database()
query = sys.argv[1]
query = db.create_query(query)
#'schema.org/FlightReservation OR ticket OR flight OR flug OR viaje OR booking OR confirmation OR confirmacion')
languages = os.environ.get('LANGUAGES', None)
if languages is not None:
languages = languages.split()
#query = db.create_query('schema.org/FlightReservation OR eticket OR flight')
#languages = ['en']
#query = db.create_query('schema.org/FlightReservation')
all_reservations = emailparser.parse_multiple_email_messages(query.search_messages(), languages=languages)
#all_reservations = []
#messages = list(query.search_messages())
#for i, m in enumerate(messages[::-1]):
# print('handling %d/%d: "%s" from %s' % (i, len(messages), m.get_header('Subject'),
# datetime.datetime.fromtimestamp(m.get_date()).strftime('%Y-%m-%d')))
# reservations = emailparser.parse_email_message(m, languages = languages)
# print('got %d reservations' % len(all_reservations))
# all_reservations += reservations
print('got %d reservations' % len(all_reservations))
def prepend(info, k, prefix):
if info[k] and info[k] != '':
info[k] = prefix + info[k]
def dateConverter(day):
#day = dateutil.parser.parse(dateText)
if day.tzinfo is not None:
return day
print 'Warning: Using local time zone to order %s' % day
local_tz = get_localzone()
return day.replace(tzinfo=local_tz)
# sort by departure time
all_reservations.sort(key=lambda info: dateConverter(info['departureTime']))
previous = None
fout = open('summary.html', 'w')
fout.write("""<!doctype html><html lang="en">
<head>
<meta charset=utf-8>
<title>Flight summary</title>
<link rel="stylesheet" type="text/css" href="theme.css">
</head>
<body>
<h1>Flights</h1>
<table>
""")
file_id = 1
for info in all_reservations:
prepend(info, 'departureGate', 'Gate ')
prepend(info, 'arrivalGate', 'Gate ')
prepend(info, 'arrivalTerminal', 'Terminal ')
prepend(info, 'departureTerminal', 'Terminal ')
prepend(info, 'ticketNumber', 'Ticket#')
prepend(info, 'operator', ' operated by ')
flightday = info['departureTime'].date()
prepend(info, 'boardingTimestr', 'Boarding ')
filenames = []
msg_id = info['emailId']
for m in db.create_query('id:%s' % msg_id).search_messages():
for mp in m.get_message_parts():
if mp.get_content_type() == 'application/pdf' or (mp.get_content_type() == 'application/octet-stream' and mp.get_filename().lower().endswith('.pdf')):
filename = 'file_id%d.pdf' % file_id
with open(filename, 'w') as f:
f.write(mp.get_payload(decode=True))
filenames.append((mp.get_filename(), filename))
file_id += 1
info['pdffiles'] = ' | '.join(['<a class="pdffile" href="%s">%s</a>' % (filename, origfilename) for (origfilename, filename) in filenames])
if previous is not None and (flightday - previous).days > 14:
delta = (flightday - previous).days
print '=============', delta, 'days later'
fout.write("""
<tr>
<td colspan="3" class="gaplater">%d days later
</tr>
""" % delta)
else:
fout.write("""
<tr>
<td colspan="3" class="gap">
</tr>
""")
previous = flightday
info['departureDay'] = flightday.strftime('%Y-%m-%d')
info['departureJustTime'] = info['departureTime'].strftime('%H:%M')
info['emailday'] = info['emailTime'].date().strftime('%Y-%m-%d')
print """
%(departureDay)s Flight %(departure)s --> %(arrival)s
Departing %(departureTimestr)s %(boardingTime)s
from %(departure)s %(departureTerminal)s %(departureGate)s
arriving %(arrivalTimestr)s
To %(arrival)s %(arrivalTerminal)s %(arrivalGate)s
Flight number %(flightNumber)s with %(airline)s%(operator)s
%(ticketNumber)s %(ticketText)s %(ticketDownload)s %(ticketPrint)s
Email %(emailday)s "%(emailSubject)s"
""" % info
fout.write(("""
<tr><td class="left">
<h5>From</h5>
%(departure)s
%(departureTerminal)s
%(departureGate)s
<td class="middle" rowspan="2" >✈
<td class="right">
<h5>Destination</h5>
%(arrival)s
%(arrivalTerminal)s
%(arrivalGate)s
</tr>
<tr>
<td class="left">
<h5>Depart</h5>
%(departureJustTime)s
<td class="right">
<h5>Date</h5>
%(departureDay)s
</tr>
<tr>
<td colspan="3" class="details">
<h5>Arriving</h5>
%(arrivalTimestr)s
<h5>Flight number</h5>
Flight number %(flightNumber)s with %(airline)s%(operator)s
<h5>Ticket</h5>
%(ticketNumber)s %(ticketText)s %(ticketDownload)s %(ticketPrint)s
<div>%(boardingTime)s</div>
<div>%(pdffiles)s</div>
</td>
</tr>
<tr>
<td colspan="3" class="email">
<h5>Email</h5>
%(emailday)s "%(emailSubject)s"
</td>
""" % info).encode('utf-8'))
``` |
{
"source": "JohannesBuchner/gammapy",
"score": 2
} |
#### File: catalog/tests/test_hawc.py
```python
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.utils.data import get_pkg_data_filename
from gammapy.catalog import SourceCatalog2HWC
from gammapy.catalog.hawc import SourceCatalog3HWC
from gammapy.modeling.models import (
DiskSpatialModel,
PointSpatialModel,
PowerLawSpectralModel,
)
from gammapy.utils.gauss import Gauss2DPDF
from gammapy.utils.testing import requires_data
@pytest.fixture(scope="session")
def cat():
return SourceCatalog2HWC()
@requires_data()
class TestSourceCatalog2HWC:
@staticmethod
def test_source_table(cat):
assert cat.tag == "2hwc"
assert len(cat.table) == 40
@staticmethod
def test_positions(cat):
assert len(cat.positions) == 40
@requires_data()
class TestSourceCatalogObject2HWC:
@staticmethod
def test_data(cat):
assert cat[0].data["source_name"] == "2HWC J0534+220"
assert cat[0].n_models == 1
assert cat[1].data["source_name"] == "2HWC J0631+169"
assert cat[1].n_models == 2
@staticmethod
def test_str(cat):
expected = open(get_pkg_data_filename("data/2hwc_j0534+220.txt")).read()
assert str(cat[0]) == expected
expected = open(get_pkg_data_filename("data/2hwc_j0631+169.txt")).read()
assert str(cat[1]) == expected
@staticmethod
def test_position(cat):
position = cat[0].position
assert_allclose(position.ra.deg, 83.628, atol=1e-3)
assert_allclose(position.dec.deg, 22.024, atol=1e-3)
@staticmethod
def test_sky_model(cat):
model = cat[1].sky_model("extended")
assert model.name == "2HWC J0631+169"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, DiskSpatialModel)
with pytest.raises(ValueError):
cat[0].sky_model("extended")
@staticmethod
def test_spectral_model(cat):
m = cat[0].spectral_model()
dnde, dnde_err = m.evaluate_error(1 * u.TeV)
assert dnde.unit == "cm-2 s-1 TeV-1"
assert_allclose(dnde.value, 2.802365e-11, rtol=1e-3)
assert_allclose(dnde_err.value, 6.537506e-13, rtol=1e-3)
@staticmethod
def test_spatial_model(cat):
m = cat[1].spatial_model()
# p = m.parameters
assert isinstance(m, PointSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 195.614, atol=1e-2)
# TODO: add assert on position error
# assert_allclose(p.error("lon_0"), tbd)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 3.507, atol=1e-2)
assert m.frame == "galactic"
m = cat[1].spatial_model("extended")
assert isinstance(m, DiskSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 195.614, atol=1e-10)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 3.507, atol=1e-10)
assert m.frame == "galactic"
assert m.r_0.unit == "deg"
assert_allclose(m.r_0.value, 2.0, atol=1e-3)
model = cat["2HWC J0534+220"].spatial_model()
pos_err = model.position_error
scale_r95 = Gauss2DPDF().containment_radius(0.95)
assert_allclose(pos_err.height.value, 2 * 0.057 * scale_r95, rtol=1e-4)
assert_allclose(pos_err.width.value, 2 * 0.057 * scale_r95, rtol=1e-4)
assert_allclose(model.position.l.value, pos_err.center.l.value)
assert_allclose(model.position.b.value, pos_err.center.b.value)
@pytest.fixture(scope="session")
def ca_3hwc():
return SourceCatalog3HWC()
@requires_data()
class TestSourceCatalog3HWC:
@staticmethod
def test_source_table(ca_3hwc):
assert ca_3hwc.tag == "3hwc"
assert len(ca_3hwc.table) == 65
@staticmethod
def test_positions(ca_3hwc):
assert len(ca_3hwc.positions) == 65
@requires_data()
class TestSourceCatalogObject3HWC:
@staticmethod
def test_data(ca_3hwc):
assert ca_3hwc[0].data["source_name"] == "3HWC J0534+220"
assert ca_3hwc[0].n_models == 1
assert ca_3hwc[1].data["source_name"] == "3HWC J0540+228"
assert ca_3hwc[1].n_models == 1
```
#### File: gammapy/estimators/excess_profile.py
```python
import numpy as np
from astropy import units as u
from regions import CircleAnnulusSkyRegion, RectangleSkyRegion
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.maps import MapAxis
from gammapy.modeling.models import PowerLawSpectralModel, SkyModel
from gammapy.stats import CashCountsStatistic, WStatCountsStatistic
from gammapy.utils.table import table_from_row_data
from .core import Estimator
__all__ = ["ExcessProfileEstimator"]
class ExcessProfileEstimator(Estimator):
"""Estimate profile from a DataSet.
Parameters
----------
regions : list of `regions`
regions to use
energy_edges : `~astropy.units.Quantity`
Energy edges of the profiles to be computed.
n_sigma : float (optional)
Number of sigma to compute errors. By default, it is 1.
n_sigma_ul : float (optional)
Number of sigma to compute upper limit. By default, it is 3.
spectrum : `~gammapy.modeling.models.SpectralModel` (optional)
Spectral model to compute the fluxes or brightness.
Default is power-law with spectral index of 2.
selection_optional : list of str
Additional quantities to be estimated. Possible options are:
* "errn-errp": estimate asymmetric errors.
* "ul": estimate upper limits.
By default all quantities are estimated.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from gammapy.data import GTI
from gammapy.estimators import ExcessProfileEstimator, ImageProfile
from gammapy.utils.regions import make_orthogonal_rectangle_sky_regions
from gammapy.datasets import Datasets
# load example data
datasets = Datasets.read("$GAMMAPY_DATA/fermi-3fhl-crab/",
"Fermi-LAT-3FHL_datasets.yaml", "Fermi-LAT-3FHL_models.yaml")
# configuration
datasets[0].gti = GTI.create("0s", "1e7s", "2010-01-01")
# creation of the boxes and axis
start_line = SkyCoord(182.5, -5.8, unit='deg', frame='galactic')
end_line = SkyCoord(186.5, -5.8, unit='deg', frame='galactic')
boxes, axis = make_orthogonal_rectangle_sky_regions(start_line,
end_line,
datasets[0].counts.geom.wcs,
1.*u.deg,
11)
# set up profile estimator and run
prof_maker = ExcessProfileEstimator(boxes, axis)
fermi_prof = prof_maker.run(datasets[0])
# smooth and plot the data using the ImageProfile class
fermi_prof.peek()
plt.show()
ax = plt.gca()
ax.set_yscale('log')
ax = fermi_prof.plot("flux", ax=ax)
"""
tag = "ExcessProfileEstimator"
_available_selection_optional = ["errn-errp", "ul", "scan"]
def __init__(
self,
regions,
energy_edges=None,
spectrum=None,
n_sigma=1.0,
n_sigma_ul=3.0,
selection_optional="all",
):
self.regions = regions
self.n_sigma = n_sigma
self.n_sigma_ul = n_sigma_ul
self.energy_edges = (
u.Quantity(energy_edges) if energy_edges is not None else None
)
if spectrum is None:
spectrum = PowerLawSpectralModel()
self.spectrum = spectrum
self.selection_optional = selection_optional
def get_spectrum_datasets(self, dataset):
""" Utility to make the final `~gammapy.datasts.Datasets`
Parameters
----------
dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
sp_datasets : array of `~gammapy.datasets.SpectrumDataset`
the list of `~gammapy.datasets.SpectrumDataset` computed in each box
"""
datasets = Datasets()
for reg in self.regions:
spectrum_dataset = dataset.to_spectrum_dataset(reg)
datasets.append(spectrum_dataset)
return datasets
def _get_projected_distance(self):
distances = []
center = self.regions[0].center
for idx, region in enumerate(self.regions):
if isinstance(region, CircleAnnulusSkyRegion):
distance = (region.inner_radius + region.outer_radius) / 2.0
else:
distance = center.separation(region.center)
distances.append(distance)
return MapAxis.from_nodes(
u.Quantity(distances, "deg"), name="projected distance"
)
def make_prof(self, sp_datasets):
""" Utility to make the profile in each region
Parameters
----------
sp_datasets : `~gammapy.datasets.MapDatasets` of `~gammapy.datasets.SpectrumDataset` or \
`~gammapy.datasets.SpectrumDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
results : list of dictionary
the list of results (list of keys: x_min, x_ref, x_max, alpha, counts, background, excess, ts, sqrt_ts, \
err, errn, errp, ul, exposure, solid_angle)
"""
results = []
distance = self._get_projected_distance()
for index, spds in enumerate(sp_datasets):
old_model = None
if spds.models is not None:
old_model = spds.models
spds.models = SkyModel(spectral_model=self.spectrum)
e_reco = spds.counts.geom.axes["energy"].edges
# ToDo: When the function to_spectrum_dataset will manage the masks, use the following line
# mask = spds.mask if spds.mask is not None else slice(None)
mask = slice(None)
if isinstance(spds, SpectrumDatasetOnOff):
stats = WStatCountsStatistic(
spds.counts.data[mask][:, 0, 0],
spds.counts_off.data[mask][:, 0, 0],
spds.alpha.data[mask][:, 0, 0],
)
else:
stats = CashCountsStatistic(
spds.counts.data[mask][:, 0, 0],
spds.npred_background().data[mask][:, 0, 0],
)
result = {
"x_min": distance.edges[index],
"x_max": distance.edges[index + 1],
"x_ref": distance.center[index],
"energy_edge": e_reco,
}
if isinstance(spds, SpectrumDatasetOnOff):
result["alpha"] = stats.alpha
result.update(
{
"counts": stats.n_on,
"background": stats.n_bkg,
"excess": stats.n_sig,
}
)
result["ts"] = stats.ts
result["sqrt_ts"] = stats.sqrt_ts
result["err"] = stats.error * self.n_sigma
if "errn-errp" in self.selection_optional:
result["errn"] = stats.compute_errn(self.n_sigma)
result["errp"] = stats.compute_errp(self.n_sigma)
if "ul" in self.selection_optional:
result["ul"] = stats.compute_upper_limit(self.n_sigma_ul)
npred = spds.npred().data[mask][:, 0, 0]
e_reco_lo = e_reco[:-1]
e_reco_hi = e_reco[1:]
flux = (
stats.n_sig
/ npred
* spds.models[0].spectral_model.integral(e_reco_lo, e_reco_hi).value
)
result["flux"] = flux
result["flux_err"] = stats.error / stats.n_sig * flux
if "errn-errp" in self.selection_optional:
result["flux_errn"] = np.abs(result["errn"]) / stats.n_sig * flux
result["flux_errp"] = result["errp"] / stats.n_sig * flux
if "ul" in self.selection_optional:
result["flux_ul"] = result["ul"] / stats.n_sig * flux
solid_angle = spds.counts.geom.solid_angle()
result["solid_angle"] = (
np.full(result["counts"].shape, solid_angle.to_value("sr")) * u.sr
)
results.append(result)
if old_model is not None:
spds.models = old_model
return results
def run(self, dataset):
"""Make the profiles
Parameters
----------
dataset : `~gammapy.datasets.MapDataset` or `~gammapy.datasets.MapDatasetOnOff`
the dataset to use for profile extraction
Returns
--------
imageprofile : `~gammapy.estimators.ImageProfile`
Return an image profile class containing the result
"""
if self.energy_edges is not None:
axis = MapAxis.from_energy_edges(self.energy_edges)
dataset = dataset.resample_energy_axis(energy_axis=axis)
else:
dataset = dataset.to_image()
spectrum_datasets = self.get_spectrum_datasets(dataset)
results = self.make_prof(spectrum_datasets)
table = table_from_row_data(results)
if isinstance(self.regions[0], RectangleSkyRegion):
table.meta["PROFILE_TYPE"] = "orthogonal_rectangle"
table.meta["SPECTRAL_MODEL"] = self.spectrum.to_dict()
# return ImageProfile(table)
return table
```
#### File: gammapy/estimators/profile.py
```python
import numpy as np
import scipy.ndimage
from astropy import units as u
from astropy.convolution import Box1DKernel, Gaussian1DKernel
from astropy.coordinates import Angle
from astropy.table import Table
from .core import Estimator
__all__ = ["ImageProfile", "ImageProfileEstimator"]
# TODO: implement measuring profile along arbitrary directions
# TODO: think better about error handling. e.g. MC based methods
class ImageProfileEstimator(Estimator):
"""Estimate profile from image.
Parameters
----------
x_edges : `~astropy.coordinates.Angle`
Coordinate edges to define a custom measument grid (optional).
method : ['sum', 'mean']
Compute sum or mean within profile bins.
axis : ['lon', 'lat', 'radial']
Along which axis to estimate the profile.
center : `~astropy.coordinates.SkyCoord`
Center coordinate for the radial profile option.
Examples
--------
This example shows how to compute a counts profile for the Fermi galactic
center region::
import matplotlib.pyplot as plt
from gammapy.maps import ImageProfileEstimator
from gammapy.maps import Map
from astropy import units as u
# load example data
filename = '$GAMMAPY_DATA/fermi-3fhl-gc/fermi-3fhl-gc-counts.fits.gz'
fermi_cts = Map.read(filename)
# set up profile estimator and run
p = ImageProfileEstimator(axis='lon', method='sum')
profile = p.run(fermi_cts)
# smooth profile and plot
smoothed = profile.smooth(kernel='gauss')
smoothed.peek()
plt.show()
"""
tag = "ImageProfileEstimator"
def __init__(self, x_edges=None, method="sum", axis="lon", center=None):
self._x_edges = x_edges
if method not in ["sum", "mean"]:
raise ValueError("Not a valid method, choose either 'sum' or 'mean'")
if axis not in ["lon", "lat", "radial"]:
raise ValueError("Not a valid axis, choose either 'lon' or 'lat'")
if method == "radial" and center is None:
raise ValueError("Please provide center coordinate for radial profiles")
self.parameters = {"method": method, "axis": axis, "center": center}
def _get_x_edges(self, image):
if self._x_edges is not None:
return self._x_edges
p = self.parameters
coordinates = image.geom.get_coord(mode="edges").skycoord
if p["axis"] == "lat":
x_edges = coordinates[:, 0].data.lat
elif p["axis"] == "lon":
lon = coordinates[0, :].data.lon
x_edges = lon.wrap_at("180d")
elif p["axis"] == "radial":
rad_step = image.geom.pixel_scales.mean()
corners = [0, 0, -1, -1], [0, -1, 0, -1]
rad_max = coordinates[corners].separation(p["center"]).max()
x_edges = Angle(np.arange(0, rad_max.deg, rad_step.deg), unit="deg")
return x_edges
def _estimate_profile(self, image, image_err, mask):
p = self.parameters
labels = self._label_image(image, mask)
profile_err = None
index = np.arange(1, len(self._get_x_edges(image)))
if p["method"] == "sum":
profile = scipy.ndimage.sum(image.data, labels.data, index)
if image.unit.is_equivalent("counts"):
profile_err = np.sqrt(profile)
elif image_err:
# gaussian error propagation
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum)
elif p["method"] == "mean":
# gaussian error propagation
profile = scipy.ndimage.mean(image.data, labels.data, index)
if image_err:
N = scipy.ndimage.sum(~np.isnan(image_err.data), labels.data, index)
err_sum = scipy.ndimage.sum(image_err.data ** 2, labels.data, index)
profile_err = np.sqrt(err_sum) / N
return profile, profile_err
def _label_image(self, image, mask=None):
p = self.parameters
coordinates = image.geom.get_coord().skycoord
x_edges = self._get_x_edges(image)
if p["axis"] == "lon":
lon = coordinates.data.lon.wrap_at("180d")
data = np.digitize(lon.degree, x_edges.deg)
elif p["axis"] == "lat":
lat = coordinates.data.lat
data = np.digitize(lat.degree, x_edges.deg)
elif p["axis"] == "radial":
separation = coordinates.separation(p["center"])
data = np.digitize(separation.degree, x_edges.deg)
if mask is not None:
# assign masked values to background
data[mask.data] = 0
return image.copy(data=data)
def run(self, image, image_err=None, mask=None):
"""Run image profile estimator.
Parameters
----------
image : `~gammapy.maps.Map`
Input image to run profile estimator on.
image_err : `~gammapy.maps.Map`
Input error image to run profile estimator on.
mask : `~gammapy.maps.Map`
Optional mask to exclude regions from the measurement.
Returns
-------
profile : `ImageProfile`
Result image profile object.
"""
p = self.parameters
if image.unit.is_equivalent("count"):
image_err = image.copy(data=np.sqrt(image.data))
profile, profile_err = self._estimate_profile(image, image_err, mask)
result = Table()
x_edges = self._get_x_edges(image)
result["x_min"] = x_edges[:-1]
result["x_max"] = x_edges[1:]
result["x_ref"] = (x_edges[:-1] + x_edges[1:]) / 2
result["profile"] = profile * image.unit
if profile_err is not None:
result["profile_err"] = profile_err * image.unit
result.meta["PROFILE_TYPE"] = p["axis"]
return ImageProfile(result)
class ImageProfile:
"""Image profile class.
The image profile data is stored in `~astropy.table.Table` object, with the
following columns:
* `x_ref` Coordinate bin center (required).
* `x_min` Coordinate bin minimum (optional).
* `x_max` Coordinate bin maximum (optional).
* `profile` Image profile data (required).
* `profile_err` Image profile data error (optional).
Parameters
----------
table : `~astropy.table.Table`
Table instance with the columns specified as above.
"""
def __init__(self, table):
self.table = table
def smooth(self, kernel="box", radius="0.1 deg", **kwargs):
r"""Smooth profile with error propagation.
Smoothing is described by a convolution:
.. math::
x_j = \sum_i x_{(j - i)} h_i
Where :math:`h_i` are the coefficients of the convolution kernel.
The corresponding error on :math:`x_j` is then estimated using Gaussian
error propagation, neglecting correlations between the individual
:math:`x_{(j - i)}`:
.. math::
\Delta x_j = \sqrt{\sum_i \Delta x^{2}_{(j - i)} h^{2}_i}
Parameters
----------
kernel : {'gauss', 'box'}
Kernel shape
radius : `~astropy.units.Quantity`, str or float
Smoothing width given as quantity or float. If a float is given it
is interpreted as smoothing width in pixels. If an (angular) quantity
is given it is converted to pixels using `xref[1] - x_ref[0]`.
kwargs : dict
Keyword arguments passed to `~scipy.ndimage.uniform_filter`
('box') and `~scipy.ndimage.gaussian_filter` ('gauss').
Returns
-------
profile : `ImageProfile`
Smoothed image profile.
"""
table = self.table.copy()
profile = table["profile"]
radius = u.Quantity(radius)
radius = np.abs(radius / np.diff(self.x_ref))[0]
width = 2 * radius.value + 1
if kernel == "box":
smoothed = scipy.ndimage.uniform_filter(
profile.astype("float"), width, **kwargs
)
# renormalize data
if table["profile"].unit.is_equivalent("count"):
smoothed *= int(width)
smoothed_err = np.sqrt(smoothed)
elif "profile_err" in table.colnames:
profile_err = table["profile_err"]
# use gaussian error propagation
box = Box1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, box.array ** 2)
smoothed_err = np.sqrt(err_sum)
elif kernel == "gauss":
smoothed = scipy.ndimage.gaussian_filter(
profile.astype("float"), width, **kwargs
)
# use gaussian error propagation
if "profile_err" in table.colnames:
profile_err = table["profile_err"]
gauss = Gaussian1DKernel(width)
err_sum = scipy.ndimage.convolve(profile_err ** 2, gauss.array ** 2)
smoothed_err = np.sqrt(err_sum)
else:
raise ValueError("Not valid kernel choose either 'box' or 'gauss'")
table["profile"] = smoothed * self.table["profile"].unit
if "profile_err" in table.colnames:
table["profile_err"] = smoothed_err * self.table["profile"].unit
return self.__class__(table)
def plot(self, ax=None, **kwargs):
"""Plot image profile.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to `~matplotlib.axes.Axes.plot`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
x = self.x_ref.value
ax.plot(x, y, **kwargs)
ax.set_xlabel("lon")
ax.set_ylabel("profile")
ax.set_xlim(x.max(), x.min())
return ax
def plot_err(self, ax=None, **kwargs):
"""Plot image profile error as band.
Parameters
----------
ax : `~matplotlib.axes.Axes`
Axes object
**kwargs : dict
Keyword arguments passed to plt.fill_between()
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
y = self.table["profile"].data
ymin = y - self.table["profile_err"].data
ymax = y + self.table["profile_err"].data
x = self.x_ref.value
# plotting defaults
kwargs.setdefault("alpha", 0.5)
ax.fill_between(x, ymin, ymax, **kwargs)
ax.set_xlabel("x (deg)")
ax.set_ylabel("profile")
return ax
@property
def x_ref(self):
"""Reference x coordinates."""
return self.table["x_ref"].quantity
@property
def x_min(self):
"""Min. x coordinates."""
return self.table["x_min"].quantity
@property
def x_max(self):
"""Max. x coordinates."""
return self.table["x_max"].quantity
@property
def profile(self):
"""Image profile quantity."""
return self.table["profile"].quantity
@property
def profile_err(self):
"""Image profile error quantity."""
try:
return self.table["profile_err"].quantity
except KeyError:
return None
def peek(self, figsize=(8, 4.5), **kwargs):
"""Show image profile and error.
Parameters
----------
**kwargs : dict
Keyword arguments passed to `ImageProfile.plot_profile()`
Returns
-------
ax : `~matplotlib.axes.Axes`
Axes object
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax = self.plot(ax, **kwargs)
if "profile_err" in self.table.colnames:
ax = self.plot_err(ax, color=kwargs.get("c"))
return ax
def normalize(self, mode="peak"):
"""Normalize profile to peak value or integral.
Parameters
----------
mode : ['integral', 'peak']
Normalize image profile so that it integrates to unity ('integral')
or the maximum value corresponds to one ('peak').
Returns
-------
profile : `ImageProfile`
Normalized image profile.
"""
table = self.table.copy()
profile = self.table["profile"]
if mode == "peak":
norm = np.nanmax(profile)
elif mode == "integral":
norm = np.nansum(profile)
else:
raise ValueError(f"Invalid normalization mode: {mode!r}")
table["profile"] /= norm
if "profile_err" in table.colnames:
table["profile_err"] /= norm
return self.__class__(table)
```
#### File: estimators/tests/test_sensitivity.py
```python
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from gammapy.datasets import SpectrumDataset, SpectrumDatasetOnOff
from gammapy.estimators import SensitivityEstimator
from gammapy.irf import EDispKernelMap
from gammapy.maps import MapAxis, RegionNDMap
@pytest.fixture()
def spectrum_dataset():
e_true = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=20, name="energy_true")
e_reco = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=4)
background = RegionNDMap.create(region="icrs;circle(0, 0, 0.1)", axes=[e_reco])
background.data += 3600
background.data[0] *= 1e3
background.data[-1] *= 1e-3
edisp = EDispKernelMap.from_diagonal_response(
energy_axis_true=e_true, energy_axis=e_reco, geom=background.geom
)
aeff = RegionNDMap.create(region="icrs;circle(0, 0, 0.1)", axes=[e_true], unit="m2")
aeff.data += 1e6
livetime = 1 * u.h
exposure = aeff * livetime
return SpectrumDataset(
name="test", exposure=exposure, edisp=edisp, background=background
)
def test_cta_sensitivity_estimator(spectrum_dataset):
dataset_on_off = SpectrumDatasetOnOff.from_spectrum_dataset(
dataset=spectrum_dataset, acceptance=1, acceptance_off=5
)
sens = SensitivityEstimator(gamma_min=25, bkg_syst_fraction=0.075)
table = sens.run(dataset_on_off)
assert len(table) == 4
assert table.colnames == ["energy", "e2dnde", "excess", "background", "criterion"]
assert table["energy"].unit == "TeV"
assert table["e2dnde"].unit == "erg / (cm2 s)"
row = table[0]
assert_allclose(row["energy"], 1.33352, rtol=1e-3)
assert_allclose(row["e2dnde"], 2.74559e-08, rtol=1e-3)
assert_allclose(row["excess"], 270000, rtol=1e-3)
assert_allclose(row["background"], 3.6e06, rtol=1e-3)
assert row["criterion"] == "bkg"
row = table[1]
assert_allclose(row["energy"], 2.37137, rtol=1e-3)
assert_allclose(row["e2dnde"], 6.04795e-11, rtol=1e-3)
assert_allclose(row["excess"], 334.454, rtol=1e-3)
assert_allclose(row["background"], 3600, rtol=1e-3)
assert row["criterion"] == "significance"
row = table[3]
assert_allclose(row["energy"], 7.49894, rtol=1e-3)
assert_allclose(row["e2dnde"], 1.42959e-11, rtol=1e-3)
assert_allclose(row["excess"], 25, rtol=1e-3)
assert_allclose(row["background"], 3.6, rtol=1e-3)
assert row["criterion"] == "gamma"
```
#### File: makers/background/phase.py
```python
import numpy as np
from gammapy.data import EventList
from gammapy.datasets import SpectrumDatasetOnOff
from gammapy.maps import RegionNDMap
from ..core import Maker
__all__ = ["PhaseBackgroundMaker"]
class PhaseBackgroundMaker(Maker):
"""Background estimation with on and off phases.
TODO: For a usage example see future notebook.
TODO: The phase interval has to be between 0 and 1.
Cases like [-0.1, 0.1], for example, are still not supported.
Parameters
----------
on_phase : `tuple` or list of tuples
on-phase defined by the two edges of each interval (edges are excluded)
off_phase : `tuple` or list of tuples
off-phase defined by the two edges of each interval (edges are excluded)
"""
tag = "PhaseBackgroundMaker"
def __init__(self, on_phase, off_phase):
self.on_phase = self._check_intervals(on_phase)
self.off_phase = self._check_intervals(off_phase)
def __str__(self):
s = self.__class__.__name__
s += f"\n{self.on_phase}"
s += f"\n{self.off_phase}"
return s
@staticmethod
def _make_counts(dataset, observation, phases):
event_lists = []
for interval in phases:
events = observation.events.select_parameter(
parameter="PHASE", band=interval
)
event_lists.append(events)
events = EventList.from_stack(event_lists)
counts = RegionNDMap.from_geom(dataset.counts.geom)
counts.fill_events(events)
return counts
def make_counts_off(self, dataset, observation):
"""Make off counts.
Parameters
----------
dataset : `SpectrumDataset`
Input dataset.
observation : `DatastoreObservation`
Data store observation.
Returns
-------
counts_off : `RegionNDMap`
Off counts.
"""
return self._make_counts(dataset, observation, self.off_phase)
def make_counts(self, dataset, observation):
"""Make off counts.
Parameters
----------
dataset : `SpectrumDataset`
Input dataset.
observation : `DatastoreObservation`
Data store observation.
Returns
-------
counts_off : `RegionNDMap`
Off counts.
"""
return self._make_counts(dataset, observation, self.on_phase)
def run(self, dataset, observation):
"""Run all steps.
Parameters
----------
dataset : `SpectrumDataset`
Input dataset.
observation : `Observation`
Data store observation.
Returns
-------
dataset_on_off : `SpectrumDatasetOnOff`
On off dataset.
"""
counts_off = self.make_counts_off(dataset, observation)
counts = self.make_counts(dataset, observation)
acceptance = np.sum([_[1] - _[0] for _ in self.on_phase])
acceptance_off = np.sum([_[1] - _[0] for _ in self.off_phase])
dataset_on_off = SpectrumDatasetOnOff.from_spectrum_dataset(
dataset=dataset,
counts_off=counts_off,
acceptance=acceptance,
acceptance_off=acceptance_off,
)
dataset_on_off.counts = counts
return dataset_on_off
@staticmethod
def _check_intervals(intervals):
"""Split phase intervals that go beyond phase 1"""
if isinstance(intervals, tuple):
intervals = [intervals]
for phase_interval in intervals:
if phase_interval[0] > phase_interval[1]:
intervals.remove(phase_interval)
intervals.append([phase_interval[0], 1])
intervals.append([0, phase_interval[1]])
return intervals
```
#### File: tests/data/make.py
```python
from pathlib import Path
import numpy as np
import astropy.units as u
from gammapy.data import DataStore
from gammapy.datasets import Datasets, MapDataset
from gammapy.makers import MapDatasetMaker
from gammapy.maps import MapAxis, WcsGeom
from gammapy.modeling.models import (
ExpCutoffPowerLawSpectralModel,
GaussianSpatialModel,
Models,
PointSpatialModel,
PowerLawSpectralModel,
SkyModel,
TemplateSpatialModel,
)
DATA_PATH = Path("./")
def make_example_2():
spatial = GaussianSpatialModel(lon_0="0 deg", lat_0="0 deg", sigma="1 deg")
model = SkyModel(PowerLawSpectralModel(), spatial, name="example_2")
models = Models([model])
models.write(DATA_PATH / "example2.yaml", overwrite=True, write_covariance=False)
def make_datasets_example():
# Define which data to use and print some information
energy_axis = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 4), unit="TeV", name="energy", interp="log"
)
geom0 = WcsGeom.create(
skydir=(0, 0),
binsz=0.1,
width=(2, 2),
frame="galactic",
proj="CAR",
axes=[energy_axis],
)
geom1 = WcsGeom.create(
skydir=(1, 0),
binsz=0.1,
width=(2, 2),
frame="galactic",
proj="CAR",
axes=[energy_axis],
)
geoms = [geom0, geom1]
sources_coords = [(0, 0), (0.9, 0.1)]
names = ["gc", "g09"]
models = Models()
for idx, (lon, lat) in enumerate(sources_coords):
spatial_model = PointSpatialModel(
lon_0=lon * u.deg, lat_0=lat * u.deg, frame="galactic"
)
spectral_model = ExpCutoffPowerLawSpectralModel(
index=2 * u.Unit(""),
amplitude=3e-12 * u.Unit("cm-2 s-1 TeV-1"),
reference=1.0 * u.TeV,
lambda_=0.1 / u.TeV,
)
model_ecpl = SkyModel(
spatial_model=spatial_model, spectral_model=spectral_model, name=names[idx]
)
models.append(model_ecpl)
models["gc"].spectral_model.reference = models["g09"].spectral_model.reference
obs_ids = [110380, 111140, 111159]
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
diffuse_spatial = TemplateSpatialModel.read(
"$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz"
)
diffuse_model = SkyModel(PowerLawSpectralModel(), diffuse_spatial)
maker = MapDatasetMaker()
datasets = Datasets()
observations = data_store.get_observations(obs_ids)
for idx, geom in enumerate(geoms):
stacked = MapDataset.create(geom=geom, name=names[idx])
for obs in observations:
dataset = maker.run(stacked, obs)
stacked.stack(dataset)
bkg = stacked.models.pop(0)
stacked.models = [models[idx], diffuse_model, bkg]
datasets.append(stacked)
datasets.write(
"$GAMMAPY_DATA/tests/models",
prefix="gc_example",
overwrite=True,
write_covariance=False,
)
if __name__ == "__main__":
make_example_2()
make_datasets_example()
```
#### File: gammapy/visualization/panel.py
```python
import numpy as np
from astropy.coordinates import Angle
__all__ = ["MapPanelPlotter"]
__doctest_requires__ = {("colormap_hess", "colormap_milagro"): ["matplotlib"]}
class MapPanelPlotter:
"""
Map panel plotter class.
Given a `~matplotlib.pyplot.Figure` object this class creates axes objects
using `~matplotlib.gridspec.GridSpec` and plots a given sky map onto these.
Parameters
----------
figure : `~matplotlib.pyplot.figure.`
Figure instance.
xlim : `~astropy.coordinates.Angle`
Angle object specifying the longitude limits.
ylim : `~astropy.coordinates.Angle`
Angle object specifying the latitude limits.
npanels : int
Number of panels.
**kwargs : dict
Keyword arguments passed to `~matplotlib.gridspec.GridSpec`.
"""
def __init__(self, figure, xlim, ylim, npanels=4, **kwargs):
from matplotlib.gridspec import GridSpec
self.figure = figure
self.parameters = {"xlim": xlim, "ylim": ylim, "npanels": npanels}
self.grid_spec = GridSpec(nrows=npanels, ncols=1, **kwargs)
def _get_ax_extend(self, ax, panel):
"""Get width and height of the axis in world coordinates."""
p = self.parameters
# compute aspect ratio of the axis
aspect = ax.bbox.width / ax.bbox.height
# compute width and height in world coordinates
height = np.abs(p["ylim"].diff())
width = aspect * height
left, bottom = p["xlim"][0].wrap_at("180d"), p["ylim"][0]
width_all = np.abs(p["xlim"].wrap_at("180d").diff())
xoverlap = ((p["npanels"] * width) - width_all) / (p["npanels"] - 1.0)
if xoverlap < 0:
raise ValueError(
"No overlap between panels. Please reduce figure "
"height or increase vertical space between the panels."
)
left = left - panel * (width - xoverlap)
return left[0], bottom, width, height
def _set_ax_fov(self, ax, panel):
left, bottom, width, height = self._get_ax_extend(ax, panel)
# set fov
xlim = Angle([left, left - width])
ylim = Angle([bottom, bottom + height])
xlim_pix, ylim_pix = ax.wcs.wcs_world2pix(xlim.deg, ylim.deg, 1)
ax.set_xlim(*xlim_pix)
ax.set_ylim(*ylim_pix)
return ax
def plot_panel(self, map, panel=1, panel_fov=None, **kwargs):
"""
Plot sky map on one panel.
Parameters
----------
map : `~gammapy.maps.WcsNDMap`
Map to plot.
panel : int
Which panel to plot on (counted from top).
"""
if panel_fov is None:
panel_fov = panel
spec = self.grid_spec[panel]
ax = self.figure.add_subplot(spec, projection=map.geom.wcs)
try:
ax = map.plot(ax=ax, **kwargs)[1]
except AttributeError:
ax = map.plot_rgb(ax=ax, **kwargs)
ax = self._set_ax_fov(ax, panel_fov)
return ax
def plot(self, map, **kwargs):
"""
Plot sky map on all panels.
Parameters
----------
map : `~gammapy.maps.WcsNDMap`
Map to plot.
"""
p = self.parameters
axes = []
for panel in range(p["npanels"]):
ax = self.plot_panel(map, panel=panel, **kwargs)
axes.append(ax)
return axes
``` |
{
"source": "JohannesBuchner/jbopt",
"score": 3
} |
#### File: jbopt/jbopt/optimize1d.py
```python
import scipy.optimize, scipy.interpolate
import numpy
import matplotlib.pyplot as plt
def pause():
import sys
print 'please press enter: >> ',
sys.stdin.readline()
def plot_values(values, points, lastpoint, ymax=numpy.nan, ftol=0.05):
#print 'values:', zip(points, values)
#print 'last value', points[lastpoint], values[lastpoint]
plt.figure()
plt.plot(points, values, 'o--', color='blue')
plt.plot(points[lastpoint], values[lastpoint], 'o', color='red')
if numpy.isnan(ymax):
worst = [v for v in values if v < 1e100]
ymax = max(worst)
plt.ylim(min(values)-ftol, ymax+ftol)
plt.savefig('optimize1d.pdf')
plt.close()
phi = (1 + 5**0.5) / 2
resphi = 2 - phi
"""
Is this a line within the tolerance --> return False
"""
def has_curvature(a, b, c, va, vb, vc, ftol, disp):
ftol = 10000 * ftol
grad = (vc - va) / (c - a)
vbpred = grad * (b - a) + va
curvcrit = numpy.abs(vbpred - vb) > ftol
if disp > 0: print '\tCurvature checking: %f (tol=%f): ' % (vbpred - vb, ftol), curvcrit
return curvcrit
def escalate_left(function, a, b, va, vb, cons, lstep, ftol, disp, plot):
assert va < vb, (va, vb)
while va < vb: # we have to go left
if disp > 0: print ' <<< fast forwarding to LEFT <<< '
b, c = a, b
vb, vc = va, vb
lstep += 1
while any([con(b - lstep) < 0 for con in cons]):
lstep /= 3
a = b - lstep
va = function(a)
if disp > 5:
if plot:
plot_values([va, vb, vc], [a, b, c], lastpoint=0, ftol=ftol)
if disp > 0: print ' left %f [%f]' % (a, va)
if va > vb or not has_curvature(a, b, c, va, vb, vc, ftol, disp): # finally, we found the border
if disp > 0: print ' found left border'
return [a, b, c], [va, vb, vc]
if lstep < 1e-4 and c - a < 1e-4 and numpy.abs(vb - va) < 1e-4:
if disp > 0: print ' WARNING: hit the lower limit of the parameter', lstep, a, b, va, vb
return [a, b, c], [va, vb, vc]
return [a, b, c], [va, vb, vc]
def escalate_right(function, b, c, vb, vc, cons, rstep, ftol, disp, plot):
assert vc < vb, (vc, vb)
while vc < vb: # we have to go right
if disp > 0: print ' >>> fast forwarding to RIGHT >>> '
a, b = b, c
va, vb = vb, vc
rstep += 1
while any([con(b + rstep) < 0 for con in cons]):
rstep /= 3
c = b + rstep
vc = function(c)
if disp > 5:
if plot:
plot_values([va, vb, vc], [a, b, c], lastpoint=2, ftol=ftol)
if disp > 0: print ' right %f [%f]' % (c, vc)
if vc > vb: # finally, we found the border
if disp > 0: print ' found right border'
return [a, b, c], [va, vb, vc]
if rstep < 1e-4 and c - a < 1e-4 and numpy.abs(vc - vb) < 1e-4:
if disp > 0: print ' WARNING: hit the upper limit of the parameter', rstep, b, c, vb, vc
return [a, b, c], [va, vb, vc]
return [a, b, c], [va, vb, vc]
def seek_minimum_bracket(function, b, cons, ftol, disp, plot):
# we want to bracket the minimum first
lstep = 0.7
rstep = 0.7
assert not any([c(b) < 0 for c in cons]), [b]
vb = function(b)
if disp > 0: print 'starting at %f [%f]' % (b, vb)
while any([c(b - lstep) < 0 for c in cons]):
lstep /= 3
if disp > 0: print 'reducing lstep for constraint'
a = b - lstep
va = function(a)
if disp > 0: print 'left %f [%f]' % (a, va)
#plot([va, vb], [a, b], lastpoint=0, ftol=ftol)
if va <= vb: # we have to go left
return escalate_left(function, a, b, va, vb, cons, lstep, ftol, disp, plot)
while any([c(b + rstep) < 0 for c in cons]):
rstep /= 3
if disp > 0: print 'reducing rstep for constraint'
c = b + rstep
vc = function(c)
#plot([va, vb, vc], [a, b, c], lastpoint=2, ftol=ftol)
if disp > 0: print 'right %f [%f]' % (c, vc)
if vc <= vb: # we have to go right
return escalate_right(function, b, c, vb, vc, cons, rstep, ftol, disp, plot)
return [a, b, c], [va, vb, vc]
def brent(function, a, b, c, va, vb, vc, cons, ftol, disp=0, plot=False):
while True:
if disp > 0: print ' BRENT', a, b, c, va, vb, vc
if vb <= va and vb <= vc:
if numpy.abs(vb - va) + numpy.abs(vb - vc) <= ftol:
if disp > 0: print ' ===> found minimum at %f, %f' % (b, vb)
return b
if numpy.abs(vb - va) + numpy.abs(vb - vc) <= ftol:
print 'Potentially problematic case. Increasing verbosity!'
print ' Narrowing to ftol:', numpy.abs(vb - va) + numpy.abs(vb - vc)
disp = 4
x = b - 0.5 * ((b - a)**2 * (vb - vc) - (b - c)**2*(vb - va)) / ((b - a) * (vb - vc) - (b - c) * (vb - va))
if disp > 0: print 'suggested point:', x
safety = 10.
if x < b and x > a and c - b >= (b - a) * safety:
if disp > 0: print 'we want to go left, but right side is getting too mighty'
x = (c + (safety - 1) * b) / safety
if x < c and x > b and b - a >= (c - b) * safety:
if disp > 0: print 'we want to go right, but left side is getting too mighty'
x = ((safety - 1) * b + a) / safety
safety2 = 10.
if x <= b:
if x - a <= numpy.abs(b - a) / safety2:
if disp > 0: print 'we want to go left, but are too close to left side'
x = a + (b - a) / safety2
if b - x <= (b - a) / safety2:
if disp > 0: print 'we want to go left, but are too close to the center'
if (b - a) * numpy.abs(va - vb) >= (c - b) * numpy.abs(vc - vb) * safety**2:
if disp > 0: print 'left side is very mighty'
x = (b + a) / 2.
else:
x = b - (b - a) / safety2
if x >= b:
if c - x <= numpy.abs(c - b) / safety2:
if disp > 0: print 'we want to go right, but are too close to right side'
x = c - (c - b) / safety2
if x - b <= (c - b) / safety2:
if disp > 0: print 'we want to go right, but are too close to the center'
if (c - b) * numpy.abs(vc - vb) >= (b - a) * numpy.abs(va - vb) * safety**2:
if disp > 0: print 'right side is very mighty'
x = (b + c) / 2.
else:
x = b + (c - b) / safety2
if va < vb: # should go left
if x > a:
if disp > 0: print 'I think we should go further left, to bracket the minimum'
(a, b, c), (va, vb, vc) = escalate_left(function, a, b, va, vb, cons=cons, lstep=c - a, ftol=ftol, disp=disp, plot=plot)
if numpy.abs(vb - va) + numpy.abs(vb - vc) <= ftol:
if disp > 0: print ' ===> found minimum at left border %f, %f' % (b, vb)
return b
#disp = 4
continue
x = a - (c - b)
if vc < vb: # should go right
if x < c:
if disp > 0: print 'I think we should go further right, to bracket the minimum'
(a, b, c), (va, vb, vc) = escalate_right(function, b, c, vb, vc, cons=cons, rstep=c - a, ftol=ftol, disp=disp, plot=plot)
if numpy.abs(vb - va) + numpy.abs(vb - vc) <= ftol:
if disp > 0: print ' ===> found minimum at right border %f, %f' % (b, vb)
return b
#disp = 4
continue
x = c + (b - a)
if disp > 0: print 'next point:', x
v = function(x)
if disp > 0: print 'next value:', v
if disp > 3:
if plot:
plot_values([va, vb, vc, v], [a, b, c, x], lastpoint=3, ftol=ftol)
pause()
if disp > 0: print ' deciding on next bracket'
if v < min(va, vb, vc):
# improvement was made.
if x < a: # go to very left
if disp > 0: print ' <<<< '
a, b, c, va, vb, vc = x, a, b, v, va, vb
continue
elif x < b: # go to left
if disp > 0: print ' << '
a, b, c, va, vb, vc = a, x, b, va, v, vb
continue
elif x > c: # go to very right
if disp > 0: print ' >>>> '
a, b, c, va, vb, vc = b, c, x, vb, vc, v
continue
else: # go to right
if disp > 0: print ' >> '
a, b, c, va, vb, vc = b, x, c, vb, v, vc
continue
# no improvement
if disp > 0: print ' no improvement made'
# did we try to move to the outer edges?
if va < vb and x < a:
# we tried to go very left, but hit the wall
if disp > 0: print ' |<< '
a, b, c, va, vb, vc = x, a, b, v, va, vb
continue
elif vc < vb and x > c:
# we tried to go very right, but hit the wall
if disp > 0: print ' >>| '
a, b, c, va, vb, vc = b, c, x, vb, vc, v
continue
if disp > 0: print ' subdividing side'
# go to the other side
if not (v < va or v < vc):
if plot:
plot_values([va, vb, vc, v], [a, b, c, x], lastpoint=3, ftol=ftol)
if disp > 0: print 'warning: found flat bit!'
return b
assert False, [v < va, v, va, v < vc, v, vc]
if x < b: # on the left, go to right side
if v > va and v < vb:
if disp > 0: print ' . | x | sequence, going left'
a, b, c, va, vb, vc = a, x, b, va, v, vb
elif v > va:
if plot:
plot_values([va, vb, vc, v], [a, b, c, x], lastpoint=3, ftol=ftol)
disp = 4
if disp > 0: print 'warning: found flat bit on the right!'
return b
else:
if disp > 0: print ' . | x | going right'
a, b, c, va, vb, vc = x, b, c, v, vb, vc
continue
else: # on the right, go to left side
if v > vc and v < vb:
if disp > 0: print ' . | x | sequence, going right'
a, b, c, va, vb, vc = b, x, c, vb, v, vc
elif v > vc:
if plot:
plot_values([va, vb, vc, v], [a, b, c, x], lastpoint=3, ftol=ftol)
disp = 4
if disp > 0: print 'warning: found flat bit on the left!'
return b
else:
if disp > 0: print ' | x | . going left'
a, b, c, va, vb, vc = a, b, x, va, vb, v
continue
assert False, [a, b, c, x, va, vb, vc, v]
neval = 0
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False):
"""
**Optimization method based on Brent's method**
First, a bracket (a b c) is sought that contains the minimum (b value is
smaller than both a or c).
The bracket is then recursively halfed. Here we apply some modifications
to ensure our suggested point is not too close to either a or c,
because that could be problematic with the local approximation.
Also, if the bracket does not seem to include the minimum,
it is expanded generously in the right direction until it covers it.
Thus, this function is fail safe, and will always find a local minimum.
"""
if disp > 0:
print
print ' ===== custom 1d optimization routine ==== '
print
print 'initial suggestion on', function, ':', x0
points = []
values = []
def recordfunction(x):
v = function(x)
points.append(x)
values.append(v)
return v
(a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot)
if disp > 0:
print '---------------------------------------------------'
print 'found useable minimum bracker after %d evaluations:' % len(points), (a, b, c), (va, vb, vc)
if disp > 2:
if plot:
plot_values(values, points, lastpoint=-1, ftol=ftol)
pause()
result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot)
if disp > 0:
print '---------------------------------------------------'
print 'found minimum after %d evaluations:' % len(points), result
if disp > 1 or len(points) > 20:
if plot:
plot_values(values, points, lastpoint=-1, ftol=ftol)
if disp > 2:
pause()
if disp > 0:
print '---------------------------------------------------'
print
print ' ===== end of custom 1d optimization routine ==== '
print
global neval
neval += len(points)
return result
def cache2errors(function, cache, disp=0, ftol=0.05):
"""
This function will attempt to identify 1 sigma errors, assuming your
function is a chi^2. For this, the 1-sigma is bracketed.
If you were smart enough to build a cache list of [x,y] into your function,
you can pass it here. The values bracketing 1 sigma will be used as
starting values.
If no such values exist, e.g. because all values were very close to the
optimum (good starting values), the bracket is expanded.
"""
vals = numpy.array(sorted(cache, key=lambda x: x[0]))
if disp > 0: print ' --- cache2errors --- ', vals
vi = vals[:,1].min()
def renormedfunc(x):
y = function(x)
cache.append([x, y])
if disp > 1: print ' renormed:', x, y, y - (vi + 1)
return y - (vi + 1)
vals[:,1] -= vi + 1
lowmask = vals[:,1] < 0
highmask = vals[:,1] > 0
indices = numpy.arange(len(vals))
b, vb = vals[indices[lowmask][ 0],:]
c, vc = vals[indices[lowmask][-1],:]
if any(vals[:,0][highmask] < b):
if disp > 0: print 'already have bracket'
a, va = vals[indices[highmask][vals[:,0][highmask] < b][-1],:]
else:
a = b
va = vb
while b > -50:
a = b - max(vals[-1,0] - vals[0,0], 1)
va = renormedfunc(a)
if disp > 0: print 'going further left: %.1f [%.1f] --> %.1f [%.1f]' % (b, vb, a, va)
if va > 0:
if disp > 0: print 'found outer part'
break
else:
# need to go further
b = a
vb = va
if disp > 0: print 'left bracket', a, b, va, vb
if va > 0 and vb < 0:
leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol)
else:
if disp > 0: print 'WARNING: border problem found.'
leftroot = a
if disp > 0: print 'left root', leftroot
if any(vals[:,0][highmask] > c):
if disp > 0: print 'already have bracket'
d, vd = vals[indices[highmask][vals[:,0][highmask] > c][ 0],:]
else:
d = c
vd = vc
while c < 50:
d = c + max(vals[-1,0] - vals[0,0], 1)
vd = renormedfunc(d)
if disp > 0: print 'going further right: %.1f [%.1f] --> %.1f [%.1f]' % (c, vc, d, vd)
if vd > 0:
if disp > 0: print 'found outer part'
break
else:
# need to go further
c = d
vc = vd
if disp > 0: print 'right bracket', c, d, vc, vd
if vd > 0 and vc < 0:
rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol)
else:
if disp > 0: print 'WARNING: border problem found.'
rightroot = d
if disp > 0: print 'right root', rightroot
assert leftroot < rightroot
if disp > 2:
fullvals = numpy.array(sorted(cache, key=lambda x: x[0]))
fullvals[:,1] -= vi + 1
plt.figure()
plt.plot(fullvals[:,0], fullvals[:,1], 's')
plt.plot(vals[:,0], vals[:,1], 'o')
plt.xlim(a, d)
plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd))
ymin, ymax = plt.ylim()
plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='dotted')
plt.savefig('cache_brent.pdf')
return leftroot, rightroot
```
#### File: jbopt/test/test.py
```python
import scipy
import numpy
"""
likelihood is multivariate, independent gaussian
optimize each param in turn
"""
centers = numpy.array([0.1, 15, 3.3, 4.1, 0])
sigmas = numpy.array([0.01, 0.1, 3, 10, 10])
eval_cache = []
def like(params):
eval_cache.append(params)
return (((params - centers) / sigmas)**2).sum()
from jbopt.independent import *
limits = numpy.array([(0, 1000)]*len(centers))
start = numpy.array([0.1]*len(centers))
def test_normalizations():
print 'TEST normalization step method'
print opt_normalizations(start, like, limits, disp=0) #, abandon_threshold=1)
print 'TEST normalization step method: neval:',
print len(eval_cache)
while len(eval_cache) > 0:
eval_cache.pop()
def test_grid():
print 'TEST grid using BRENT'
print opt_grid(start, like, limits, ftol=0.01, disp=0)
print 'TEST grid using BRENT: neval:',
print len(eval_cache)
def test_grid_parallel():
print 'TEST grid using BRENT --- parallel'
print opt_grid_parallel(start, like, limits, ftol=0.01, disp=0)
print 'TEST grid using BRENT: neval:',
print len(eval_cache)
if __name__ == '__main__':
test_grid()
test_grid_parallel()
``` |
{
"source": "JohannesBuchner/massivedatans",
"score": 2
} |
#### File: JohannesBuchner/massivedatans/elldrawer.py
```python
from __future__ import print_function, division
"""
Implementation of MultiEllipsoidal sampling via nestle
Copyright (c) 2017 <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy
from numpy import exp, log, log10, pi
from nestle import bounding_ellipsoid, bounding_ellipsoids, sample_ellipsoids
from collections import defaultdict
class MultiEllipsoidalConstrainer(object):
def __init__(self, rebuild_every = 1000, verbose = False, enlarge=3.):
self.iter = 0
self.ndraws_since_rebuild = 0
self.rebuild_every = int(rebuild_every)
self.enlarge = enlarge
self.verbose = verbose
self.ells = None
self.last_cluster_points = None
def update(self, points):
# volume is larger than standard Ellipsoid computation
# because we have a superset of various likelihood contours
# increase proportional to number of points
pointvol = exp(-self.iter / self.nlive_points) * (len(points) * 1. / self.nlive_points) / self.nlive_points
self.ells = bounding_ellipsoids(numpy.asarray(points), pointvol=pointvol)
for ell in self.ells:
ell.scale_to_vol(ell.vol * self.enlarge)
def generate(self, ndim):
ntotal = 0
N = 10000
while True:
u = sample_ellipsoids(self.ells, rstate=numpy.random)
if not (numpy.all(u > 0.) and numpy.all(u < 1.)):
continue
yield u, ntotal
def rebuild(self, u, ndim):
if self.last_cluster_points is not None and \
len(self.last_cluster_points) == len(u) and \
numpy.all(self.last_cluster_points == u):
# do nothing if everything stayed the same
return
self.update(points=u)
self.last_cluster_points = u
self.generator = self.generate(ndim)
def _draw_constrained_prepare(self, Lmins, priortransform, loglikelihood, live_pointsu, ndim, **kwargs):
rebuild = self.ndraws_since_rebuild > self.rebuild_every or self.ells is None
if rebuild:
print('rebuild triggered at call')
self.rebuild(numpy.asarray(live_pointsu), ndim)
self.ndraws_since_rebuild = 0
assert self.generator is not None
return rebuild
def draw_constrained(self, Lmins, priortransform, loglikelihood, live_pointsu, ndim, iter, nlive_points, **kwargs):
ntoaccept = 0
self.iter = iter
self.nlive_points = nlive_points
#print 'MLFriends trying to replace', Lmins
rebuild = self._draw_constrained_prepare(Lmins, priortransform, loglikelihood, live_pointsu, ndim, **kwargs)
while True:
#print ' starting generator ...'
for u, ntotal in self.generator:
assert (u >= 0).all() and (u <= 1).all(), u
x = priortransform(u)
L = loglikelihood(x)
ntoaccept += 1
self.ndraws_since_rebuild += 1
if numpy.any(L > Lmins):
# yay, we win
#print 'accept after %d tries' % ntoaccept
return u, x, L, ntoaccept
# if running very inefficient, optimize clustering
# if we haven't done so at the start
if not rebuild and self.ndraws_since_rebuild > self.rebuild_every:
rebuild = True
print('Ellipsoid rebuild triggered after %d draws' % self.ndraws_since_rebuild)
self.rebuild(numpy.asarray(live_pointsu), ndim)
self.ndraws_since_rebuild = 0
break
```
#### File: JohannesBuchner/massivedatans/friends.py
```python
from __future__ import print_function, division
import numpy
import scipy.spatial, scipy.cluster
import matplotlib.pyplot as plt
from nested_sampling.clustering import clusterdetect
from nested_sampling.clustering.neighbors import find_maxdistance, find_rdistance, initial_rdistance_guess, nearest_rdistance_guess
class FriendsConstrainer(object):
"""
Rejection sampling pre-filtering method based on neighborhood to live points.
"Distant" means in this implementation that the distance to a cluster member
is large.
The maximum distance to a cluster is computed by considering each
cluster member and its k nearest neighbors in turn, and
computing the maximum distance.
:param rebuild_every: After how many iterations should the clustering
distance be re-computed?
:param radial:
if radial = True, then the normal euclidean distance is used.
otherwise, the absolute coordinate difference in each dimension is used.
:param metric:
metric to use. Use 'chebyshev' for SupFriends, in which case then
the supremum norm is used. Use 'euclidean' for RadFriends, via
the euclidean norm.
:param jackknife:
if True, instead of leaving out a group of live points in
the distance estimate, only one is left out in turn (jackknife resampling
instead of bootstrap resampling).
:param force_shrink:
if True, the distance can only decrease between sampling steps.
"""
def __init__(self, rebuild_every = 50, radial = True, metric = 'euclidean', jackknife = False,
force_shrink = False,
hinter = None, verbose = False,
keep_phantom_points=False, optimize_phantom_points=False):
self.maxima = []
self.iter = 0
self.region = None
self.rebuild_every = rebuild_every
self.radial = radial
self.metric = metric
self.file = None
self.jackknife = jackknife
self.force_shrink = force_shrink
self.hinter = hinter
self.verbose = verbose
if keep_phantom_points:
assert self.force_shrink, 'keep_phantom_points needs force_shrink=True'
self.keep_phantom_points = keep_phantom_points
self.optimize_phantom_points = optimize_phantom_points
self.phantom_points = []
self.phantom_points_Ls = []
self.last_cluster_points = None
def cluster(self, u, ndim, keepRadius=False):
"""
"""
if self.verbose: print('building region ...')
if len(u) > 10:
if keepRadius and self.region is not None and 'maxdistance' in self.region:
maxdistance = self.region['maxdistance']
else:
if self.radial:
if self.jackknife:
#maxdistance = initial_rdistance_guess(u, k=1, metric=self.metric)
maxdistance = nearest_rdistance_guess(u, metric=self.metric)
else:
maxdistance = find_rdistance(u, nbootstraps=20, metric=self.metric, verbose=self.verbose)
else:
maxdistance = find_maxdistance(u)
if self.force_shrink and self.region is not None and 'maxdistance' in self.region:
maxdistance = min(maxdistance, self.region['maxdistance'])
if self.keep_phantom_points and len(self.phantom_points) > 0:
# add phantoms to u now
print('including phantom points in cluster members', self.phantom_points)
u = numpy.vstack((u, self.phantom_points))
ulow = numpy.max([u.min(axis=0) - maxdistance, numpy.zeros(ndim)], axis=0)
uhigh = numpy.min([u.max(axis=0) + maxdistance, numpy.ones(ndim)], axis=0)
else:
maxdistance = None
ulow = numpy.zeros(ndim)
uhigh = numpy.ones(ndim)
if self.verbose: print('setting sampling region:', (ulow, uhigh), maxdistance)
self.region = dict(members=u, maxdistance=maxdistance, ulow=ulow, uhigh=uhigh)
self.generator = None
def is_inside(self, u):
"""
Check if this new point is near or inside one of our clusters
"""
ndim = len(u)
ulow = self.region['ulow']
uhigh = self.region['uhigh']
if not ((ulow <= u).all() and (uhigh >= u).all()):
# does not even lie in our primitive rectangle
# do not even need to compute the distances
return False
members = self.region['members']
maxdistance = self.region['maxdistance']
# if not initialized: no prefiltering
if maxdistance is None:
return True
# compute distance to each member in each dimension
if self.radial:
dists = scipy.spatial.distance.cdist(members, [u], metric=self.metric)
assert dists.shape == (len(members), 1)
dist_criterion = dists < maxdistance
else:
dists = numpy.abs(u - members)
assert dists.shape == (len(members), ndim), (dists.shape, ndim, len(members))
# nearer than maxdistance in all dimensions
dist_criterion = numpy.all(dists < maxdistance, axis=1)
assert dist_criterion.shape == (len(members),), (dist_criterion.shape, len(members))
# is it true for at least one?
closeby = dist_criterion.any()
if closeby:
return True
return False
def are_inside_rect(self, u):
"""
Check if the new points are near or inside one of our clusters
"""
ulow = self.region['ulow']
uhigh = self.region['uhigh']
mask = numpy.logical_and(((ulow <= u).all(axis=1), (uhigh >= u).all(axis=1)))
def are_inside_cluster(self, u, ndim):
members = self.region['members']
maxdistance = self.region['maxdistance']
# if not initialized: no prefiltering
if maxdistance is None:
return numpy.ones(len(u), dtype=bool)
# compute distance to each member in each dimension
if self.radial:
dists = scipy.spatial.distance.cdist(members, u, metric=self.metric)
assert dists.shape == (len(members), len(u))
dist_criterion = dists < maxdistance
else:
raise NotImplementedError()
# is it true for at least one?
closeby = dist_criterion.any(axis=0)
return closeby
def generate(self, ndim):
it = True
verbose = False and self.verbose
ntotal = 0
# largest maxdistance where generating from full space makes sense
full_maxdistance = 0.5 * (0.01)**(1./ndim)
while True:
maxdistance = self.region['maxdistance']
if maxdistance is None:
# do a prefiltering rejection sampling first
u = numpy.random.uniform(self.region['ulow'], self.region['uhigh'], size=ndim)
yield u, ntotal
ntotal = 0
continue
members = self.region['members']
it = numpy.random.uniform() < 0.01
# depending on the region size compared to
# the total space, one of the two methods will
# be more efficient
if it or not self.radial or maxdistance > full_maxdistance:
it = True
# for large regions
# do a prefiltering rejection sampling first
us = numpy.random.uniform(self.region['ulow'], self.region['uhigh'], size=(100, ndim))
ntotal += 100
mask = self.are_inside_cluster(self.transform_points(us), ndim)
if not mask.any():
continue
us = us[mask]
#indices = numpy.arange(len(mask))[mask]
#for i in indices:
# u = us[indices[i],:]
for u in us:
yield u, ntotal
ntotal = 0
else:
# for small regions
# draw from points
us = members[numpy.random.randint(0, len(members), 100),:]
ntotal += 100
if verbose: print('chosen point', us)
if self.metric == 'euclidean':
# draw direction around it
direction = numpy.random.normal(0, 1, size=(100, ndim))
direction = direction / ((direction**2).sum(axis=1)**0.5).reshape((-1,1))
if verbose: print('chosen direction', direction)
# choose radius: volume gets larger towards the outside
# so give the correct weight with dimensionality
radius = maxdistance * numpy.random.uniform(0, 1, size=(100,1))**(1./ndim)
us = us + direction * radius
else:
assert self.metric == 'chebyshev'
us = us + numpy.random.uniform(-maxdistance, maxdistance, size=(100, ndim))
if verbose: print('using point', u)
inside = numpy.logical_and((us >= 0).all(axis=1), (us <= 1).all(axis=1))
if not inside.any():
if verbose: print('outside boundaries', us, direction, maxdistance)
continue
us = us[inside]
# count the number of points this is close to
dists = scipy.spatial.distance.cdist(members, us, metric=self.metric)
assert dists.shape == (len(members), len(us))
nnear = (dists < maxdistance).sum(axis=0)
if verbose: print('near', nnear)
#ntotal += 1
# accept with probability 1./nnear
coin = numpy.random.uniform(size=len(us))
accept = coin < 1. / nnear
if not accept.any():
if verbose: print('probabilistic rejection due to overlaps')
continue
us = us[accept]
for u in us:
yield u, ntotal
ntotal = 0
def transform_new_points(self, us):
return us
def transform_points(self, us):
return us
def transform_point(self, u):
return u
def rebuild(self, u, ndim, keepRadius=False):
if self.last_cluster_points is None or \
len(self.last_cluster_points) != len(u) or \
numpy.any(self.last_cluster_points != u):
self.cluster(u=self.transform_new_points(u), ndim=ndim, keepRadius=keepRadius)
self.last_cluster_points = u
# reset generator
self.generator = self.generate(ndim=ndim)
def debug(self, ndim):
if self.file is None:
#self.file = open("friends_debug.txt", "a")
import tempfile
filename = tempfile.mktemp(dir='',
prefix='friends%s-%s_' % (
'1' if self.jackknife else '',
self.metric))
self.file = open(filename, 'w')
self.file.write("{} {} {}\n".format(self.iter, self.region['maxdistance'], len(self.region['members'])))
self.file.write("{} {} {} {}\n".format(self.iter, self.region['maxdistance'], len(self.region['members']), ndim))
def debugplot(self, u = None):
print('creating plot...')
n = len(self.region['members'][0]) / 2
plt.figure(figsize=(6, n/2*4+1))
m = self.region['members']
d = self.region['maxdistance']
for i in range(n):
plt.subplot(numpy.ceil(n / 2.), 2, 1+i)
j = i * 2
k = i * 2 + 1
plt.plot(m[:,j], m[:,k], 'x', color='b', ms=1)
plt.gca().add_artist(plt.Circle((m[0,j], m[0,k]), d, color='g', alpha=0.3))
if u is not None:
plt.plot(u[j], u[k], 's', color='r')
plt.gca().add_artist(plt.Circle((u[j], u[k]), d, color='r', alpha=0.3))
prefix='friends%s-%s_' % ('1' if self.jackknife else '', self.metric)
plt.savefig(prefix + 'cluster.pdf')
plt.close()
print('creating plot... done')
def draw_constrained(self, Lmins, priortransform, loglikelihood, live_pointsu, ndim, max_draws=None, **kwargs):
# previous is [[u, x, L], ...]
self.iter += 1
rebuild = self.iter % self.rebuild_every == 1
if rebuild or self.region is None:
self.rebuild(numpy.asarray(live_pointsu), ndim, keepRadius=False)
if self.generator is None:
self.generator = self.generate(ndim=ndim)
ntoaccept = 0
ntotalsum = 0
while True:
for u, ntotal in self.generator:
assert (u >= 0).all() and (u <= 1).all(), u
ntotalsum += ntotal
if self.hinter is not None:
hints = self.hinter(u)
if len(hints) == 0:
# no way
continue
if len(hints) > 1:
# choose a random solution, by size
raise NotImplementedError("multiple solutions not implemented")
hints = hints[numpy.random.randInt(len(hints))]
else:
hints = hints[0]
for i, lo, hi in hints:
u[i] = numpy.random.uniform(lo, hi)
if not is_inside(self.transform_point(u)):
# not sure if this is a good idea
# it means we dont completely trust
# the hinting function
continue
x = priortransform(u)
L = loglikelihood(x)
ntoaccept += 1
if numpy.any(L > Lmins) or (max_draws is not None and ntotalsum > max_draws):
# yay, we win
if ntotalsum > 10000:
if self.verbose:
print('sampled %d points, evaluated %d ' % (ntotalsum, ntoaccept))
#self.debugplot(u)
return u, x, L, ntoaccept
# if running very inefficient, optimize clustering
# if we haven't done so at the start
if not rebuild and ntoaccept > 1000:
#self.debugplot(u)
break
rebuild = True
self.rebuild(numpy.asarray(live_pointsu), ndim, keepRadius=False)
if __name__ == '__main__':
friends = FriendsConstrainer(radial = True)
u = numpy.random.uniform(0.45, 0.55, size=1000).reshape((-1, 2))
ndim = 2
friends.cluster(u, ndim=ndim)
Lmin = -1
rv = scipy.stats.norm(0.515, 0.03)
def priortransform(x): return x
def loglikelihood(x): return rv.logpdf(x).sum()
previous = []
colors = ['r', 'g', 'orange']
plt.figure("dists", figsize=(7,4))
plt.figure("plane", figsize=(5,5))
plt.plot(u[:,0], u[:,1], 'x')
Lmins = [-5, 2, 2.5] #, 2.58]
for j, (Lmin, color) in enumerate(zip(numpy.array(Lmins)*ndim, colors)):
values = []
for i in range(200):
friends.iter = 4 # avoid rebuild
u, x, L, ntoaccept = friends.draw_constrained(Lmin, priortransform, loglikelihood, previous, ndim)
plt.figure("plane")
plt.plot(u[0], u[1], '+', color=color)
values.append(u)
values = numpy.array(values)
plt.figure("dists")
for k in range(ndim):
plt.subplot(1, ndim, k + 1)
plt.title('Lmin={}, dim={}'.format(Lmin, k))
plt.hist(values[:,k], cumulative=True, normed=True,
color=color, bins=1000, histtype='step')
plt.figure("plane")
plt.savefig('friends_sampling_test.pdf', bbox_inches='tight')
plt.close()
plt.figure("dists")
plt.savefig('friends_sampling_test_dists.pdf', bbox_inches='tight')
plt.close()
# another test: given a group of samples, assert that only neighbors are evaluated
r = numpy.random.uniform(0.2, 0.25, size=400)
phi = numpy.random.uniform(0, 1, size=400)**10 * 2*numpy.pi
u = numpy.transpose([0.5 + r*numpy.cos(phi), 0.5 + r*numpy.sin(phi)])
friends.cluster(u, ndim=2)
plt.figure(figsize=(10,5))
plt.subplot(1, 2, 1)
plt.plot(u[:,0], u[:,1], 'x')
suggested = []
def loglikelihood(x):
r = ((x[0] - 0.5)**2 + (x[1] - 0.5)**2)**0.5
#assert r < 0.5
#assert r > 0.1
suggested.append(r)
if r > 0.2 and r < 0.25:
plt.plot(x[0], x[1], 'o', color='green')
return 100
plt.plot(x[0], x[1], 'o', color='red')
return -100
ndim = 2
taken = []
for i in range(100):
friends.iter = 4 # avoid rebuild
u, x, L, ntoaccept = friends.draw_constrained(Lmin, priortransform, loglikelihood, previous, ndim)
r = ((x[0] - 0.5)**2 + (x[1] - 0.5)**2)**0.5
taken.append(r)
print('suggested:', u)
plt.subplot(1, 2, 2)
plt.hist(taken, cumulative=True, normed=True,
color='g', bins=1000, histtype='step')
plt.hist(suggested, cumulative=True, normed=True,
color='r', bins=1000, histtype='step')
#x = numpy.linspace(0, 1, 400)
#y = x**ndim - (x - min(suggested) / max(suggested))**ndim
#y /= max(y)
#plt.plot(x * (max(suggested) - min(suggested)) + min(suggested), y, '--', color='grey')
plt.savefig('friends_sampling_test_sampling.pdf', bbox_inches='tight')
plt.close()
```
#### File: JohannesBuchner/massivedatans/gensimple_bright.py
```python
from __future__ import print_function, division
import numpy
import matplotlib.pyplot as plt
import h5py
from numpy import exp
import sys
def gauss(x, z, A, mu, sig):
xT = x.reshape((1,-1))
zT = z.reshape((-1,1))
AT = A.reshape((-1,1))
muT = mu.reshape((-1,1))
sigT = sig.reshape((-1,1))
return AT * exp(-0.5 * ((muT - xT / (1. + zT))/sigT)**2)
x = numpy.linspace(400, 800, 200)
N = 40
N = int(sys.argv[1])
numpy.random.seed(N)
z = numpy.zeros(N) + 0.01
rest_wave = 440
print('generating parameters ...')
# in km/s
width_broad = 4000 * rest_wave / 300000 * numpy.ones(N)
width_narrow = 400 * rest_wave / 300000 * numpy.ones(N)
# convert to nm
mean_broad = rest_wave * numpy.ones(N)
mean_narrow = rest_wave * numpy.ones(N)
width_broad = width_broad
width_narrow = width_narrow
noise_level = 0.01
#signal_level = numpy.random.exponential(size=N) * 0.4
signal_level = numpy.ones(N) * 0.2
#signal_level = numpy.random.uniform(size=N) * 0.5
#is_type1 = numpy.random.uniform(size=N) < 0.5
height_broad = 10**-1 * signal_level
height_narrow = signal_level
#X = numpy.array([x])
print('generating signal ...')
ym = gauss(A=height_broad, mu=mean_broad, x=x, z=z, sig=width_broad)
ym += gauss(A=height_narrow, mu=mean_narrow, x=x, z=z, sig=width_narrow)
ym = numpy.transpose(ym)
print(ym.shape)
# add noise
print('adding noise...')
y = ym.copy()
for i in range(N):
y[:,i] += numpy.random.normal(0, noise_level, size=len(x))
print('plotting ...')
for i in range(min(N, 20)):
#plt.plot(x, y[:,i], '.-')
plt.plot(x, y[:,i], '-')
plt.savefig('gen_bright.pdf', bbox_inches='tight')
plt.close()
#print x.shape, y.shape, z.shape
with h5py.File('data_bright_%s.hdf5' % sys.argv[1], 'w') as f:
f.create_dataset('x', data=x, compression='gzip', shuffle=True)
f.create_dataset('y', data=y, compression='gzip', shuffle=True)
f.create_dataset('z', data=z, compression='gzip', shuffle=True)
f.create_dataset('mean_broad', data=mean_broad, compression='gzip', shuffle=True)
f.create_dataset('width_broad', data=width_broad, compression='gzip', shuffle=True)
f.create_dataset('height_broad', data=height_broad, compression='gzip', shuffle=True)
f.create_dataset('mean_narrow', data=mean_narrow, compression='gzip', shuffle=True)
f.create_dataset('width_narrow', data=width_narrow, compression='gzip', shuffle=True)
f.create_dataset('height_narrow', data=height_narrow, compression='gzip', shuffle=True)
```
#### File: JohannesBuchner/massivedatans/multi_nested_integrator.py
```python
from __future__ import print_function, division
"""
Integrator
----------
Copyright (c) 2017 <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy
from numpy import exp, log, log10, pi
import progressbar
from adaptive_progress import AdaptiveETA
from numpy import logaddexp
import sys
def integrate_remainder(sampler, logwidth, logVolremaining, logZ, H, globalLmax):
# logwidth remains the same now for each sample
remainder = list(sampler.remainder())
logV = logwidth
L0 = remainder[-1][2]
L0 = globalLmax
logLs = [Li - L0 for ui, xi, Li in remainder]
Ls = numpy.exp(logLs)
LsMax = Ls.copy()
LsMax[-1] = numpy.exp(globalLmax - L0)
Lmax = LsMax[1:].sum(axis=0) + LsMax[-1]
#Lmax = Ls[1:].sum(axis=0) + Ls[-1]
Lmin = Ls[:-1].sum(axis=0) + Ls[0]
logLmid = log(Ls.sum(axis=0)) + L0
logZmid = logaddexp(logZ, logV + logLmid)
logZup = logaddexp(logZ, logV + log(Lmax) + L0)
logZlo = logaddexp(logZ, logV + log(Lmin) + L0)
logZerr = logZup - logZlo
assert numpy.isfinite(H).all()
assert numpy.isfinite(logZerr).all(), logZerr
for i in range(len(remainder)):
ui, xi, Li = remainder[i]
wi = logwidth + Li
logZnew = logaddexp(logZ, wi)
#Hprev = H
H = exp(wi - logZnew) * Li + exp(logZ - logZnew) * (H + logZ) - logZnew
H[H < 0] = 0
#assert (H>0).all(), (H, Hprev, wi, Li, logZ, logZnew)
logZ = logZnew
#assert numpy.isfinite(logZerr + (H / sampler.nlive_points)**0.5), (H, sampler.nlive_points, logZerr)
return logV + logLmid, logZerr, logZmid, logZerr + (H / sampler.nlive_points)**0.5, logZerr + (H / sampler.nlive_points)**0.5
"""
Performs the Nested Sampling integration by calling the *sampler* multiple times
until the *tolerance* is reached, or the maximum number of likelihood evaluations
is exceeded.
:param sampler: Sampler
:param tolerance: uncertainty in log Z to compute to
:param max_samples: maximum number of likelihood evaluations (None for no limit)
@return dictionary containing the keys
logZ, logZerr: log evidence and uncertainty,
samples: all obtained samples,
weights: posterior samples:
list of prior coordinates, transformed coordinates, likelihood value
and weight
information: information H
niterations: number of nested sampling iterations
"""
def multi_nested_integrator(multi_sampler, tolerance = 0.01, max_samples=None, min_samples = 0, need_robust_remainder_error=True):
sampler = multi_sampler
logVolremaining = 0
logwidth = log(1 - exp(-1. / sampler.nlive_points))
weights = [] #[-1e300, 1]]
widgets = ["|...|",
progressbar.Bar(), progressbar.Percentage(), AdaptiveETA()]
pbar = progressbar.ProgressBar(widgets = widgets, maxval=sampler.nlive_points)
i = 0
ndata = multi_sampler.ndata
running = numpy.ones(ndata, dtype=bool)
last_logwidth = numpy.zeros(ndata)
last_logVolremaining = numpy.zeros(ndata)
last_remainderZ = numpy.zeros(ndata)
last_remainderZerr = numpy.zeros(ndata)
logZerr = numpy.zeros(ndata)
ui, xi, Li = next(sampler)
wi = logwidth + Li
logZ = wi
H = Li - logZ
remainder_tails = [[]] * ndata
pbar.currval = i
pbar.start()
while True:
i = i + 1
logwidth = log(1 - exp(-1. / sampler.nlive_points)) + logVolremaining
last_logwidth[running] = logwidth
last_logVolremaining[running] = logwidth
logVolremaining -= 1. / sampler.nlive_points
# fill up, otherwise set weight to zero
Lifull = numpy.zeros(ndata)
Lifull[:] = -numpy.inf
Lifull[running] = Li
uifull = numpy.zeros((ndata, ui.shape[1]))
uifull[running,:] = ui
xifull = numpy.zeros((ndata, ui.shape[1]))
xifull[running,:] = xi
weights.append([uifull, xifull, Lifull, numpy.where(running, logwidth, -numpy.inf), running])
logZerr[running] = (H[running] / sampler.nlive_points)**0.5
sys.stdout.flush()
pbar.update(i)
# expected number of iterations:
i_final = -sampler.nlive_points * (-sampler.Lmax + log(exp(numpy.max([tolerance - logZerr[running], logZerr[running] / 100.], axis=0) + logZ[running]) - exp(logZ[running])))
i_final = numpy.where(i_final < i+1, i+1, numpy.where(i_final > i+100000, i+100000, i_final))
max_value = max(i+1, i_final.max())
if hasattr(pbar, 'max_value'):
pbar.max_value = max_value
elif hasattr(pbar, 'maxval'):
pbar.maxval = max_value
if i > min_samples and i % 50 == 1 or (max_samples and i > max_samples):
remainderZ, remainderZerr, totalZ, totalZerr, totalZerr_bootstrapped = integrate_remainder(sampler, logwidth, logVolremaining, logZ[running], H[running], sampler.Lmax)
print('checking for termination:', remainderZ, remainderZerr, totalZ, totalZerr)
# tolerance
last_remainderZ[running] = remainderZ
last_remainderZerr[running] = remainderZerr
terminating = totalZerr < tolerance
if max_samples and i > max_samples:
terminating[:] = True
widgets[0] = '|%d/%d samples+%d/%d|lnZ = %.2f +- %.3f + %.3f|L=%.2f^%.2f ' % (
i + 1, max_value, sampler.nlive_points, sampler.ndraws, logaddexp(logZ[running][0], remainderZ[0]), max(logZerr[running]), max(remainderZerr), Li[0], sampler.Lmax[0])
if terminating.any():
print('terminating %d, namely:' % terminating.sum(), list(numpy.where(terminating)[0]))
for j, k in enumerate(numpy.where(running)[0]):
if terminating[j]:
remainder_tails[k] = [[ui, xi, Li, logwidth] for ui, xi, Li in sampler.remainder(j)]
sampler.cut_down(~terminating)
running[running] = ~terminating
if not running.any():
break
print(widgets[0])
ui, xi, Li = next(sampler)
wi = logwidth + Li
logZnew = logaddexp(logZ[running], wi)
H[running] = exp(wi - logZnew) * Li + exp(logZ[running] - logZnew) * (H[running] + logZ[running]) - logZnew
logZ[running] = logZnew
# add tail
# not needed for integral, but for posterior samples, otherwise there
# is a hole in the most likely parameter ranges.
all_tails = numpy.ones(ndata, dtype=bool)
for i in range(sampler.nlive_points):
u, x, L, logwidth = list(zip(*[tail[i] for tail in remainder_tails]))
weights.append([u, x, L, logwidth, all_tails])
logZerr = logZerr + last_remainderZerr
logZ = logaddexp(logZ, last_remainderZ)
return dict(logZ=logZ, logZerr=logZerr,
weights=weights, information=H,
niterations=i)
__all__ = [multi_nested_integrator]
```
#### File: massivedatans/pres/plotcontour.py
```python
from __future__ import print_function, division
import numpy
import matplotlib.pyplot as plt
#from nested_sampling.clustering.neighbors import find_rdistance, is_within_distance_of, count_within_distance_of, any_within_distance_of
from nested_sampling.samplers.hiermetriclearn import ClusterResult, RadFriendsRegion
CX = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4]
CS = [0.2, 0.2, 0.2, 0.2, 0.15, 0.2, 0.15, 0.2, 0.2]
CY = [0.2, 0, 0, 0, 0.1, 0.3, 1, 1.4, 2]
CW = [1, 2, 2, 2, 2, 2, 20, 2, 2]
CX = numpy.linspace(0, 4, 20)
CY = CX*-0.2 + CX**2*0.3
#plt.plot(x, x*-0.2 + x**2*0.2)
CW = CX * 0 + 2 + 10*CY**2
CW = 1./CW
CW[0] = 0.5
CW[1] = 1
#CW[-5] = 20
CS = CX * 0 + 0.2
#CS[-5] = 0.12
def likelihood(x, y):
l = 0
for cx, cy, cw, cs in zip(CX, CY, CW, CS):
l += cw * numpy.exp(-0.5 * (((cx - x)/cs)**2 + ((cy - y)/cs)**2))
return numpy.log(l)
x = numpy.linspace(-2.5, 6.5, 100)
y = numpy.linspace(-2.5, 6.5, 100)
X, Y = numpy.meshgrid(x, y)
XY = numpy.array(numpy.transpose([X.flatten(), Y.flatten()]), order='C')
print(XY.dtype)
L = likelihood(X, Y)
Lsorted = L[30:-30,30:-30].flatten()
Lsorted.sort()
levels = Lsorted[::Lsorted.size/7-1].tolist() # + [L.max()]
levels = levels[2:]
#levels = L.max() - numpy.arange(5) * 4 - 2
plt.figure(figsize=(6, 3), frameon=False)
plt.axis('off')
plt.contour(X, Y, L, levels)
plt.savefig('plotcontour.png', bbox_inches='tight')
plt.savefig('plotcontour.pdf', bbox_inches='tight')
plt.close()
numpy.random.seed(1)
N = 10000
x = numpy.random.uniform(-2, 6, size=N)
y = numpy.random.uniform(-2, 6, size=N)
l = likelihood(x, y)
Nlive = 100
for i in range(len(levels)):
plt.figure(figsize=(6, 2.2), frameon=False)
plt.axis('off')
plt.text(-2, 4, 'Iteration %d' % (i*100))
#plt.text(-2, 4, '(%d)' % (i+1))
mask = l > levels[i]
xlevel = x[mask][:Nlive]
ylevel = y[mask][:Nlive]
live_points = numpy.array(numpy.transpose([xlevel, ylevel]), order='C')
plt.contour(X, Y, L, levels[i:i+1], colors=['k'], linestyles=[':'])
plt.plot(xlevel, ylevel, '.', color='k')
# do radfriends with these points
region = RadFriendsRegion(live_points)
mask = region.are_inside(XY)
maskregion = mask.reshape(X.shape)
plt.contour(X, Y, maskregion*1., [0.5], colors=['orange'], linestyles=['-'])
plt.ylim(-2.5, 6.2)
plt.xlim(-3, 7)
plt.savefig('plotcontour_%d.png' % (i+1), bbox_inches='tight')
plt.savefig('plotcontour_%d.pdf' % (i+1), bbox_inches='tight')
plt.close()
``` |
{
"source": "JohannesBuchner/matplotlib-subsets",
"score": 2
} |
#### File: matplotlib-subsets/tests/nestedsetrect_test.py
```python
from matplotlib_subsets import *
def test_nested_example1():
sets = [
set(list('ABCDEFGH')),
set(list('DEFG')),
set(list('E')),
]
setsizes = [len(s) for s in sets]
nestedsets_rectangles(setsizes, labels = [
r'$\mathbf{%s}$ ($%d$)' % (string.ascii_uppercase[i], len(s))
for i, s in enumerate(sets)])
plt.savefig('example_nested.pdf', bbox_inches='tight')
plt.close()
def test_tree_example1():
tree = ((120, '100', None), [
((50, 'A50', None), []),
((50, 'B50', None), [])
])
treesets_rectangles(tree)
plt.savefig('example_tree.pdf', bbox_inches='tight')
plt.close()
def test_tree_example2():
tree = ((120, '100', None),
[((50, 'A50', None),
[((20, 'AX20', None), [((8, 'AXM8', None), [((4, 'AXM4', None), [((2, 'AXM2', None), [])])]), ((8, 'AXN8', None), [])]),
((20, 'AY20', None), [])]),
((50, 'B50', None), [((5, 'Bt', None), [])]*5)
])
plt.figure(figsize=(7,7))
treesets_rectangles(tree)
plt.savefig('example_tree2.pdf', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
test_nested_example1()
test_tree_example1()
test_tree_example2()
``` |
{
"source": "JohannesBuchner/nnest",
"score": 3
} |
#### File: nnest/nnest/networks.py
```python
import math
import torch
import torch.nn as nn
class BatchNormFlow(nn.Module):
""" An implementation of a batch normalization layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
"""
def __init__(self, num_inputs, momentum=0.0, eps=1e-5):
super(BatchNormFlow, self).__init__()
self.log_gamma = nn.Parameter(torch.zeros(num_inputs))
self.beta = nn.Parameter(torch.zeros(num_inputs))
self.momentum = momentum
self.eps = eps
self.register_buffer('running_mean', torch.zeros(num_inputs))
self.register_buffer('running_var', torch.ones(num_inputs))
def forward(self, inputs, cond_inputs=None, mode='direct'):
if mode == 'direct':
if self.training:
self.batch_mean = inputs.mean(0)
self.batch_var = (
inputs - self.batch_mean).pow(2).mean(0) + self.eps
self.running_mean.mul_(self.momentum)
self.running_var.mul_(self.momentum)
self.running_mean.add_(self.batch_mean.data *
(1 - self.momentum))
self.running_var.add_(self.batch_var.data *
(1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - mean) / var.sqrt()
y = torch.exp(self.log_gamma) * x_hat + self.beta
return y, (self.log_gamma - 0.5 * torch.log(var)).sum(
-1, keepdim=True)
else:
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (inputs - self.beta) / torch.exp(self.log_gamma)
y = x_hat * var.sqrt() + mean
return y, (-self.log_gamma + 0.5 * torch.log(var)).sum(
-1, keepdim=True)
class CouplingLayer(nn.Module):
""" An implementation of a coupling layer
from RealNVP (https://arxiv.org/abs/1605.08803).
"""
def __init__(self,
num_inputs,
num_hidden,
mask,
num_cond_inputs=None,
s_act='tanh',
t_act='relu',
num_layers=2):
super(CouplingLayer, self).__init__()
self.num_inputs = num_inputs
self.mask = mask
activations = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'tanh': nn.Tanh}
s_act_func = activations[s_act]
t_act_func = activations[t_act]
if num_cond_inputs is not None:
total_inputs = num_inputs + num_cond_inputs
else:
total_inputs = num_inputs
scale_layers = [nn.Linear(total_inputs, num_hidden), s_act_func()]
for i in range(0, num_layers):
scale_layers += [nn.Linear(num_hidden, num_hidden), s_act_func()]
scale_layers += [nn.Linear(num_hidden, num_inputs)]
self.scale_net = nn.Sequential(*scale_layers)
translate_layers = [nn.Linear(total_inputs, num_hidden), t_act_func()]
for i in range(0, num_layers):
translate_layers += [nn.Linear(num_hidden, num_hidden), t_act_func()]
translate_layers += [nn.Linear(num_hidden, num_inputs)]
self.translate_net = nn.Sequential(*translate_layers)
def init(m):
if isinstance(m, nn.Linear):
m.bias.data.fill_(0)
nn.init.orthogonal_(m.weight.data)
def forward(self, inputs, cond_inputs=None, mode='direct'):
mask = self.mask
masked_inputs = inputs * mask
if cond_inputs is not None:
masked_inputs = torch.cat([masked_inputs, cond_inputs], -1)
if mode == 'direct':
log_s = self.scale_net(masked_inputs) * (1 - mask)
t = self.translate_net(masked_inputs) * (1 - mask)
s = torch.exp(log_s)
return inputs * s + t, log_s.sum(-1, keepdim=True)
else:
log_s = self.scale_net(masked_inputs) * (1 - mask)
t = self.translate_net(masked_inputs) * (1 - mask)
s = torch.exp(-log_s)
return (inputs - t) * s, -log_s.sum(-1, keepdim=True)
class FlowSequential(nn.Sequential):
""" A sequential container for flows.
In addition to a forward pass it implements a backward pass and
computes log jacobians.
"""
def forward(self, inputs, cond_inputs=None, mode='direct', logdets=None):
""" Performs a forward or backward pass for flow modules.
Args:
inputs: a tuple of inputs and logdets
mode: to run direct computation or inverse
"""
self.num_inputs = inputs.size(-1)
if logdets is None:
logdets = torch.zeros(inputs.size(0), 1, device=inputs.device)
assert mode in ['direct', 'inverse']
if mode == 'direct':
for module in self._modules.values():
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
else:
for module in reversed(self._modules.values()):
inputs, logdet = module(inputs, cond_inputs, mode)
logdets += logdet
return inputs, logdets
def log_probs(self, inputs, cond_inputs=None):
u, log_jacob = self(inputs, cond_inputs)
log_probs = (-0.5 * u.pow(2) - 0.5 * math.log(2 * math.pi)).sum(
-1, keepdim=True)
return (log_probs + log_jacob).sum(-1, keepdim=True)
def sample(self, num_samples=None, noise=None, cond_inputs=None):
if noise is None:
noise = torch.Tensor(num_samples, self.num_inputs).normal_()
device = next(self.parameters()).device
noise = noise.to(device)
if cond_inputs is not None:
cond_inputs = cond_inputs.to(device)
samples = self.forward(noise, cond_inputs, mode='inverse')[0]
return samples
class SingleSpeed(nn.Module):
def __init__(self, num_inputs, num_hidden, num_blocks, num_layers):
super(SingleSpeed, self).__init__()
modules = []
mask = torch.arange(0, num_inputs) % 2
mask = mask.float()
for i in range(num_blocks):
modules += [
CouplingLayer(
num_inputs, num_hidden, mask, None,
s_act='tanh', t_act='relu', num_layers=num_layers)
]
mask = 1 - mask
self.net = FlowSequential(*modules)
def forward(self, inputs, cond_inputs=None, mode='direct', logdets=None):
return self.net(inputs, cond_inputs=cond_inputs, mode=mode, logdets=logdets)
def log_probs(self, inputs, cond_inputs=None):
return self.net.log_probs(inputs, cond_inputs=None)
def sample(self, num_samples=None, noise=None, cond_inputs=None):
return self.net.sample(num_samples=num_samples, noise=noise, cond_inputs=cond_inputs)
class FastSlow(nn.Module):
def __init__(self, num_fast, num_slow, num_hidden, num_blocks, num_layers):
super(FastSlow, self).__init__()
self.num_fast = num_fast
self.num_slow = num_slow
self.num_inputs = num_fast + num_slow
# Fast block
mask_fast = torch.arange(0, num_fast) % 2
mask_fast = mask_fast.float()
modules_fast = []
for _ in range(num_blocks):
modules_fast += [
CouplingLayer(
num_fast, num_hidden, mask_fast, None,
s_act='tanh', t_act='relu', num_layers=num_layers)
]
mask_fast = 1 - mask_fast
self.net_fast = FlowSequential(*modules_fast)
# Slow block
mask_slow = torch.arange(0, num_slow) % 2
mask_slow = mask_slow.float()
modules_slow = []
for _ in range(num_blocks):
modules_slow += [
CouplingLayer(
num_slow, num_hidden, mask_slow, None,
s_act='tanh', t_act='relu', num_layers=num_layers)
]
mask_slow = 1 - mask_slow
self.net_slow = FlowSequential(*modules_slow)
# Combine fast and slow such that slow is unnchanged just by updating fast block
modules = []
mask = torch.cat((torch.ones(num_slow), torch.zeros(num_fast)))
modules = [
CouplingLayer(
num_slow + num_fast, num_hidden, mask, None,
s_act='tanh', t_act='relu', num_layers=num_layers)
]
self.net = FlowSequential(*modules)
def forward(self, inputs, cond_inputs=None, mode='direct', logdets=None):
assert mode in ['direct', 'inverse']
if mode == 'direct':
slow, logdets_slow = self.net_slow(inputs[:, 0:self.num_slow], mode=mode)
fast, logdets_fast = self.net_fast(inputs[:, self.num_slow:self.num_slow+self.num_fast], mode=mode)
inputs = torch.cat((slow, fast), dim=1)
inputs, logdets = self.net(inputs, mode=mode)
else:
inputs, logdets = self.net(inputs, mode=mode)
slow, logdets_slow = self.net_slow(inputs[:, 0:self.num_slow], mode=mode)
fast, logdets_fast = self.net_fast(inputs[:, self.num_slow:self.num_slow+self.num_fast], mode=mode)
inputs = torch.cat((slow, fast), dim=1)
return inputs, logdets_slow + logdets_fast + logdets
def log_probs(self, inputs, cond_inputs=None):
slow, logdets_slow = self.net_slow(inputs[:, 0:self.num_slow])
fast, logdets_fast = self.net_fast(inputs[:, self.num_slow:self.num_slow+self.num_fast])
inputs = torch.cat((slow, fast), dim=1)
u, log_jacob = self.net(inputs)
log_probs = (-0.5 * u.pow(2) - 0.5 * math.log(2 * math.pi)).sum(
-1, keepdim=True)
return (log_probs + log_jacob + logdets_slow + logdets_fast).sum(-1, keepdim=True)
def sample(self, num_samples=None, noise=None, cond_inputs=None):
if noise is None:
noise = torch.Tensor(num_samples, self.num_inputs).normal_()
device = next(self.parameters()).device
noise = noise.to(device)
if cond_inputs is not None:
cond_inputs = cond_inputs.to(device)
samples = self.forward(noise, cond_inputs, mode='inverse')[0]
return samples
``` |
{
"source": "JohannesBuchner/pystrict3",
"score": 2
} |
#### File: pystrict3/pystrict3lib/classchecker.py
```python
import ast
import sys
from .funcchecker import FuncLister, count_call_arguments
internal_members = set(dir(object)).union(dir(classmethod)).union(dir(lambda x: x))
class MethodCallLister(ast.NodeVisitor):
"""Verifies all calls against call signatures in known_methods.
Unknown functions are not verified."""
def __init__(self, filename, class_name, known_methods, known_staticmethods):
self.filename = filename
self.known_methods = known_methods
self.known_staticmethods = known_staticmethods
self.class_name = class_name
def visit_Call(self, node):
self.generic_visit(node)
if not (isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name) and node.func.value.id == 'self'):
return
funcname = node.func.attr
min_call_args, may_have_more = count_call_arguments(node)
if funcname in self.known_staticmethods:
min_args, max_args = self.known_staticmethods[funcname]
is_staticmethod = True
elif funcname in self.known_methods:
min_args, max_args = self.known_methods[funcname]
# self is supplied by Python
min_call_args += 1
is_staticmethod = False
else:
# this is already guaranteed by the ClassPropertiesLister
return
if max_args >= 0 and min_call_args > max_args:
sys.stderr.write('%s:%d: ERROR: Class "%s": %s "%s" (%d..%d arguments) called with too many (%d%s) arguments\n' % (
self.filename, node.lineno, self.class_name, 'static method' if is_staticmethod else 'method',
funcname, min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
elif min_call_args < min_args and not may_have_more:
sys.stderr.write('%s:%d: ERROR: Class "%s": %s "%s" (%d..%d arguments) called with too few (%d%s) arguments\n' % (
self.filename, node.lineno, self.class_name, 'static method' if is_staticmethod else 'method',
funcname, min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
else:
print("call(%s.%s with %d%s args): OK" % (self.class_name, funcname, min_call_args, '+' if may_have_more else ''))
class ClassPropertiesLister(ast.NodeVisitor):
"""Verifies that all class properties that are accessed inside a class
are set at some point in the same class."""
def __init__(self, filename):
self.filename = filename
def visit_ClassDef(self, node):
self.generic_visit(node)
# skip subclasses metaclasses and other fancy things
derived_class = len(node.bases) > 1 \
or len(node.bases) > 0 and not (len(node.bases) == 1 and isinstance(node.bases[0], ast.Name) and node.bases[0].id == 'object') \
or len(node.keywords) > 0 or len(node.decorator_list) > 0
# standalone class
# collect all members
known_attributes = set()
known_members = set(internal_members)
for child in ast.iter_child_nodes(node):
if isinstance(child, ast.FunctionDef):
print("+%s.%s()" % (node.name, child.name))
known_members.add(child.name)
if isinstance(child, ast.Assign):
for target in child.targets:
for name in ast.walk(target):
if isinstance(name, ast.Name):
print("+%s.%s" % (node.name, name.id))
known_attributes.add(name.id)
# collect all assigns
for child in ast.walk(node):
if isinstance(child, ast.Attribute) and isinstance(child.value, ast.Name) and child.value.id == 'self' and isinstance(child.ctx, ast.Store):
known_attributes.add(child.attr)
for child in ast.walk(node):
if isinstance(child, ast.Attribute) and isinstance(child.value, ast.Name) and child.value.id == 'self' and isinstance(child.ctx, ast.Load):
if child.attr in known_attributes:
print("accessing attribute %s.%s: OK" % (node.name, child.attr))
continue
if child.attr in known_members:
print("accessing member %s.%s: OK" % (node.name, child.attr))
continue
if derived_class:
print("accessing unknown member %s.%s: possibly OK, derived class" % (node.name, child.attr))
continue
sys.stderr.write('%s:%d: ERROR: accessing unknown class attribute "%s.%s"\n' % (
self.filename, child.lineno, node.name, child.attr))
sys.exit(1)
# verify class members
funcs = FuncLister(filename=self.filename)
funcs.visit(node)
MethodCallLister(
filename=self.filename, class_name=node.name,
known_methods=funcs.known_functions, known_staticmethods=funcs.known_staticmethods
).visit(node)
```
#### File: pystrict3/pystrict3lib/funcchecker.py
```python
import ast
import sys
import inspect
import importlib
import builtins
import distutils.sysconfig
import os
from collections import defaultdict
def parse_builtin_signature(signature):
min_args = 0
for param in signature.parameters.values():
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD or param.kind == inspect.Parameter.POSITIONAL_ONLY:
if param.default == inspect.Parameter.empty:
min_args += 1
else:
break
for param in signature.parameters.values():
if param.kind == inspect.Parameter.VAR_KEYWORD or param.kind == inspect.Parameter.VAR_POSITIONAL:
return min_args, -1
max_args = len(signature.parameters)
return min_args, max_args
def count_function_min_arguments(arguments):
""" returns minimum number of arguments. """
return len(arguments.args) - len(arguments.defaults)
def count_function_max_arguments(arguments):
""" returns maximum number of arguments. If uncertain, returns -1. """
if arguments.vararg or arguments.kwarg:
return -1
return len(arguments.args) + len(arguments.kwonlyargs)
def count_function_arguments(arguments):
""" returns minimum and maximum number of arguments.
If uncertain, the maximum is -1. """
return count_function_min_arguments(arguments), count_function_max_arguments(arguments)
def count_call_arguments(call):
""" returns the number of arguments given to call, and
a bool indicating whether that may be a lower limit """
may_have_more = False
min_call_args = 0
for arg in call.args:
if isinstance(arg, ast.Starred):
# should not count *args
may_have_more |= True
continue
min_call_args += 1
for arg in call.keywords:
if arg.arg is None: # **kwargs
may_have_more |= True
min_call_args += 1
return min_call_args, may_have_more
class FuncLister(ast.NodeVisitor):
"""Compiles a list of all functions and class inits
with their call signatures.
Result is in the known_functions attribute."""
def __init__(self, filename):
self.filename = filename
self.known_functions = dict(**FuncLister.KNOWN_BUILTIN_FUNCTIONS)
self.known_staticmethods = {}
KNOWN_BUILTIN_FUNCTIONS = {}
@staticmethod
def load_builtin_functions():
for f, func in builtins.__dict__.items():
if inspect.isbuiltin(func) or inspect.isfunction(func):
try:
FuncLister.KNOWN_BUILTIN_FUNCTIONS[f] = parse_builtin_signature(inspect.signature(func))
except ValueError:
FuncLister.KNOWN_BUILTIN_FUNCTIONS[f] = 0, -1
def visit_FunctionDef(self, node):
is_staticmethod = len(node.decorator_list) == 1 and isinstance(node.decorator_list[0], ast.Name) and node.decorator_list[0].id == 'staticmethod'
if node.decorator_list == [] or is_staticmethod:
min_args = count_function_min_arguments(node.args)
max_args = count_function_max_arguments(node.args)
else:
min_args = 0
max_args = -1
if node.name in self.known_functions:
min_args_orig, max_args_orig = self.known_functions[node.name]
min_combined_args = min(min_args, min_args_orig)
if max_args == -1 or max_args_orig == -1:
max_combined_args = -1
else:
max_combined_args = max(max_args, max_args_orig)
else:
min_combined_args, max_combined_args = min_args, max_args
if is_staticmethod:
self.known_staticmethods[node.name] = (min_combined_args, max_combined_args)
else:
self.known_functions[node.name] = (min_combined_args, max_combined_args)
print('function "%s" has %d..%d arguments' % (node.name, min_args, max_args))
self.generic_visit(node)
def visit_ClassDef(self, node):
# find the child that defines __init__
if node.decorator_list == []:
if node.name in self.known_functions:
sys.stderr.write('%s:%d: ERROR: Class "%s" redefined\n' % (
self.filename, node.lineno, node.name))
sys.exit(1)
for child in ast.iter_child_nodes(node):
# look for __init__ method:
if not isinstance(child, ast.FunctionDef):
continue
if not child.name == '__init__':
continue
arguments = child.args
if len(arguments.args) >= 1 and arguments.args[0].arg == 'self':
min_args, max_args = count_function_arguments(arguments)
# remove self from arguments, as it is supplied by Python
if max_args > 0:
max_args -= 1
min_args -= 1
self.known_functions[node.name] = (min_args, max_args)
print('class "%s" init has %d..%d arguments' % (node.name, min_args, max_args))
self.generic_visit(node)
class CallLister(ast.NodeVisitor):
"""Verifies all calls against call signatures in known_functions.
Unknown functions are not verified."""
def __init__(self, filename, known_functions):
self.filename = filename
self.known_functions = known_functions
def visit_Call(self, node):
self.generic_visit(node)
if not isinstance(node.func, ast.Name):
return
funcname = node.func.id
if funcname not in self.known_functions:
return
min_args, max_args = self.known_functions[funcname]
min_call_args, may_have_more = count_call_arguments(node)
if max_args >= 0 and min_call_args > max_args:
sys.stderr.write('%s:%d: ERROR: Function "%s" (%d..%d arguments) called with too many (%d%s) arguments\n' % (
self.filename, node.lineno, funcname, min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
elif min_call_args < min_args and not may_have_more:
sys.stderr.write('%s:%d: ERROR: Function "%s" (%d..%d arguments) called with too few (%d%s) arguments\n' % (
self.filename, node.lineno, funcname, min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
else:
print("call(%s with %d%s args): OK" % (funcname, min_call_args, '+' if may_have_more else ''))
BUILTIN_MODULES = []
for m in list(sys.builtin_module_names) + list(sys.modules.keys()):
if not m.startswith('_'):
BUILTIN_MODULES.append(m)
class ModuleCallLister(ast.NodeVisitor):
"""Verifies all calls against call signatures in known_functions.
Unknown functions are not verified."""
def __init__(self, filename, load_policy='none'):
""" If load_policy is 'none', do not load any new modules.
if load_policy is 'builtins', load python libraries from the python
standard library path.
if load_policy is 'all', try to load arbitrary python libraries."""
self.filename = filename
if load_policy not in ('none', 'builtins', 'all'):
raise ValueError("load_policy needs to be one of ('none', 'builtins', 'all'), not '%s'" % load_policy)
self.load_policy = load_policy
self.approved_module_names = {k for k in sys.modules.keys() if not k.startswith('_')}
if self.load_policy != 'none':
self.approved_module_names |= {k for k in sys.builtin_module_names if not k.startswith('_')}
# if self.load_policy != 'all':
# print("allowed modules:", sorted(self.approved_module_names))
self.used_module_names = {}
def visit_Import(self, node):
for alias in node.names:
if alias.asname is None:
self.used_module_names[alias.name] = alias.name
# print('+module: "%s"' % (alias.name))
else:
self.used_module_names[alias.asname] = alias.name
# print('+module: "%s"' % (alias.name))
self.load_module(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.level == 0:
for alias in node.names:
if alias.asname is None:
self.used_module_names[alias.name] = node.module + '.' + alias.name
# print('+module: %s' % (node.module + '.' + alias.name, alias.name))
else:
self.used_module_names[alias.asname] = node.module + '.' + alias.name
# print('+module: %s' % (node.module + '.' + alias.name, alias.asname))
self.load_module(node.module + '.' + alias.name)
self.generic_visit(node)
# lazy load the needed module functions
KNOWN_CALLS = defaultdict(dict)
KNOWN_MODULES = {}
def load_module(self, module_name):
# if this is a submodule, try to handle by importing the parent
parts = module_name.split('.')
parent_module = '.'.join(parts[:-1])
if parent_module != '':
self.load_module(parent_module)
parent_mod = ModuleCallLister.KNOWN_MODULES.get(parent_module)
if parent_mod is not None:
mod = getattr(parent_mod, parts[-1], None)
if mod is not None:
ModuleCallLister.KNOWN_MODULES[module_name] = mod
return mod
del mod
ModuleCallLister.KNOWN_MODULES[module_name] = None
if self.load_policy != 'all' and module_name.split('.')[0] not in self.approved_module_names:
return
if self.load_policy == 'builtins':
if module_name.split('.')[0] not in self.approved_module_names:
std_lib = distutils.sysconfig.get_python_lib(standard_lib=True)
loadable_std_file = os.path.exists(os.path.join(std_lib, module_name.split('.')[0] + '.py'))
loadable_std_dir = os.path.exists(os.path.join(std_lib, module_name.split('.')[0], '__init__.py'))
if not loadable_std_file and not loadable_std_dir:
# do not load arbitrary modules
print('skipping loading module "%s" outside standard lib' % module_name)
return
try:
print('+loading module %s' % module_name)
mod = importlib.import_module(module_name)
ModuleCallLister.KNOWN_MODULES[module_name] = mod
return mod
except ImportError:
print('WARNING: loading module %s failed' % module_name)
return None
def get_function(self, module_name, funcname):
mod = ModuleCallLister.KNOWN_MODULES[module_name]
assert mod is not None
if funcname != "":
for level in funcname.split('.'):
subm = getattr(mod, level, None)
if subm is None:
print('skipping unknown function "%s.%s"' % (module_name, level))
return
else:
del mod
mod = subm
return mod
def lazy_load_call(self, module_name, funcname):
functions = ModuleCallLister.KNOWN_CALLS[module_name]
if funcname in functions:
# use cached result
return functions[funcname]
func = self.get_function(module_name, funcname)
if inspect.isbuiltin(func) or inspect.isfunction(func):
try:
min_args, max_args = parse_builtin_signature(inspect.signature(func))
print('+function: "%s.%s" (%d..%d) arguments' % (module_name, funcname, min_args, max_args))
except ValueError:
min_args, max_args = 0, -1
print('+uninspectable callable: "%s.%s"' % (module_name, funcname))
elif inspect.isclass(func):
min_args, max_args = parse_builtin_signature(inspect.signature(func.__init__))
# remove self from arguments, as it is supplied by Python
if max_args > 0:
max_args -= 1
min_args -= 1
print('+class: "%s.%s" (%d..%d) arguments' % (module_name, funcname, min_args, max_args))
elif hasattr(func, '__call__'):
# some type we do not understand, like numpy ufuncs
min_args, max_args = 0, -1
print('+uninspectable callable: "%s.%s"' % (module_name, funcname))
else:
# not callable
return
functions[funcname] = min_args, max_args
return min_args, max_args
def visit_Call(self, node):
self.generic_visit(node)
if isinstance(node.func, ast.Name):
funcname = ''
module_alias = node.func.id
module_name = self.used_module_names.get(module_alias)
elif not isinstance(node.func, ast.Attribute):
# print("skipping call: not an attribute")
return
elif isinstance(node.func.value, ast.Name):
funcname = node.func.attr
module_alias = node.func.value.id
module_name = self.used_module_names.get(module_alias)
elif isinstance(node.func.value, ast.Attribute) and isinstance(node.func.value.value, ast.Name):
module_alias = node.func.value.value.id + '.' + node.func.value.attr
funcname = node.func.attr
module_name = self.used_module_names.get(module_alias)
if module_name is None and node.func.value.value.id in self.used_module_names:
module_name = self.used_module_names.get(node.func.value.value.id)
funcname = node.func.value.attr + '.' + node.func.attr
else:
# print("skipping call: not an 1 or 2-layer attribute: %s")
return
if module_name is None or module_name not in ModuleCallLister.KNOWN_MODULES:
#print('skipping module "%s", because not registered' % module_alias)
return
del module_alias
if self.load_policy in ('builtin', 'none') and module_name not in self.approved_module_names:
print('skipping call into unapproved module "%s"' % module_name)
return
if ModuleCallLister.KNOWN_MODULES[module_name] is None:
print('skipping call into not loaded module "%s"' % module_name)
return
if self.get_function(module_name, funcname) is None:
sys.stderr.write('%s:%d: ERROR: "%s.%s" is not in a known module\n' % (
self.filename, node.lineno, module_name, funcname))
sys.exit(1)
signature = self.lazy_load_call(module_name, funcname)
if signature is None:
sys.stderr.write('%s:%d: ERROR: "%s.%s" is not a known function\n' % (
self.filename, node.lineno, module_name, funcname))
sys.exit(1)
return
min_args, max_args = signature
min_call_args, may_have_more = count_call_arguments(node)
if max_args >= 0 and min_call_args > max_args:
sys.stderr.write('%s:%d: ERROR: function "%s.%s" (%d..%d arguments) called with too many (%d%s) arguments\n' % (
self.filename, node.lineno, module_name, funcname,
min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
elif min_call_args < min_args and not may_have_more:
sys.stderr.write('%s:%d: ERROR: function "%s.%s" (%d..%d arguments) called with too few (%d%s) arguments\n' % (
self.filename, node.lineno, module_name, funcname,
min_args, max_args, min_call_args, '+' if may_have_more else ''))
sys.exit(1)
else:
print('call(%s.%s with %d%s args): OK' % (module_name, funcname, min_call_args, '+' if may_have_more else ''))
```
#### File: tests/data23/recipe-121574.py
```python
from functools import reduce
def matmult(m, v):
nrows = len(m)
w = [None] * nrows
for row in range(nrows):
w[row] = reduce(lambda x,y: x+y, list(map(lambda x,y: x*y, m[row], v)))
return w
#................................................
if __name__=='__main__':
m, n = 2, 3
vec = list(range(1, n+1))
mat = [[i*n+x+1 for x in range(n)] for i in range(m)]
print('vec=', vec)
print('mat=', mat)
print('mat . vec=', matmult(mat, vec))
```
#### File: tests/data23/recipe-197140.py
```python
def keypress():
"""
Waits for the user to press a key. Returns the ascii code
for the key pressed or zero for a function key pressed.
"""
import msvcrt
while 1:
if msvcrt.kbhit(): # Key pressed?
a = ord(msvcrt.getch()) # get first byte of keyscan code
if a == 0 or a == 224: # is it a function key?
msvcrt.getch() # discard second byte of key scan code
return 0 # return 0
else:
return a # else return ascii code
def funkeypress():
"""
Waits for the user to press any key including function keys. Returns
the ascii code for the key or the scancode for the function key.
"""
import msvcrt
while 1:
if msvcrt.kbhit(): # Key pressed?
a = ord(msvcrt.getch()) # get first byte of keyscan code
if a == 0 or a == 224: # is it a function key?
b = ord(msvcrt.getch()) # get next byte of key scan code
x = a + (b*256) # cook it.
return x # return cooked scancode
else:
return a # else return ascii code
def anykeyevent():
"""
Detects a key or function key pressed and returns its ascii or scancode.
"""
import msvcrt
if msvcrt.kbhit():
a = ord(msvcrt.getch())
if a == 0 or a == 224:
b = ord(msvcrt.getch())
x = a + (b*256)
return x
else:
return a
# -----------------------------------------------------------------------------
# demo applications.
def about(): return\
"""
Keys reported: ENTER, comma, period, greater-than, less-than.
Upper and lower case keys: A, C, H, Q.
Function Keys: F1, SHIFT-F1, CTRL-F1, ALT-F1, Left arrow,
right arrow, page up, page down.
Any other keys are assigned to print "Default"
Pressing ESC or Q will initiate exit query.
Pressing A will print this text.
"""
def keycommands(x):
if x == 13: # ENTER
print('ENTER pressed')
return True
if x in list(map(ord,'aA')): # A
print(about())
return True
if x in list(map(ord,'cC')): # C
print('Continue')
return True
if x in list(map(ord,'hH')): # H
print('HELP')
return True
if x in list(map(ord,'qQ')) or x == 27: # Q or ESC
print('Press any key to exit.')
keypress()
print('Bye')
return False
if x == ord(','): # ,
print('Comma')
return True
if x == ord('.'): # .
print('Period')
return True
if x == ord('>'): # >
print('Greater Than')
return True
if x == ord('<'): # <
print('Less Than')
return True
if x == 15104: # F1
print('F1')
return True
if x == 21504: # SHIFT-F1
print('SHIFT-F1')
return True
if x == 24064: # CTRL-F1
print('CNTRL-F1')
return True
if x == 26624: # ALT-F1
print('ALT-F1')
return True
if x == 18912: # PAGE UP
print('PAGE UP')
return True
if x == 20960: # PAGE DOWN
print('PAGE DOWN')
return True
if x == 19424: # LEFT ARROW KEY
print('LEFT ARROW KEY')
return True
if x == 19936: # RIGHT ARROW KEY
print('RIGHT ARROW KEY')
return True
print('Default') # Any remaining keys
return True
def validating(x):
if x in list(map(ord,'hH')): # query if help is needed
print('Would you like to see the help menu? <y/n>', end=' ')
if keypress() in list(map(ord,'yY')):
return ord('h') # help needed
else: return ord('c') # help not needed
if x in list(map(ord,'qQ')): # query if quitting is requested
print('Would you like to quit? <y/n>', end=' ')
if keypress() in list(map(ord,'yY')):
return ord('q') # quit
else: return ord('c') # don't quit
return x # otherwise, x is any key other than H,Q.
#################################
# The keypress interpreter demo #
#################################
def commandloop():
print('Keypress interpreter utility.')
print(about())
print('Waiting...')
interpreting=True
while interpreting:
interpreting=keycommands(validating(funkeypress()))
####################################
# The IBM scancode display utility #
####################################
def scancode():
print('IBM scancode utility.\nPress CTRL-C to quit.')
while 1:
x=funkeypress()
print('Dec: %d Hex: %x' % (x,x))
########################
# The Exit key example #
########################
def exitkey():
x = True
while x != 20448: # END key?
print('o', end=' ')
x = anykeyevent() # key?
if x == 20448 : break # if END key touched, then break.
elif x == None: continue # if no key touched, continue printing.
else: # if other key touched, prompt user.
print('\nPress END key to exit. Other key to continue printing.')
x = funkeypress()
#------------------------------------------------------------------------------
# The main loop.
if __name__ == '__main__':
while 1:
print("""Please select one from the menu:
[1] Keypress interpreter demo.
[2] IBM scancode utility.
[3] Exit key example\n""")
x=keypress()
if x == ord('1'): commandloop()
if x == ord('2'): scancode()
if x == ord('3'): exitkey()
print('Press q to quit.\n')
if keypress() in list(map(ord,'qQ')):
break
else:
continue
```
#### File: tests/data23/recipe-224980.py
```python
class AssertInit(type):
"""Assert that initializers get called.
Set this as a __metaclass__ for the root class
in a class hierarchy, and you will get AssertionError
if some of the base class initializers isn't called.
"""
def __new__(cls, classname, bases, classdict):
# if class has no real init method, it doesn't
# matter if it isn't called
classdict['_has_real_init_'] = ('__init__' in classdict)
old_init = classdict.get('__init__', lambda self: None)
def new_init(self, *args, **kwargs):
# here selfclass refers to the class in which
# this __init__ function lies (see below this function
# definition for the definition of selfclass)
# this is code that will be executed by
# all initializers except the first one:
if hasattr(self, '_visited_bases_'):
self._visited_bases_[selfclass] = True
old_init(self, *args, **kwargs)
return
# initialize _visited_bases_ by scanning *all* superclasses
# and by creating mappings from the class object to False
# if the base class needs to be visited, True otherwise.
self._visited_bases_ = vb = {}
def recurseBases(bases):
for claz in bases:
vb[claz] = (not hasattr(claz, '_has_real_init_') or
not claz.__dict__['_has_real_init_'])
recurseBases(claz.__bases__)
recurseBases(bases)
old_init(self, *args, **kwargs)
# scan _visited_bases_ to see which base class
# initializers didn't get visited
unvisited = ['%s.%s' % (claz.__module__, claz.__name__)
for claz, visited in list(vb.items()) if not visited]
if unvisited:
fullClassName = '%s.%s' %\
(selfclass.__module__, selfclass.__name__)
raise AssertionError("Initializer%s in class%s (%s) not "
"visited when constructing object "
"from class %s" %
(len(unvisited) > 1 and 's' or '',
len(unvisited) > 1 and 'es' or '',
', '.join(unvisited),
fullClassName))
# ^def new_init
classdict['__init__'] = new_init
# the newly created class, selfclass, is referred to inside
# the new_init function, so it has to be put in a new variable
selfclass = super(AssertInit, cls).__new__\
(cls, classname, bases, classdict)
return selfclass
########### USAGE ############
def test():
class A(object, metaclass=AssertInit):
def __init__(self):
print('A init')
class B(A):
def __init__(self):
#A.__init__(self)
print('B init')
class C(A):
pass
# a separate root class needs to set the __metaclass__ properly
class G(object, metaclass=AssertInit):
def __init__(self):
print('G init')
class D(C, B, G):
def __init__(self):
B.__init__(self)
#G.__init__(self)
print('D init')
# This will raise an error for not calling two base
# class initializers: A and G.
# It properly sees that C.__init__ needs not be
# called and that B.__init__ was called.
D()
if __name__ == '__main__':
test()
```
#### File: tests/data23/recipe-252237.py
```python
from sets import Set
import re
"""
From the SWI-Prolog manual:
dwim_match(+Atom1, +Atom2)
Succeeds if Atom1 matches Atom2 in `Do What I Mean' sense. Both
Atom1 and Atom2 may also be integers or floats. The two atoms
match if:
o They are identical
o They differ by one character (spy == spu)
o One character is inserted/deleted (debug == deug)
o Two characters are transposed (trace == tarce)
o `Sub-words' are glued differently (existsfile == existsFile
== exists_file)
o Two adjacent sub words are transposed (existsFile == fileExists)
Thanks for <NAME> for writing a Levenshtein Distance function in Py.
Thanks to <NAME> for pointing me to SWI-Prolog's DWIM algo.
"""
def dwim_match(s1, s2, degree=1): #passing a degree arg sounds nice, but is
#probably dumb... the rest of the code will
#break if degree != 1... needs to be reworked
#the easy, obvious case
if s1 == s2:
return True
#covers these cases:
# - one character of diff
# - one character inserted/deleted
if ld(s1, s2) == degree:
return True
#transposition is trickier since it's ld == 2; so, maybe:
if ld(s1, s2) == 2:
if len(s1) == len(s2):
return True #this fails on "pat" and "atp"
#the two subword cases: diff gluings; transp'd adjacents
w1 = split_words(s1)
w2 = split_words(s2)
if w1 and w2: #diff gluings
if len(w1) == len(w2):
if Set([s.lower() for s in w1]) == Set([s.lower() for s in w2]):
return True #this may cover both subword cases!?
#for now, let's say it does....
#give up
return False
def split_words(s, other=False):
# we consider 4 word separator cases:
# "_", "-", "+", camelCase; short of running through a dictionary, I don't
# know how to do the no separator case: foobar...
if "_" in s: sep = "_"
if "-" in s: sep = "-"
if "+" in s: sep = "+"
if other and other in s:
sep = other
try:
if sep:
return s.split(sep)
except UnboundLocalError:
return case_splitter(s)
def case_splitter(s):
pattern = re.compile(r"""([A-Z][a-z]*)""")
def nullp(str):
if str != "": return True
else: return False
return list(filter(nullp, pattern.split(s)))
def ld(a, b): #stolen from m.l. hetland
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0] * m
for j in range(1, n+1):
add, delete = previous[j] + 1, current[j-1] + 1
change = previous[j-1]
if a[j-1] != b[i-1]:
change +=1
current[j] = min(add, delete, change)
return current[n]
if __name__ == "__main__":
s1 = s2 = "foobar"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "spy"; s2 = "spu"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "debug"; s2 = "deug"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "file_exists"; s2 = "file-exists"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "file+exists"; s2 = "file-exists"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "Bartles"; s2 = "bartles"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "fileExists"; s2 = "existsFile"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
s1 = "bartles"; s2 = "james"
print("Testing: %s, %s" % (s1, s2))
print(dwim_match(s1, s2))
```
#### File: tests/data23/recipe-286132.py
```python
class MementoMetaclass(type):
cache = {}
def __call__(self, *args):
print("="*20)
print("ClassObj:", self)
print("Args:", args)
print("="*20)
cached = self.cache.get(args, None)
if not cached:
instance = type.__call__(self, *args)
self.cache.update({args:instance})
return instance
return cached
class Foo(object, metaclass=MementoMetaclass):
template = ''
def __init__(self, arg1, arg2, arg3):
self.template = arg1
a = Foo(1,2,3)
b = Foo(2,3,4)
c = Foo(1,2,3)
d = Foo(2,3,4)
e = Foo(5,6,7)
f = Foo(5,6,7)
print(id(a), id(b), id(c), id(d), id(e), id(f))
```
#### File: tests/data23/recipe-286160.py
```python
import sys
import cgi
import urllib.request, urllib.error, urllib.parse
sys.stderr = sys.stdout
HOMEPAGE = 'www.google.co.uk'
######################################################
def getform(valuelist, theform, notpresent=''):
"""This function, given a CGI form, extracts the data from it, based on
valuelist passed in. Any non-present values are set to '' - although this can be changed.
(e.g. to return None so you can test for missing keywords - where '' is a valid answer but to have the field missing isn't.)"""
data = {}
for field in valuelist:
if field not in theform:
data[field] = notpresent
else:
if type(theform[field]) != type([]):
data[field] = theform[field].value
else:
values = [x.value for x in theform[field]] # allows for list type values
data[field] = values
return data
def pagefetch(thepage):
req = urllib.request.Request(thepage)
u = urllib.request.urlopen(req)
data = u.read()
return data
###################################################
if __name__ == '__main__':
form = cgi.FieldStorage()
data = getform(['url'],form)
if not data['url']: data['url'] = HOMEPAGE
print("Content-type: text/html") # this is the header to the server
print() # so is this blank line
test = pagefetch('http://' + data['url'])
print(test)
```
#### File: tests/data23/recipe-299207.py
```python
class progressbarClass:
def __init__(self, finalcount, progresschar=None):
import sys
self.finalcount=finalcount
self.blockcount=0
#
# See if caller passed me a character to use on the
# progress bar (like "*"). If not use the block
# character that makes it look like a real progress
# bar.
#
if not progresschar: self.block=chr(178)
else: self.block=progresschar
#
# Get pointer to sys.stdout so I can use the write/flush
# methods to display the progress bar.
#
self.f=sys.stdout
#
# If the final count is zero, don't start the progress gauge
#
if not self.finalcount : return
self.f.write('\n------------------ % Progress -------------------1\n')
self.f.write(' 1 2 3 4 5 6 7 8 9 0\n')
self.f.write('----0----0----0----0----0----0----0----0----0----0\n')
return
def progress(self, count):
#
# Make sure I don't try to go off the end (e.g. >100%)
#
count=min(count, self.finalcount)
#
# If finalcount is zero, I'm done
#
if self.finalcount:
percentcomplete=int(round(100*count/self.finalcount))
if percentcomplete < 1: percentcomplete=1
else:
percentcomplete=100
#print "percentcomplete=",percentcomplete
blockcount=int(percentcomplete/2)
#print "blockcount=",blockcount
if blockcount > self.blockcount:
for i in range(self.blockcount,blockcount):
self.f.write(self.block)
self.f.flush()
if percentcomplete == 100: self.f.write("\n")
self.blockcount=blockcount
return
if __name__ == "__main__":
from time import sleep
pb=progressbarClass(8,"*")
count=0
while count<9:
count+=1
pb.progress(count)
sleep(0.2)
pb=progressbarClass(100)
pb.progress(20)
sleep(0.2)
pb.progress(47)
sleep(0.2)
pb.progress(90)
sleep(0.2)
pb.progress(100)
print("testing 1:")
pb=progressbarClass(1)
pb.progress(1)
```
#### File: tests/data23/recipe-302498.py
```python
import re
class REstr(str):
cache = {}
def __div__(self, regex):
try:
reg = REstr.cache[regex]
except KeyError:
REstr.cache[regex] = reg = re.compile(regex)
self.sre = reg.search(self)
return REstr(self.sre.group())
def __idiv__(self, tpl):
try:
regex, repl, count = tpl
except ValueError:
regex, repl = tpl
count = 0
try:
reg = REstr.cache[regex]
except KeyError:
REstr.cache[regex] = reg = re.compile(regex)
return REstr(reg.sub(repl, self, count))
def __call__(self, g):
return self.sre.group(g)
if __name__ == '__main__':
a = REstr('abcdebfghbij')
print("a :", a)
print("Match a / 'b(..)(..)' :", end=' ')
print(a / 'b(..)(..)') # find match
print("a[0], a[1], a[2] :", end=' ')
print(a[0], a[1], a[2]) # print letters from string
print("a(0), a(1), a(2) :", end=' ')
print(a(0), a(1), a(2)) # print matches
print("a :", a)
a /= 'b.', 'X', 1 # find and replace once
print("a :", a)
a /= 'b.', 'X' # find and replace all
print("a :", a)
```
#### File: tests/data23/recipe-302746.py
```python
import threading,queue,time,sys,traceback
#Globals (start with a captial letter)
Qin = queue.Queue()
Qout = queue.Queue()
Qerr = queue.Queue()
Pool = []
def err_msg():
trace= sys.exc_info()[2]
try:
exc_value=str(sys.exc_info()[1])
except:
exc_value=''
return str(traceback.format_tb(trace)),str(sys.exc_info()[0]),exc_value
def get_errors():
try:
while 1:
yield Qerr.get_nowait()
except queue.Empty:
pass
def process_queue():
flag='ok'
while flag !='stop':
try:
flag,item=Qin.get() #will wait here!
if flag=='ok':
newdata='new'+item
Qout.put(newdata)
except:
Qerr.put(err_msg())
def start_threads(amount=5):
for i in range(amount):
thread = threading.Thread(target=process_queue)
thread.start()
Pool.append(thread)
def put(data,flag='ok'):
Qin.put([flag,data])
def get(): return Qout.get() #will wait here!
def get_all():
try:
while 1:
yield Qout.get_nowait()
except queue.Empty:
pass
def stop_threads():
for i in range(len(Pool)):
Qin.put(('stop',None))
while Pool:
time.sleep(1)
for index,the_thread in enumerate(Pool):
if the_thread.isAlive():
continue
else:
del Pool[index]
break
#STANDARD use:
for i in ('b','c'): put(i)
start_threads()
stop_threads()
for i in get_all(): print(i)
for i in get_errors(): print(i)
#POOL use
#put element into input queue
put('a')
#setup threads -- will run forever as a pool until you shutdown
start_threads()
for i in ('b','c'): put(i)
#get an element from output queue
print(get())
#put even more data in, 7 causes an error
for i in ('d','e',7): put(i)
#get whatever is available
for i in get_all(): print(i)
#stop_threads only returns when all threads have stopped
stop_threads()
print('__threads finished last data available__')
for i in get_all(): print(i)
for i in get_errors(): print(i)
#starting up threads again
start_threads()
put('f')
stop_threads()
print('__threads finished(again) last data available__')
for i in get_all(): print(i)
for i in get_errors(): print(i)
```
#### File: tests/data23/recipe-305306.py
```python
import re
def convert_template(template, opener='[', closer=']'):
opener = re.escape(opener)
closer = re.escape(closer)
pattern = re.compile(opener + '([_A-Za-z][_A-Za-z0-9]*)' + closer)
return re.sub(pattern, r'%(\1)s', template.replace('%','%%'))
if __name__ == '__main__':
import doctest
print('Doctest results: ', doctest.testmod())
```
#### File: tests/data23/recipe-306705.py
```python
import sys, os, rpm
def get_rpm_info(rpm_file):
"""Returns rpm information by querying a rpm"""
ts = rpm.ts()
fdno = os.open(rpm_file, os.O_RDONLY)
try:
hdr = ts.hdrFromFdno(fdno)
except rpm.error:
fdno = os.open(rpm_file, os.O_RDONLY)
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
hdr = ts.hdrFromFdno(fdno)
os.close(fdno)
return { 'name': hdr[rpm.RPMTAG_NAME], 'ver' : "%s-%s" % (hdr[rpm.RPMTAG_VERSION],\
hdr[rpm.RPMTAG_RELEASE]), 'epoch': hdr[rpm.RPMTAG_EPOCH],\
'arch': hdr[rpm.RPMTAG_ARCH] }
if __name__ == '__main__':
blob = sys.argv[1]
rpm_info = get_rpm_info(blob)
for key in rpm_info:
print('%s:%s' % (key.ljust(11), rpm_info[key]))
```
#### File: tests/data23/recipe-355638.py
```python
def require(*types):
'''
Return a decorator function that requires specified types.
types -- tuple each element of which is a type or class or a tuple of
several types or classes.
Example to require a string then a numeric argument
@require(str, (int, long, float))
will do the trick
'''
def deco(func):
'''
Decorator function to be returned from require(). Returns a function
wrapper that validates argument types.
'''
def wrapper (*args):
'''
Function wrapper that checks argument types.
'''
assert len(args) == len(types), 'Wrong number of arguments.'
for a, t in zip(args, types):
if type(t) == type(()):
# any of these types are ok
assert sum(isinstance(a, tp) for tp in t) > 0, '''\
%s is not a valid type. Valid types:
%s
''' % (a, '\n'.join(str(x) for x in t))
assert isinstance(a, t), '%s is not a %s type' % (a, t)
return func(*args)
return wrapper
return deco
@require(int)
def inter(int_val):
print('int_val is ', int_val)
@require(float)
def floater(f_val):
print('f_val is ', f_val)
@require(str, (int, int, float))
def nameAge1(name, age):
print('%s is %s years old' % (name, age))
# another way to do the same thing
number = (int, float, int)
@require(str, number)
def nameAge2(name, age):
print('%s is %s years old' % (name, age))
nameAge1('Emily', 8) # str, int ok
nameAge1('Elizabeth', 4.5) # str, float ok
nameAge2('Romita', 9) # str, long ok
nameAge2('Emily', 'eight') # raises an exception!
```
#### File: tests/data23/recipe-363779.py
```python
import sys
import time
def logged(when):
"""Log every invocation of the function.
when -- This should be "pre" or "post". If "post", then I'll also time
the function, which may be useful for profiling.
"""
def log(f, *args, **kargs):
print("""\
Called:
function: %s
args: %s
kargs: %s""" % (repr(f), repr(args), repr(kargs)), file=sys.stderr)
def pre_logged(f):
def wrapper(*args, **kargs):
log(f, *args, **kargs)
return f(*args, **kargs)
return wrapper
def post_logged(f):
def wrapper(*args, **kargs):
start = time.time()
try:
return f(*args, **kargs)
finally:
log(f, *args, **kargs)
print("""\
time delta: %s""" % (time.time() - start), file=sys.stderr)
return wrapper
try:
return {"pre": pre_logged, "post": post_logged}[when]
except KeyError as e:
raise ValueError(e)
@logged("post")
def hello(name):
print("Hello,", name)
hello("World!")
```
#### File: tests/data23/recipe-392115.py
```python
import unittest
def makeSigDigs(num, digits, debug=False):
"""Return a numeric string with significant digits of a given number.
Arguments:
num -- a numeric value
digits -- how many significant digits (int)
debug -- boolean; set to True for verbose mode
"""
notsig = ['0', '.', '-', '+', 'e'] # not significant
pad_zeros_left = '' # zeros to pad immed. left of the decimal
pad_zeros_right = '' # zeros to pad immed. right of the decimal
pad_zeros_last = '' # zeros to pad after last number after decimal
str_left = '' # string to prepend to left of zeros and decimal
str_right = '' # string to append to right of decimal and zeros
dec = '.' # the decimal
e_idx = None
e_str = ''
num = float(num)
if debug: print("%s at %s digits:" % (repr(num), digits))
for n in repr(num):
if n not in notsig: # ignore zeros and punctuation
first_idx = repr(num).find(n) # index of first digit we care about
if debug: print("\tfirst digit at %s" % (first_idx))
break
try: first_idx # If it doesn't exist, then we're looking at 0.0
except UnboundLocalError:
return '0.0'
try: e_idx = repr(num).index('e') # get index of e if in scientific notation
except: pass
if debug: print("\te at: %s" % (e_idx))
dec_idx = repr(num).find('.') # index of the decimal
if debug: print("\tdecimal at %s" % (dec_idx))
if dec_idx < first_idx:
"""All sigdigs to right of decimal '0.033'
"""
if debug: print("\tdigits are right of decimal.")
last_idx = first_idx + digits -1
if last_idx+1 > len(repr(num)[0:e_idx]): # in case we need extra zeros at the end
pad_zeros_last = '0'*(last_idx+1 - len(repr(num)[0:e_idx]))
if e_idx and last_idx >= e_idx: # fix last_idx if it picks up the 'e'
last_idx = e_idx-1
pad_zeros_left = '0'*1
pad_zeros_right = '0'*(first_idx - dec_idx - 1)
str_right = repr(num)[first_idx:last_idx+1]
elif dec_idx > first_idx + digits - 1:
"""All sigdigs to left of decimal. '3300.0'
"""
if debug: print("\tdigits are left of decimal.")
last_idx = first_idx + digits - 1
if e_idx and last_idx >= e_idx: # fix last_idx if it picks up the 'e'
last_idx = e_idx-1
str_left = repr(num)[first_idx]
str_right = repr(num)[first_idx+1:last_idx+1]+'e+'+str(dec_idx-1-first_idx)
else:
"""Sigdigs straddle the decimal '3.300'
"""
if debug: print("\tnumber straddles decimal.")
last_idx = first_idx + digits # an extra place for the decimal
if last_idx+1 > len(repr(num)[0:e_idx]): # in case we need extra zeros at the end
pad_zeros_last = '0'*(last_idx+1 - len(repr(num)[0:e_idx]))
if e_idx and last_idx >= e_idx: # fix last_idx if it picks up the 'e'
last_idx = e_idx-1
str_left = repr(num)[first_idx:dec_idx]
str_right = repr(num)[dec_idx+1:last_idx + 1]
if e_idx:
e_str = repr(num)[e_idx:]
if debug: print("\tlast digit at %s" % (last_idx))
if debug: print("\t%s %s %s %s %s %s %s" % (str_left or '_',
pad_zeros_left or '_',
dec or '_',
pad_zeros_right or '_',
str_right or '_',
pad_zeros_last or '_',
e_str or '_'))
sig_string = str_left+pad_zeros_left+dec+pad_zeros_right+str_right+pad_zeros_last+e_str
if debug: print("\tsignificant: %s\n" % (sig_string))
return sig_string
class utMakeSigDigs(unittest.TestCase):
knownValues = [[333.333, 4, '333.3'],
[33.0, 2, '3.3e+1'],
[333.33, 2, '3.3e+2'],
[33300.00, 4, '3.330e+4'],
[0.0033333, 3, '0.00333'],
[3.3e-10, 2, '3.3e-10'],
[0.0001, 2, '0.00010'],
[3.3e-10, 3, '3.30e-10'],
[1.0000000, 6, '1.00000'],
[1.00000001591, 6, '1.00000'],
[33330000000000000000.0, 6, '3.33300e+19'],
[33330000000000000000.03, 6, '3.33300e+19']
]
def testKnownValues(self):
"""MakeSigDigs should return known values for known inputs.
"""
for el in self.knownValues:
self.assertEqual(makeSigDigs(el[0], el[1], debug=True), el[2])
if __name__ == "__main__":
unittest.main()
```
#### File: tests/data23/recipe-408762.py
```python
def timeit(*args):
"Run the timeit.main function with args, catch and parse the output."
import sys
import re
import timeit
import io
prev_stdout = sys.stdout
sys.stdout = io.StringIO()
timeit.main(args)
out = sys.stdout.getvalue()
sys.stdout = prev_stdout
# Parse the output, and apply our own formatting
match = re.search(r"(\d+\.\d*|\d+) usec", out)
time = float(match.group(1))
print("%8.2f us: %s" % (time, args[-1]))
if __name__ == "__main__":
timeit("object()")
timeit("list()")
timeit("[]")
timeit("int()")
timeit("-s", "rng = range(32)",
"[i for i in rng] # a list comp")
timeit("-s", "class ClassicClass: pass",
"ClassicClass() # create a classic class instance")
timeit("-s", "class NewStyleClass(object): pass",
"NewStyleClass() # create a new style class instance")
```
#### File: tests/data23/recipe-415903.py
```python
class ReverseDict(dict):
"""
A dictionary which can lookup values by key, and keys by value.
All values and keys must be hashable, and unique.
"""
def __init__(self, *args, **kw):
dict.__init__(self, *args, **kw)
self.reverse = dict((reversed(list(i)) for i in list(self.items())))
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.reverse[value] = key
class LookupDict(dict):
"""
A dictionary which can lookup values by key, and keys by value.
The lookup method returns a list of keys with matching values
to the value argument.
"""
def __init__(self, *args, **kw):
dict.__init__(self, *args, **kw)
def lookup(self, value):
return [item[0] for item in list(self.items()) if item[1] == value]
if __name__ == "__main__":
a = ReverseDict(((1,2),(3,4)))
print(a[1])
print(a.reverse[2])
a["123"] = 67
print(a["123"])
print(a.reverse[67])
print("-" * 20)
b = LookupDict(((1,2),(3,4),(4,2)))
print(b.lookup(2))
```
#### File: tests/data23/recipe-425607.py
```python
import datetime # a thing of beauty and a joy forever
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = FORTH = 3 # for people who have finger trouble
FIFTH = 4
LAST = -1
SECONDLAST = -2
THIRDLAST = -3
MONDAY = MON = 0
TUESDAY = TUE = TUES = 1
WEDNESDAY = WED = 2
THURSDAY = THU = THUR = 3
FRIDAY = FRI = 4
SATURDAY = SAT = 5
SUNDAY = SUN = 6
JANUARY = JAN = 1
FEBRUARY = FEB = 2
MARCH = MAR = 3
APRIL = APR = 4
MAY = 5
JUNE = JUN = 6
JULY = JUL = 7
AUGUST = AUG = 8
SEPTEMBER = SEP = 9
OCTOBER = OCT = 10
NOVEMBER = NOV = 11
DECEMBER = DEC = 12
def dow_date_finder(which_weekday_in_month=FIRST,day=MONDAY,month=JANUARY,year=2000):
dt = datetime.date(year,month,1)
dow_lst = []
while dt.weekday() != day:
dt = dt + datetime.timedelta(days=1)
while dt.month == month:
dow_lst.append(dt)
dt = dt + datetime.timedelta(days=7)
return dow_lst[which_weekday_in_month] # may raise an exception if slicing is wrong
if __name__ == "__main__":
print("2nd tuesday of may 2005")
print(dow_date_finder(SECOND,TUESDAY,MAY,2005))
print("last wednesday of april 2005")
print(dow_date_finder(LAST,WEDNESDAY,APRIL,2005))
print("secondlast friday of october 2005 - short form")
print(dow_date_finder(SECONDLAST,FRI,OCT,2005))
```
#### File: tests/data23/recipe-440504.py
```python
import shelve
import time
class collection(object):
def __init__(self, db):
self.db = db
def __call__(self, objects):
# get the current time in seconds
now = time.time()
# create/open the shelve db
d = shelve.open(self.db, 'c')
# find & remove the missing items from the collection
removed = [k for k in d if k not in objects]
for remove in removed:
d.pop(remove)
# find & add new items to the collection
added = [k for k in objects if k not in d]
for obj in added:
d[obj] = now
# build a list of tuples (item + age in seconds)
items = [(k, int((now - d[k]))) for k in d]
d.close()
return removed, added, items
if __name__ == "__main__":
"""
below is just a cooked up sample of how the collection
object can be used
"""
mycollection = collection('mycollection.db')
removed, added, items = mycollection(('b','c','d'))
if removed:
print("\nitem(s) removed from the collection:\n")
for item in removed:
print("\t%s" % item)
if added:
print("\nitem(s) added to the collection:\n")
for item in added:
print("\t%s" % item)
if items:
print("\nitem(s):\n")
for item in items:
i, age = item
print("\titem: %-12s age: %s" % (i,age))
```
#### File: tests/data23/recipe-473784.py
```python
class CustomDict( dict ):
#---------------------------------------------------------------------------
defaultValue = 'THIS ITEM NOT AVAILABLE'
#---------------------------------------------------------------------------
def __getitem__( self, name ):
try:
return super( CustomDict, self ).__getitem__( name )
except KeyError:
return self.defaultValue
#---------------------------------------------------------------------------
def __contains__( self, name ):
return True
#---------------------------------------------------------------------------
def has_key( self, name ):
return True
################################################################################
#
#
#
#===============================================================================
class X( object ):
#---------------------------------------------------------------------------
def __init__( self ):
self._dict = CustomDict( foo = 'bar' )
#---------------------------------------------------------------------------
@property
def __dict__( self ):
#print 'X.__dict__ ( get() )'
return self._dict
#---------------------------------------------------------------------------
def __getattr__( self, name ):
return self.__dict__[ name ]
#---------------------------------------------------------------------------
def __setattr__( self, name, value ):
if name == '_dict':
return super( X, self ).__setattr__( name, value )
self._dict[ name ] = value
################################################################################
#
#
#
#===============================================================================
if __name__ == '__main__':
x = X()
print(x.__dict__[ 'foo' ])
print(x.__dict__[ 'bar' ])
print(x.foo)
print(x.bar)
print(x.__dict__)
x.oops = 42
print(x.__dict__)
# Output:
# bar
# THIS ITEM NOT AVAILABLE
# bar
# THIS ITEM NOT AVAILABLE
# {'foo': 'bar'}
# {'foo': 'bar', 'oops': 42}
```
#### File: tests/data23/recipe-473800.py
```python
import os, win32api, win32con
def getenv_system(varname, default=''):
v = default
try:
rkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment')
try:
v = str(win32api.RegQueryValueEx(rkey, varname)[0])
v = win32api.ExpandEnvironmentStrings(v)
except:
pass
finally:
win32api.RegCloseKey(rkey)
return v
print('SYSTEM.TEMP => %s' % getenv_system('TEMP'))
print('USER.TEMP => %s' % os.getenv('TEMP'))
```
#### File: tests/data23/recipe-473893.py
```python
n = 3 # Size of inner region
n2, n3, n4 = n**2, n**3, n**4
def show(flatline):
'Display grid from a string (values in row major order with blanks for unknowns)'
fmt = '|'.join(['%s' * n] * n)
sep = '+'.join(['-' * n] * n)
for i in range(n):
for j in range(n):
offset = (i*n+j)*n2
print(fmt % tuple(flatline[offset:offset+n2]))
if i != n-1:
print(sep)
def _find_friends(cell):
'Return tuple of cells in same row, column, or subgroup'
friends = set()
row, col = cell // n2, cell % n2
friends.update(row * n2 + i for i in range(n2))
friends.update(i * n2 + col for i in range(n2))
nw_corner = row // n * n3 + col // n * n
friends.update(nw_corner + i + j for i in range(n) for j in range(0,n3,n2))
friends.remove(cell)
return tuple(friends)
friend_cells = list(map(_find_friends, list(range(n4))))
def select_an_unsolved_cell(possibles, heuristic=min):
# Default heuristic: select cell with fewest possibilities
# Other possible heuristics include: random.choice() and max()
return heuristic((len(p), cell) for cell, p in enumerate(possibles) if len(p)>1)[1]
def solve(possibles, pending_marks):
# Apply pending_marks (list of cell,value pairs) to possibles (list of str).
# Mutates both inputs. Return solution as a flat string (values in row-major order)
# or return None for dead-ends where all possibilites have been eliminated.
for cell, v in pending_marks:
possibles[cell] = v
for f in friend_cells[cell]:
p = possibles[f]
if v in p:
p = possibles[f] = p.replace(v, '') # exclude value v from friend f
if not p:
return None # Dead-end: all possibilities eliminated
if len(p) == 1:
pending_marks.append((f, p[0]))
# Check to see if the puzzle is fully solved (each cell has only one possible value)
if max(list(map(len, possibles))) == 1:
return ''.join(possibles)
# If it gets here, there are still unsolved cells
cell = select_an_unsolved_cell(possibles)
for v in possibles[cell]: # try all possible values for that cell
ans = solve(possibles[:], [(cell, v)])
if ans is not None:
return ans
# ----- Examples -----
for given in [
'53 7 6 195 98 6 8 6 34 8 3 17 2 6 6 28 419 5 8 79',
' 75 4 5 8 17 6 36 2 7 1 5 1 1 5 8 96 1 82 3 4 9 48 ',
' 9 7 4 1 6 2 8 1 43 6 59 1 3 97 8 52 7 6 8 4 7 5 8 2 ',
'67 38 921 85 736 1 8 4 7 5 1 8 4 2 6 8 5 175 24 321 61 84',
'27 15 8 3 7 4 7 5 1 7 9 2 6 2 5 8 6 5 4 8 59 41',
]:
show(given)
pending_marks = [(i,v) for i, v in enumerate(given) if v != ' ']
possibles = ['123456789'] * len(given)
result = solve(possibles, pending_marks)
print()
show(result)
print('=-' * 20)
```
#### File: tests/data23/recipe-475181.py
```python
import sys
from popen2 import popen3
class FetchPhotos:
bin = "gnokii"
dir = "A:\predefgallery\predefphotos\\"
dest = "."
file_list = []
def __init__(self, **kwargs):
if "bin" in kwargs:
self.bin = kwargs["bin"]
if "dir" in kwargs:
self.dir = kwargs["dir"]
if "dest" in kwargs:
self.dest = kwargs["dest"]
def fetchList(self):
(stdout, stdin, stderr) = popen3("%s --getfilelist '%s*.*'" % (self.bin, self.dir))
list = stdout.readlines()
# Useless gnokii prompt
del list[0]
# Get rid of whitespaces at the ends of the file name
self.file_list = [x.strip() for x in list]
def fetchPhoto(self, p):
print("Fetching file %s..." % p)
(stdout, stdin, stderr) = popen3("%s --getfile '%s%s' '%s/%s'" % (self.bin,
self.dir, p, self.dest, p))
# Make it blocking, so the program will wait for gnokii
stdout.read(1)
def fetchAll(self):
for i in self.file_list:
self.fetchPhoto(i)
if __name__ == "__main__":
if len(sys.argv) == 2:
o = FetchPhotos(dest=sys.argv[1])
else:
o = FetchPhotos()
o.fetchList()
o.fetchAll()
```
#### File: tests/data23/recipe-491264.py
```python
import socket
class DNSQuery:
def __init__(self, data):
self.data=data
self.dominio=''
tipo = (ord(data[2]) >> 3) & 15 # Opcode bits
if tipo == 0: # Standard query
ini=12
lon=ord(data[ini])
while lon != 0:
self.dominio+=data[ini+1:ini+lon+1]+'.'
ini+=lon+1
lon=ord(data[ini])
def respuesta(self, ip):
packet=''
if self.dominio:
packet+=self.data[:2] + "\x81\x80"
packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
packet+='\xc0\x0c' # Pointer to domain name
packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+=str.join('',[chr(int(x)) for x in ip.split('.')]) # 4bytes of IP
return packet
if __name__ == '__main__':
ip='192.168.1.1'
print('pyminifakeDNS:: dom.query. 60 IN A %s' % ip)
udps = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udps.bind(('',53))
try:
while 1:
data, addr = udps.recvfrom(1024)
p=DNSQuery(data)
udps.sendto(p.respuesta(ip), addr)
print('Respuesta: %s -> %s' % (p.dominio, ip))
except KeyboardInterrupt:
print('Finalizando')
udps.close()
```
#### File: tests/data23/recipe-491280.py
```python
def example_BackgroundCall():
import urllib.request, urllib.parse, urllib.error,time
def work():
return urllib.request.urlopen('http://www.python.org/').read()
bkcall=BackgroundCall(work)
print('work() executing in background ...')
while not bkcall.is_done():
print('.', end=' ')
time.sleep(0.010)
print('done.')
print(bkcall.get_return()[:500])
import sys
from time import time as _time, sleep as _sleep
class Full(Exception):pass
class Empty(Exception):pass
class BackgroundCall:
"""BackgroundCall
Example:
bkcall=BackgroundCall( time_consuming_function )
...
if bkcall.is_done():
print "got", bkcall.get_return()
"""
id=None
done=0 #1=returned; 2=exception raised
def __init__(self, func, args=(), kwargs={}):
import _thread
def thread_bkcall():
try:
self.ret=func(*args, **kwargs)
self.done=1
except:
self.exc=sys.exc_info()
self.done=2
self.id=_thread.start_new(thread_bkcall, ())
def is_done(self):
return self.done
def get_return(self, wait=1, timeout=None, raise_exception=1, alt_return=None):
"""delivers the return value or (by default) echoes the exception of
the call job
wait: 0=no waiting; Attribute error raised if no
1=waits for return value or exception
callable -> waits and wait()-call's while waiting for return
"""
if not self.done and wait:
starttime=_time()
delay=0.0005
while not self.done:
if timeout:
remaining = starttime + timeout - _time()
if remaining <= 0: #time is over
if raise_exception:
raise Empty("return timed out")
else:
return alt_return
delay = min(delay * 2, remaining, .05)
else:
delay = min(delay * 2, .05)
if callable(wait): wait()
_sleep(delay) #reduce CPU usage by using a sleep
if self.done==2: #we had an exception
exc=self.exc
del self.exc
if raise_exception & 1: #by default exception is raised
raise exc[0](exc[1]).with_traceback(exc[2])
else:
return alt_return
return self.ret
def get_exception(self):
return self.exc
if __name__=='__main__':
example_BackgroundCall()
```
#### File: tests/data23/recipe-511429.py
```python
import os
def main():
try:
while True:
while True:
mode = input('Mode: ').lower()
if 'search'.startswith(mode):
mode = False
break
elif 'destroy'.startswith(mode):
mode = True
break
print('"search" or "destroy"')
path = input('Path: ')
extention = input('Extention: ')
for path_name in search(path, extention, mode):
print('Found:', path_name)
except:
pass
def search(path, extention, destroy):
assert os.path.isdir(path)
path_list = list()
for name in os.listdir(path):
path_name = os.path.join(path, name)
try:
if os.path.isdir(path_name):
path_list += search(path_name, extention, destroy)
elif os.path.isfile(path_name):
if path_name.endswith(extention) or not extention:
if destroy:
os.remove(path_name)
else:
path_list.append(path_name)
except:
print('Error:', path_name)
return path_list
if __name__ == '__main__':
main()
```
#### File: tests/data23/recipe-511490.py
```python
from time import time
class TokenBucket(object):
"""An implementation of the token bucket algorithm.
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
>>> print bucket.consume(90)
False
"""
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(tokens)
self.fill_rate = float(fill_rate)
self.timestamp = time()
def consume(self, tokens):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens otherwise False."""
if tokens <= self.tokens:
self._tokens -= tokens
else:
return False
return True
def get_tokens(self):
if self._tokens < self.capacity:
now = time()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
tokens = property(get_tokens)
if __name__ == '__main__':
from time import sleep
bucket = TokenBucket(80, 1)
print("tokens =", bucket.tokens)
print("consume(10) =", bucket.consume(10))
print("consume(10) =", bucket.consume(10))
sleep(1)
print("tokens =", bucket.tokens)
sleep(1)
print("tokens =", bucket.tokens)
print("consume(90) =", bucket.consume(90))
print("tokens =", bucket.tokens)
```
#### File: tests/data23/recipe-521901.py
```python
import pickle
class Upgradable:
class_version = '3.0'
def __init__(self):
self.version = self.__class__.class_version
self.new_attr = 42
def pickle(self):
return pickle.dumps(self)
def new_method(self):
'''
Return the answer to life the universe and everything.
Would normally break pickles prior to the introduction of new_attr.
'''
return self.new_attr
@staticmethod
def unpickle(data):
out = pickle.loads(data)
if not hasattr(out, 'version'):
out.version = '0.0'
if out.version != out.class_version:
out.upgrade()
return out
def upgrade(self):
version = float(self.version)
print('upgrade from version %s' % self.version)
if version < 1.:
self.version = '1.0'
print('upgrade to version 1.0')
if version < 2.:
self.version = '2.0'
print('upgrade to version 2.0')
if version < 3.:
self.version = '3.0'
self.new_attr = 42
print('upgrade to version 3.0')
def __test__():
ug0 = Upgradable()
# downgrade to lower version
del ug0.version
del ug0.new_attr
try:
# breaks old pickles!
ug0.new_method()
raise Exception('Should have raised AttributeError!')
except AttributeError:
pass
# check to see if automatic upgrade works
ug3 = ug0.unpickle(ug0.pickle())
assert ug3.version == '3.0'
assert ug3.new_attr == 42
assert ug3.new_method() == 42
__test__()
```
#### File: tests/data23/recipe-521914.py
```python
from contextlib import contextmanager
@contextmanager
def Switch():
D = {}
class _P(Exception): pass
def _mkCase(var):
class _PP(_P):
V = var
def __repr__(self):
return str(self.V)
D[var]=_PP
return _PP
def switch(var):
if var in D:
raise D[var]()
raise _mkCase(var)()
def case(var):
if var in D:
return D[var]
return _mkCase(var)
def default():
return _P
yield switch, case, default
if __name__=="__main__":
def test1():
with Switch() as (switch, case, default):
try: switch(55)
except case(1):
print(1)
except case(6):
print(6)
except case(5):
print(5)
except default():
print('default..')
def test2():
with Switch() as (switch, case, default):
try:switch('hola')
except case(1):
print(1)
except case('holaS'):
print('holaS')
except case('hola'):
print('hola')
except default():
print('default..')
test1()
test2()
```
#### File: tests/data23/recipe-52304.py
```python
class Class1:
def static1(name):
print("Hello",name)
# ...but now, a call such as:
Class1.static1("John")
# will fail with a TypeError, as 'static1' has become
# an unbound-method object, not a plain function.
# This is easy to solve with a simple tiny wrapper:
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# toy-example usage:
class Class2:
def static2(name):
print("Hi there",name)
static2 = Callable(static2)
# now, a call such as:
Class2.static2("Peter")
# works just fine, and as-expected
```
#### File: tests/data23/recipe-52305.py
```python
"Yet Another Python Templating Utility, Version 1.2"
import sys
# utility stuff to avoid tests in the mainline code
class _nevermatch:
"Polymorphic with a regex that never matches"
def match(self, line):
return None
_never = _nevermatch() # one reusable instance of it suffices
def identity(string, why):
"A do-nothing-special-to-the-input, just-return-it function"
return string
def nohandle(string):
"A do-nothing handler that just re-raises the exception"
raise
# and now the real thing
class copier:
"Smart-copier (YAPTU) class"
def copyblock(self, i=0, last=None):
"Main copy method: process lines [i,last) of block"
def repl(match, self=self):
"return the eval of a found expression, for replacement"
# uncomment for debug: print '!!! replacing',match.group(1)
expr = self.preproc(match.group(1), 'eval')
try: return str(eval(expr, self.globals, self.locals))
except: return str(self.handle(expr))
block = self.locals['_bl']
if last is None: last = len(block)
while i<last:
line = block[i]
match = self.restat.match(line)
if match: # a statement starts "here" (at line block[i])
# i is the last line to _not_ process
stat = match.string[match.end(0):].strip()
j=i+1 # look for 'finish' from here onwards
nest=1 # count nesting levels of statements
while j<last:
line = block[j]
# first look for nested statements or 'finish' lines
if self.restend.match(line): # found a statement-end
nest = nest - 1 # update (decrease) nesting
if nest==0: break # j is first line to _not_ process
elif self.restat.match(line): # found a nested statement
nest = nest + 1 # update (increase) nesting
elif nest==1: # look for continuation only at this nesting
match = self.recont.match(line)
if match: # found a contin.-statement
nestat = match.string[match.end(0):].strip()
stat = '%s _cb(%s,%s)\n%s' % (stat,i+1,j,nestat)
i=j # again, i is the last line to _not_ process
j=j+1
stat = self.preproc(stat, 'exec')
stat = '%s _cb(%s,%s)' % (stat,i+1,j)
# for debugging, uncomment...: print "-> Executing: {"+stat+"}"
exec(stat, self.globals,self.locals)
i=j+1
else: # normal line, just copy with substitution
self.ouf.write(self.regex.sub(repl,line))
i=i+1
def __init__(self, regex=_never, dict={},
restat=_never, restend=_never, recont=_never,
preproc=identity, handle=nohandle, ouf=sys.stdout):
"Initialize self's attributes"
self.regex = regex
self.globals = dict
self.locals = { '_cb':self.copyblock }
self.restat = restat
self.restend = restend
self.recont = recont
self.preproc = preproc
self.handle = handle
self.ouf = ouf
def copy(self, block=None, inf=sys.stdin):
"Entry point: copy-with-processing a file, or a block of lines"
if block is None: block = inf.readlines()
self.locals['_bl'] = block
self.copyblock()
if __name__=='__main__':
"Test: copy a block of lines, with full processing"
import re
rex=re.compile('@([^@]+)@')
rbe=re.compile('\+')
ren=re.compile('-')
rco=re.compile('= ')
x=23 # just a variable to try substitution
cop = copier(rex, globals(), rbe, ren, rco)
lines_block = [line+'\n' for line in """
A first, plain line -- it just gets copied.
A second line, with @x@ substitutions.
+ x+=1 # non-block statements MUST end with comments
-
Now the substitutions are @x@.
+ if x>23:
After all, @x@ is rather large!
= else:
After all, @x@ is rather small!
-
+ for i in range(3):
Also, @i@ times @x@ is @i*x@.
-
One last, plain line at the end.""".split('\n')]
print("*** input:")
print(''.join(lines_block))
print("*** output:")
cop.copy(lines_block)
```
#### File: tests/data23/recipe-543271.py
```python
def _posmax_psy(seq, key=None):
"""posmax(seq, key=None): return the position of the first maximum
item of a sequence. Accepts the usual key parameter too.
>>> posmax([])
Traceback (most recent call last):
...
ValueError: maxpos() arg is an empty sequence
>>> posmax([1])
0
>>> posmax(xrange(100))
99
>>> posmax(xrange(100, 0, -1))
0
>>> posmax([1,5,0,4,3])
1
>>> posmax([1,5,0,4,5,3])
1
>>> l = ['medium', 'longest', 'short']
>>> posmax(l)
2
>>> posmax(l, key=len)
1
>>> posmax(xrange(10**4))
9999
>>> posmax([2,4,-2,[4],21]) # silly comparison
3
>>> posmax([2,4,-2+3J,[4],21])
Traceback (most recent call last):
...
TypeError: no ordering relation is defined for complex numbers
"""
first = True
max_pos = 0
pos = 0
if key is None:
for el in seq:
if first:
max_el = el
first = False
elif el > max_el:
max_el = el
max_pos = pos
pos += 1
else:
for el in seq:
key_el = key(el)
if first:
max_key_el = key_el
first = False
elif key_el > max_key_el:
max_key_el = key_el
max_pos = pos
pos += 1
if first:
raise ValueError("maxpos() arg is an empty sequence")
else:
return max_pos
def _posmax_nopsy(seq, key=None):
"""posmax(seq, key=None): return the position of the first maximum
item of a sequence. Accepts the usual key parameter too.
>>> posmax([])
Traceback (most recent call last):
...
ValueError: maxpos() arg is an empty sequence
>>> posmax([1])
0
>>> posmax(xrange(100))
99
>>> posmax(xrange(100, 0, -1))
0
>>> posmax([1,5,0,4,3])
1
>>> posmax([1,5,0,4,5,3])
1
>>> l = ['medium', 'longest', 'short']
>>> posmax(l)
2
>>> posmax(l, key=len)
1
>>> posmax(xrange(10**4))
9999
>>> posmax([2,4,-2,[4],21]) # silly comparison
3
>>> posmax([2,4,-2+3J,[4],21])
Traceback (most recent call last):
...
TypeError: no ordering relation is defined for complex numbers
"""
first = True
max_pos = 0
if key is None:
for pos, el in enumerate(seq):
if first:
max_el = el
first = False
elif el > max_el:
max_el = el
max_pos = pos
else:
for pos, el in enumerate(seq):
key_el = key(el)
if first:
max_key_el = key_el
first = False
elif key_el > max_key_el:
max_key_el = key_el
max_pos = pos
if first:
raise ValueError("maxpos() arg is an empty sequence")
else:
return max_pos
def _posmax_benchmark1():
from time import clock
alist = [3]*1000 + [5] + [3]*1000
t = clock()
for _ in range(60000):
r = posmax(alist)
print(round(clock() - t, 2), r)
try:
import psyco
psyco.full()
posmax = _posmax_psy
except ImportError:
posmax = _posmax_nopsy
if __name__ == "__main__":
import doctest
doctest.testmod()
print("Doctests finished.\n")
_posmax_benchmark1()
```
#### File: tests/data23/recipe-576418.py
```python
import threading
import time
class ThreadGateException(Exception):
pass
class ThreadGate(object):
""" A class which works as a FIFO 'gate' for threads. By
default the gate is open and any thread calling on the
'enter' method returns immediately.
A thread can 'close' the gate by calling the method 'close'.
This thread becomes the owner of the gate. Any other thread
calling 'enter' after this is automatically blocked till
the owner calls reopens the gate by calling 'open'.
The gate requires a certain number of threads to block
before the owner exits from the 'close' method. Otherwise,
the owner waits for a timeout before returning from the 'close'
method, without actually closing the gate.
The gate class can be used to block running threads for a
particular operation and making sure that they resume after
the operation is complete, for a fixed number of threads.
"""
def __init__(self, numthreads, timeout=0):
self.lock = threading.Lock()
self.sem = threading.BoundedSemaphore(1)
self.evt = threading.Event()
self.count = 0
self.owner_timeout = timeout
self.owner = None
self.nthreads = numthreads
# Open by default
self.position = 1
def close(self):
""" Close the gate. The calling thread
becomes the owner of the gate and blocks
till the requisite number of threads block
on the gate or a timeout occurs, whichever
is first.
It is an error if the gate is already closed """
if self.position == 0:
# Already closed
raise ThreadGateException("trying to close an already closed gate")
try:
self.lock.acquire()
self.position = 0
self.owner = threading.currentThread()
self.sem.acquire()
finally:
self.lock.release()
# Wait on the event till timeout
self.evt.clear()
self.evt.wait(self.owner_timeout)
# If event was set, requisite number off
# threads have blocked, else reset the gate
if not self.evt.isSet():
try:
print('Owner thread timedout, re-setting gate')
self.lock.acquire()
self.position = 1
self.owner = None
self.sem.release()
finally:
self.lock.release()
return -1
return 0
def open(self):
""" Open the gate. The calling thread should
be the owner of the gate. It is an error if
the gate is already open """
if self.position == 1:
# Already open
raise ThreadGateException("trying to open an already opened gate")
if threading.currentThread() != self.owner:
raise ThreadGateException("not owner, cannot open gate")
try:
self.lock.acquire()
self.position = 1
self.owner = None
self.sem.release()
finally:
self.lock.release()
def enter(self):
""" Enter the gate. If the gate is open, returns
immediately, else gets blocked till the gate is
opened by the owner """
if self.position==1:
return 0
# Lock mutex and increment count
try:
self.lock.acquire()
self.count += 1
if self.count==self.nthreads:
self.evt.set()
finally:
self.lock.release()
ct = threading.currentThread()
print('Thread %s - Entering Gate' % (ct.getName()))
# Lock mutex and decrement count
try:
# Will block here
self.sem.acquire()
self.lock.acquire()
self.count -= 1
finally:
self.lock.release()
self.sem.release()
print('Thread %s - Exiting Gate' % (ct.getName()))
def get_count(self):
""" Return count of blocked threads """
return self.count
def test():
""" Test code """
import random
import queue
enterlog = queue.Queue(0)
exitlog = queue.Queue(0)
def enter(index):
enterlog.put(index)
def exit(index):
exitlog.put(index)
class OwnerThread(threading.Thread):
""" Owner thread class for gate demo """
def __init__(self, gate):
self.gate = gate
threading.Thread.__init__(self, None, 'Owner thread')
def run(self):
# Close the gate
print('Closing gate...')
ret = self.gate.close()
if ret==0:
print('Gate closed successfully')
print('Gate count=>',self.gate.get_count())
# Open gate after sleeping some time
time.sleep(5)
if ret==0:
print('Opening gate')
self.gate.open()
else:
print('Gate closing not successful')
class SampleThread(threading.Thread):
""" Sample thread class for gate demo """
def __init__(self, index, gate):
self.gate = gate
self.index = index
threading.Thread.__init__(self, None, 'Thread %d' % self.index, None)
def run(self):
# Sleep randomly
time.sleep(random.choice(list(range(1,10))))
# Mark entry to gate
enter(self.index)
self.gate.enter()
# Mark exit out of gate
exit(self.index)
def test1():
""" Test code where gate is closed successfully """
print('test1()...')
gate = ThreadGate(10, 20)
random.seed()
print('Starting threads...')
# Create 10 threads
threads = []
threads.append(OwnerThread(gate))
for x in range(10):
threads.append(SampleThread(x, gate))
# Start threads and join
for x in range(11):
threads[x].start()
# Join with threads
for x in range(11):
threads[x].join()
print('Joined with threads')
print('Gate count=>',gate.get_count())
# Exit and entry logs must be same
print(enterlog)
print(exitlog)
def test2():
""" Test code where gate is closed unsuccessfully """
print('test1()...')
gate = ThreadGate(10, 5)
random.seed()
print('Starting threads...')
# Create 10 threads
threads = []
threads.append(OwnerThread(gate))
for x in range(10):
threads.append(SampleThread(x, gate))
# Start threads and join
for x in range(11):
threads[x].start()
# Join with threads
for x in range(11):
threads[x].join()
print('Joined with threads')
print('Gate count=>',gate.get_count())
# Exit and entry logs must be same
print(enterlog)
print(exitlog)
test1()
while not enterlog.empty():
print(enterlog.get(), end=' ')
print()
while not exitlog.empty():
print(exitlog.get(), end=' ')
print()
test2()
while not enterlog.empty():
print(enterlog.get(), end=' ')
print()
while not exitlog.empty():
print(exitlog.get(), end=' ')
print()
if __name__ == "__main__":
test()
```
#### File: tests/data23/recipe-576483.py
```python
import sys
def usage():
print("Call : %s <BitCount>" % sys.argv[0])
print(" shows the dotted netmask (i.e %s 24 => 255.255.255.0)" % sys.argv[0])
def calcDottedNetmask(mask):
bits = 0
for i in range(32-mask,32):
bits |= (1 << i)
return "%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8 , (bits & 0xff))
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1].isdigit():
print(calcDottedNetmask(int(sys.argv[1])))
else:
usage()
```
#### File: tests/data23/recipe-576522.py
```python
__version__ = '$Id: which_dll.py 2247 2014-10-06 09:19:53Z mn $'
r"""
Returns the pathnames of the file (.exe or .dll)
which would be loaded/executed in the current environment
it uses some dirs from configuration (SystemDir, WindowsDir)
and dirs from PATH.
To obtain version info it uses code from:
http://pywin32.hg.sourceforge.net/hgweb/pywin32/pywin32/file/tip/win32/Demos/getfilever.py
Example of usage:
c:\tools\pyscripts\scripts>which_dll.py libpq.dll
2008-06-09 02:58:26 167936 [b] c:\postgresql\8.3\bin\libpq.dll ver:8.3.3.8160
2008-03-17 01:47:50 167936 [b] c:\tools\libpq.dll ver:8.3.1.8075
2008-03-17 01:47:50 167936 [b] g:\public\libpq.dll ver:8.3.1.8075
trying to load "libpq.dll" ...
c:\postgresql\8.3\bin\libpq.dll loaded
Author: <NAME>
"""
USAGE = 'Usage:\n\twhich_dll.py dll_name/exe_name'
import sys
import time
import os
import os.path
import win32api
def get_file_ver(fname):
# see: http://pywin32.hg.sourceforge.net/hgweb/pywin32/pywin32/file/tip/win32/Demos/getfilever.py
result = []
try:
ver_strings = ('ProductVersion', 'FileVersion')
pairs = win32api.GetFileVersionInfo(fname, '\\VarFileInfo\\Translation')
## \VarFileInfo\Translation returns list of available (language, codepage) pairs that can be used to retreive string info
## any other must be of the form \StringfileInfo\%04X%04X\parm_name, middle two are language/codepage pair returned from above
for lang, codepage in pairs:
#print 'lang: ', lang, 'codepage:', codepage
for ver_string in ver_strings:
str_info = '\\StringFileInfo\\%04X%04X\\%s' % (lang, codepage, ver_string)
result.append('%s %s' % (ver_string, win32api.GetFileVersionInfo(fname, str_info).strip()))
except:
pass
return result
def get_file_info(file_path):
"""returns string with file name, its modification time and size"""
s = os.stat(file_path)
f_date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(s[8]))
f_size = s[6]
fv = ''
ver_info = get_file_ver(file_path)
if ver_info:
fv = '\t%s' % ('; '.join(ver_info))
return "%s\t%8s [b]\t%s%s" % (f_date, f_size, file_path, fv)
def which(fname):
"""searches fname in PATH dirs"""
if not which_file(fname):
if '.' not in fname:
# no extension, so we try some "executable" extensions
for ext in ('.exe', '.com', '.bat', '.cmd'):
fname2 = fname + ext
if which_file(fname2):
break
def which_file(fname):
"""prints paths for fname where fname can be found,
in case of .dll loads it"""
files = []
path = win32api.GetEnvironmentVariable('PATH')
# try paths as described in MSDN
dirs = [os.getcwd(), win32api.GetSystemDirectory(), win32api.GetWindowsDirectory()] + path.split(';')
dirs_norm = []
dirs_l = []
for d in dirs:
dn = d.lower()
if dn not in dirs_l:
dirs_l.append(dn)
dirs_norm.append(d)
for d in dirs_norm:
fname2 = os.path.join(d, fname)
if os.path.exists(fname2):
if fname2 not in files:
files.append(fname2)
if files:
print(('\n'.join([get_file_info(f) for f in files])))
h = 0
if fname.lower().endswith('.dll'):
print(('\ttrying to load "%s" ...' % (fname)))
try:
h = win32api.LoadLibrary(fname)
if h:
dll_name = win32api.GetModuleFileName(h)
print(('\t%s loaded' % (dll_name)))
except:
print(('\tCannot load "%s" !!!' % (fname)))
def main():
if '--version' in sys.argv:
print(__version__)
return
elif '--help' in sys.argv:
print(USAGE)
return
elif '--test' in sys.argv:
which('libpq.dll')
which('libeay32.dll')
which('msvcr71.dll')
which('ssleay32.dll')
which('cmd.exe')
which('grep')
which('iclit09b.dll')
which('non_existient.dll')
return
if len(sys.argv) < 2:
print(USAGE)
else:
which(sys.argv[1])
main()
```
#### File: tests/data23/recipe-576614.py
```python
import gtk
import xml.dom.minidom as minidom
class PaletteBox:
class PaletteBoxItem:
def __init__(self,num,icon,tooltip,size):
self.num=num
self.icon=icon
self.tooltip=tooltip
self.Item=gtk.RadioToolButton()
self.Item.set_size_request(size,size)
def __init__(self,title,items_size = 40):
if items_size > 64 : items_size = 64
if items_size < 16 : items_size = 16
self.rowcount=-1
self.item=[]
self.items_size = items_size
self.button = gtk.Button()
self.button.set_focus_on_click(False)
self.button.connect("clicked",self.hide_show_box)
self.btnbox=gtk.HBox()
self.btnlabel=gtk.Label(" " + title )
self.btnarrow=gtk.Arrow(gtk.ARROW_DOWN,gtk.SHADOW_OUT)
self.btnarrowx = gtk.ARROW_DOWN
self.btnbox.pack_start(self.btnarrow,0,0)
self.btnbox.pack_start(self.btnlabel,0,0)
self.button.add(self.btnbox)
self.hbl = gtk.HBox()
self.itemsbox = gtk.Fixed()
self.itemsbox.set_size_request(items_size, -1)
self.itemsbox.connect("size-allocate",self.redraw_itemsbox)
self.hbl.pack_start(self.itemsbox,1,1)
self.box=gtk.VBox()
self.box.pack_start(self.button,0,0)
self.box.pack_start(self.hbl,1,1)
def hide_show_box(self,widget):
if self.btnarrowx == gtk.ARROW_DOWN:
self.btnarrow.set(gtk.ARROW_RIGHT,gtk.SHADOW_OUT)
self.hbl.set_property("visible",False)
self.btnarrowx=gtk.ARROW_RIGHT
elif self.btnarrowx == gtk.ARROW_RIGHT:
self.btnarrow.set(gtk.ARROW_DOWN,gtk.SHADOW_OUT)
self.hbl.set_property("visible",True)
self.btnarrowx=gtk.ARROW_DOWN
def set_title(self,title):
try:
self.button.set_label(title)
except:
print("PaletteBox.set_title(" + str(title) + ") is not string.")
def add_item(self,icon,tooltip,group=None):
index = len(self.item)
self.item.append(self.PaletteBoxItem(index,icon,tooltip,self.items_size))
if(len(self.item) > 1):
self.item[index].Item.set_group(self.item[0].Item)
else:
self.item[index].Item.set_group(group)
self.item[index].Item.set_label(tooltip)
self.itemsbox.add(self.item[index].Item)
self.redraw_itemsbox(self,self.itemsbox)
def redraw_itemsbox(self,widget,event = 0):
width = self.itemsbox.get_allocation()[2]
rowcount = width / self.items_size
if(self.rowcount != rowcount):
if rowcount<1 : rowcount = self.items_size
colcount = len(self.item) / rowcount
extra = len(self.item) % rowcount
itemcounter = 0
for i in range(colcount):
for j in range(rowcount):
self.itemsbox.move(self.item[itemcounter].Item , j * self.items_size , i * self.items_size)
itemcounter += 1
for j in range(extra):
self.itemsbox.move(self.item[itemcounter].Item , j * self.items_size , colcount * self.items_size)
itemcounter += 1
self.rowcount = rowcount
class MakePaletteByXML:
def __init__(self, xmlpath, parent = gtk.VBox()):
try:
self.parent = parent
x=minidom.parse(xmlpath)
for group in x.getElementsByTagName("group"):
isnewgroup = True
for box in group.getElementsByTagName("box"):
if (box.getAttribute("title")) : title = box.getAttribute("title")
else : title = "unknown"
mypalette = PaletteBox(title)
self.parent.pack_start(mypalette.box, 0)
for item in box.getElementsByTagName("item"):
icon = item.getAttribute("iconpath")
tooltip = item.getAttribute("tooltip")
if (isnewgroup) :
isnewgroup = False
mypalette.add_item(icon, tooltip, None)
itemgroup = mypalette.item[0].Item
else:
mypalette.add_item(icon, tooltip, itemgroup)
except :
print("on read a xml file occured a error .")
print("please view a xml file and repir the xml file structure...")
w = gtk.Window()
w.set_size_request(500,500)
w.connect("destroy",gtk.main_quit)
p = gtk.HPaned()
group=pb1=PaletteBox("First Test")
pb1.add_item("1", "A")
pb1.add_item("2", "B")
pb1.add_item("3", "C")
pb1.add_item("4", "D")
pb1.add_item("5", "E")
pb1.add_item("6", "F")
pb1.add_item("7", "G")
pb1.add_item("8", "H")
pb1.add_item("9", "I")
pb1.add_item("10", "J")
pb2=PaletteBox("Last Test")
pb2.add_item("1", "A",pb1.item[0].Item)
pb2.add_item("2", "B",pb1.item[0].Item)
pb2.add_item("3", "C",pb1.item[0].Item)
pb2.add_item("4", "D",pb1.item[0].Item)
pb2.add_item("5", "E",pb1.item[0].Item)
pb2.add_item("6", "F",pb1.item[0].Item)
pb2.add_item("7", "G",pb1.item[0].Item)
pb2.add_item("8", "H",pb1.item[0].Item)
pb2.add_item("9", "I",pb1.item[0].Item)
pb2.add_item("10", "J",pb1.item[0].Item)
vbb=gtk.VBox()
vbb.pack_start(pb1.box,0)
vbb.pack_start(pb2.box,0)
p.pack1(vbb,0,0)
v=gtk.VBox()
v.add(p)
l=gtk.Layout()
p.pack2(l)
w.add(v)
w.show_all()
gtk.main()
```
#### File: tests/data23/recipe-576618.py
```python
import threading
import queue
import os
import feedparser
from urllib.request import urlretrieve
#-----------------------------------------------------------------------------#
n_threads = 10
feed_url = "http://www.heise.de/newsticker/heise.rdf"
left_link = "http://www.heise.de/fastbin/audio_download" \
"?meldung=http://www.heise.de/newsticker/meldung/"
try:
archive_filename = "%s/.heise" % os.environ["HOME"]
except KeyError:
archive_filename = "%s%sheise_archive" % (os.environ["HOMEPATH"], os.sep)
#-----------------------------------------------------------------------------#
class Downloader(threading.Thread):
""" Class for worker-threads that download files. Don't tell Marx! """
def __init__(self, links_filenames):
threading.Thread.__init__(self)
self.setDaemon(True)
self.links_filenames = links_filenames
self.start()
#-------------------------------------------------------------------------#
def run(self):
while True:
link, filename = self.links_filenames.get()
urlretrieve(link, filename)
self.links_filenames.task_done()
#-----------------------------------------------------------------------------#
class Archive(object):
def __init__(self):
feed = feedparser.parse(feed_url)
try:
archive_file = open(archive_filename)
old_links = archive_file.readlines()
self.old_links = [link.strip() for link in old_links]
archive_file.close()
except IOError:
self.old_links = []
self.entries_i = list(range(len(feed["entries"])))
self.feed_links = [feed["entries"][entry_i]["link"].encode("utf-8")
for entry_i in self.entries_i]
self.feed = feed
#-------------------------------------------------------------------------#
def get_new_entries(self):
new_links = [link for link in self.feed_links
if link not in self.old_links]
titles = [self.feed["entries"][entry_i]["title"].encode("utf-8")
for entry_i in self.entries_i
if self.feed["entries"][entry_i]["link"].encode("utf-8")
in new_links]
# the article_id is in the link between "meldung/" and "/from"
article_ids = [link.split("meldung/")[1].split("/from")[0]
for link in new_links]
return new_links, titles, article_ids
#-------------------------------------------------------------------------#
def store(self):
archive_file = open(archive_filename, "w")
archive_file.writelines("\n".join(self.feed_links))
archive_file.close()
#-----------------------------------------------------------------------------#
def prepare_workers():
links_filenames = queue.Queue()
return [Downloader(links_filenames) for ii in range(n_threads)][0]
#-----------------------------------------------------------------------------#
def start_download(link, title, id, downloader):
for bad, good in zip(("/", ":", " ", '"', "?"), ("", "", "_", "", "")):
title = title.replace(bad, good)
filename = "heise_%s_%s.mp3" % (id, title)
mp3_link = left_link + id
downloader.links_filenames.put((mp3_link, filename))
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
downloader = prepare_workers()
feed_archive = Archive()
links, titles, ids = feed_archive.get_new_entries()
for link, title, id in zip(links, titles, ids):
download_yn = None
while download_yn != "y" and download_yn != "n" and download_yn != "c":
print(title)
download_yn = input('Download mp3? (y/[n]/c)')
if download_yn == "":
download_yn = "n"
if download_yn == "y":
start_download(link, title, id, downloader)
if download_yn == "c":
break
if links:
print("Waiting for downloads to end...")
downloader.links_filenames.join()
feed_archive.store()
```
#### File: tests/data23/recipe-576619.py
```python
def test_for_day(target_day):
"""
Accepts a weekday and tests if today is that weekday.
"""
import time
# Get the date object of today's date:
todays_date = time.localtime().tm_wday
# Form a dictionary of the days of the week, starting on Monday
# since this is the time module's assumption:
date_dict = dict(enumerate('Monday Tuesday Wednesday Thursday Friday Saturday Sunday'.split()))
# Find the weekday of today's date and compare to target:
if date_dict[todays_date] == target_day:
print("Today is the target (%s)." % target_day)
else:
print("Today is %s, not %s." % (date_dict[todays_date], target_day))
```
#### File: tests/data23/recipe-576726.py
```python
import threading
from time import sleep
def intervalExecute(interval, func, *args, **argd):
''' @param interval: execute func(*args, **argd) each interval
@return: a callable object to enable you terminate the timer.
'''
cancelled = threading.Event()
def threadProc(*args, **argd):
while True:
cancelled.wait(interval)
if cancelled.isSet():
break
func(*args, **argd) #: could be a lenthy operation
th = threading.Thread(target=threadProc, args=args, kwargs=argd)
th.start()
def close(block=True, timeout=3):
''' @param block: if True, block the caller until the thread
is closed or time out
@param timout: if blocked, timeout is used
@return: if block, True -> close successfully; False -> timeout
if non block, always return False
'''
if not block:
cancelled.set()
return False
else:
cancelled.set()
th.join(timeout)
isClosed = not th.isAlive()
return isClosed
return close
if __name__=='__main__':
# sample usage is as follow....
def testFunc(identifier, txt=''):
print('test func entered')
sleep(2)
print(identifier, txt)
cancellObj = intervalExecute(2.0, testFunc, 1, 'haha')
help(cancellObj)
sleep(5.2)
print(cancellObj()) #: cancel the intervalExecute timer.
print('after calling close')
```
#### File: tests/data23/recipe-576809.py
```python
DEL = '/'
class PorReader(object):
def __init__(self, file):
if type(file) in (str, str): file = open(file)
self.file = file
self.pos = -1
self.buffer = ""
def consumeOne(self, skip=False):
p = self.buffer.find(DEL, self.pos+1)
output = ""
while p == -1:
if not skip: output += self.buffer[self.pos+1:]
self.buffer = self.file.read(1024)
self.pos = -1
p = self.buffer.find(DEL, self.pos+1)
if not self.buffer: break
if not skip: output += self.buffer[self.pos+1:p]
self.pos = p
if not skip:
output = output.replace("\r\n", "")
return output
def consume(self, n=1):
return [self.consumeOne() for i in range(n)]
def skip(self, n=1):
for i in range(n):
self.consumeOne(skip=True)
HEAD = 'SPSS for Microsoft Windows Release 15.04'
FLOAT, STR, INT = 0,1,2
class SPSSVariable(object):
def __init__(self, name, label=None, numeric=True, decimals=0):
self.name = name
self.label = label
self.numeric = numeric
self.decimals = decimals
self.valuelabels = None
self.index = None
def __str__(self):
t = 'S'
if self.numeric: t = 'I'
if self.numeric and self.decimals: t = 'F'
return "%s%s%s" % (self.name, (' "%s" ' % self.label if self.label else ''),t)
def splitstring(slen=None, s=None, reader=None):
if slen is None:
slen = reader.consume(2)
if s is None: slen, s = slen
if type(slen) == str: slen = readnum(slen)
while slen > len(s):
if reader:
s += "/"+reader.consumeOne()
else:
raise Exception("!")
keep = s[slen:]
s = s[:slen]
return s, keep
class SPSSFile(object):
def __init__(self, file):
self.variables = []
self.vardict = {}
self.data = []
self.init(file)
def addvar(self, var):
var.index = len(self.variables)
self.variables.append(var)
self.vardict[var.name] = var
def getvar(self, varname):
return self.vardict[varname]
def get(self, var, row):
if type(var) in (str, str):
var = self.vardict[var]
return row[var.index]
def init(self, file):
r = PorReader(file)
r.skip(5)
h = r.consumeOne()
if not h.startswith(HEAD): raise Exception("Cannot read .por")
numvars = readnum(h[len(HEAD):])
h = r.skip(1)
keep = r.consumeOne()
while True:
action = keep[0]
#print "ACTION: %s" % action
if action == '7':
data = r.consume(8)
while data[-2][0] != 'C': data += r.consume()
decimals = readnum(data[4])
numeric = keep[1:] == '0'
name, dummy = splitstring(data[:2])
labellen, label = data[-2:]
label, keep = splitstring(labellen[1:], label, r)
v = SPSSVariable(name, label, numeric, decimals)
self.addvar(v)
#print "ADDED VAR ", v, data, `keep`, labellen[1:]
if action == 'D': # value labels
numvars = readnum(keep[1:])
varnames = []
keep = r.consumeOne()
for i in range(numvars):
name, keep = splitstring(keep, r.consumeOne(), reader=r)
varnames.append(name)
numlabels = readnum(keep)
keep = r.consumeOne()
labels = {}
numeric = self.getvar(varnames[0]).numeric
for i in range(numlabels):
if numeric:
val = readnum(keep)
name, keep = splitstring(reader=r)
else:
val, keep = splitstring(keep, r.consumeOne(), reader=r)
name, keep = splitstring(keep, r.consumeOne(), reader=r)
labels[val] = name
#print "VALUE LABELS", varnames, labels
for varname in varnames:
self.getvar(varname).valuelabels = labels
if action == 'F': # data
keep = keep[1:]
while True:
row = []
for var in self.variables:
if not keep: keep = r.consumeOne()
if keep.startswith("Z"):
return
if var.numeric:
if keep.startswith("*."):
row.append(None)
keep = keep[2:]
else:
try:
row.append(readnum(keep))
except Exception as e:
print(row)
print("Exception on %s" % var)
raise e
keep = ""
else:
slen = keep
x, keep = splitstring(slen, r.consumeOne())
row.append(x)
self.data.append(tuple(row))
if action == 'Z': # data
print("Done!")
return
def _codec(str_in, base_from=36, base_to=10):
"""
Base36 Encoder/Decoder
by <NAME> (<EMAIL>) on August 26, 2008
This code has been placed in the public domain.
"""
ASCII = { "0": 48, "9": 57, "A": 65, "Z": 90 }
# There are 8 characters between 9 and A
from_digits = [chr(x) for x in range(ASCII["0"], ASCII["9"] + 8 + base_from)
if (x >= ASCII["0"] and x <= ASCII["9"]) or
(x >= ASCII["A"] and x <= ASCII["Z"])][:base_from]
to_digits = [chr(x) for x in range(ASCII["0"], ASCII["9"] + 8 + base_to)
if (x >= ASCII["0"] and x <= ASCII["9"]) or
(x >= ASCII["A"] and x <= ASCII["Z"])][:base_to]
x = int(0)
for digit in str(str_in).upper():
x = x * len(from_digits) + from_digits.index(digit)
result = ""
# This is going to assemble our number in reverse order
# so we'll have to fix it before we return it
while x > 0:
result += to_digits[x % len(to_digits)]
x /= len(to_digits)
return result[::-1]
def decode(s):
while s.startswith("0"): s = s[1:]
if not s: return 0
try:
return int(_codec(s, 30, 10))
except ValueError as e:
raise ValueError("Cannot decode %r: %s" % (s, e))
def readnum(s):
neg = s.startswith("-")
if neg: s = s[1:]
if "+" in s:
num, exp = list(map(decode, s.split("+")))
result = 30**exp
elif "-" in s:
num, exp = list(map(decode, s.split("-")))
result = 1. / (30**exp)
else:
if "." in s:
i, d = s.split(".")
else:
i, d = s, None
result = decode(i)
if d:
for j, digit in enumerate(d):
result += decode(digit) / 30.**(j+1)
return result * (-1 if neg else 1)
if __name__ == '__main__':
import sys
fn = sys.argv[1]
f = SPSSFile(fn)
print(len(f.variables), len(f.data))
```
#### File: tests/data23/recipe-576834.py
```python
import struct, array, fcntl
class struxx:
_fields = None
_format = None
_buffer = None
def __init__(self):
self.reset()
def __len__(self):
"""binary represntation length, for fields, use __dict__ or something"""
return struct.calcsize(self._format)
def __iter__(self):
return [getattr(self, field) for field in self._fields.split(";")].__iter__()
def reset(self):
for field in self._fields.split(";"):
setattr(self, field, 0)
self._buffer = array.array('B', [0]*len(self))
def pack(self):
self._buffer = array.array('B', struct.pack(self._format, *self))
def unpack(self):
rv = struct.unpack(self._format, self._buffer)
for i in range(len(rv)):
setattr(self, self._fields.split(";")[i], rv[i])
def ioctl(self, fd, ioctlno):
self.pack()
rv = fcntl.ioctl(fd, ioctlno, self._buffer, True)
self.unpack()
return rv
class uint(struxx):
_fields = "uint"
_format = "I"
def get_version(self, fd): return self.ioctl(fd, HIDIOCGVERSION)
def get_flags(self, fd): return self.ioctl(fd, HIDIOCGFLAG)
def set_flags(self, fd): return self.ioctl(fd, HIDIOCSFLAG)
class hiddev_devinfo(struxx):
_fields = "bustype;busnum;devnum;ifnum;vendor;product;version;num_applications"
_format = "IIIIhhhI"
def get(self, fd): return self.ioctl(fd, HIDIOCGDEVINFO)
class hiddev_string_descriptor(struxx):
_fields = "index;value"
_format = "i256c"
def reset(self):
self.index = 0
self.value = '\0'*256
def pack(self):
tmp = struct.pack("i", self.index) + self.value[:256].ljust(256, '\0')
self._buffer = array.array('B', tmp)
def unpack(self):
self.index = struct.unpack("i", self._buffer[:4])
self.value = self._buffer[4:].tostring()
def get_string(self, fd, idx):
self.index = idx
return self.ioctl(fd, HIDIOCGSTRING)
class hiddev_report_info(struxx):
_fields = "report_type;report_id;num_fields"
_format = "III"
def get_info(self, fd): return self.ioctl(fd, HIDIOCGREPORTINFO)
class hiddev_field_info(struxx):
_fields = "report_type;report_id;field_index;maxusage;flags;physical;logical;application;logical_minimum;logical_maximum;physical_minimum;physical_maximum;unit_exponent;unit"
_format = "I"*8+"i"*4+"II"
def get_info(self, fd): return self.ioctl(fd, HIDIOCGFIELDINFO)
class hiddev_usage_ref(struxx):
_fields = "report_type;report_id;field_index;usage_index;usage_code;value"
_format = "I"*5+"i"
class hiddev_collection_info(struxx):
_fields = "index;type;usage;level"
_format = "I"*4
def get_info(self, fd, index):
self.index = index
return self.ioctl(fd, HIDIOCGCOLLECTIONINFO)
class hiddev_event(struxx):
_fields = "hid;value"
_format = "Hi"
IOCPARM_MASK = 0x7f
IOC_NONE = 0x20000000
IOC_WRITE = 0x40000000
IOC_READ = 0x80000000
def FIX(x): return struct.unpack("i", struct.pack("I", x))[0]
def _IO(x,y): return FIX(IOC_NONE|(ord(x)<<8)|y)
def _IOR(x,y,t): return FIX(IOC_READ|((t&IOCPARM_MASK)<<16)|(ord(x)<<8)|y)
def _IOW(x,y,t): return FIX(IOC_WRITE|((t&IOCPARM_MASK)<<16)|(ord(x)<<8)|y)
def _IOWR(x,y,t): return FIX(IOC_READ|IOC_WRITE|((t&IOCPARM_MASK)<<16)|(ord(x)<<8)|y)
HIDIOCGVERSION =_IOR('H', 0x01, struct.calcsize("I"))
HIDIOCAPPLICATION =_IO('H', 0x02)
HIDIOCGDEVINFO =_IOR('H', 0x03, len(hiddev_devinfo()))
HIDIOCGSTRING =_IOR('H', 0x04, len(hiddev_string_descriptor()))
HIDIOCINITREPORT =_IO('H', 0x05)
def HIDIOCGNAME(buflen): return _IOR('H', 0x06, buflen)
HIDIOCGREPORT =_IOW('H', 0x07, len(hiddev_report_info()))
HIDIOCSREPORT =_IOW('H', 0x08, len(hiddev_report_info()))
HIDIOCGREPORTINFO =_IOWR('H', 0x09, len(hiddev_report_info()))
HIDIOCGFIELDINFO =_IOWR('H', 0x0A, len(hiddev_field_info()))
HIDIOCGUSAGE =_IOWR('H', 0x0B, len(hiddev_usage_ref()))
HIDIOCSUSAGE =_IOW('H', 0x0C, len(hiddev_usage_ref()))
HIDIOCGUCODE =_IOWR('H', 0x0D, len(hiddev_usage_ref()))
HIDIOCGFLAG =_IOR('H', 0x0E, struct.calcsize("I"))
HIDIOCSFLAG =_IOW('H', 0x0F, struct.calcsize("I"))
HIDIOCGCOLLECTIONINDEX =_IOW('H', 0x10, len(hiddev_usage_ref()))
HIDIOCGCOLLECTIONINFO =_IOWR('H', 0x11, len(hiddev_collection_info()))
def HIDIOCGPHYS(buflen): return _IOR('H', 0x12, buflen)
HID_REPORT_TYPE_INPUT =1
HID_REPORT_TYPE_OUTPUT =2
HID_REPORT_TYPE_FEATURE =3
HID_REPORT_TYPE_MIN =1
HID_REPORT_TYPE_MAX =3
HID_REPORT_ID_UNKNOWN =0xffffffff
HID_REPORT_ID_FIRST =0x00000100
HID_REPORT_ID_NEXT =0x00000200
HID_REPORT_ID_MASK =0x000000ff
HID_REPORT_ID_MAX =0x000000ff
def enum_reports(fd):
for report_type in (HID_REPORT_TYPE_INPUT,
HID_REPORT_TYPE_OUTPUT,
HID_REPORT_TYPE_FEATURE):
for i in range(HID_REPORT_ID_MAX+1):
try:
ri = hiddev_report_info()
ri.report_type = report_type
ri.report_id = i
#print "trying", ri.__dict__
ri.get_info(fd)
print("%s(%s): %s fields" % ({1: 'input', 2:'output', 3:'feature'}.get(ri.report_type), ri.report_id, ri.num_fields))
for field in range(ri.num_fields):
fi = hiddev_field_info()
fi.report_type = ri.report_type
fi.report_id = ri.report_id
fi.field_index = field
fi.get_info(fd)
print(", ".join(["%s:%s" % (key, fi.__dict__[key]) for key in fi.__dict__ if key not in ("report_type", "report_id", "_buffer") and fi.__dict__[key] ]))
#print report_info.__dict__
print()
except IOError:
pass
if __name__=="__main__":
# name = ""
# for name in globals():
# if name.startswith("HID"):
# if type(globals()[name]) == int:
# print name, "\t%x" % globals()[name]
f = open("/dev/usb/hiddev0", "r")
tmp = uint()
tmp.get_version(f)
print("version 0x%x" % tmp.uint)
tmp.get_flags(f)
print("flags 0x%x" % tmp.uint)
tmp.uint = 3
tmp.set_flags(f)
tmp.get_flags(f)
print("flags 0x%x" % tmp.uint)
devinfo = hiddev_devinfo()
devinfo.get(f)
print("devinfo", devinfo.__dict__)
enum_reports(f)
def get_device_name(f):
a = array.array('B', [0]*1024)
fcntl.ioctl(f, HIDIOCGNAME(1024), a, True)
print(a)
def get_some_strings(f):
for i in range(-10000, 10000):
try:
string = hiddev_string_descriptor()
string.get_string(f, i)
print("string %s: %s", string.index, repr(string.value))
except IOError:
pass
def show_all_collections(f):
for i in range(256):
try:
collection_info = hiddev_collection_info()
collection_info.get_info(f, i)
print("coll %s" % i, collection_info.__dict__)
print("""
idnex: %(index)s
type: %(type)s
level: %(level)s
usage: 0x%(usage)x""" % collection_info.__dict__)
except IOError:
pass
```
#### File: tests/data23/recipe-576842.py
```python
import os, sqlite3
"""Walks through the all the Firefox profiles in current user account and cleans all
*.sqlite files with "vacuum". It makes firefox faster then often. Should work on Linux, too,
when properly changed constants."""
# -------------- constants -----------------------------------------
systemEncoding="mbcs"
profileUser= str(os.environ["USERPROFILE"], systemEncoding)
profileApp = str(os.environ["APPDATA"], systemEncoding) + r"\Mozilla\Firefox\Profiles"
# -------------- functions -----------------------------------------
def searchProfil(profileApp):
"all firefox profiles"
for profile in os.listdir(profileApp):
profileFull=os.path.join(profileApp, profile)
searchSqlite(profileFull)
def searchSqlite(profile):
"all sqlite file in each firefox profile"
sq=[os.path.join(profile,s) for s in os.listdir(profile) if s.endswith(".sqlite")]
print("\n..."+profile[len(profileUser):])
for s in sq:
dirName, fileName=os.path.split(s)
conn = sqlite3.connect(s)
oldSize=os.path.getsize(s)
print(fileName+":", end=' ')
try:
c=conn.cursor()
c.execute("VACUUM") # this is the thing
c.close()
print("done.", end=' ')
print("%.1f%%" % (os.path.getsize(s)*1.0/oldSize*100))
except:
print("error.")
# ----------------- main -------------------------------------------
if __name__=="__main__":
if os.path.isdir(profileApp):
searchProfil(profileApp)
else:
print("Not exists:", profileApp)
```
#### File: tests/data23/recipe-576858.py
```python
from email import encoders
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import mimetypes
import os
import re
import smtplib
class Email:
"""
This class handles the creation and sending of email messages
via SMTP. This class also handles attachments and can send
HTML messages. The code comes from various places around
the net and from my own brain.
"""
def __init__(self, smtpServer):
"""
Create a new empty email message object.
@param smtpServer: The address of the SMTP server
@type smtpServer: String
"""
self._textBody = None
self._htmlBody = None
self._subject = ""
self._smtpServer = smtpServer
self._reEmail = re.compile("^([\\w \\._]+\\<[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\>|[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)$")
self.clearRecipients()
self.clearAttachments()
def send(self):
"""
Send the email message represented by this object.
"""
# Validate message
if self._textBody is None and self._htmlBody is None:
raise Exception("Error! Must specify at least one body type (HTML or Text)")
if len(self._to) == 0:
raise Exception("Must specify at least one recipient")
# Create the message part
if self._textBody is not None and self._htmlBody is None:
msg = MIMEText(self._textBody, "plain")
elif self._textBody is None and self._htmlBody is not None:
msg = MIMEText(self._htmlBody, "html")
else:
msg = MIMEMultipart("alternative")
msg.attach(MIMEText(self._textBody, "plain"))
msg.attach(MIMEText(self._htmlBody, "html"))
# Add attachments, if any
if len(self._attach) != 0:
tmpmsg = msg
msg = MIMEMultipart()
msg.attach(tmpmsg)
for fname,attachname in self._attach:
if not os.path.exists(fname):
print("File '%s' does not exist. Not attaching to email." % fname)
continue
if not os.path.isfile(fname):
print("Attachment '%s' is not a file. Not attaching to email." % fname)
continue
# Guess at encoding type
ctype, encoding = mimetypes.guess_type(fname)
if ctype is None or encoding is not None:
# No guess could be made so use a binary type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(fname)
attach = MIMEText(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'image':
fp = open(fname, 'rb')
attach = MIMEImage(fp.read(), _subtype=subtype)
fp.close()
elif maintype == 'audio':
fp = open(fname, 'rb')
attach = MIMEAudio(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(fname, 'rb')
attach = MIMEBase(maintype, subtype)
attach.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(attach)
# Set the filename parameter
if attachname is None:
filename = os.path.basename(fname)
else:
filename = attachname
attach.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(attach)
# Some header stuff
msg['Subject'] = self._subject
msg['From'] = self._from
msg['To'] = ", ".join(self._to)
msg.preamble = "You need a MIME enabled mail reader to see this message"
# Send message
msg = msg.as_string()
server = smtplib.SMTP(self._smtpServer)
server.sendmail(self._from, self._to, msg)
server.quit()
def setSubject(self, subject):
"""
Set the subject of the email message.
"""
self._subject = subject
def setFrom(self, address):
"""
Set the email sender.
"""
if not self.validateEmailAddress(address):
raise Exception("Invalid email address '%s'" % address)
self._from = address
def clearRecipients(self):
"""
Remove all currently defined recipients for
the email message.
"""
self._to = []
def addRecipient(self, address):
"""
Add a new recipient to the email message.
"""
if not self.validateEmailAddress(address):
raise Exception("Invalid email address '%s'" % address)
self._to.append(address)
def setTextBody(self, body):
"""
Set the plain text body of the email message.
"""
self._textBody = body
def setHtmlBody(self, body):
"""
Set the HTML portion of the email message.
"""
self._htmlBody = body
def clearAttachments(self):
"""
Remove all file attachments.
"""
self._attach = []
def addAttachment(self, fname, attachname=None):
"""
Add a file attachment to this email message.
@param fname: The full path and file name of the file
to attach.
@type fname: String
@param attachname: This will be the name of the file in
the email message if set. If not set
then the filename will be taken from
the fname parameter above.
@type attachname: String
"""
if fname is None:
return
self._attach.append( (fname, attachname) )
def validateEmailAddress(self, address):
"""
Validate the specified email address.
@return: True if valid, False otherwise
@rtype: Boolean
"""
if self._reEmail.search(address) is None:
return False
return True
if __name__ == "__main__":
# Run some tests
mFrom = "Test User <<EMAIL>>"
mTo = "<EMAIL>"
m = Email("mail.mydomain.com")
m.setFrom(mFrom)
m.addRecipient(mTo)
# Simple Plain Text Email
m.setSubject("Plain text email")
m.setTextBody("This is a plain text email <b>I should not be bold</b>")
m.send()
# Plain text + attachment
m.setSubject("Text plus attachment")
m.addAttachment("/home/user/image.png")
m.send()
# Simple HTML Email
m.clearAttachments()
m.setSubject("HTML Email")
m.setTextBody(None)
m.setHtmlBody("The following should be <b>bold</b>")
m.send()
# HTML + attachment
m.setSubject("HTML plus attachment")
m.addAttachment("/home/user/image.png")
m.send()
# Text + HTML
m.clearAttachments()
m.setSubject("Text and HTML Message")
m.setTextBody("You should not see this text in a MIME aware reader")
m.send()
# Text + HTML + attachment
m.setSubject("HTML + Text + attachment")
m.addAttachment("/home/user/image.png")
m.send()
```
#### File: tests/data23/recipe-576942.py
```python
import hashlib
import os
def get_thumbnailfile(filename):
"""Given the filename for an image,
return the path to the thumbnail file.
Returns None if there is no thumbnail file.
"""
# Generate the md5 hash of the file uri
file_hash = hashlib.md5('file://'+filename).hexdigest()
# the thumbnail file is stored in the ~/.thumbnails/normal folder
# it is a png file and name is the md5 hash calculated earlier
tb_filename = os.path.join(os.path.expanduser('~/.thumbnails/normal'),
file_hash) + '.png'
if os.path.exists(tb_filename):
return tb_filename
else:
return None
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print('Usage ---')
print(' get_thumbnail.py filename')
sys.exit(0)
filename = sys.argv[1]
tb_filename = get_thumbnailfile(filename)
if tb_filename:
print('Thumbnail for file %s is located at %s' %(
filename, tb_filename))
else:
print('No thumbnail found')
```
#### File: tests/data23/recipe-577001.py
```python
import pickle
import fnmatch
import time
# mostly here for documentation purposes, not used in code.
CREATE_TABLE = ('create table if not exists mbus (id integer primary key '
'autoincrement, source, dest, data blob)')
# read all messages higher than the specified id
RECV_MESSAGES = 'select * from mbus where id > ? order by id asc'
SEND_MESSAGE = 'insert into mbus values (NULL, ?, ?, ?)'
# used at startup to find
FIND_LAST_ID = 'select max(id) from mbus'
class MBus:
def __init__ (self, db, name):
self.db = db
self.name = name
self.seen = self._find_last_id()
self.mbox = []
# PRIVATE
def _find_last_id (self):
return self.db.execute(FIND_LAST_ID, ()).fetchone()[0] or 1
def _poll (self):
"""Fetch new messages from database and append to mailbox.
"""
for row in list(self.db.execute(RECV_MESSAGES, (self.seen,))):
self.seen, source, dest, blob = row
if source != self.name and fnmatch.fnmatch(self.name, dest):
tag, data = pickle.loads(str(blob))
self.mbox.append((self.seen, source, tag, data))
def _filter (self, tag, func):
"""Remove and return matching messages from mailbox and retain the rest.
"""
mbox = []
for t in self.mbox:
if fnmatch.fnmatch(t[2], tag) and func(t):
yield t
else:
mbox.append(t)
self.mbox = mbox
# PUBLIC
def recv (self, tag='*', func=lambda _: True, wait=5, sleep=0.5):
end = time.time() + wait
while True:
self._poll()
for t in self._filter(tag, func):
yield t
if time.time() > end:
break
time.sleep(sleep)
def send (self, dest, tag, **kwargs):
data = (tag, kwargs)
rowid = self.db.execute(SEND_MESSAGE, (self.name, dest,
pickle.dumps(data))).lastrowid
return rowid
# PBULIC API
def connect (db, name):
"""Create a MBus and populate module environment with it's global methods.
The common case is that we connect to only one message bus. To avoid passing
around a message bus object we can instead simply do:
import mbus
mbus.connect(db, 'client')
mbus.send('*', 'ping')
for rowid, source, tag, data in mbus.recv(tag='pong'):
pass
"""
g = globals()
m = MBus(db, name)
g['send'] = m.send
g['recv'] = m.recv
# TESTING
if __name__ == '__main__':
import os
import sys
import sqlite3
p = 'test.db'
c = not os.path.exists(p)
db = sqlite3.connect(p, isolation_level=None)
if c:
db.execute(CREATE_TABLE)
if sys.argv[1] == 'server':
mb = MBus(db, 'server')
while True:
for _, source, _, data in mb.recv(tag='ping'):
mb.send(source, 'pong', pid=os.getpid())
sys.exit(0)
else:
mb = MBus(db, 'client')
mb.send('server', 'ping')
for _, source, _, data in mb.recv(tag='pong'):
print('received pong from pid', data['pid'])
```
#### File: tests/data23/recipe-577194.py
```python
class Thing(object):
"""A thing, does stuff."""
def __init__(self):
self.special = "My special value!"
def process(self, default=True):
"""Accept any argument with no special processing (except True)."""
if default is True: # Notice I'm checking identity, not truth or equality
default = self.special
elif not default: # Optional check for False values
print("Non-value given!")
print(default)
if __name__ == "__main__":
t = Thing()
t.process() # Prints t's special value
t.process("something") # Prints 'something'
t.process(None) # Prints the False value warning
```
#### File: tests/data23/recipe-577355.py
```python
import collections
import time
# Requires Python 2.7
class Node(object):
def __init__(self, value):
self.value = value
self.next = None
self.prev = None
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.value)
class LinkedList(collections.MutableSequence):
def __init__(self, iterable=None):
self.sentinel = Node(None)
self.sentinel.next = self.sentinel
self.sentinel.prev = self.sentinel
self.__len = 0
if iterable is not None:
self += iterable
def get_node(self, index):
node = sentinel = self.sentinel
i = 0
while i <= index:
node = node.__next__
if node == sentinel:
break
i += 1
if node == sentinel:
node = None
return node
def __getitem__(self, index):
node = self.__get_node(index)
return node.value
def __len__(self):
return self.__len
def __setitem__(self, index, value):
node = self.get_node(index)
node.value = value
def __delitem__(self, index):
node = self.get_node(index)
if node:
node.prev.next = node.__next__
if node.__next__:
node.next.prev = node.prev
node.prev = None
node.next = None
node.value = None
self.__len -= 1
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
list_ = [self.__get_node(i).value for i in range(len(self))]
return '%s(%r)' % (self.__class__.__name__, list_)
def append(self, value):
sentinel = self.sentinel
node = Node(value)
self.insert_between(node, sentinel.prev, sentinel)
def insert(self, index, value):
sentinel = self.sentinel
new_node = Node(value)
len_ = len(self)
if len_ == 0:
self.insert_between(new_node, sentinel, sentinel)
elif index >= 0 and index < len_:
node = self.get_node(index)
self.insert_between(new_node, node.prev, node)
elif index == len_:
self.insert_between(new_node, sentinel.prev, sentinel)
else:
raise IndexError
self.__len += 1
def insert_between(self, node, left_node, right_node):
if node and left_node and right_node:
node.prev = left_node
node.next = right_node
left_node.next = node
right_node.prev = node
else:
raise IndexError
class Stopwatch(object):
def __init__(self):
self.__start = 0.0
self.__stop = 0.0
self.__duration = 0.0
def start(self):
self.__start = time.time()
return self
def stop(self):
self.__stop = time.time()
self.__duration = self.__stop - self.__start
return self.__duration
def duration(self):
return self.__duration
class Profiler(object):
def __init__(self, size):
self.size = size
self.list = None
self.linked_list = None
self.sw_create_list = Stopwatch()
self.sw_create_linked_list = Stopwatch()
self.sw_pop_list = Stopwatch()
self.sw_pop_linked_list = Stopwatch()
def create_list(self):
self.sw_create_list.start()
self.list = [i for i in range(self.size)]
self.sw_create_list.stop()
def create_linked_list(self):
self.sw_create_linked_list.start()
self.linked_list = LinkedList()
for value in self.list:
self.linked_list.append(value)
self.sw_create_linked_list.stop()
def pop_list(self):
self.sw_pop_list.start()
for i in range(self.size):
del self.list[0]
self.sw_pop_list.stop()
def pop_linked_list(self):
self.sw_pop_linked_list.start()
for i in range(self.size):
del self.linked_list[0]
self.sw_pop_linked_list.stop()
def report(self):
print(("%6s %10d" % ("Size", self.size)))
print(("%6s %10s %10s %10s" % ("Type", "Create", "Pop", "Total")))
print(("%6s %10.2f %10.2f %10.2f" % ("List", self.sw_create_list.duration(), \
self.sw_pop_list.duration(), self.sw_create_list.duration() + self.sw_pop_list.duration())))
print(("%6s %10.2f %10.2f %10.2f" % ("Linked", self.sw_create_linked_list.duration(), \
self.sw_pop_linked_list.duration(), self.sw_create_linked_list.duration() + \
self.sw_pop_linked_list.duration())))
print()
def run(self):
self.create_list()
self.pop_list()
self.create_linked_list()
self.pop_linked_list()
self.report()
if __name__ == '__main__':
Profiler(1000).run()
Profiler(2000).run()
Profiler(5000).run()
Profiler(10000).run()
Profiler(20000).run()
Profiler(50000).run()
Profiler(100000).run()
print("Complete.")
```
#### File: tests/data23/recipe-577356.py
```python
__author__ = '<NAME> <<EMAIL>>'
__source__ = 'http://code.activestate.com/recipes/577356'
import os
if os.name == 'nt':
raise ValueError('dos/windows paths unsupported in this version')
def relative_path(base, target):
def split_path(path):
res = list()
while 1:
path, basename = os.path.split(path)
if path == os.sep and basename == '':
# root reached
break
res.insert(0, basename)
if path == '':
break
return res
# check for absolute paths
if not base.startswith(os.sep):
raise ValueError('base must be absolute: %s' % base)
if not target.startswith(os.sep):
raise ValueError('target must be absolute: %s' % target)
base_parts = split_path(base)
target_parts = split_path(target)
while len(base_parts) > 0 and \
len(target_parts) > 0 and \
base_parts[0] == target_parts[0]:
base_parts.pop(0)
target_parts.pop(0)
rel_parts = ['..'] * len(base_parts)
rel_parts.extend(target_parts)
return os.path.join(*rel_parts)
if __name__ == '__main__':
base = os.sep + os.path.join('a', 'b', 'c', 'd')
target = os.sep + os.path.join('a', 'b', 'c1', 'd2')
print('base :', base)
print('target:', target)
print('relative base->target:', relative_path(base, target))
```
#### File: tests/data23/recipe-577507.py
```python
__author__ = '<NAME>'
import re
class SwitchError(Exception): pass
CPAT_TYPE = type(re.compile('.'))
STR_TYPE = type('')
LIST_TYPE = type([])
TUPLE_TYPE = type(())
class Switch(object):
def __init__(self):
self.exactCases = {}
self.inCases = []
self.patternCases = []
self.defaultHandler = None
##
# Try each 'in' case, in the order they were
# specified, stopping if we get a match.
# Return a tuple of the string we are searching for in the target string,
# and the case handler found, or (None, None) if no match found.
def _findInCase(self, switchValue):
for inStr, aHandler in self.inCases:
if inStr in switchValue:
return (inStr, aHandler)
return (None, None)
##
# Try each regex pattern (using re.search), in the order they were
# specified, stopping if we get a match.
# Return a tuple of the re match object and the case handler found, or
# (None, None) if no match found.
def _findRegExCase(self, switchValue):
for cpat, aHandler in self.patternCases:
matchObj = cpat.search(switchValue)
if matchObj is not None:
return (matchObj, aHandler)
return (None, None)
##
# Switch on a switch value. A match against the exact
# (non-regular-expression) case matches is tried first. If that doesn't
# find a match, then if the switch value is a string, the 'in' case
# matches are tried next, in the order they were registered. If that
# doesn't find a match, then if the switch value is a string,
# the regular-expression case matches are tried next, in
# the order they were registered. If that doesn't find a match, and
# a default case handler was registered, the default case handler is used.
# If no match was found, and no default case handler was registered,
# SwitchError is raised.
# If a switch match is found, the corresponding case handler is called.
# The switch value is passed as the first positional parameter, along with
# any other positional and keyword parameters that were passed to the
# switch method. The switch method returns the return value of the
# called case handler.
def switch(self, switchValue, *args, **kwargs):
caseHandler = None
switchType = type(switchValue)
try:
# Can we find an exact match for this switch value?
# For an exact match, we will pass the case value to the case
# handler.
caseHandler = self.exactCases.get(switchValue)
caseValue = switchValue
except TypeError:
pass
# If no exact match, and we have 'in' cases to try,
# see if we have a matching 'in' case for this switch value.
# For an 'in' operation, we will be passing the left-hand side of
# 'in' operator to the case handler.
if not caseHandler and switchType in (STR_TYPE, LIST_TYPE, TUPLE_TYPE) \
and self.inCases:
caseValue, caseHandler = self._findInCase(switchValue)
# If no 'in' match, and we have regex patterns to try,
# see if we have a matching regex pattern for this switch value.
# For a RegEx match, we will be passing the re.matchObject to the
# case handler.
if not caseHandler and switchType == STR_TYPE and self.patternCases:
caseValue, caseHandler = self._findRegExCase(switchValue)
# If still no match, see if we have a default case handler to use.
if not caseHandler:
caseHandler = self.defaultHandler
caseValue = switchValue
# If still no case handler was found for the switch value,
# raise a SwitchError.
if not caseHandler:
raise SwitchError("Unknown case value %r" % switchValue)
# Call the case handler corresponding to the switch value,
# passing it the case value, and any other parameters passed
# to the switch, and return that case handler's return value.
return caseHandler(caseValue, *args, **kwargs)
##
# Register a case handler, and the case value is should handle.
# This is a function decorator for a case handler. It doesn't
# actually modify the decorated case handler, it just registers it.
# It takes a case value (any object that is valid as a dict key),
# or any iterable of such case values.
def case(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# Raise SwitchError on a dup case value.
if aCaseValue in self.exactCases:
raise SwitchError("Duplicate exact case value '%s'" % \
aCaseValue)
# Add it to the dict for finding exact case matches.
self.exactCases[aCaseValue] = caseHandler
return caseHandler
return wrap
##
# Register a case handler for handling a regular expression.
def caseRegEx(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# If this item is not a compiled regular expression, compile it.
if type(aCaseValue) != CPAT_TYPE:
aCaseValue = re.compile(aCaseValue)
# Raise SwitchError on a dup case value.
for thisCaseValue, _ in self.patternCases:
if aCaseValue.pattern == thisCaseValue.pattern:
raise SwitchError("Duplicate regex case value '%s'" % \
aCaseValue.pattern)
self.patternCases.append((aCaseValue, caseHandler))
return caseHandler
return wrap
##
# Register a case handler for handling an 'in' operation.
def caseIn(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# Raise SwitchError on a dup case value.
for thisCaseValue, _ in self.inCases:
if aCaseValue == thisCaseValue:
raise SwitchError("Duplicate 'in' case value '%s'" % \
aCaseValue)
# Add it to the the list of 'in' values.
self.inCases.append((aCaseValue, caseHandler))
return caseHandler
return wrap
##
# This is a function decorator for registering the default case handler.
def default(self, caseHandler):
self.defaultHandler = caseHandler
return caseHandler
if __name__ == '__main__': # pragma: no cover
# Example uses
# Instantiate a switch object.
mySwitch = Switch()
# Register some cases and case handlers, using the handy-dandy
# decorators.
# A default handler
@mySwitch.default
def gotDefault(value, *args, **kwargs):
print("Default handler: I got unregistered value %r, "\
"with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A single numeric case value.
@mySwitch.case(0)
def gotZero(value, *args, **kwargs):
print("gotZero: I got a %d, with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A range of numeric case values.
@mySwitch.case(list(range(5, 10)))
def gotFiveThruNine(value, *args, **kwargs):
print("gotFiveThruNine: I got a %d, with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A string case value, for an exact match.
@mySwitch.case('Guido')
def gotGuido(value, *args, **kwargs):
print("gotGuido: I got '%s', with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A string value for use with the 'in' operator.
@mySwitch.caseIn('lo')
def gotLo(value, *args, **kwargs):
print("gotLo: I got '%s', with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A regular expression pattern match in a string.
# You can also pass in a pre-compiled regular expression.
@mySwitch.caseRegEx(r'\b([Pp]y\w*)\b')
def gotPyword(matchObj, *args, **kwargs):
print("gotPyword: I got a matchObject where group(1) is '%s', "\
"with args: %r and kwargs: %r" % \
(matchObj.group(1), args, kwargs))
return matchObj
# And lastly, you can pass a iterable to case, caseIn, and
# caseRegEx.
@mySwitch.case([ 99, 'yo', 200 ])
def gotStuffInSeq(value, *args, **kwargs):
print("gotStuffInSeq: I got %r, with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# Now show what we can do.
got = mySwitch.switch(0)
# Returns 0, prints "gotZero: I got a 0, with args: () and kwargs: {}"
got = mySwitch.switch(6, flag='boring')
# Returns 6, prints "gotFiveThruNine: I got a 6, with args: () and
# kwargs: {'flag': 'boring'}"
got = mySwitch.switch(10, 42)
# Returns 10, prints "Default handler: I got unregistered value 10,
# with args: (42,) and kwargs: {}"
got = mySwitch.switch('Guido', BDFL=True)
# Returns 'Guido', prints "gotGuido: I got 'Guido', with args: () and
# kwargs: {'BDFL': True}"
got = mySwitch.switch('Anyone seen Guido around?')
# Returns 'Anyone Seen Guido around?', prints "Default handler: I got
# unregistered value 'Anyone seen Guido around?', with args: () and
# kwargs: {}", 'cause we used 'case' and not 'caseIn'.
got = mySwitch.switch('Yep, and he said "hello".', 99, yes='no')
# Returns 'lo', prints "gotLo: I got 'lo', with args: (99,) and
# kwargs: {'yes': 'no'}", 'cause we found the 'lo' in 'hello'.
got = mySwitch.switch('Bird is the Python word of the day.')
# Returns a matchObject, prints "gotPyword: I got a matchObject where
# group(1) is 'Python', with args: () and kwargs: {}"
got = mySwitch.switch('yo')
# Returns 'yo', prints "gotStuffInSeq: I got 'yo', with args: () and
# kwargs: {}"
```
#### File: tests/data23/recipe-577701.py
```python
class StateMachine:
'A class that implements a flexible state machine'
def __init__( self, transitions, first=None ):
if type( transitions ) == dict:
self.sm = transitions
else:
self.first = first or transitions.split(',')[0]
smtext = [ term.split(':') for term in transitions.split() ]
self.sm = dict( [ tuple(a.split(',')), tuple(b.split(',')) ]
for a,b in smtext )
self.setstate()
def setstate( self, state=None ):
self.state = state or self.first
def __call__( self, event ):
return self.signal( event )
def signal( self, event ):
change = self.sm.get( (self.state,event) )
change = change or self.sm.get( (self.state,'*') )
change = change or self.sm.get( ('*',event) )
change = change or self.sm.get( ('*','*') )
emit, newstate = change or ('**ERROR**',self.first)
if newstate == '*':
newstate = self.state
elif newstate == '':
newstate = self.first
if callable( emit ):
result = emit( self.state, event, newstate )
else:
result = ( emit, newstate )
self.state = newstate
return result
if __name__ == '__main__':
# Example for demonstration and test.
transitions = '''
garden,climb:go-up-to-roof,roof
roof,jump:fall-through,cottage
cottage,leave:unlock-door,garden
*,need-help:can-choose-climb-jump-leave,*
*,jump:feel-tired,*
*,sleep:feel-refreshed,*
*,*:cannot-do,*
'''
houseguy = StateMachine( transitions, first='garden' )
print('I start at location: %s' % houseguy.state)
actionlist = 'sleep jump need-help climb jump jump leave climb climb'.split()
for action in actionlist:
result, newstate = houseguy( action )
print('I %s. I %s. Location: %s.' % (action, result, newstate))
```
#### File: tests/data23/recipe-577796.py
```python
import sys
import os
import random
import time
def main():
# Deliberately set all parameters as global, (my choice!).
global LoggerScreen
global MyFiles
global savefile
global plot
global position
global horiz
global demo
global pause
global pausestring
global serialport
global mybyte
global grab
global csvdata
global autosave
global filestr
# global n
# initial parameter settings...
LoggerScreen="(C)2011, B.Walker, G0LCU."
MyFiles="Data_Logger-Transient_Recorder."
savefile="/tmp/LoggerStartup.txt"
# Default DEMO mode, set to 0 for REAL mode.
demo=1
plot=0
horiz=1
position=79
pause=1
pausestring="1"
# The latest Linux device name for current Arduino variants, (01-01-2011).
serialport="/dev/ttyACM0"
mybyte="?"
grab=255
csvdata="?"
# Temporarily set to autosave enabled for testing, set to 0 to disable.
autosave=1
filestr="0000000000.CSV"
# n=0
# Determine AMIGA, Windows-(32 bit), WinUAE or Linux for serial access.
if sys.platform=="amiga":
# The AMIGA serial port may need to be changed to 1200 baud, no parity,
# 8 bit data and 1 stop bit, this applies to WinUAE too.
serialport="SER:"
if sys.platform=="linux2":
# Assumed running from root for the time being.
# /dev/ttyUSB0 the device on my test systems, the Arduino Diecimila Board.
# It may need to be changed for your needs.
serialport="/dev/ttyUSB0"
os.system("chmod 666 "+serialport)
os.system("stty -F "+serialport+" 1200")
os.system("stty -F "+serialport+" raw")
if sys.platform=="win32":
# This is the COM port number generated on a test system.
# It may need to be changed for your needs.
serialport="COM3:"
os.system("MODE "+serialport+" BAUD=1200 PARITY=N DATA=8 STOP=1 to=on")
# A clear screen function for the platforms shown.
def clrscn():
if sys.platform=="amiga": print("\f", end=' ')
if sys.platform=="linux2": print(os.system("clear"),chr(13)," ",chr(13), end=' ')
if sys.platform=="win32": print(os.system("CLS"),chr(13)," ",chr(13), end=' ')
# Save the initial screen for future use function.
def savescreen():
global MyFiles
global savefile
global LoggerScreen
if sys.platform=="amiga": savefile="S:LoggerStartup.txt"
if sys.platform=="linux2": savefile="/tmp/LoggerStartup.txt"
if sys.platform=="win32": savefile="C:\\Windows\\Temp\\LoggerStartup.txt"
MyFiles=open(savefile,"wb+")
MyFiles.write(LoggerScreen)
MyFiles.close()
# This function does the plotting and generates a text variable in CSV format.
# It also sets the timebase values as required, not implimented yet.
def doplot():
global horiz
global position
global savefile
global MyFiles
global LoggerScreen
global demo
global pause
global pausestring
global plot
global mybyte
global serialport
global grab
global csvdata
csvdata=""
horiz=1
while horiz<=64:
# Generate a byte as though grabbed from Arduino.
if demo==1: grab=int(random.random()*256)
# Generate a byte from Arduino.
if demo==0:
MyFiles=open(serialport,"rb",2)
mybyte=str(MyFiles.read(1))
MyFiles.close()
# Convert to a decimal value, assume 8 bit integer.
grab=ord(mybyte)
# Generate the 64 byte CSV string on the fly...
csvdata=csvdata+str(grab)+"\r\n"
# Convert to 4 bit depth.
plot=int(grab/16)
# Invert to suit the text display window.
plot=15-plot
if plot<=0: plot=0
if plot>=15: plot=15
# Set up the plot position per grab.
position=79+horiz+plot*79
MyFiles=open(savefile,"rb+")
MyFiles.seek(position)
MyFiles.write("o")
# Now get the whole array.
MyFiles.seek(0)
LoggerScreen=MyFiles.read(1659)
MyFiles.close()
# End of screen array update per plot.
# Wait for a period for none AMIGA platforms.
if sys.platform!="amiga": time.sleep(pause)
# time.sleep() does NOT work on an A1200, WinUAE and E-UAE so pause......
if sys.platform=="amiga":
pausestring=str(pause)
os.system("C:Wait "+pausestring)
# ......and then do a clear screen.
print("\f", end=' ')
# Do a clear screen for other platforms.
if sys.platform=="linux2": print(os.system("clear"),chr(13)," ",chr(13), end=' ')
if sys.platform=="win32": print(os.system("CLS"),chr(13)," ",chr(13), end=' ')
# Now print the whole on screen...
print(LoggerScreen)
horiz=horiz+1
# This function saves a file to disk every 64 plots in CSV format.
def datafile():
global MyFiles
global filestr
global savefile
filestr=str(int(time.time()))+".CSV"
if sys.platform=="amiga": savefile="S:"
if sys.platform=="linux2": savefile="/tmp/"
if sys.platform=="win32": savefile="C:\\Windows\\Temp\\"
savefile=savefile+filestr
MyFiles=open(savefile,"wb+")
MyFiles.write(csvdata)
MyFiles.close()
# This is the main running code.
while 1:
# Set up DataLogger screen, use "\r\n" to suit Windows, "\r" is *ignored* on Linux and AMIGA_OS.
# This is for the default Command Prompt, (Windows), Terminal, (Linux) and CLI, (AMIGA), modes.
LoggerScreen="+-------+-------+-------+-------+-------+-------+-------+--------+ +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | |>(R)UN |\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"+-------+-------+-------+-------+-------+-------+-------+--------+ | Ctrl-C |\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | | (K)B |\r\n"
LoggerScreen=LoggerScreen+"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-++ +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | | (S)LOW |\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +---------\r\n"
LoggerScreen=LoggerScreen+"+-------+-------+-------+-------+-------+-------+-------+--------+ +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | | 1(0)S |\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | +--------+\r\n"
LoggerScreen=LoggerScreen+"| | | | + | | | | |>(1)S |\r\n"
LoggerScreen=LoggerScreen+"+-------+-------+-------+-------+-------+-------+-------+--------+ +--------+\r\n"
LoggerScreen=LoggerScreen+"+----------------------------------------------------------------+ +--------+\r\n"
LoggerScreen=LoggerScreen+"| Status:- Running in DEMO mode. | | (U)NCAL|\r\n"
LoggerScreen=LoggerScreen+"+----------------------------------------------------------------+ +--------+\r\n"
# Save the startscreen to write to.
savescreen()
# Clear the screen every 64 plots and restart.
clrscn()
print(LoggerScreen)
# Grab the 64 plots.
doplot()
# Automatically save to disk when autosave is set to 1.
if autosave==1: datafile()
main()
# DataLogger program end.
# Enjoy finding simple solutions to often very difficult problems.
```
#### File: tests/data23/recipe-577865.py
```python
import random
import math
def bound(coefficients):
coefficients.reverse()
n = len(coefficients) - 1
b = 0.0
for i in range(n):
b += abs(coefficients[i] / coefficients[i + 1])
coefficients.reverse()
return b
def polynomial(z, coefficients): # Horner method
t = complex(0, 0)
for c in reversed(coefficients):
t = t * z + c
return t
eps = 1e-7 # max error allowed
def DurandKerner(coefficients):
n = len(coefficients) - 1
roots = [complex(0, 0)] * n
bnd = bound(coefficients)
retry = True
while retry:
retry = False
# set initial roots as random points within bounding circle
for k in range(n):
r = bnd * random.random()
theta = 2.0 * math.pi * random.random()
roots[k] = complex(r * math.cos(theta), r * math.sin(theta))
itCtr = 0
rootsNew = roots[:]
flag = True
while flag:
flag = False
for k in range(n):
temp = complex(1.0, 0.0)
for j in range(n):
if j != k:
temp *= roots[k] - roots[j]
rootsNew[k] = roots[k] - polynomial(roots[k], coefficients) / temp
if abs(roots[k] - rootsNew[k]) > eps:
# print abs(roots[k] - rootsNew[k])
flag = True
if math.isnan(rootsNew[k].real) or math.isnan(rootsNew[k].imag):
flag = False
retry = True
print('retrying...')
break
roots = rootsNew[:]
itCtr += 1
print("iteration count: " + str(itCtr))
return roots
# example
# x**3-3*x**2+3*x-5=0
coefficients = [complex(-5, 0), complex(3, 0), complex(-3, 0), complex(1, 0)]
print("coefficients: " + str(coefficients))
print("roots: " + str(DurandKerner(coefficients)))
```
#### File: tests/data23/recipe-577959.py
```python
import threading
from queue import Queue
# Set up a queue for tasks to be run on the main thread.
# Most UI toolkits as glib contains functions to push task this way.
Q = Queue()
def idle_add(a,b):
Q.put((a,b))
def async_int(gen):
try: next(gen)
except StopIteration: return
def do():
try: next(gen)
except StopIteration: return
idle_add(async_int, gen)
threading.Thread(target=do).start()
def async(func):
return lambda *a,**kw: async_int(func(*a,**kw))
@async
def test():
# We start in the main thread
print("1 %s" % threading.currentThread())
yield
# This part is run in a seperate thread, not blocking the main thread
print("2 %s" % threading.currentThread())
yield
# Now we are back in the main thread
print("3 %s" % threading.currentThread())
yield
# And in another background thread
print("4 %s" % threading.currentThread())
yield
# And we keep all internal variables between the threads!
print("5 %s" % threading.currentThread())
if __name__ == "__main__":
test()
while True:
a,b = Q.get()
a(b)
```
#### File: tests/data23/recipe-578094.py
```python
def print_dict(dictionary, ident = '', braces=1):
""" Recursively prints nested dictionaries."""
for key, value in dictionary.items():
if isinstance(value, dict):
print('%s%s%s%s' %(ident,braces*'[',key,braces*']'))
print_dict(value, ident+' ', braces+1)
else:
print(ident+'%s = %s' %(key, value))
if __name__ == '__main__':
example_dict = { 'key1' : 'value1',
'key2' : 'value2',
'key3' : { 'key3a': 'value3a' },
'key4' : { 'key4a': { 'key4aa': 'value4aa',
'key4ab': 'value4ab',
'key4ac': 'value4ac'},
'key4b': 'value4b'}
}
print_dict(example_dict)
```
#### File: tests/data23/recipe-578129.py
```python
import sys
import math
EPSILON = 0.0000001
class SimpleLinearRegression:
""" tool class as help for calculating a linear function """
def __init__(self, data):
""" initializes members with defaults """
self.data = data # list of (x,y) pairs
self.a = 0 # "a" of y = a + b*x
self.b = 0 # "b" of y = a + b*x
self.r = 0 # coefficient of correlation
def run(self):
""" calculates coefficient of correlation and
the parameters for the linear function """
sumX, sumY, sumXY, sumXX, sumYY = 0, 0, 0, 0, 0
n = float(len(self.data))
for x, y in self.data:
sumX += x
sumY += y
sumXY += x*y
sumXX += x*x
sumYY += y*y
denominator = math.sqrt((sumXX - 1/n * sumX**2)*(sumYY - 1/n * sumY**2))
if denominator < EPSILON:
return False
# coefficient of correlation
self.r = (sumXY - 1/n * sumX * sumY)
self.r /= denominator
# is there no relationship between 'x' and 'y'?
if abs(self.r) < EPSILON:
return False
# calculating 'a' and 'b' of y = a + b*x
self.b = sumXY - sumX * sumY / n
self.b /= (sumXX - sumX**2 / n)
self.a = sumY - self.b * sumX
self.a /= n
return True
def function(self, x):
""" linear function (be aware of current
coefficient of correlation """
return self.a + self.b * x
def __repr__(self):
""" current linear function for print """
return "y = f(x) = %(a)f + %(b)f*x" % self.__dict__
def example():
""" provides an example with error rates (one per session)
@note linear function verified in open office calc """
print("Simple linear regression v0.3 by <NAME> 2012")
print(("...Python %s" % sys.version.replace("\n", "")))
data = [(1.0, 18.0), (2, 15.0), (3, 19.0), (4, 10.0)]
print(("...data is %s" % data))
linRegr = SimpleLinearRegression(data)
if not linRegr.run():
print("...error: failed to calculate parameters")
return
print(("...the coefficient of correlation r = %f (r**2 is %f)" % (linRegr.r, linRegr.r**2)))
print(("...parameter a of y = f(x) = a + b*x is %f" % linRegr.a))
print(("...parameter b of y = f(x) = a + b*x is %f" % linRegr.b))
print(("...linear function is then %s" % linRegr))
print(("...forecast of next value: f(5) = %f" % linRegr.function(5)))
firstY = linRegr.function(1)
lastY = linRegr.function(4)
change = (lastY - firstY) / firstY * 100.0
# keep in mind: reducing of error rate (inverse valuation)!
if change < 0:
print(("...the trend is about %.1f%% improvement" % -change))
else:
print(("...the trend is about %.1f%% to the worse" % change))
if __name__ == "__main__":
example()
```
#### File: tests/data23/recipe-578197.py
```python
import os, sys
def main():
if len(sys.argv) - 1:
engine(' '.join(sys.argv[1:]))
else:
print(os.path.basename(sys.argv[0]), '<directory>')
def engine(path):
directories = files = 0
for information in os.walk(path):
directories += len(information[1])
files += len(information[2])
print('Directories =', directories)
print('Files =', files)
if __name__ == '__main__':
main()
```
#### File: tests/data23/recipe-578208.py
```python
from random import random
# minimum finder
def minimum(value_one, value_two):
if value_one < value_two:
return value_one
return value_two
# main function
def simulate(boys, girls, years, total, fast):
# make sure the ages are DIFFERENT
b_ages = list(range(70))
g_ages = list(range(70))
# setup the ages
for age in range(70):
b_ages[age] = g_ages[age] = 0
b_ages[20] = boys
g_ages[20] = girls
# simulator
for year in range(years):
# slow printer
if not fast:
print('Year =', year)
sum = 0
for age in range(70):
sum += b_ages[age]
print('Boys =', sum)
sum = 0
for age in range(70):
sum += g_ages[age]
print('Girls =', sum)
print('Boy Ages =', str(b_ages)[1:-1])
print('Girl Ages =', str(g_ages)[1:-1])
# find out the number of offspring
b_born = g_born = 0
for age in range(20, 50):
pairs = minimum(b_ages[age], g_ages[age])
total_born = int(random() * (pairs + 1))
half = int(random() * (total_born + 1))
b_born += total_born - half
g_born += half
# make everyone age one year
for age in range(68, -1, -1):
b_ages[age + 1] = b_ages[age]
g_ages[age + 1] = g_ages[age]
# add the offspring
b_ages[0] = b_born
g_ages[0] = g_born
# check for total population
if total != 0:
sum = 0
for age in range(70):
sum += b_ages[age] + g_ages[age]
if total <= sum:
break
# pause for reading
if not fast:
input('Pausing ...')
# finish the simulation
print('Year =', year + 1)
sum = 0
for age in range(70):
sum += b_ages[age]
print('Boys =', sum)
sum = 0
for age in range(70):
sum += g_ages[age]
print('Girls =', sum)
print('Boy Ages =', str(b_ages)[1:-1])
print('Girl Ages =', str(g_ages)[1:-1])
# calculate the total
sum = 0
for age in range(70):
sum += b_ages[age] + g_ages[age]
print('There are a total of', sum, 'people.')
# get input
def start():
global notes_words
loop = True
while loop:
try:
boys = int(input('How many boys should we start with? '))
loop = False
except:
pass
loop = True
while loop:
try:
girls = int(input('How many girls should we start with? '))
loop = False
except:
pass
loop = True
while loop:
try:
years = int(input('How many years should we simulate? '))
loop = False
except:
pass
loop = True
while loop:
try:
total = int(input('How many people do we want? '))
loop = False
except:
pass
# more vocabulary
accept = ['yes', 'y', 'yeah', 'si']
deny = ['no', 'n', 'nada', 'never']
loop = True
while loop:
try:
fast = input('Should we go fast? ')
if fast.lower() in accept:
fast = True
loop = False
elif fast.lower() in deny:
fast = False
loop = False
elif fast.lower() in notes_words:
print('The available commands are ' + str(accept)[1:-1] + ', ' \
+ str(deny)[1:-1] + ', ' + str(notes_words)[1:-1] + '.')
else:
print('"' + fast + '" is not something that I understand.')
except:
pass
try:
simulate(boys, girls, years, total, fast)
except:
print('The simulation crashed !!!')
# define the vocabulary
start_words = ['start', 'begin', 'exe', 'execute']
break_words = ['break', 'exit', 'end', 'quit']
notes_words = ['help', '?', '/?', '-?']
# get command
print('Executing Population Simulator ...')
while True:
prompt = input('Please enter a command: ')
if prompt.lower() in start_words:
start()
elif prompt.lower() in break_words:
break
elif prompt.lower() in notes_words:
print('The available commands are ' + str(start_words)[1:-1] + ', ' + \
str(break_words)[1:-1] + ', ' + str(notes_words)[1:-1] + '.')
else:
print('"' + prompt + '" is not something that I understand.')
```
#### File: tests/data23/recipe-578333.py
```python
import time
bloated_string = 'bloat me' * 10
# method 1, simple concatenation
def slow():
test_string = ''
start = time.clock()
for i in range(1000):
test_string += bloated_string
end = time.clock()
delta = float(end-start)
# print 'slow len: ', len(test_string)
return delta
# method 2, use list.append() and ''.join()
def fast():
test_string = list()
start = time.clock()
for i in range(1000):
test_string.append('%s' % bloated_string)
test_string = ''.join(test_string)
end = time.clock()
delta = float(end-start)
# print 'fast len: ', len(test_string)
return delta
# method 3, use list comprehension and ''.join()
def fastest():
test_string = bloated_string
start = time.clock()
test_string = ''.join([test_string for i in range(1000)])
end = time.clock()
delta = float(end-start)
# print 'fastest len', len(test_string)
return delta
if __name__ == '__main__':
print('--- CPU TIMES ---')
delta_slow = slow()
print('delta slow: {delta_slow}'.format(delta_slow=delta_slow))
delta_fast = fast()
print('delta fast: {delta_fast}'.format(delta_fast=delta_fast))
delta_fastest = fastest()
print('delta fastest: {delta_fastest}'.format(delta_fastest=delta_fastest))
print('---')
print('listcomps is %f times faster than (list.append + ''.join())' % \
(delta_fast/delta_fastest))
print('the latter is %f times faster (slower) than simple concat' %\
(delta_slow/delta_fast))
```
#### File: tests/data23/recipe-64937.py
```python
import re, string, urllib.request, urllib.parse, urllib.error
"""
Various patterns I have encountered in looking for the babelfish result.
We try each of them in turn, based on the relative number of times I've
seen each of these patterns. $1.00 to anyone who can provide a heuristic
for knowing which one to use. This includes AltaVista employees.
"""
__where = [ re.compile(r'name=\"q\">([^<]*)'),
re.compile(r'td bgcolor=white>([^<]*)'),
re.compile(r'<\/strong><br>([^<]*)')
]
__languages = { 'english' : 'en',
'french' : 'fr',
'spanish' : 'es',
'german' : 'de',
'italian' : 'it',
'portugese' : 'pt',
}
"""
All of the available language names.
"""
available_languages = [ x.title() for x in list(__languages.keys()) ]
"""
Calling translate() or babelize() can raise a BabelizerError
"""
class BabelizerError(Exception):
pass
class LanguageNotAvailableError(BabelizerError):
pass
class BabelfishChangedError(BabelizerError):
pass
class BabelizerIOError(BabelizerError):
pass
def clean(text):
return ' '.join(string.replace(text.strip(), "\n", ' ').split())
def translate(phrase, from_lang, to_lang):
phrase = clean(phrase)
try:
from_code = __languages[from_lang.lower()]
to_code = __languages[to_lang.lower()]
except KeyError as lang:
raise LanguageNotAvailableError(lang)
params = urllib.parse.urlencode( { 'BabelFishFrontPage' : 'yes',
'doit' : 'done',
'urltext' : phrase,
'lp' : from_code + '_' + to_code } )
try:
response = urllib.request.urlopen('http://babelfish.altavista.com/tr', params)
except IOError as what:
raise BabelizerIOError("Couldn't talk to server: %s" % what)
except:
print("Unexpected error:", sys.exc_info()[0])
html = response.read()
for regex in __where:
match = regex.search(html)
if match: break
if not match: raise BabelfishChangedError("Can't recognize translated string.")
return clean(match.group(1))
def babelize(phrase, from_language, through_language, limit = 12, callback = None):
phrase = clean(phrase)
seen = { phrase: 1 }
if callback:
callback(phrase)
else:
results = [ phrase ]
flip = { from_language: through_language, through_language: from_language }
next = from_language
for i in range(limit):
phrase = translate(phrase, next, flip[next])
if phrase in seen: break
seen[phrase] = 1
if callback:
callback(phrase)
else:
results.append(phrase)
next = flip[next]
if not callback: return results
if __name__ == '__main__':
import sys
def printer(x):
print(x)
sys.stdout.flush();
babelize("I won't take that sort of treatment from you, or from your doggie!",
'english', 'french', callback = printer)
```
#### File: tests/data23/recipe-66003.py
```python
class Error(Exception):
def __init__(self, errcode, heading_num = 0, sublist_length = 0):
self.errcode = errcode
if self.errcode == "Length Error - Sublists":
self.message = ["All the sublists must be of uniform length."]
elif self.errcode == "Heading Error - Empty Item":
self.message = ["There is at least one empty heading item.\n",
"Please supply only non-empty headings."]
elif self.errcode == "Heading Error - heading/sublist missmatch":
self.message = ["Number of headings=",repr(heading_num), "\n",
"Number of elements in sublists=", repr(sublist_length), "\n",
"These numbers must be equal."]
print(self.message)
else: self.message = ""
self.errmsg = "".join(self.message)
def __str__(self):
return (self.errmsg)
pass
def escape(s):
"""Replace special characters '&', "'", '<', '>' and '"' by XML entities."""
s = s.replace("&", "&") # Must be done first!
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace('"', """)
return s
def cleanString(s, ident):
if type(s) != type(""):
s = repr(s)
s = escape(s)
if ident == "tag":
s = s.lower()
s = s.replace(" ", "_")
return s
def LL2XML(LL,headings_tuple = (), root_element = "rows", row_element = "row", xml_declared = "yes"):
if headings_tuple == "table":
td_list = []
for item in LL[0]:
td_list.append("td")
headings_tuple = tuple(td_list)
root_element = "table"
row_element = "tr"
xml_declared = "no"
root_element = cleanString(root_element, "tag")
row_element = cleanString(row_element, "tag")
if headings_tuple == ():
headings = [cleanString(s,"tag") for s in LL[0]]
LL = LL[1:] # remove now redundant heading row
else:
headings = [cleanString(s,"tag") for s in headings_tuple]
# Sublists all of the same length?
if ['!' for sublist in LL if len(sublist) != len(LL[0])]:
raise Error("Length Error - Sublists")
#check headings
heading_num = len(headings)
if heading_num != len(LL[0]):
raise Error("Heading Error - heading/sublist missmatch", heading_num, len(LL[0]))
for item in headings:
if not cleanString(item,"heading"):
raise Error("Heading Error - Empty Item")
else:
pass
# Do the conversion
xml = ""
if xml_declared == "yes":
xml_declaration = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
else:
xml_declaration = ""
bits = []
add_bit = bits.append
add_bit(xml_declaration)
add_bit('<')
add_bit(root_element)
add_bit('>')
for sublist in LL:
add_bit("\n <")
add_bit(row_element)
add_bit(">\n")
i = 0
for item in sublist:
tag = headings[i]
item = cleanString(item, "item")
add_bit(" <")
add_bit(tag)
add_bit(">")
add_bit(item)
add_bit("</")
add_bit(tag)
add_bit(">\n")
i = i+1
add_bit(" </")
add_bit(row_element)
add_bit(">")
add_bit("\n</")
add_bit(root_element)
add_bit(">")
xml = "".join(bits)
return xml
def test():
LL = [['Login', 'First Name', 'Last Name', 'Job', 'Group', 'Office', 'Permission'],
['auser', 'Arnold', 'Atkins', 'Partner', 'Tax', 'London', 'read'],
['buser', 'Bill', 'Brown', 'Partner', 'Tax', 'New York', 'read'],
['cuser', 'Clive', 'Cutler', 'Partner', 'Management', 'Brussels', 'read'],
['duser', 'Denis', 'Davis', 'Developer', 'ISS', 'London', 'admin'],
['euser', 'Eric', 'Ericsson', 'Analyst', 'Analysis', 'London', 'admin'],
['fuser', 'Fabian', 'Fowles', 'Partner', 'IP', 'London', 'read']]
LL_no_heads = [['auser', 'Arnold', 'Atkins', 'Partner', 'Tax', 'London', 'read'],
['buser', 'Bill', 'Brown', 'Partner', 'Tax', 'New York', 'read'],
['cuser', 'Clive', 'Cutler', 'Partner', 'Management', 'Brussels', 'read'],
['duser', 'Denis', 'Davis', 'Developer', 'ISS', 'London', 'admin'],
['euser', 'Eric', 'Ericsson', 'Analyst', 'Analysis', 'London', 'admin'],
['fuser', 'Fabian', 'Fowles', 'IP', 'Partner', 'London', 'read']]
#Example 1
print("Example 1: Simple case, using defaults.\n")
print(LL2XML(LL))
print("\n")
#Example 2
print("""Example 2: LL has its headings in the first line, and we define our root and row element names.\n""")
print(LL2XML(LL,(),"people","person"))
print("\n")
#Example 3
print("""Example 3: headings supplied using the headings argument(tuple), using default root and row element names.\n""")
print(LL2XML(LL_no_heads,("Login","First Name","Last Name","Job","Group","Office","Permission")))
print("\n")
#Example 4
print("""Example 4: The special case where we ask for an HTML table as output by just giving the string "table" as the second argument.\n""")
print(LL2XML(LL,"table"))
```
#### File: tests/data23/recipe-67682.py
```python
VOS_DOS = 0x00010000
VOS_OS216 = 0x00020000
VOS_OS232 = 0x00030000
VOS_NT = 0x00040000
VOS__BASE = 0x00000000
VOS__WINDOWS16 = 0x00000001
VOS__PM16 = 0x00000002
VOS__PM32 = 0x00000003
VOS__WINDOWS32 = 0x00000004
VOS_DOS_WINDOWS16 = 0x00010001
VOS_DOS_WINDOWS32 = 0x00010004
VOS_OS216_PM16 = 0x00020002
VOS_OS232_PM32 = 0x00030003
VOS_NT_WINDOWS32 = 0x00040004
def normalizer(s):
for j in range(len(s)):
if len(s[j]) > 3:
k = s[j][2:]
else:
k = '0' + s[j][2:]
s[j] = k
return s
def calcversioninfo(fn):
ostypes = [VOS_DOS, VOS_NT, VOS__WINDOWS32, VOS_DOS_WINDOWS16,
VOS_DOS_WINDOWS32, VOS_NT_WINDOWS32]
verstrings = []
sigstrings = findsignatures(fn)
if sigstrings[0] == '':
print('No Version Information Available')
return
for i in sigstrings:
FV = normalizer(i.split(',')[8:16])
FOS = normalizer(i.split(',')[32:36])
hexver = FV[3]+FV[2]+FV[1]+FV[0]+':'+FV[7]+FV[6]+FV[5]+FV[4]
OStag = int('0x' + FOS[3]+FOS[2]+FOS[1]+FOS[0] + 'L',16)
if OStag not in ostypes:
continue
if hexver not in verstrings:
verstrings.append(hexver)
myver = max(verstrings)
return parsver(myver)
def createparsestruct(b):
s= ''
for i in range(len(b)):
s += hex(ord(b[i]))+','
return s[:-1]
def findsignatures(file):
f = open(file, 'rb')
sz = f.read()
f.close()
res = []
indx=sz.find('\xbd\x04\xef\xfe')
cnt = sz.count('\xbd\x04\xef\xfe')
while cnt > 1:
s = createparsestruct(sz[indx:indx+52])
sz = sz[indx+1:]
cnt = sz.count('\xbd\x04\xef\xfe')
indx=sz.find('\xbd\x04\xef\xfe')
res.append(s)
res.append(createparsestruct(sz[indx:indx+52]))
return res
def parsver(v):
a,b,c,d = v[:4], v[4:8], v[9:13], v[13:]
return str(int(a,16)) + '.'+ str(int(b,16)) +'.' + str(int(c,16)) + '.' + str(int(d,16))
```
#### File: tests/data23/recipe-68417.py
```python
import os, sys, string, copy, getopt
def usage():
print("""patchdiff generates a listing of patches
that are different between two solaris boxes.
usage: patchdiff hostname1 hostname2""")
sys.exit(1)
def getpatches(target):
f = os.popen('/usr/local/bin/ssh ' + target + ' /bin/showrev -p', 'r')
patch = ['', {'obsoletes': None, 'requires': None, 'incompatibles': None, 'packages': None}]
patch_listing = []
while 1:
line = string.split(f.readline()[:-1])
if not line: break # Break at EOF
patch[0] = line[1]
patch[1]['obsoletes'] = line[line.index('Obsoletes:')+1:line.index('Requires:')]
patch[1]['requires'] = line[line.index('Requires:')+1:line.index('Incompatibles:')]
patch[1]['incompatibles'] = line[line.index('Incompatibles:')+1:line.index('Packages:')]
patch[1]['packages'] = line[line.index('Packages:')+1:]
patch_listing.append([patch[0],copy.copy(patch[1])])
return patch_listing
def compare(a,b):
a_extra = []
b_extra = []
for i in a:
if i not in b:
a_extra.append(i)
for i in b:
if i not in a:
b_extra.append(i)
return (a_extra,b_extra)
def collapse(a):
a.sort()
older = []
for i in range(0,len(a)):
next = i+1
try:
if a[i][0][0:6] == a[next][0][0:6]:
if a[i][0][7:9] < a[next][0][7:9]:
older.append([a[i][0],copy.copy(a[i][1])])
except: pass
for i in older: a.remove(i)
return a
def printout(differences):
for i in differences[0]:
print(i[0] + "\t", end=' ')
for j in differences[1]:
if i[0][0:6] == j[0][0:6]:
print(j[0], end=' ')
print("")
if len(sys.argv) != 3: usage()
options, target = getopt.getopt(sys.argv[1:], '')
patches = (collapse(getpatches(target[0])),collapse(getpatches(target[1])))
differences = compare(patches[0], patches[1])
print(target[0] + '\t' + target[1])
print('---------\t---------')
printout(differences)
```
#### File: tests/examples-bad/classdup.py
```python
class Foo0():
def __init__(self):
pass
foo1 = Foo0()
class Foo0(): ## error: redefined class
def __init__(self, a):
pass
foo2 = Foo0()
```
#### File: tests/examples-good/3.py
```python
def get_node():
return 1
node = get_node()
node.foo() ## ok, modification
node += 3 ## ok, modification
```
#### File: tests/expect-fail23/recipe-131495.py
```python
class sample(object):
class one(object):
def __get__(self, obj, type=None):
print("computing ...")
obj.one = 1
return 1
one = one()
x=sample()
print(x.one)
print(x.one)
#
# other solution:
#
# lazy attribute descriptor
class lazyattr(object):
def __init__(self, fget, doc=''):
self.fget = fget
self.__doc__ = doc
def __appoint__(self, name, cl_name):
if hasattr(self,"name"):
raise SyntaxError("conflict between "+name+" and "+self.name)
self.name = name
def __get__(self, obj, cl=None):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.name, value)
return value
# appointer metaclass:
# call the members __appoint__ method
class appointer(type):
def __init__(self, cl_name, bases, namespace):
for name,obj in namespace.items():
try:
obj.__appoint__(name, cl_name)
except AttributeError:
pass
super(appointer, self).__init__(cl_name, bases, namespace)
# base class for lazyattr users
class lazyuser(object, metaclass=appointer):
pass
# usage sample
class sample(lazyuser):
def one(self):
print("computing ...")
return 1
one = lazyattr(one, "one lazyattr")
x=sample()
print(x.one)
print(x.one)
del x.one
print(x.one)
```
#### File: tests/expect-fail23/recipe-275150.py
```python
import cgi
print("Content-type: text/html\n\n")
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
bsize = 3
playerToken = "X"
myToken = "0"
gameOver = 0
winArr = []
rowArr = []
colArr = []
digArr = []
x = 0
while x < bsize * bsize :
rowArr.append(0)
colArr.append(0)
digArr.append(0)
x = x + 1
out1 = """<html>
<head>
<title>Tic Tac Toe in Python</title>
<style type="text/css">
.main{border:#9999CC solid 2px; width:350px}
.btn{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#9999CC; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.btn_over{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#EFEFFF; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#9999CC}
.btn_down{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#666699; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.footer{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link:hover{font-family:verdana,arial,helvetica; font-size:8pt; color:#EFEFFF}
</style>
<script language="JavaScript">
var doneFlag=false;
function toggleVal(who) {
var check;
eval('check=document.ttt.'+who+'_btn.value;');
if(check==" ") {
if(!doneFlag) {
eval('document.ttt.'+who+'_btn.value="X";');
eval('document.ttt.'+who+'_btn.disabled="true";');
eval('document.ttt.'+who+'.value="X";');
document.ttt.submit();
doneFlag=true;
document.getElementById('process').innerHTML="Processing.........";
}
}
else {
alert('Invalid Move!');
}
}
</script>
</head>
<body>
<table width="100%" height="100%"><tr><td align="center">
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table align="center" cellspacing="0" cellpadding="0" class="main"><tr><td align="center">
<table width="100%" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="pyttt.py"><img src="../ttt_py.gif" border="0" alt="Tic Tac Toe (in Python)"></a></td></tr></table>
<table width="100%" bgcolor="#EFEFFF" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="http://www.qiksearch.com"><img src="../qiksearch_ttt_py.gif" border="0" alt="www.qiksearch.com"></a></td></tr></table>"""
print(out1)
def genBox(size):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value=" " class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value=" ">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def genBox2(size,arr):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value="' + str(arr[count-1]) + '" class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value="' + str(arr[count-1]) + '">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def isEmpty(who):
if who == " ":
return 1
else:
return 0;
def move(bsize,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
maxCount = 0
pos = 0
retVal = 0
# Build Row Array
i = 0
while i < bsize :
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
count = count + 1
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
j = j + 1
rowArr[i] = maxCount
if fullCounter == bsize :
rowArr[i] = -1
i = i + 1
# Building Column Array
i = 0
while i < bsize :
count = i + 1
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
count = count + bsize
j = j + 1
colArr[i] = maxCount
if fullCounter == bsize :
colArr[i] = -1
i = i + 1
# Building Diagonal Array
i = 0
while i < 2 :
if i == 0 :
count = i + 1
else:
count = bsize
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
if i == 0 :
count = count + bsize + 1
else:
count = count + bsize - 1
j = j + 1
digArr[i] = maxCount
if fullCounter == bsize :
digArr[i] = -1
i = i + 1
# Finding Max Values
maxRow = myMax(0,bsize,"row",rowArr)
maxCol = myMax(0,bsize,"col",colArr)
maxDig = myMax(0,bsize,"dig",digArr)
maxArrs = []
maxArrs.append(myMax(1,bsize,"row",rowArr))
maxArrs.append(myMax(1,bsize,"col",colArr))
maxArrs.append(myMax(1,bsize,"dig",digArr))
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = bsize * (maxRow + 1) - bsize
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = maxCol
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = maxDig
else:
pos = bsize - 1
retFlag = 0
y = 0
while y < bsize :
if not(retFlag):
if arr[pos] == " " :
retVal = pos
retFlag = 1
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = pos + 1
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = pos + bsize
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = pos + bsize + 1
else:
pos = pos + bsize - 1
y = y + 1
return retVal
def myMax(what,bsize,type,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
max = -1
maxIndex = -1
if type != "dig" :
i = 0
while i < bsize :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if type == "dig" :
i = 0
while i < 2 :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if what == 0 :
return maxIndex
else:
return max
def playerWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def iWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def whereWinComp():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def whereWinPlayer():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def draw():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
drawCounter = 0
dCounter = 0
while dCounter < len(vals) :
if vals[dCounter] != " " :
drawCounter = drawCounter + 1
dCounter = dCounter + 1
if drawCounter == bsize * bsize :
return 1
else:
return 0
form = cgi.FieldStorage()
if form :
s1 = form['s1'].value
s2 = form['s2'].value
s3 = form['s3'].value
s4 = form['s4'].value
s5 = form['s5'].value
s6 = form['s6'].value
s7 = form['s7'].value
s8 = form['s8'].value
s9 = form['s9'].value
vals = [s1,s2,s3,s4,s5,s6,s7,s8,s9]
if draw() or playerWin() :
gameOver = 1
# Computer's Move!
movIndex = move(bsize,vals)
if not(gameOver) :
vals[movIndex] = myToken
# Update S's
if not(gameOver) :
if movIndex == 0 :
s1 = myToken
if movIndex == 1 :
s2 = myToken
if movIndex == 2 :
s3 = myToken
if movIndex == 3 :
s4 = myToken
if movIndex == 4 :
s5 = myToken
if movIndex == 5 :
s6 = myToken
if movIndex == 6 :
s7 = myToken
if movIndex == 7 :
s8 = myToken
if movIndex == 8 :
s9 = myToken
genBox2(bsize,vals)
if playerWin() :
print('<font face="verdana,arial,helvetica" color="#009900" size="4"><b>Wow! You Won!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinPlayer()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#009900\';')
winCount = winCount + 1
w = 0
while w < (bsize * bsize) :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if iWin() and not(gameOver) :
print('<font face="verdana,arial,helvetica" color="#FF0000" size="4"><b>Oops! You Lost!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinComp()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#FF0000\';');
winCount = winCount + 1
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if draw() and not(playerWin()) and not(iWin()) :
print('<font face="verdana,arial,helvetica" color="#000000" size="4"><b>It\'s a Draw!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
print('<script language="JavaScript">')
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
else:
genBox(bsize)
out2 = """<div style="font-family:verdana,arial,helvetica; font-weight:bold; font-size:10pt; color:#CC0000; background:#EFEFFF; width:100%; padding:3px" id="process"></div>
<table width="100%" bgcolor="#9999CC"><tr><td><span class="footer">© 2004 <a href="http://www.qiksearch.com" class="link"><NAME></a> | <a href="http://www.guestbookdepot.com/cgi-bin/guestbook.cgi?book_id=374186" class="link">Sign my Guestbook</a>.</span></td></tr></table>
</td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
</td></tr></table>
</body>
</html>"""
print(out2)
```
#### File: tests/expect-fail23/recipe-440542.py
```python
__author__ = '<NAME>'
import copy
def uniqueInsert(l, v):
'''
Add v to list if it is not already there, else raise ValueError
'''
if v is not None:
if v in l:
raise ValueError('list already contains value %s' % v)
assert 0 < v < 10, 'Only 1-9 allowed, got %s' % v
l.append(v)
class Sudoku:
def submat(self, i, j):
'''
Return i, j 3x3 submatrix of self.
'''
mat = self.mat
out = []
for srow_i in range(3):
row = []
for scol_i in range(3):
v = mat[i * 3 + srow_i][j * 3 + scol_i]
row.append(v)
out.append(row)
return out
def copy(self):
return Sudoku(copy.deepcopy(self.mat))
def add(self, v, i, j):
'''
Fill in an entry in self.mat
'''
self.mat[i][j] = v
uniqueInsert(self.rows[i], v)
uniqueInsert(self.cols[j], v)
sub_i = i // 3 * 3 + j // 3
uniqueInsert(self.subs[sub_i], v)
def __init__(self, mat):
'''
Create a new Sudoku instance.
mat -- 9x9 array of digits 1-9
or None if no value is known for that spot
'''
self.mat = mat
# keep track of all values used in each row, column and sub-matrix.
rows = [[] for i in range(9)]
cols = [[] for i in range(9)]
subs = [[] for i in range(9)]
for row_i in range(9):
for col_i in range(9):
v = self.mat[row_i][col_i]
uniqueInsert(rows[row_i], v)
uniqueInsert(cols[col_i], v)
for srow_i in range(3):
for scol_i in range(3):
sub = self.submat(srow_i, scol_i)
for i in range(3):
for j in range(3):
v = sub[i][j]
sub_i = srow_i * 3 + scol_i
uniqueInsert(subs[sub_i], v)
self.rows = rows
self.cols = cols
self.subs = subs
def __repr__(self):
out = ''
for i in range(9):
if i % 3 == 0:
out += '+-------+-------+-------+\n'
for j in range(9):
if j % 3 == 0:
out += '| '
v = self.mat[i][j]
if v is not None:
out += '%1d ' % v
else:
out += ' '
out += '|\n'
out += '+-------+-------+-------+\n'
return out
def solve(self):
'''
Solve for the unknown positions of the puzzle
'''
min_poss = 9 # Minimum possible number of choices for a cell
done = True
for i in range(9):
for j in range(9):
sub_i = i // 3 * 3 + j // 3 # sub-matrix index
v = self.mat[i][j]
if v:
pass
else:
# not all values filled out so we are not done yet
done = False
all = set(range(1, 10))
# determine all possible values for this cell
possible = (all.difference(self.rows[i])
.difference(self.cols[j])
.difference(self.subs[sub_i]))
# see if we have run into a brick wall
if len(possible) == 0:
raise ValueError('Sudoku not solvable')
elif len(possible) < min_poss:
# keep track of cell with smallest number of choices
min_poss = len(possible)
best = possible
min_i = i
min_j = j
if done:
out = self
else:
# Try these possibilities and recurse
for b in best:
print(min_i, min_j, b)
trial = self.copy()
trial.add(b, min_i, min_j)
print(trial)
try:
soln = trial.solve()
break
except ValueError:
soln = None
if soln is None:
print(self)
raise ValueError('Sudoku not solvable')
out = soln
return out
N = None
easy = [
[7, N, N, 1, 5, N, N, N, 8],
[N, N, 4, N, N, 2, N, N, N],
[N, N, N, N, N, 4, 5, 6, N],
[6, N, N, N, N, N, N, 2, 9],
[5, N, 2, N, N, N, 8, N, 4],
[3, 4, N, N, N, N, N, N, 1],
[N, 3, 8, 6, N, N, N, N, N],
[N, N, N, 2, N, N, 9, N, N],
[1, N, N, N, 8, N, N, N, 3]
]
hard = [
[N, 4, N, N, N, 7, 9, N, N],
[N, N, 8, 5, 3, 9, N, N, N],
[N, 6, N, N, N, N, 2, N, 3],
[N, N, N, N, N, 2, 5, N, N],
[N, 8, 6, N, N, N, 1, 4, N],
[N, N, 9, 8, N, N, N, N, N],
[6, N, 3, N, N, N, N, 9, N],
[N, N, N, 9, 8, 6, 3, N, N],
[N, N, 1, 4, N, N, N, 6, N]
]
evil = [
[4, 2, N, N, N, N, N, 1, N],
[N, N, N, 5, 4, N, N, 3, N],
[N, N, 6, N, N, 7, N, N, N],
[N, N, N, N, N, N, 2, 7, 9],
[N, 1, N, N, N, N, N, 6, N],
[3, 4, 2, N, N, N, N, N, N],
[N, N, N, 9, N, N, 3, N, N],
[N, 6, N, N, 3, 8, N, N, N],
[N, 8, N, N, N, N, N, 5, 7]
]
blank = [
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N]
]
import time
easy = Sudoku(easy)
hard = Sudoku(hard)
evil = Sudoku(evil)
print()
print('easy')
print(easy)
time.sleep(2)
easy.solve()
print()
print('hard')
print(hard)
time.sleep(2)
hard.solve()
print()
print('evil')
print(evil)
print()
time.sleep(2)
evil.solve()
```
#### File: tests/expect-fail23/recipe-498266.py
```python
from UserDict import DictMixin
import sys
from time import time
class _FakeLock(object):
'''
a do-nothin, substituted for a real Lock if there is no threading. Really a micro-optimization.
'''
acquire = release = lambda x : None
_FakeLock = _FakeLock() # need only one instance
def RLock():
'''
make the container threadsafe if running in a threaded context
'''
if 'thread' in sys.modules: # thread may be imported either directly or by module threading
import threading
return threading.RLock()
else:
return _FakeLock
class _Item(object):
'''
wrapper for items stored in LruDict, providing them with references to one another
'''
__slots__ = "key value nextItem previousItem atime".split()
def __init__(self, key, value):
self.key = key
self.value = value
self.nextItem = None
self.previousItem = None
self.atime = time()
class LruDict(DictMixin):
'''
store up to size items for up to timeout seconds
We inherit from UserDict.DictMixin rather than from dict
because DictMixin builds all its methods on a base set
of user-supplied ones
'''
def __init__(self, timeout=600, size=1000, data=None):
self._lock = RLock()
self._timeout = timeout
self._size = size
# pointers to newest and oldest items
self._newest = None
self._oldest = None
self._data = {}
if data:
self.update(data)
def _setNewest(self, item):
'''
put a new or retrieved item at the top of the pile
'''
item.atime = time()
if item is self._newest: # item is already on top
return
if item.nextItem or item.previousItem: # this item is currently in the pile...
self._pullout(item) # pull it out
if self._newest:
self._newest.nextItem = item # point the previously newest item to this one...
item.previousItem = self._newest # and vice versa
self._newest = item # reset the 'newest' pointer
if not self._oldest: # this only applies if the pile was empty
self._oldest = item
def _pullout(self, item):
'''
pull an item out of the pile and hook up the neighbours to each other
'''
if item is self._oldest:
if item is self._newest: # removing the only item
self._newest = self._oldest = None
else: # removing the oldest item of 2 or more
self._oldest = item.nextItem
self._oldest.previousItem = None
elif item is self._newest: # removing the newest item of 2 or more
self._newest = item.previousItem
self._newest.nextItem = None
else: # we are somewhere in between at least 2 others - hitch up the neighbours to each other
prev = item.previousItem
next = item.nextItem
prev.nextItem = next
next.previousItem = prev
item.nextItem = item.previousItem = None
def __setitem__(self, key, value):
'''
add a new item or update an old one
'''
try:
self._lock.acquire()
self.prune() # here we make a choice - if we prune a the beginning, we may wind up with size+1
# items; if we prune at the end, we might keep an expired item. Not serious.
item = self._data.get(key)
if item:
item.value = value
else:
item = self._data[key] = _Item(key, value)
self._setNewest(item)
finally:
self._lock.release()
def __getitem__(self, key):
'''
get an item and update its access time and pile position
'''
try:
self._lock.acquire()
self.prune()
item = self._data[key]
self._setNewest(item)
return item.value
finally:
self._lock.release()
def __delitem__(self, key):
'''
delete an item
'''
try:
self._lock.acquire()
item = self._data.pop(key)
self._pullout(item)
self.prune()
finally:
self._lock.release()
def prune(self):
'''
called by __delitem__, __getitem__, __setitem__, and _contents
drop the oldest members until we get back to recent time or
to size limit
'''
if not len(self._data):
return
try:
self._lock.acquire()
outtime = time() - self._timeout
while len(self._data) > self._size or self._oldest and self._oldest.atime < outtime:
drop = self._data.pop(self._oldest.key)
self._oldest = drop.nextItem
if self._oldest:
self._oldest.previousItem = None
finally:
self._lock.release()
def _contents(self, method, *args):
'''
common backend for methods:
keys, values, items, __len__, __contains__
'''
try:
self._lock.acquire()
self.prune()
data = getattr(self._data, method)(*args)
return data
finally:
self._lock.release()
def __contains__(self, key):
return self._contents('__contains__', key)
has_key = __contains__
def __len__(self):
return self._contents('__len__')
def keys(self):
return self._contents('keys')
def values(self):
data = self._contents('values')
return [v.value for v in data]
def items(self):
data = self._contents('items')
return [(k, v.value) for k, v in data]
def __repr__(self):
d = dict(list(self.items()))
return '%s(timeout=%s, size=%s, data=%s)' % (self.__class__.__name__, self._timeout, self._size, repr(d))
if __name__ == '__main__':
from time import sleep
ls = LruDict(timeout=100, size=5)
print('expiring items by size')
l = 10
for x in range(l):
ls[x] = ''
print(ls)
print('\nexpiring items by time')
ls = LruDict(timeout=1, size=100)
print(ls)
for x in range(10):
ls[x] = ''
print(ls)
sleep(0.21)
size = 10000
items = 100000
print('\ncreating a LruDict with length %d and setting %d items' % (size, items))
ls = LruDict(timeout=600, size=size)
print(ls)
for x in range(items):
ls[x] = ''
print(len(ls))
```
#### File: tests/expect-fail23/recipe-577519.py
```python
from heapq import heappush, heappop # for priority queue
import math
import time
import random
class node:
xPos = 0 # x position
yPos = 0 # y position
distance = 0 # total distance already travelled to reach the node
priority = 0 # priority = distance + remaining distance estimate
def __init__(self, xPos, yPos, distance, priority):
self.xPos = xPos
self.yPos = yPos
self.distance = distance
self.priority = priority
def __lt__(self, other): # comparison method for priority queue
return self.priority < other.priority
def updatePriority(self, xDest, yDest):
self.priority = self.distance + self.estimate(xDest, yDest) * 10 # A*
# give higher priority to going straight instead of diagonally
def nextMove(self, dirs, d): # d: direction to move
if dirs == 8 and d % 2 != 0:
self.distance += 14
else:
self.distance += 10
# Estimation function for the remaining distance to the goal.
def estimate(self, xDest, yDest):
xd = xDest - self.xPos
yd = yDest - self.yPos
# Euclidian Distance
d = math.sqrt(xd * xd + yd * yd)
# Manhattan distance
# d = abs(xd) + abs(yd)
# Chebyshev distance
# d = max(abs(xd), abs(yd))
return(d)
# A-star algorithm.
# The path returned will be a string of digits of directions.
def pathFind(the_map, n, m, dirs, dx, dy, xA, yA, xB, yB):
closed_nodes_map = [] # map of closed (tried-out) nodes
open_nodes_map = [] # map of open (not-yet-tried) nodes
dir_map = [] # map of dirs
row = [0] * n
for i in range(m): # create 2d arrays
closed_nodes_map.append(list(row))
open_nodes_map.append(list(row))
dir_map.append(list(row))
pq = [[], []] # priority queues of open (not-yet-tried) nodes
pqi = 0 # priority queue index
# create the start node and push into list of open nodes
n0 = node(xA, yA, 0, 0)
n0.updatePriority(xB, yB)
heappush(pq[pqi], n0)
open_nodes_map[yA][xA] = n0.priority # mark it on the open nodes map
# A* search
while len(pq[pqi]) > 0:
# get the current node w/ the highest priority
# from the list of open nodes
n1 = pq[pqi][0] # top node
n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)
x = n0.xPos
y = n0.yPos
heappop(pq[pqi]) # remove the node from the open list
open_nodes_map[y][x] = 0
closed_nodes_map[y][x] = 1 # mark it on the closed nodes map
# quit searching when the goal is reached
# if n0.estimate(xB, yB) == 0:
if x == xB and y == yB:
# generate the path from finish to start
# by following the dirs
path = ''
while not (x == xA and y == yA):
j = dir_map[y][x]
c = str((j + dirs / 2) % dirs)
path = c + path
x += dx[j]
y += dy[j]
return path
# generate moves (child nodes) in all possible dirs
for i in range(dirs):
xdx = x + dx[i]
ydy = y + dy[i]
if not (xdx < 0 or xdx > n-1 or ydy < 0 or ydy > m - 1
or the_map[ydy][xdx] == 1 or closed_nodes_map[ydy][xdx] == 1):
# generate a child node
m0 = node(xdx, ydy, n0.distance, n0.priority)
m0.nextMove(dirs, i)
m0.updatePriority(xB, yB)
# if it is not in the open list then add into that
if open_nodes_map[ydy][xdx] == 0:
open_nodes_map[ydy][xdx] = m0.priority
heappush(pq[pqi], m0)
# mark its parent node direction
dir_map[ydy][xdx] = (i + dirs / 2) % dirs
elif open_nodes_map[ydy][xdx] > m0.priority:
# update the priority
open_nodes_map[ydy][xdx] = m0.priority
# update the parent direction
dir_map[ydy][xdx] = (i + dirs / 2) % dirs
# replace the node
# by emptying one pq to the other one
# except the node to be replaced will be ignored
# and the new node will be pushed in instead
while not (pq[pqi][0].xPos == xdx and pq[pqi][0].yPos == ydy):
heappush(pq[1 - pqi], pq[pqi][0])
heappop(pq[pqi])
heappop(pq[pqi]) # remove the target node
# empty the larger size priority queue to the smaller one
if len(pq[pqi]) > len(pq[1 - pqi]):
pqi = 1 - pqi
while len(pq[pqi]) > 0:
heappush(pq[1-pqi], pq[pqi][0])
heappop(pq[pqi])
pqi = 1 - pqi
heappush(pq[pqi], m0) # add the better node instead
return '' # if no route found
# MAIN
dirs = 8 # number of possible directions to move on the map
if dirs == 4:
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
elif dirs == 8:
dx = [1, 1, 0, -1, -1, -1, 0, 1]
dy = [0, 1, 1, 1, 0, -1, -1, -1]
n = 30 # horizontal size of the map
m = 30 # vertical size of the map
the_map = []
row = [0] * n
for i in range(m): # create empty map
the_map.append(list(row))
# fillout the map with a '+' pattern
for x in range(n / 8, n * 7 / 8):
the_map[m / 2][x] = 1
for y in range(m/8, m * 7 / 8):
the_map[y][n / 2] = 1
# randomly select start and finish locations from a list
sf = []
sf.append((0, 0, n - 1, m - 1))
sf.append((0, m - 1, n - 1, 0))
sf.append((n / 2 - 1, m / 2 - 1, n / 2 + 1, m / 2 + 1))
sf.append((n / 2 - 1, m / 2 + 1, n / 2 + 1, m / 2 - 1))
sf.append((n / 2 - 1, 0, n / 2 + 1, m - 1))
sf.append((n / 2 + 1, m - 1, n / 2 - 1, 0))
sf.append((0, m / 2 - 1, n - 1, m / 2 + 1))
sf.append((n - 1, m / 2 + 1, 0, m / 2 - 1))
(xA, yA, xB, yB) = random.choice(sf)
print('Map size (X,Y): ', n, m)
print('Start: ', xA, yA)
print('Finish: ', xB, yB)
t = time.time()
route = pathFind(the_map, n, m, dirs, dx, dy, xA, yA, xB, yB)
print('Time to generate the route (seconds): ', time.time() - t)
print('Route:')
print(route)
# mark the route on the map
if len(route) > 0:
x = xA
y = yA
the_map[y][x] = 2
for i in range(len(route)):
j = int(route[i])
x += dx[j]
y += dy[j]
the_map[y][x] = 3
the_map[y][x] = 4
# display the map with the route added
print('Map:')
for y in range(m):
for x in range(n):
xy = the_map[y][x]
if xy == 0:
print('.', end=' ') # space
elif xy == 1:
print('O', end=' ') # obstacle
elif xy == 2:
print('S', end=' ') # start
elif xy == 3:
print('R', end=' ') # route
elif xy == 4:
print('F', end=' ') # finish
print()
input('Press Enter...')
```
#### File: tests/expect-fail23/recipe-577554.py
```python
import sys
import os
def encode_fib(n):
# Return string with Fibonacci encoding for n (n >= 1).
result = ""
if n >= 1:
a = 1
b = 1
c = a + b # next Fibonacci number
fibs = [b] # list of Fibonacci numbers, starting with F(2), each <= n
while n >= c:
fibs.append(c) # add next Fibonacci number to end of list
a = b
b = c
c = a + b
result = "1" # extra "1" at end
for fibnum in reversed(fibs):
if n >= fibnum:
n = n - fibnum
result = "1" + result
else:
result = "0" + result
return result
def byteWriter(bitStr, outputFile):
global bitStream
bitStream += bitStr
while len(bitStream) > 8: # write byte(s) if there are more then 8 bits
byteStr = bitStream[:8]
bitStream = bitStream[8:]
outputFile.write(chr(int(byteStr, 2)))
def bitReader(n): # number of bits to read
global byteArr
global bitPosition
bitStr = ''
for i in range(n):
bitPosInByte = 7 - (bitPosition % 8)
bytePosition = int(bitPosition / 8)
byteVal = byteArr[bytePosition]
bitVal = int(byteVal / (2 ** bitPosInByte)) % 2
bitStr += str(bitVal)
bitPosition += 1 # prepare to read the next bit
return bitStr
# MAIN
if len(sys.argv) != 4:
print('Usage: Fibonacci.py [e|d] [path]InputFileName [path]OutputFileName')
sys.exit()
mode = sys.argv[1] # encoding/decoding
inputFile = sys.argv[2]
outputFile = sys.argv[3]
# read the whole input file into a byte array
fileSize = os.path.getsize(inputFile)
fi = open(inputFile, 'rb')
# byteArr = map(ord, fi.read(fileSize))
byteArr = bytearray(fi.read(fileSize))
fi.close()
fileSize = len(byteArr)
print('File size in bytes:', fileSize)
print()
if mode == 'e': # FILE ENCODING
# calculate the total number of each byte value in the file
freqList = [0] * 256
for b in byteArr:
freqList[b] += 1
# create a list of (frequency, byteValue, encodingBitStr) tuples
tupleList = []
for b in range(256):
if freqList[b] > 0:
tupleList.append((freqList[b], b, ''))
# sort the list according to the frequencies descending
tupleList = sorted(tupleList, key=lambda tup: tup[0], reverse = True)
# assign encoding bit strings to each byte value
for b in range(len(tupleList)):
tupleList[b] = (tupleList[b][0], tupleList[b][1], encode_fib(b + 1))
# print 'The list of (frequency, byteValue, encodingBitStr) tuples:'
# print tupleList
# print
# write the list of byte values as the compressed file header
bitStream = '' # global
fo = open(outputFile, 'wb')
fo.write(chr(len(tupleList) - 1)) # first write the number of byte values
for (freq, byteValue, encodingBitStr) in tupleList:
# convert the byteValue into 8-bit and send to be written into file
# bitStr = bin(byteValue)
# bitStr = bitStr[2:] # remove 0b
# bitStr = '0' * (8 - len(bitStr)) + bitStr # add 0's if needed for 8 bits
# byteWriter(bitStr, fo)
fo.write(chr(byteValue)) # this would do the same
# write 32-bit (input file size)-1 value
bitStr = bin(fileSize - 1)
bitStr = bitStr[2:] # remove 0b
bitStr = '0' * (32 - len(bitStr)) + bitStr # add 0's if needed for 32 bits
byteWriter(bitStr, fo)
# create a dictionary of byteValue : encodingBitStr pairs
dic = dict([(tup[1], tup[2]) for tup in tupleList])
# del tupleList
# print 'The dictionary of byteValue : encodingBitStr pairs:'
# print dic
# write the encoded data
for b in byteArr:
byteWriter(dic[b], fo)
byteWriter('0' * 8, fo) # to write the last remaining bits (if any)
fo.close()
elif mode == 'd': # FILE DECODING
bitPosition = 0 # global
n = int(bitReader(8), 2) + 1 # first read the number of byte values
# print 'Number of byte values:', n
dic = dict()
for i in range(n):
# read the byteValue
byteValue = int(bitReader(8), 2)
encodingBitStr = encode_fib(i + 1)
dic[encodingBitStr] = byteValue # add to the dictionary
# print 'The dictionary of encodingBitStr : byteValue pairs:'
# print dic
# print
# read 32-bit file size (number of encoded bytes) value
numBytes = int(bitReader(32), 2) + 1
print('Number of bytes to decode:', numBytes)
# read the encoded data, decode it, write into the output file
fo = open(outputFile, 'wb')
for b in range(numBytes):
# read bits until a decoding match is found
encodingBitStr = ''
while True:
encodingBitStr += bitReader(1)
# if encodingBitStr in dic:
if encodingBitStr.endswith('11'):
byteValue = dic[encodingBitStr]
fo.write(chr(byteValue))
break
fo.close()
```
#### File: tests/expect-fail23/recipe-578005.py
```python
import os
import pygame, sys
from pygame.locals import K_a, K_s,K_w,K_d,K_LEFTBRACKET,K_RIGHTBRACKET, K_RIGHT, K_LEFT, QUIT
from PIL import Image
pygame.init()
'''
main() sets these (perhaps differently), so make changes down there.
If you cut/copy this code somewhere you need these variables globally,
or make it a class and make these attributes.
'''
resolution = 300 #the resolution (in dpi) the resulting cropped images should have.
infile_folder = '.' #path to folder images to process are in. '.' is the folder this script is in
infile_prefix = "album80-86_" #prefix common to all the images you'd like to access
start_page = 1 #which page to start on. 0 is the first page.
outfile_folder= "./cropped"
outfile_prefix = "photo80-86_"
outfile_extension = "jpg" #must be three character extension with no period. Why? Because I am lazy. So no "jpeg", okay?
BG_COLOR = (0,0,0)
def displayRect(screen, px, topleft, prior,pos,scale):
# ensure that the rect always has positive width, height
if topleft == None:
#func was called without a topleft, which means clear the previous rectangle
screen.fill(BG_COLOR)
rect = px.get_rect()
px = pygame.transform.scale(px,[int(rect.width/scale), int(rect.height/scale)])
screen.blit(px, (rect[0]-pos[0],rect[1]-pos[1]))
pygame.display.flip()
return None
#or, the usual situation, topleft is defined, so blit over the old rect and blit in the new.
topleft = [(val/scale-pos[i]) for i,val in enumerate(topleft)]
x, y = topleft
bottomright = pygame.mouse.get_pos()
width = bottomright[0] - topleft[0]
height = bottomright[1] - topleft[1]
if width < 0:
x += width
width = abs(width)
if height < 0:
y += height
height = abs(height)
# eliminate redundant drawing cycles (when mouse isn't moving)
current = x, y, width, height
if not (width and height):
return current
if current == prior:
return current
# draw transparent box and blit it onto canvas
rect = px.get_rect()
px = pygame.transform.scale(px,[int(rect.width/scale), int(rect.height/scale)])
screen.blit(px, (rect[0]-pos[0],rect[1]-pos[1]))
im = pygame.Surface((width, height))
im.fill((128, 128, 128))
pygame.draw.rect(im, (32, 32, 32), im.get_rect(), 1)
im.set_alpha(128)
screen.blit(im, (x, y))
pygame.display.flip()
# return current box extents
return (x, y, width, height)
def setup(px):
screen = pygame.display.set_mode( px.get_rect()[2:] )
screen.blit(px, px.get_rect())
pygame.display.flip()
return screen, px
def move(pos,scale,px,screen):
x,y = pos
#print pos,x
rect = px.get_rect()
screen.fill(BG_COLOR)
px = pygame.transform.scale(px,[int(rect.width/scale), int(rect.height/scale)])
screen.blit(px, (rect[0]-x,rect[1]-y))
pygame.display.flip()
#px.rect.topleft = pr.rect.topleft[0] - x,
def mainLoop():
topleft = bottomright = prior = None
n=0
scale = 1
pos = [0,0]
#create list of files matching prefix in folder, and sort it
# input_loc = first file #input_loc = 'album86-92_003.jpg'
infiles = []
len_prefix = len(infile_prefix)
for fname in os.listdir(infile_folder):
if fname[:len_prefix] == infile_prefix:
if fname[-3:] in ['jpg','png']:
infiles.append(fname)
infiles.sort()
file_idx = start_page
try:
infile = infiles[file_idx]
except IndexError:
print("the start page you requested is beyond the scope of the files loaded.\nYou have been taken to the last page instead.")
file_idx = len(infiles)-1
infile = infiles[file_idx]
#get files begining with output prefix, grab suffixes and sort.
#but, if folder does not exist or is empty, just start at 0
outfiles = []
len_prefix = len(outfile_prefix)
try:
for fname in os.listdir(outfile_folder):
if fname[:len_prefix] == outfile_prefix:
outfiles.append(fname)
except OSError:
os.makedirs(outfile_folder)
out_idx = 0
else:
outfiles.sort()
try:
out_idx = int(outfiles[-1][len_prefix:-4])+1
except ValueError:
print("Egad! Not all files with the output prefix specified end with a number followed by a three character extension\nMaybe start a new output folder?")
print("...Quitting")
return 0
except IndexError:
#folder exisits but is empty
out_idx = 0
input_loc = os.path.join(infile_folder,infile)
screen, px = setup(px = pygame.image.load(input_loc))
outfilename = outfile_prefix+str(out_idx).zfill(3)+'.'+outfile_extension
output_loc = os.path.join(outfile_folder,outfilename)
while n!=1:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
if event.type == pygame.MOUSEBUTTONUP:
if not topleft:
topleft = [(val+pos[i])*scale for i,val in enumerate(event.pos)]
#print "tr: ",topleft
else:
bottomright = [(val+pos[i])*scale for i,val in enumerate(event.pos)]
#print "br: ",bottomright
if event.type == pygame.KEYDOWN and event.key == K_a:
pos = [pos[0]-200,pos[1]]
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_d:
pos = [pos[0]+200,pos[1]]
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_w:
pos = [pos[0],pos[1]-200]
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_s:
pos = [pos[0],pos[1]+200]
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_RIGHTBRACKET:
scale = scale/1.25
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_LEFTBRACKET:
scale = scale*1.25
move(pos,scale,px,screen)
if event.type == pygame.KEYDOWN and event.key == K_RIGHT:
file_idx += 1
try:
infile = infiles[file_idx]
#print "file_idx: ",file_idx
except IndexError:
file_idx -= 1
print("End of album")
#raise
else:
input_loc = os.path.join(infile_folder,infile)
px = pygame.image.load(input_loc)
pos = [0,0]
topleft = bottomright = prior = None
prior = displayRect(screen, px, topleft, prior,pos,scale)
if event.type == pygame.KEYDOWN and event.key == K_LEFT:
if file_idx == 0:
print("This is the begining of the album, cannot go back a page.")
else:
#print "file_idx",file_idx
file_idx -= 1
infile = infiles[file_idx]
input_loc = os.path.join(infile_folder,infile)
px = pygame.image.load(input_loc)
pos = [0,0]
topleft = bottomright = prior = None
prior = displayRect(screen, px, topleft, prior,pos,scale)
if topleft:
#first corner has been selected
prior = displayRect(screen, px, topleft, prior,pos,scale)
if bottomright:
#selection has been made!
left, upper, right, lower = ( topleft + bottomright )
# ensure output rect always has positive width, height
if right < left:
left, right = right, left
if lower < upper:
lower, upper = upper, lower
im = Image.open(input_loc)
im = im.crop(( int(left), int(upper), int(right), int(lower)))
dpi = resolution
im.save(output_loc, dpi = (dpi,dpi))
out_idx += 1
outfilename = outfile_prefix+str(out_idx).zfill(3)+'.'+outfile_extension
output_loc = os.path.join(outfile_folder,outfilename)
topleft = bottomright = prior = None
prior = displayRect(screen, px, topleft, prior,pos,scale)
print("saved")
return
if __name__ == "__main__":
os.system( [ 'clear', 'cls' ][ os.name == 'nt' ] )
print('''
Hello!
This program exists to speed up cropping out many sections from larger images
while also changing the resolution of the cropped images.
The Zudell family photo album was scanned at 600 dpi resolution.
The default resolution for cropped images is 300 dpi.
''')
resolution = input('enter new integer resolution, or nothing for default: ')
os.system( [ 'clear', 'cls' ][ os.name == 'nt' ] )
try: resolution = int(resolution)
except:
print('\nNo new resolution specified, using 300 dpi')
resolution = int(300)
dirs = []
for f in os.listdir('.'):
if os.path.isdir(f):
dirs.append(f)
print('''\n\n\n\n
now, enter the name of the directory you want to work on. here is a list of sub
directories within this current directory:\n''')
if dirs:
for dir in dirs: print(dir)
else:
print("oops, there are no sub-directories here")
print("\n\nenter nothing or nonsense to use the current directory")
path = input("enter directory to use: ")
infile_folder = path.strip()
os.system( [ 'clear', 'cls' ][ os.name == 'nt' ] )
if os.path.isdir(infile_folder):
pass
elif os.path.isdir('./'+infile_folder):
infile_folder = './'+infile_folder
else:
print("no valid directory entered, using current")
infile_folder = '.'
for f in os.listdir(infile_folder):
print(f)
if not os.listdir(infile_folder):
print("oh... There aren't any files at all in here")
d = input("press enter to quit")
pygame.display.quit()
print('''\n\n
You may choose a filename prefix so that only some of the images in this dir
are available for editing. all files in this directory are listed above. \n''')
infile_prefix = input('input file prefix (or nothing to use all files): ')
os.system( [ 'clear', 'cls' ][ os.name == 'nt' ] )
print('''\n\n
You may choose a prefix for output files also. they will go in the ./cropped folder.\n''')
outfile_prefix = input('output file prefix (or nothing for default): ')
if not outfile_prefix: outfile_prefix = "image_"
os.system( [ 'clear', 'cls' ][ os.name == 'nt' ] )
print('''
Use the left ard right arrows to change image.
'[' and ']' zoom out and in, respectively.
click and drag a box to crop.
too move around:
w
asd
And come back to this screen to see unnecessary messages.
''')
input('\npress enter to begin')
mainLoop()
pygame.display.quit()
```
#### File: tests/expect-fail23/recipe-578257.py
```python
from sympy.solvers import solve
from sympy import Symbol, abs, Real
x = Symbol('x', real=True)
import pylab as pylab
def g(yieldCurve, zeroRates,n, verbose):
'''
generates recursively the zero curve
expressions eval('(0.06/1.05)+(1.06/(1+x)**2)-1')
solves these expressions to get the new rate
for that period
'''
if len(zeroRates) >= len(yieldCurve):
print("\n\n\t+zero curve boot strapped [%d iterations]" % (n))
return
else:
legn = ''
for i in range(0,len(zeroRates),1):
if i == 0:
legn = '%2.6f/(1+%2.6f)**%d'%(yieldCurve[n], zeroRates[i],i+1)
else:
legn = legn + ' +%2.6f/(1+%2.6f)**%d'%(yieldCurve[n], zeroRates[i],i+1)
legn = legn + '+ (1+%2.6f)/(1+x)**%d-1'%(yieldCurve[n], n+1)
# solve the expression for this iteration
if verbose:
print("-[%d] %s" % (n, legn.strip()))
rate1 = solve(eval(legn), x)
# Abs here since some solutions can be complex
rate1 = min([Real(abs(r)) for r in rate1])
if verbose:
print("-[%d] solution %2.6f" % (n, float(rate1)))
# stuff the new rate in the results, will be
# used by the next iteration
zeroRates.append(rate1)
g(yieldCurve, zeroRates,n+1, verbose)
verbose = True
tenors = [.1,.25,0.5,1,2,3,5,7,10,20,30]
#
# money market, futures, swap rates
#
yieldCurve = [0.07, 0.09, 0.15, 0.21, 0.37, 0.57, 1.13, 1.70, 2.31, 3.08 ,3.41]
#yieldCurve = [0.05, 0.06, 0.07, 0.08 ,0.085 ,0.0857 ,0.0901,0.0915,0.0925,0.0926,0.0934,0.0937]
zeroRates = [yieldCurve[0]] # TODO: check that this is the correct rate
print("\n\n\t<NAME>, March 2012\n\tYield Curve Bootstrapper\n\t<NAME>\n\n")
# kick off the recursive code
g(yieldCurve, zeroRates, 1, verbose)
print("\tZeroRate Array",zeroRates)
pylab.plot(tenors,yieldCurve)
pylab.plot(tenors,zeroRates)
pylab.show()
```
#### File: tests/expect-fail23/recipe-578853.py
```python
import os
import random
import copy
import datetime
def mean(x): # mean
n = len(x)
mean = sum(x) / n
return mean
def sd(x): # standard deviattion
n = len(x)
mean = sum(x) / n
sd = (sum((x-mean)**2 for x in x) / n) ** 0.5
return sd
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
class ndim: # from 3D array to flat array
def __init__(self,x,y,z,d):
self.dimensions=[x,y,z]
self.numdimensions=d
self.gridsize=x*y*z
def getcellindex(self, location):
cindex = 0
cdrop = self.gridsize
for index in range(self.numdimensions):
cdrop /= self.dimensions[index]
cindex += cdrop * location[index]
return cindex
def getlocation(self, cellindex):
res = []
for size in reversed(self.dimensions):
res.append(cellindex % size)
cellindex /= size
return res[::-1]
""" how to use ndim class
n=ndim(4,4,5,3)
print n.getcellindex((0,0,0))
print n.getcellindex((0,0,1))
print n.getcellindex((0,1,0))
print n.getcellindex((1,0,0))
print n.getlocation(20)
print n.getlocation(5)
print n.getlocation(1)
print n.getlocation(0)
"""
print("###############################################################################")
print("# KB_CAT KNOWLEDGE DISCOVERY IN DATA MINING (CATALOG PROGRAM) #")
print("# by <NAME> (COPYRIGHT MARCH 2011 ALL RIGHTS RESERVED) #")
print("# Language used: PYTHON #")
print("###############################################################################")
# input and run parameters
error = 0
while True:
arch_input = input('InputFile : ')
if not os.path.isfile(arch_input):
print("Oops! File does not exist. Try again... or CTR/C to exit")
else:
break
while True:
try:
num_gruppi = int(input('Number of Groups (3 - 20) : '))
except ValueError:
print("Oops! That was no valid number. Try again...")
else:
if(num_gruppi < 3):
print("Oops! Number of Groups too low. Try again...")
else:
if(num_gruppi > 20):
print("Oops! Number of Groups too big. Try again...")
else:
break
while True:
normaliz = input('Normalization(Max, Std, None) : ')
normaliz = normaliz.upper()
normaliz = normaliz[0]
if(normaliz != 'M' and normaliz != 'S' and normaliz != 'N'):
print("Oops! Input M, S or N. Try again...")
else:
break
while True:
try:
max_alpha = float(input('Start value of alpha (1.8 - 0.9) : '))
except ValueError:
print("Oops! That was no valid number. Try again...")
else:
if(max_alpha > 1.8):
print("Oops! Start value of alpha too big. Try again...")
else:
if(max_alpha < 0.9):
print("Oops! Start value of alpha too low. Try again...")
else:
break
while True:
try:
min_alpha = float(input('End value of alpha (0.5 - 0.0001) : '))
except ValueError:
print("Oops! That was no valid number. Try again...")
else:
if(min_alpha > 0.5):
print("Oops! alpha too big. Try again...")
else:
if(min_alpha < 0.0001):
print("Oops! alpha too low. Try again...")
else:
break
while True:
try:
step_alpha = float(input('Decreasing step of alpha (0.1 - 0.001) : '))
except ValueError:
print("Oops! That was no valid number. Try again...")
else:
if(step_alpha > 0.1):
print("Oops! Decreasing step of alpha too big. Try again...")
else:
if(step_alpha < 0.001):
print("Oops! Decreasing step of alpha too low. Try again...")
else:
break
file_input = arch_input
gruppi_num = num_gruppi
tipo_norm = normaliz
alpha_min = min_alpha
alpha_max = max_alpha
alpha_step = step_alpha
# outputs files
file_input = arch_input
tipo_norm = normaliz
gruppi_num = num_gruppi
nome_input = file_input.split(".")
arch_output = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_out.txt"
arch_outsrt = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_outsrt.txt"
arch_sort = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_sort.txt"
arch_catal = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_catal.txt"
arch_medsd = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_medsd.txt"
arch_cv = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_cv.txt"
arch_grid = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_grid.txt"
arch_log = nome_input[0] + "_" + tipo_norm + "_g" + str(gruppi_num) + "_log.txt"
# start time
t0 = datetime.datetime.now()
# read input file
arr_r = []
arr_orig = []
arr_c = []
mtchx = []
mtchy = []
txt_col = []
xnomi = []
# the numbers of variables / columns in all record must be the same
n_rows = 0
n_cols = 0
err_cols = 0
index = 0
for line in open(file_input).readlines():
linea = line.split()
if(index == 0):
xnomi.append(linea)
n_cols = len(linea)
else:
arr_r.append(linea)
if(len(linea) != n_cols):
err_cols = 1
print(("Different numbers of variables / columns in the record " + str(index)
+ " cols " + str(len(linea))))
index += 1
if(err_cols == 1):
print(("File " + file_input + " contains errors. Exit "))
quit()
index = 0
while index < len(arr_r):
linea = arr_r[index]
index_c = 0
while index_c < len(linea):
if linea[index_c].isdigit():
linea[index_c] = float(linea[index_c])
index_c += 1
arr_r[index] = linea
index += 1
arr_orig = copy.deepcopy(arr_r) # original input file
testata_cat = copy.deepcopy(xnomi[0]) # original header row
# finding columns containing strings and columns containing numbers
testata = xnomi[0]
testata_orig = copy.deepcopy(xnomi[0])
n_cols = len(testata) - 1
n_rows = len(arr_r)
ind_c = 1
err_type = 0
while ind_c < len(testata):
ind_r = 1
tipo_num = 0
tipo_txt = 0
while ind_r < len(arr_r):
arr_c = arr_r[ind_r]
if is_number(arr_c[ind_c]):
tipo_num = 1
else:
tipo_txt = 1
ind_r += 1
if tipo_num == 1 and tipo_txt == 1:
print("The columns / variables " + testata[ind_c] + " contains both strings and numbers.")
print(arr_c)
err_type = 1
ind_c += 1
if err_type == 1:
print("Oops! The columns / variables contains both strings and numbers. Exit. ")
quit()
index_c = 1
while index_c <= n_cols:
txt_col = []
index = 0
while index < len(arr_r):
arr_c = arr_r[index]
if(isinstance(arr_c[index_c],str)):
txt_col.append(arr_c[index_c])
index += 1
set_txt_col = set(txt_col) # remove duplicates
txt_col = list(set(set_txt_col))
txt_col.sort()
# from strings to numbers
if(len(txt_col) > 0):
if(len(txt_col) > 1):
passo1 = 1.0 / (len(txt_col) - 1)
else:
passo1 = 0.0
index = 0
while index < len(arr_r):
arr_c = arr_r[index]
campo1 = arr_c[index_c]
indice1 = txt_col.index(campo1)
if(len(txt_col) == 1): # same values in the column
val_num1 = float(1)
else:
val_num1 = float(passo1 * indice1)
arr_c[index_c] = val_num1 + 0.00000001 # to avoid zero values in means
# (to prevent zero divide in CV)
index += 1
index_c += 1
# means, max & std
xmeans = []
xmaxs = []
xmins = [] ### aggiunto Roberto 4/03/2012
xsds = []
xcv = []
index_c = 0
while index_c <= n_cols:
xmeans.append(0.0)
xmaxs.append(-9999999999999999.9)
xmins.append(9999999999999999.9) ### aggiunto Roberto 4/03/2012
xsds.append(0.0)
xcv.append(0.0)
index_c += 1
# means & max
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols:
xmeans[index_c] += arr_c[index_c]
if(arr_c[index_c] > xmaxs[index_c]):
xmaxs[index_c] = arr_c[index_c]
index_c += 1
index += 1
index_c = 1
while index_c <= n_cols:
xmeans[index_c] = xmeans[index_c] / n_rows
index_c += 1
# std
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols:
xsds[index_c] += (arr_c[index_c] - xmeans[index_c])**2
index_c += 1
index += 1
index_c = 1
while index_c <= n_cols:
xsds[index_c] = (xsds[index_c] / (n_cols - 1)) ** 0.5
index_c += 1
# Means, Max, Std, CV output file
medsd_file = open(arch_medsd, 'w')
# columns names
medsd_file.write('%s %s ' % ('Function' , "\t"))
index_c = 1
while index_c <= n_cols:
medsd_file.write('%s %s ' % (testata[index_c], "\t"))
index_c += 1
medsd_file.write('%s' % ('\n'))
# means
medsd_file.write('%s %s ' % ('Mean' , "\t"))
index_c = 1
while index_c <= n_cols:
valore = str(xmeans[index_c])
valore = valore[0:6]
medsd_file.write('%s %s ' % (valore, "\t"))
index_c += 1
medsd_file.write('%s' % ('\n'))
# max
medsd_file.write('%s %s ' % ('Max' , "\t"))
index_c = 1
while index_c <= n_cols:
valore = str(xmaxs[index_c])
valore = valore[0:6]
medsd_file.write('%s %s ' % (valore, "\t"))
index_c += 1
medsd_file.write('%s' % ('\n'))
# std
medsd_file.write('%s %s ' % ('Std' , "\t"))
index_c = 1
while index_c <= n_cols:
valore = str(xsds[index_c])
valore = valore[0:6]
medsd_file.write('%s %s ' % (valore, "\t"))
index_c += 1
medsd_file.write('%s' % ('\n'))
# CV
medsd_file.write('%s %s ' % ('CV' , "\t"))
index_c = 1
med_cv_gen = 0.0 # cv average of all columns / variables
while index_c <= n_cols:
if xmeans[index_c] == 0:
media1 = 0.000001
else:
media1 = xmeans[index_c]
xcv[index_c] = xsds[index_c] / abs(media1)
valore = str(xcv[index_c])
med_cv_gen += xcv[index_c]
valore = valore[0:6]
medsd_file.write('%s %s ' % (valore, "\t"))
index_c += 1
med_cv_gen = med_cv_gen / n_cols
str_med_cv_gen = str(med_cv_gen)
str_med_cv_gen = str_med_cv_gen[0:6]
medsd_file.write('%s' % ('\n'))
medsd_file.close()
# input standardization
# standardization on max
if tipo_norm == 'M':
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols: ## aggiornare anche kb_cla.py
if xmaxs[index_c] == 0.0:
xmaxs[index_c] = 0.00001
arr_c[index_c] = arr_c[index_c] / xmaxs[index_c]
index_c += 1
index += 1
# standardization on std
if tipo_norm == 'S':
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols:
if xsds[index_c] == 0.0:
xsds[index_c] = 0.00001
arr_c[index_c] = (arr_c[index_c] - xmeans[index_c]) / xsds[index_c]
if arr_c[index_c] < xmins[index_c]: ### aggiunto Roberto 4/03/2012
xmins[index_c] = arr_c[index_c] ### aggiunto Roberto 4/03/2012
index_c += 1
index += 1
# aggiungo xmins per eliminare i valori negativi (aggiunto da Roberto 4/03/2012)
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols:
arr_c[index_c] = arr_c[index_c] - xmins[index_c]
print(arr_c[index_c])
index_c += 1
index += 1
# fine aggiunta da Roberto 4/03/2012
# start of kohonen algorithm
# min and max vectors
vmaxs = []
vmins = []
index_c = 0
while index_c <= n_cols:
vmaxs.append(-10000000000000.0)
vmins.append( 10000000000000.0)
index_c += 1
# columns min & max
index = 0
while index < n_rows:
arr_c = arr_r[index]
index_c = 1
while index_c <= n_cols:
if arr_c[index_c] > vmaxs[index_c]:
vmaxs[index_c] = arr_c[index_c]
if arr_c[index_c] < vmins[index_c]:
vmins[index_c] = arr_c[index_c]
index_c += 1
index += 1
# run parameters and temp arrays
n = n_rows
m = n_cols
nx = gruppi_num
ny = gruppi_num
ix = 950041 # integer as random seed
nsteps = int(10000 * nx * ny) # number of steps
nepoks = int(nsteps / n ** 0.5) # number of epochs
unit_calc = int(n * m * nx * ny) # running units
passo = int(5000 / n) # step of visualization on monitor
rmax = nx - 1
rmin = 1.0
if passo < 1:
passo = 1
grid = [] # training grid
index = 0
while index < nx * ny * m:
grid.append(0.0)
index += 1
n=ndim(nx,ny,m,3)
random.seed(ix) # initial value of random seed to obtain the same sequences in new runs
index = 0
while index < nx:
index_c = 0
while index_c < ny:
index_k = 0
while index_k < m:
ig = n.getcellindex((index,index_c,index_k))
grid[ig] = random.random()
index_k += 1
index_c += 1
index += 1
gridp = copy.deepcopy(grid) # initial previous grid = current grid
gridm = copy.deepcopy(grid) # initial min grid = current grid
# for each record in each epoch
iter = 0
discrea = 1000000000000.0 # current error
discrep = 0.0 # previous error
if nepoks < 20:
nepoks = 20 # min epochs = 20
nepokx = 0
min_epok = 0 # epoch with min error
min_err = 1000000000.0 # min error
alpha = float(alpha_max) # initial value of alpha parameter
ir = 0.0 # initial value of ir parameter ir
ne = 1
print(" ")
print('Record ' + str(n_rows) + ' Columns ' + str(n_cols))
# main loop
try:
while ne <= nepoks:
if (ne % passo == 0): # print running message when modulo division = zero
min_err_txt = "%14.5f" % min_err # format 8 integers and 3 decimals
alpha_txt = "%12.5f" % alpha # format 6 integers and 5 decimals
print(('Epoch ' + str(ne) + ' min err ' + min_err_txt + ' min epoch ' +
str(min_epok - 1) + " alpha " + alpha_txt))
if min_err < 1000000000.0:
nepokx += 1
if min_err > discrea and discrep > discrea and discrea > 0.0:
min_epok = ne # current epoch (min)
min_err = discrea
# copy current grid to min grid
gridm = copy.deepcopy(grid)
min_err_txt = "%12.3f" % min_err # format 8 integers and 3 decimals
alpha_txt = "%12.5f" % alpha # format 6 integer and 5 decimals
print(('**** Epoch ' + str(ne - 1) + ' WITH MIN ERROR ' + min_err_txt +
" alpha " + alpha_txt))
# cheking the current value of alpha
if alpha > alpha_min:
discrea = discrep
discrep = 0.0
# copy current grid to previous grid
gridp = copy.deepcopy(grid)
# from the starting row to the ending row
i = 0
while i < n_rows:
iter += 1
# find the best grid coefficient
ihit = 0
jhit = 0
dhit = 100000.0
igx = 0
igy = 0
while igx < nx:
igy = 0
while igy < ny:
d = 0.0
neff = 0
k = 0
arr_c = arr_r[i]
while k < m: # update the sum of squared deviation of input
# value from the grid coefficient
ig = n.getcellindex((igx,igy,k))
d = d + (arr_c[k+1] - grid[ig]) ** 2
k += 1
d = d / float(m)
# d = d / m
if d < dhit:
dhit = d
ihit = int(igx)
jhit = int(igy)
igy += 1
igx += 1
# update iteration error
discrep = discrep + dhit
# now we have the coordinates of the best grid coefficient
ir = max(rmax * float(1001 - iter) / 1000.0 + 0.9999999999 , 1)
ir = int(ir)
# new alpha value to increase the radius of groups proximity
alpha = max(alpha_max * float(1 - ne * alpha_step) , alpha_min)
# update the grid coefficients applying alpha parameter
inn0 = int(ihit) - int(ir)
inn9 = int(ihit) + int(ir)
jnn0 = int(jhit) - int(ir)
jnn9 = int(jhit) + int(ir)
while inn0 <= inn9:
jnn0 = int(jhit) - int(ir)
while jnn0 <= jnn9:
if not (inn0 < 0 or inn0 >= nx):
if not (jnn0 < 0 or jnn0 >= ny):
arr_c = arr_r[i]
k = 0
while k < m:
ig = n.getcellindex((inn0,jnn0,k))
grid[ig] += alpha * (arr_c[k+1] - grid[ig])
k += 1
jnn0 += 1
inn0 += 1
i += 1
else:
print()
print("Min alpha reached ")
print()
break
ne += 1
except KeyboardInterrupt:
print()
print("KeyboardInterrupt (Ctrl/C) ")
print()
pass
# computing results
# grid = grid min
grid = copy.deepcopy(gridm)
# write min grid file
arch_grid_file = open(arch_grid, 'w')
ii = 0
while ii < nx:
j = 0
while j < ny:
k = 0
while k < m:
ig = n.getcellindex((ii,j,k))
arch_grid_file.write('%6i %s %.6i %s %.6i %s %14.7f %s' % (ii,' ', j ,' ', k,' ', grid[ig], "\n"))
k += 1
j += 1
ii += 1
arch_grid_file.close()
# catalog input by min grid
ii = 0
while ii < n_rows:
ihit = 0
jhit = 0
dhit = 100000.0
# from 1 to numbers of groups
ir = 0
while ir < nx: # from 1 to numbers of groups
jc = 0
while jc < ny: # from 1 to numbers of groups
d = 0.0
neff = 0
k = 0
while k < n_cols: # update the sum of squared deviation of input
# value from the grid coefficient
arr_c = arr_r[ii]
ig = n.getcellindex((ir,jc,k))
d = d + (arr_c[k+1] - grid[ig]) ** 2
k += 1
d = d / m
if d < dhit: # save the coordinates of the best coefficient
dhit = d
ihit = ir
jhit = jc
jc += 1
ir += 1
mtchx.append(ihit)
mtchy.append(jhit)
ii += 1
# write arch_catal file
arch_catal_file = open(arch_catal, 'w')
ii = 0
while ii < n_rows:
arch_catal_file.write("%.6i %s %.6i %s %.6i %s" % (ii, ' ', mtchx[ii], ' ', mtchy[ii], "\n"))
ii += 1
arch_catal_file.close()
# matrix of statistics
arr_cv = [] # CV array of the Groups and Total
arr_med = [] # means array of the Groups
riga_cv = [] # CV row in arr_cv
arr_col = [] # group temporary array
arr_grsg = [] # input data array (normalized)
arr_grsg_c = [] # copy of arr_grsg (for file out sort)
# input matrix sort in group sequence
ii = 0
ix = 0
while ii < n_rows:
ix += 1
gr1 = str(mtchx[ii])
if mtchx[ii] < 10:
gr1 = '0' + str(mtchx[ii])
sg1 = str(mtchy[ii])
if mtchy[ii] < 10:
sg1 = '0' + str(mtchy[ii])
riga_norm = arr_r[ii]
im = 0
riga_norm1 = []
while im <= m:
riga_norm1.append(str(riga_norm[im]))
im += 1
riga_norm2 = " ".join(riga_norm1)
gr_sg_txt = "G_" + gr1 + "_" + sg1 + " " + str(ix) + " " + riga_norm2
arr_grsg.append(gr_sg_txt)
ii += 1
arr_grsg.sort()
ii = 0
while ii < n_rows:
arr_grsg_c.append(arr_grsg[ii])
ii += 1
# setup of arr_cv matrix
num_gr = 0
gruppo0 = ""
ir = 0
while ir < n_rows:
grsg_key = arr_grsg_c[ir].split()
if not grsg_key[0] == gruppo0:
gruppo0 = grsg_key[0]
num_gr +=1
ic = 1
riga1 = []
riga1.append(grsg_key[0])
while ic <= m + 2: # adding new columns for row mean and n° of records
riga1.append(0.0)
ic += 1
arr_cv.append(riga1) # cv row
ir += 1
riga1 = []
riga1.append("*Means*") # adding new row for cv mean
ic = 1
while ic <= m + 2: # adding new column for row mean and n° of records
riga1.append(0.0)
ic += 1
arr_cv.append(riga1)
def found(x):
ir = 0
while ir < len(arr_cv):
linea_cv = arr_cv[ir]
key_cv = linea_cv[0]
if key_cv == x:
return ir
ir += 1
ir = 0
irx = len(arr_grsg_c)
ic = 3
linea_cv = arr_cv[0]
icx = len(linea_cv)
val_col = []
while ic < icx:
ir = 0
gruppo = ""
val_col = []
while ir < irx:
linea = arr_grsg_c[ir].split()
if linea[0] == gruppo or gruppo == "":
gruppo = linea[0]
val_col.append(float(linea[ic]))
else:
i_gruppo = found(gruppo)
linea_cv = arr_cv[i_gruppo]
media_v = abs(mean(val_col))
if media_v == 0.0:
media_v = 0.0000000001
std_v = sd(val_col)
cv_v = std_v / media_v
linea_cv[ic-2] = cv_v # cv value
linea_cv[len(linea_cv)-1] = len(val_col) # number of records
val_col = []
val_col.append(float(linea[ic]))
gruppo = linea[0]
ir += 1
i_gruppo = found(gruppo)
linea_cv = arr_cv[i_gruppo]
media_v = abs(mean(val_col))
if media_v == 0.0:
media_v = 0.0000000001
std_v = sd(val_col)
cv_v = std_v / media_v
linea_cv[ic-2] = cv_v # cv value
linea_cv[len(linea_cv)-1] = len(val_col) # number of records
ic += 1
ir = 0
irx = len(arr_cv)
linea_cv = arr_cv[0]
icx = len(linea_cv) - 2
ic = 1
num_rec1 = 0
while ir < irx: # rows mean
media_riga = 0.0
ic = 1
num_col1 = 0
linea_cv = arr_cv[ir]
while ic < icx:
media_riga += float(linea_cv[ic])
num_col1 += 1
ic += 1
linea_cv[icx] = media_riga / num_col1
num_rec1 += linea_cv[icx + 1]
ir += 1
ir = 0
ic = 1
while ic < icx: # weighted mean of columns
media_col = 0.0
ir = 0
num_rec1 = 0
while ir < irx - 1:
linea_cv = arr_cv[ir]
media_col = media_col + linea_cv[ic] * linea_cv[icx+1] # linea_cv[icx+1] = number of records
num_rec1 = num_rec1 + linea_cv[icx+1]
ir += 1
linea_cv = arr_cv[irx - 1]
linea_cv[ic] = media_col / num_rec1
ic += 1
# updating mean of the row
linea_cv = arr_cv[irx - 1]
linea_means = linea_cv[1:icx]
media_riga = mean(linea_means)
linea_cv[icx] = media_riga # Total mean
linea_cv[icx + 1] = num_rec1 # n° of records
cv_media_gen_after = str(media_riga)
cv_media_gen_after = cv_media_gen_after[0:6]
# write cv file
testata_cv = testata
testata_cv[0] = "*Groups*"
testata_cv.append("*Mean*")
testata_cv.append("N_recs")
arch_cv_file = open(arch_cv, 'w')
ic = 0
while ic <= icx + 1:
arch_cv_file.write('%s %s ' % (testata_cv[ic], " "*(9-len(testata_cv[ic]))))
ic += 1
arch_cv_file.write('%s' % ('\n'))
ir = 0
while ir < irx:
ic = 0
linea_cv = arr_cv[ir]
while ic <= icx + 1:
if ic == 0:
arch_cv_file.write('%s %s ' % (linea_cv[0], " "))
else:
if ic <= icx:
arch_cv_file.write('%7.4f %s ' % (linea_cv[ic], " "))
else:
arch_cv_file.write('%6i %s ' % (linea_cv[ic], " "))
ic += 1
arch_cv_file.write('%s' % ("\n"))
ir += 1
ic = 0
media_xcv = mean(xcv[1:icx])
while ic <= icx : # print CV input (before catalogue)
if ic == 0:
arch_cv_file.write('%s %s ' % ("*CVinp*", " "))
else:
if ic < icx:
arch_cv_file.write('%7.4f %s ' % (xcv[ic], " "))
else:
arch_cv_file.write('%7.4f %s ' % (media_xcv, " "))
arch_cv_file.write('%6i %s ' % (linea_cv[ic+1], " "))
ic += 1
arch_cv_file.write('%s' % ("\n"))
#=========istruzioni aggiunte <NAME> 29/02/2012======================
#know_index = str(1.0 - float(cv_media_gen_after) / float(str_med_cv_gen))
#know_index = know_index[0:6]
#arch_cv_file.write('%s %s %s' % ('*KIndex* ', know_index, '\n'))
#=========fine istruzioni aggiunte da <NAME> 29/02/2012==============
arch_cv_file.close()
# writing out catalog file
testata_cat1 = []
testata_cat1.append("*Group*")
arch_output_file = open(arch_output, 'w')
ic= 0
while ic < icx:
testata_cat1.append(testata_cat[ic])
ic += 1
ic= 0
while ic < len(testata_cat1):
arch_output_file.write('%s %s ' % (testata_cat1[ic], " "*(15-len(testata_cat1[ic]))))
ic += 1
arch_output_file.write('%s' % ("\n"))
index = 0
while index < len(arr_orig):
riga_orig = arr_orig[index]
ic = 0
while ic < len(riga_orig):
if not(isinstance(riga_orig[ic],str)):
riga_orig[ic] = str(riga_orig[ic])
ic += 1
# place before 0 if gr / sg < 10
gr1 = str(mtchx[index])
if mtchx[index] < 10:
gr1 = '0' + str(mtchx[index])
sg1 = str(mtchy[index])
if mtchy[index] < 10:
sg1 = '0' + str(mtchy[index])
arr_rig0 = "G_" + gr1 + "_" + sg1 + " "*8
arch_output_file.write('%s ' % (arr_rig0))
ic= 0
while ic < len(riga_orig):
arch_output_file.write('%s %s ' % (riga_orig[ic], " "*(15-len(riga_orig[ic]))))
ic += 1
arch_output_file.write('%s' % ("\n"))
index += 1
testata_cat1 = []
testata_cat1.append("*Group*")
testata_cat1.append("*RecNum*")
arch_sort_file = open(arch_sort, 'w')
ic= 0
while ic < icx:
testata_cat1.append(testata_cat[ic])
ic += 1
ic= 0
while ic < len(testata_cat1):
arch_sort_file.write('%s %s ' % (testata_cat1[ic], " "*(15-len(testata_cat1[ic]))))
ic += 1
arch_sort_file.write('%s' % ("\n"))
index = 0
while index < len(arr_grsg_c):
riga_grsg = arr_grsg_c[index].split()
ic = 0
while ic < len(riga_grsg):
val_txt = riga_grsg[ic]
val_txt = val_txt[0:13]
arch_sort_file.write('%s %s ' % (val_txt, " "*(15-len(val_txt))))
ic += 1
if index < len(arr_grsg_c) - 1:
arch_sort_file.write('%s' % ("\n"))
index += 1
arch_sort_file.close()
# writing out catalog and sorted file
arr_outsrt = []
index = 0
while index < len(arr_orig):
riga_sort = []
# place before 0 if gr / sg < 10
gr1 = str(mtchx[index])
if mtchx[index] < 10:
gr1 = '0' + str(mtchx[index])
sg1 = str(mtchy[index])
if mtchy[index] < 10:
sg1 = '0' + str(mtchy[index])
riga_sort.append("G_" + gr1 + "_" + sg1)
ic = 0
riga_orig = arr_orig[index]
while ic < len(riga_orig):
val_riga = riga_orig[ic]
riga_sort.append(val_riga)
ic += 1
arr_outsrt.append(riga_sort)
index += 1
for line in arr_outsrt:
line = "".join(line)
arr_outsrt.sort()
testata_srt = []
testata_srt.append("*Group*")
arch_outsrt_file = open(arch_outsrt, 'w')
ic= 0
while ic < icx:
testata_srt.append(testata_orig[ic])
ic += 1
ic= 0
while ic < len(testata_srt):
arch_outsrt_file.write('%s %s' % (testata_srt[ic], " "*(15-len(testata_srt[ic]))))
ic += 1
arch_outsrt_file.write('%s' % ("\n"))
index = 0
key_gruppo = ""
while index < len(arr_outsrt):
riga_sort = arr_outsrt[index]
index_c = 0
while index_c < len(riga_sort):
if index_c == 0:
if riga_sort[0] != key_gruppo:
# arch_outsrt_file.write('%s ' % ("\n"))
key_gruppo = riga_sort[0]
valore = riga_sort[index_c]
arch_outsrt_file.write('%s %s' % (valore, " "*(15-len(valore))))
index_c += 1
if index < len(arr_grsg_c) - 1:
arch_outsrt_file.write('%s' % ("\n"))
index += 1
arch_outsrt_file.close()
print("###############################################################################")
print("# KB_CAT KNOWLEDGE DISCOVERY IN DATA MINING (CATALOG PROGRAM) #")
print("# by <NAME> (COPYRIGHT MARCH 2011 ALL RIGHTS RESERVED) #")
print("# Language used: PYTHON #")
print("###############################################################################")
arch_log_file = open(arch_log, 'w')
arch_log_file.write("%s %s" % ("############################################################################", "\n"))
arch_log_file.write("%s %s" % ("# KB_CAT KNOWLEDGE DISCOVERY IN DATA MINING (CATALOG PROGRAM) #", "\n"))
arch_log_file.write("%s %s" % ("# by <NAME> (COPYRIGHT MARCH 2011 ALL RIGHTS RESERVED) #", "\n"))
arch_log_file.write("%s %s" % ("# Language used: PYTHON . #", "\n"))
arch_log_file.write("%s %s" % ("############################################################################", "\n"))
arch_log_file.write("%s %s %s" % ("Input File -> ", file_input, "\n"))
arch_log_file.write("%s %s %s" % ("Numer of Groups (3 - 20) -> ", str(gruppi_num), "\n"))
arch_log_file.write("%s %s %s" % ("Normalization (Max, Std, None) -> ", tipo_norm, "\n"))
arch_log_file.write("%s %s %s" % ("Start Value of alpha (from 1.8 to 0.9) -> ", str(alpha_max), "\n"))
arch_log_file.write("%s %s %s" % ("End Value of alpha (from 0.5 to 0.0001) -> ", str(alpha_min), "\n"))
arch_log_file.write("%s %s %s" % ("Decreasing step of alpha (from 0.1 to 0.001) -> ", str(alpha_step), "\n"))
arch_log_file.write("%s" % ("=========================OUTPUT=======================================================\n"))
arch_log_file.write("%s %s %s" % ("Output File Catalog.original ", arch_output, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Catalog.sort ", arch_outsrt, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Summary sort ", arch_sort, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Matrix Catal. ", arch_catal, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Means, STD, CV. ", arch_medsd, "\n"))
arch_log_file.write("%s %s %s" % ("Output File CV of the Groups ", arch_cv, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Training Grid ", arch_grid, "\n"))
arch_log_file.write("%s %s %s" % ("Output File Run Parameters ", arch_log, "\n"))
#=========istruzioni aggiunte <NAME> 29/02/2012======================
know_index = str(1.0 - float(cv_media_gen_after) / float(str_med_cv_gen))
know_index = know_index[0:6]
arch_log_file.write('%s %s %s' % ('*KIndex* ', know_index, '\n'))
#=========fine istruzioni aggiunte da <NAME> 29/02/2012==============
min_err_txt = "%12.3f" % min_err # format 8 integer and 3 decimals
alpha_txt = "%12.5f" % alpha # format 6 integer and 5 decimals
alpha_min_txt = "%12.5f" % alpha_min # format 6 integer and 5 decimals
print()
if min_err == 1000000000.000:
print("Oops! No result. Try again with new alpha parameters")
print()
print(("EPOCH " + str(min_epok -1) + " WITH MIN ERROR " + min_err_txt +
" starting alpha " + alpha_min_txt + " ending alpha " + alpha_txt +
" Iterations " + str(iter) + " Total Epochs " + str(ne - 1)))
print()
print('Output File Catalog.original ' + arch_output)
print('Output File Catalog.sort ' + arch_outsrt)
print('Output File Summary sort ' + arch_sort)
print('Output File Matrix Catal. ' + arch_catal)
print('Output File Means, STD, CV. ' + arch_medsd)
print('Output File CV of the Groups ' + arch_cv)
print('Output File Training Grid ' + arch_grid)
print('Output File Run Parameters ' + arch_log)
print('CV before Catalog ' + str_med_cv_gen)
print('CV after Catalog ' + cv_media_gen_after)
know_index = str(1.0 - float(cv_media_gen_after) / float(str_med_cv_gen))
know_index = know_index[0:6]
print('Knowledge Index ' + know_index)
print()
# Elapsed time
t1 = datetime.datetime.now()
elapsed_time = t1 - t0
print("Elapsed time (seconds) : " + str(elapsed_time.seconds))
print()
```
#### File: tests/expect-fail23/recipe-580622.py
```python
import os, sys
import wx
import wx.grid as gridlib
import wx.lib.gridmovers as gridmovers
import PyPDF2 # only used for output (make_pdf)
import fitz
# some abbreviations
DefPos = wx.DefaultPosition
DefSize = wx.DefaultSize
class PDFTable(gridlib.PyGridTableBase):
def __init__(self):
gridlib.PyGridTableBase.__init__(self)
self.colLabels = ['File','Pages','from','to','rotate']
self.dataTypes = [gridlib.GRID_VALUE_STRING,
gridlib.GRID_VALUE_NUMBER,
gridlib.GRID_VALUE_NUMBER,
gridlib.GRID_VALUE_NUMBER,
gridlib.GRID_VALUE_CHOICE + ':0, 90, 180, 270',
]
self.data = []
#==============================================================================
# Methods for the wxPyGridTableBase interface (mostly mandatory)
#==============================================================================
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.colLabels)
def IsEmptyCell(self, row, col):
try:
return not self.data[row][col]
except IndexError:
return True
def GetValue(self, row, col):
return self.data[row][col]
def SetValue(self, row, col, value):
self.data[row][col] = value
#==============================================================================
# Provide column header names
#==============================================================================
def GetColLabelValue(self, col):
return self.colLabels[col]
#==============================================================================
# Provide row header names (just the line numbers in our case)
#==============================================================================
def GetRowLabelValue(self,row):
return str(row +1)
#==============================================================================
# Provide type of a cell value
#==============================================================================
def GetTypeName(self, row, col):
return self.dataTypes[col]
#==============================================================================
# Move a row
#==============================================================================
def MoveRow(self,frm,to):
grid = self.GetView()
if grid:
# Move the rowLabels and data rows
oldData = self.data[frm]
del self.data[frm]
if to > frm:
self.data.insert(to-1,oldData)
else:
self.data.insert(to,oldData)
#==============================================================================
# inform the grid about our doing
#==============================================================================
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, frm, 1)
grid.ProcessTableMessage(msg)
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, to, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# Insert a row
#==============================================================================
def NewRow(self, zeile):
grid = self.GetView()
if grid:
self.data.append(zeile)
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# Duplicate a row
#==============================================================================
def DuplicateRow(self, row):
grid = self.GetView()
if grid:
zeile = [self.data[row][0], self.data[row][1],
self.data[row][2], self.data[row][3],
self.data[row][4]]
self.data.insert(row, zeile)
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, row, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# Remove a row
#==============================================================================
def DeleteRow(self, row):
grid = self.GetView()
if grid:
del self.data[row]
grid.BeginBatch()
msg = gridlib.GridTableMessage(self,
gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, row, 1)
grid.ProcessTableMessage(msg)
grid.EndBatch()
#==============================================================================
# Define the grid
#==============================================================================
class MyGrid(gridlib.Grid):
def __init__(self, parent):
gridlib.Grid.__init__(self, parent, -1)
table = PDFTable() # create PDFTable object
#==============================================================================
# Announce our table to the grid and let it manage it ('True')
#==============================================================================
self.SetTable(table, True)
#==============================================================================
# do some cell attribute setting
#==============================================================================
align1 = gridlib.GridCellAttr()
align1.SetAlignment(wx.ALIGN_RIGHT, wx.ALIGN_CENTER)
self.SetColAttr(2, align1)
self.SetColAttr(3, align1)
self.SetColAttr(4, align1)
align2 = gridlib.GridCellAttr()
align2.SetAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
self.SetColAttr(5, align2)
#==============================================================================
# Enable Row moving
#==============================================================================
gridmovers.GridRowMover(self)
#==============================================================================
# Bind: move a row
#==============================================================================
self.Bind(gridmovers.EVT_GRID_ROW_MOVE, self.OnRowMove, self)
#==============================================================================
# Bind: delete a row
#==============================================================================
self.Bind(gridlib.EVT_GRID_LABEL_RIGHT_DCLICK, self.OnRowDel, self)
#==============================================================================
# Bind: duplicate a row
#==============================================================================
self.Bind(gridlib.EVT_GRID_LABEL_LEFT_DCLICK, self.OnRowDup, self)
#==============================================================================
# Event Method: move a row
#==============================================================================
def OnRowMove(self,evt):
frm = evt.GetMoveRow() # Row being moved
to = evt.GetBeforeRow() # Before which row to insert
self.GetTable().MoveRow(frm,to)
#==============================================================================
# Event Method: delete a row
#==============================================================================
def OnRowDel(self, evt):
row = evt.GetRow()
self.GetTable().DeleteRow(row)
#==============================================================================
# Event Method: duplicate a row
#==============================================================================
def OnRowDup(self, evt):
row = evt.GetRow()
col = evt.GetCol()
if col < 0 and row >= 0: # else it is not a row duplication!
self.GetTable().DuplicateRow(row)
evt.Skip()
#==============================================================================
#
# Define the dialog
#
#==============================================================================
class PDFDialog (wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__ (self, parent, id = wx.ID_ANY,
title = "Join PDF files",
pos = DefPos,
size = wx.Size(900,710),
style = wx.CAPTION|
wx.CLOSE_BOX|
wx.DEFAULT_DIALOG_STYLE|
wx.MAXIMIZE_BOX|
wx.MINIMIZE_BOX|
wx.RESIZE_BORDER)
self.SetSizeHintsSz(DefSize, DefSize)
self.FileList = {}
#==============================================================================
# Create Sizer 01 (browse button and explaining text)
#==============================================================================
szr01 = wx.BoxSizer(wx.HORIZONTAL)
self.btn_neu = wx.FilePickerCtrl(self, wx.ID_ANY,
wx.EmptyString,
"Select a PDF file",
"*.pdf",
DefPos, DefSize,
wx.FLP_CHANGE_DIR|wx.FLP_FILE_MUST_EXIST|wx.FLP_SMALL,
)
szr01.Add(self.btn_neu, 0, wx.ALIGN_TOP|wx.ALL, 5)
msg_txt ="""ADD files with this button. Path and total page number will be appended to the table below.\nDUPLICATE row: double-click its number. MOVE row: drag its number with the mouse. DELETE row: right-double-click its number."""
msg = wx.StaticText(self, wx.ID_ANY, msg_txt,
DefPos, wx.Size(-1, 50), wx.ALIGN_LEFT)
msg.Wrap(-1)
msg.SetFont(wx.Font(10, 74, 90, 90, False, "Arial"))
szr01.Add(msg, 0, wx.ALIGN_TOP|wx.ALL, 5)
#==============================================================================
# Create Sizer 02 (contains the grid)
#==============================================================================
self.szr02 = MyGrid(self)
self.szr02.AutoSizeColumn(0)
self.szr02.AutoSizeColumn(1)
self.szr02.SetColSize(2, 45)
self.szr02.SetColSize(3, 45)
self.szr02.SetColSize(4, 45)
self.szr02.SetRowLabelSize(30)
# Columns 1 and 2 are read only
attr_ro = gridlib.GridCellAttr()
attr_ro.SetReadOnly(True)
self.szr02.SetColAttr(0, attr_ro)
self.szr02.SetColAttr(1, attr_ro)
#==============================================================================
# Create Sizer 03 (output parameters)
#==============================================================================
szr03 = wx.FlexGridSizer( 5, 2, 0, 0 ) # 4 rows, 2 cols, gap sizes 0
szr03.SetFlexibleDirection( wx.BOTH )
szr03.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
tx_ausdat = wx.StaticText(self, wx.ID_ANY, "Output:",
DefPos, DefSize, 0)
tx_ausdat.Wrap(-1)
szr03.Add(tx_ausdat, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.btn_aus = wx.FilePickerCtrl(self, wx.ID_ANY,
os.path.join(os.path.expanduser('~'), "joined.pdf"),
"Specify output file",
"*.pdf",
DefPos, wx.Size(480,-1),
wx.FLP_OVERWRITE_PROMPT|
wx.FLP_SAVE|wx.FLP_SMALL|
wx.FLP_USE_TEXTCTRL)
szr03.Add(self.btn_aus, 0, wx.ALL, 5)
tx_autor = wx.StaticText( self, wx.ID_ANY, "Author:",
DefPos, DefSize, 0 )
tx_autor.Wrap( -1 )
szr03.Add( tx_autor, 0, wx.ALL, 5 )
self.ausaut = wx.TextCtrl( self, wx.ID_ANY,
os.path.basename(os.path.expanduser('~')),
DefPos, wx.Size(480, -1), 0)
szr03.Add( self.ausaut, 0, wx.ALL, 5 )
pdf_titel = wx.StaticText( self, wx.ID_ANY, "Title:",
DefPos, DefSize, 0 )
pdf_titel.Wrap( -1 )
szr03.Add( pdf_titel, 0, wx.ALL, 5 )
self.austit = wx.TextCtrl( self, wx.ID_ANY,
"Joined PDF files",
DefPos, wx.Size(480, -1), 0 )
szr03.Add( self.austit, 0, wx.ALL, 5 )
tx_subject = wx.StaticText( self, wx.ID_ANY, "Subject:",
DefPos, DefSize, wx.ALIGN_RIGHT)
tx_subject.Wrap( -1 )
szr03.Add( tx_subject, 0, wx.ALL, 5 )
self.aussub = wx.TextCtrl( self, wx.ID_ANY,
"Joined PDF files",
DefPos, wx.Size(480, -1), 0 )
szr03.Add( self.aussub, 0, wx.ALL, 5 )
tx_blank = wx.StaticText( self, wx.ID_ANY, " ",
DefPos, DefSize, wx.ALIGN_RIGHT)
tx_blank.Wrap( -1 )
szr03.Add( tx_blank, 0, wx.ALL, 5 )
self.noToC = wx.CheckBox( self, wx.ID_ANY,
"check if no table of contents wanted",
DefPos, DefSize, wx.ALIGN_LEFT)
szr03.Add( self.noToC, 0, wx.ALL, 5 )
#==============================================================================
# Create Sizer 04 (OK / Cancel buttons)
#==============================================================================
szr04 = wx.StdDialogButtonSizer()
szr04OK = wx.Button(self, wx.ID_OK)
szr04.AddButton(szr04OK)
szr04Cancel = wx.Button(self, wx.ID_CANCEL)
szr04.AddButton(szr04Cancel)
szr04.Realize();
#==============================================================================
# 3 horizontal lines (decoration only)
#==============================================================================
linie1 = wx.StaticLine(self, wx.ID_ANY,
DefPos, DefSize, wx.LI_HORIZONTAL)
linie2 = wx.StaticLine(self, wx.ID_ANY,
DefPos, DefSize, wx.LI_HORIZONTAL)
linie3 = wx.StaticLine(self, wx.ID_ANY,
DefPos, DefSize, wx.LI_HORIZONTAL)
mainszr = wx.BoxSizer(wx.VERTICAL)
mainszr.Add(szr01, 0, wx.EXPAND, 5)
mainszr.Add(linie1, 0, wx.EXPAND |wx.ALL, 5)
mainszr.Add(self.szr02, 1, wx.EXPAND, 5)
mainszr.Add(linie2, 0, wx.EXPAND|wx.ALL, 5)
mainszr.Add(szr03, 0, wx.EXPAND, 5)
mainszr.Add(linie3, 0, wx.EXPAND |wx.ALL, 5)
mainszr.Add(szr04, 0, wx.ALIGN_TOP|wx.ALIGN_CENTER_HORIZONTAL, 5)
self.SetSizer(mainszr)
self.Layout()
self.Centre(wx.BOTH)
#==============================================================================
# Define event handlers for the buttons
#==============================================================================
self.btn_neu.Bind(wx.EVT_FILEPICKER_CHANGED, self.NewFile)
self.btn_aus.Bind(wx.EVT_FILEPICKER_CHANGED, self.AusgabeDatei)
def __del__(self):
pass
#==============================================================================
# "NewFile" - Event Handler for including new files
#==============================================================================
def NewFile(self, event):
dat = event.GetPath()
if dat not in self.FileList:
doc = fitz.Document(dat)
if doc.needsPass:
wx.MessageBox("Cannot read encrypted file\n" + dat,
"Encrypted File Error")
event.Skip()
return
self.FileList[dat] = doc
else:
doc = self.FileList[dat]
seiten = doc.pageCount
zeile = [dat, str(seiten), 1, str(seiten), 0]
self.szr02.Table.NewRow(zeile)
self.szr02.AutoSizeColumn(0)
self.Layout()
event.Skip()
#==============================================================================
# "AusgabeDatei" - Event Handler for out file
#==============================================================================
def AusgabeDatei(self, event):
event.Skip()
#==============================================================================
# Create the joined PDF
#==============================================================================
def make_pdf(dlg):
# no file selected: treat like "Cancel"
if not len(dlg.szr02.Table.data): # no files there
return None
cdate = wx.DateTime.Now().Format("D:%Y%m%d%H%M%S-04'30'")
ausgabe = dlg.btn_aus.GetPath()
pdf_fle_out = open(ausgabe,"wb")
pdf_out = PyPDF2.PdfFileWriter()
aus_nr = 0 # current page number in output
pdf_dict = {"/Creator":"PDF-Joiner",
"/Producer":"PyMuPDF, PyPDF2",
"/CreationDate": cdate,
"/ModDate": cdate,
"/Title": dlg.austit.Value,
"/Author": dlg.ausaut.Value,
"/Subject": dlg.aussub.Value}
pdf_out.addMetadata(pdf_dict)
parents = {}
#==============================================================================
# process one input file
#==============================================================================
for zeile in dlg.szr02.Table.data:
dateiname = zeile[0]
doc = dlg.FileList[dateiname]
max_seiten = int(zeile[1])
#==============================================================================
# user input minus 1, PDF pages count from zero
# also correct any inconsistent input
#==============================================================================
von = int(zeile[2]) - 1
bis = int(zeile[3]) - 1
von = max(0, von) # "from" must not be < 0
bis = min(max_seiten - 1, bis) # "to" must not be > max pages - 1
bis = max(von, bis) # "to" cannot be < "from"
rot = int(zeile[4]) # get rotation angle
pdfin = PyPDF2.PdfFileReader(dateiname)
for p in range(von, bis + 1): # read pages from input file
pdf_page = pdfin.getPage(p)
if rot > 0:
pdf_page.rotateClockwise(rot) # rotate the page
pdf_out.addPage(pdf_page) # output the page
# title = "infile [from-to (max.pages)]"
if dlg.noToC.Value: # no ToC wanted
continue
bm_main_title = "%s [%s-%s (%s)]" % \
(os.path.basename(dateiname[:-4]).encode("latin-1"), von + 1,
bis + 1, max_seiten)
bm_main = pdf_out.addBookmark(bm_main_title, aus_nr,
None, None, False, False, "/Fit")
print(1, bm_main_title, aus_nr)
parents[1] = bm_main # lvl 1 bookmark is infile's title
toc = fitz.GetToC(doc) # get infile's table of contents
bm_lst = [] # prepare the relevant sub-ToC
for t in toc:
if t[2] > von and t[2] <= bis + 1: # relevant page range only
bm_lst.append([t[0] + 1, # indent increased 1 level
t[1], # the title
t[2] + aus_nr - von - 1]) # new page number
aus_nr += (bis - von + 1) # increase output counter
if bm_lst == []: # do we have a sub-ToC?
continue # no, next infile
# while indent gap is too large, prepend "filler" bookmarks to bm_lst
while bm_lst[0][0] > 2:
zeile = [bm_lst[0][0] - 1, "<>", bm_lst[0][2]]
bm_lst.insert(0, zeile)
# now add infile's bookmarks
for b in bm_lst:
bm = pdf_out.addBookmark(b[1].encode("latin-1"), b[2],
parents[b[0]-1], None, False, False, "/Fit")
parents[b[0]] = bm
#==============================================================================
# all input files processed
#==============================================================================
pdf_out.write(pdf_fle_out)
pdf_fle_out.close()
return ausgabe
#==============================================================================
#
# Main program
#
#==============================================================================
if wx.VERSION[0] >= 3:
pass
else:
print("wx Version needs to be at least 3")
sys.exit(1)
app = None
app = wx.App()
this_dir = os.getcwd()
#==============================================================================
# create dialog
#==============================================================================
dlg = PDFDialog(None)
#==============================================================================
# Show dialog and wait ...
#==============================================================================
rc = dlg.ShowModal()
#==============================================================================
# if OK pressed, create output PDF
#==============================================================================
if rc == wx.ID_OK:
ausgabe = make_pdf(dlg)
dlg.Destroy()
app = None
```
#### File: tests/expect-fail23/recipe-81330.py
```python
import re
#
# The simplest, lambda-based implementation
#
def multiple_replace(dict, text):
""" Replace in 'text' all occurences of any key in the given
dictionary by its corresponding value. Returns the new tring."""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, list(dict.keys()))))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
#
# You may combine both the dictionnary and search-and-replace
# into a single object using a 'callable' dictionary wrapper
# which can be directly used as a callback object.
#
# In Python 2.2+ you may extend the 'dictionary' built-in class instead
from UserDict import UserDict
class Xlator(UserDict):
""" An all-in-one multiple string substitution class """
def _make_regex(self):
""" Build a regular expression object based on the keys of
the current dictionary """
return re.compile("(%s)" % "|".join(map(re.escape, list(self.keys()))))
def __call__(self, mo):
""" This handler will be invoked for each regex match """
# Count substitutions
self.count += 1 # Look-up string
return self[mo.string[mo.start():mo.end()]]
def xlat(self, text):
""" Translate text, returns the modified text. """
# Reset substitution counter
self.count = 0
# Process text
return self._make_regex().sub(self, text)
#
# Test
#
if __name__ == "__main__":
text = "<NAME> is the creator of Perl"
dict = {
"<NAME>" : "<NAME>",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print(multiple_replace(dict, text))
xlat = Xlator(dict)
print(xlat.xlat(text))
print("Changed %d thing(s)" % xlat.count)
``` |
{
"source": "JohannesBuchner/ultragem",
"score": 3
} |
#### File: JohannesBuchner/ultragem/ultragem.py
```python
import random
import time
import pygame
import sys
import copy
import numpy
from pygame.locals import QUIT, KEYUP, K_ESCAPE, K_BACKSPACE, MOUSEBUTTONUP, MOUSEBUTTONDOWN
from gemengine import Board, InitialFillerDoubleLockSpecial, InitialFillerDoubleLock, InitialFillerDisable, NastyTopFiller, BoardGravityPuller, Combiner, PairCombiner, Activater
FPS = 60 # frames per second to update the screen
HINTFPS = FPS / 10
SCOREFPS = FPS / 20
WINDOWWIDTH = 400 # width of the program's window, in pixels
WINDOWHEIGHT = 400 # height in pixels
GEMIMAGESIZE = 32 # width & height of each space in pixels
# NUMGEMIMAGES is the number of gem types. You will need .png image files named
# gem0.png, gem1.png, etc. up to gem(N-1).png.
NUMGEMIMAGES = 7
NUMFIREIMAGES = 30
NUMGLANCEIMAGES = 29
# NUMMATCHSOUNDS is the number of different sounds to choose from when a match
# is made. The .wav files are named match0.wav, match1.wav, etc.
NUMMATCHSOUNDS = 6
MOVERATE = 5 # 1 to 100, larger num means faster animations
HIGHLIGHTCOLOR = (0, 255, 255) # color of the selected gem's border
HINTCOLOR = (128, 255, 255) # color to hint to a possible move
BGCOLOR = (170, 190, 255) # background color on the screen
#BGCOLOR = (255, 255, 255) # background color on the screen
GRIDCOLOR = (0, 0, 255) # color of the game board
GAMEOVERCOLOR = (255, 100, 100) # color of the "Game over" text.
GAMEOVERBGCOLOR = (0, 0, 0) # background color of the "Game over" text.
SCORECOLOR = (85, 65, 0)
LINKCOLOR = (0, 0, 255)
# Constants for the different directions. The numbers correspond to the
# keyboard's keypad, but they could be any arbitrary value.
UP = 8
RIGHT = 6
DOWN = 2
LEFT = 4
EMPTY_SPACE = -1 # an arbitrary, nonnegative value
ROWABOVEBOARD = 'row above board' # an arbitrary, noninteger value
class GameInvalidException(Exception):
pass
class UltraGemGame(object):
def __init__(self, gameid=1):
self.gameid = gameid
self.ncolors = 6
self.journey = 'journey-auto'
self.setBoardSize(8,8)
self.rng = numpy.random
#self.rng.seed(4)
def setBoardSize(self, h, w):
self.BOARDWIDTH = w # how many columns in the board
self.BOARDHEIGHT = h # how many rows in the board
# The amount of space to the sides of the board to the edge of the window is
# used several times, so calculate it once here and store in variables.
self.XMARGIN = int((WINDOWWIDTH - GEMIMAGESIZE * self.BOARDWIDTH) / 2)
self.YMARGIN = int((WINDOWHEIGHT - GEMIMAGESIZE * self.BOARDHEIGHT) / 2)
def run(self):
pygame.init()
self.BASICFONT = pygame.font.Font('freesansbold.ttf', 24)
self.SMALLFONT = pygame.font.Font('freesansbold.ttf', 12)
self.FPSCLOCK = pygame.time.Clock()
self.WINDOWSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('UltraGem')
self.WINDOWSURF.fill(BGCOLOR)
txt = self.BASICFONT.render('Loading ...', 1, GAMEOVERCOLOR, GAMEOVERBGCOLOR)
rect = txt.get_rect()
rect.center = int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2)
self.WINDOWSURF.blit(txt, rect)
pygame.display.update()
# Load the images
self.GEMIMAGES = {}
for lock, status in ('N',0), ('X', -1), ('2',1), ('3',2):
for modifier, type in ('N',1), ('stripeH',2), ('stripeV',3), ('bomb',4):
for color in range(NUMGEMIMAGES):
i = color + 1
print('loading comb%s-%s-%s.png for %d,%d,%d' % (lock, i, modifier, status, type, color))
gemImage = pygame.image.load('graphics/comb%s-%s-%s.png' % (lock, i, modifier))
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.GEMIMAGES[(status, type, color)] = gemImage
modifier, type = 'spark', 5
i = 'N'
gemImage = pygame.image.load('graphics/comb%s-%s-%s.png' % (lock, i, modifier))
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.GEMIMAGES[(status, type, 0)] = gemImage
print('loading comb%s-%s-%s.png for %d,%d,%d' % (lock, i, modifier, status, type, 0))
if status > 0:
modifier, type = 'empty', 0
gemImage = pygame.image.load('graphics/gemlock%s.png' % (lock))
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.GEMIMAGES[(status, type, 0)] = gemImage
print('loading gemlock%s.png for %d,%d,%d' % (lock, status, type, 0))
status, type, color = 0, -1, 0
gemImage = pygame.image.load('graphics/nonfield.png')
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.GEMIMAGES[(status, type, color)] = gemImage
print('loading nonfield.png for %d,%d,%d' % (status, type, 0))
#print('images loaded:', self.GEMIMAGES.keys())
self.FIREIMAGES = []
for i in range(1,NUMFIREIMAGES+1):
gemImage = pygame.image.load('graphics/fire%s.png' % i)
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.FIREIMAGES.append(gemImage)
self.GLANCEIMAGES = []
for i in range(1,NUMGLANCEIMAGES+1):
gemImage = pygame.image.load('graphics/glance%s.png' % i)
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
self.GLANCEIMAGES.append(gemImage)
# Load the sounds.
GAMESOUNDS = {}
GAMESOUNDS['bad swap'] = pygame.mixer.Sound('sounds/badswap.wav')
GAMESOUNDS['match'] = []
for i in range(NUMMATCHSOUNDS):
GAMESOUNDS['match'].append(pygame.mixer.Sound('sounds/match%s.wav' % i))
self.GAMESOUNDS = GAMESOUNDS
BOARDRECTS = []
for x in range(self.BOARDWIDTH):
BOARDRECTS.append([])
for y in range(self.BOARDHEIGHT):
r = pygame.Rect((self.XMARGIN + (x * GEMIMAGESIZE),
self.YMARGIN + (y * GEMIMAGESIZE),
GEMIMAGESIZE,
GEMIMAGESIZE))
BOARDRECTS[x].append(r)
self.BOARDRECTS = BOARDRECTS
while True:
try:
self.last_move = None, None, None, None
self.nswaps = 0
self.boardlog = []
self.score = self.scoring_function([])
self.events_processed = 0
self.initGame()
self.runGame()
success = self.score[self.goalid] >= self.goalvalue
if success:
self.gameid += 1
with open('currentgame', 'w') as f:
f.write('%d\n' % self.gameid)
except GameInvalidException as e:
self.gameid += 1
print(e)
def setupGame(self, seed):
nrows, ncols, ncolors = self.BOARDWIDTH, self.BOARDHEIGHT, self.ncolors
rng = numpy.random.RandomState(seed)
board = Board(nrows=nrows, ncols=ncols)
# make lower numbers more likely to be selected
prows = 1. / (0.2 + numpy.arange(nrows))
prows /= prows.sum()
ndrows = rng.choice(numpy.arange(nrows), p=prows)
ndlrows = rng.choice(numpy.arange(nrows), p=prows)
pcols = 1. / (0.2+ numpy.arange(ncols))
pcols /= pcols.sum()
ndcols = rng.choice(numpy.arange(nrows), p=pcols)
ndlcols = rng.choice(numpy.arange(nrows), p=pcols)
if rng.uniform() < 0.1:
types = [2,3,4,5] if rng.uniform() < 0.5 else [2,3,4]
InitialFillerDoubleLockSpecial(board, ncolors=ncolors, types=types, nrows=ndlrows, ncols=ndlcols, rng=rng).run()
else:
InitialFillerDoubleLock(board, nrows=ndlrows, ncols=ndlcols, rng=rng).run()
if rng.uniform() < 0.1:
InitialFillerDisable(board, nrows=ndrows, ncols=ndcols, rng=rng).run()
topfill = NastyTopFiller(board, ncolors=ncolors)
return board, topfill
def setupUniqueGame(self, seed):
nrows, ncols, ncolors = self.BOARDWIDTH, self.BOARDHEIGHT, self.ncolors
board, topfill = self.setupGame(seed)
for i in range(1, seed):
board2, _ = self.setupGame(i)
if (board2.status == board.status).all() and (board2.type == board.type).all() and (board2.color == board.color).all():
raise GameInvalidException("Board with seed=%d same as seed=%d" % (i, seed))
return board, topfill
def loadGame(self, gameid):
self.BOARDWIDTH, self.BOARDHEIGHT, self.ncolors = None, None, None
with open('%s/%d' % (self.journey, gameid)) as f:
gameprops = {}
for line in f:
key, value = line.split(':')
gameprops[key] = value
key = key.upper()
if key == 'NCOLORS':
self.ncolors = int(value)
elif key == 'MAXSWAPS':
self.maxswaps = int(value)
elif key == 'GOALID':
self.goalid = int(value)
elif key == 'NMIN':
self.goalvalue = int(value)
elif key == 'DIFFICULTY':
d = float(value)
if d < 0.2:
self.difficulty_text = 'SUPER EASY'
elif d < 0.5:
self.difficulty_text = 'EASY'
elif d < 0.75:
self.difficulty_text = 'HARD'
elif d < 1.0:
self.difficulty_text = 'VERY HARD'
else:
self.difficulty_text = 'EXTREME'
elif key == 'BOARD':
a, b = value.split('x')
self.setBoardSize(int(a), int(b))
break
nrows, ncols, ncolors = self.BOARDWIDTH, self.BOARDHEIGHT, self.ncolors
board = Board(nrows=nrows, ncols=ncols)
for i, line in enumerate(f):
#print('parsing line', line)
for k in range(self.BOARDWIDTH):
txt = line[k*4:(k+1)*4]
#print('parsing chunk: "%s"' % txt)
type = 0
color = 0
status = 0
if txt == ' ':
# empty
pass
elif txt[1] == 'X':
type = -1
elif txt[1] == 'B':
status = 2
type = 0
elif txt[1] == 'b':
status = 1
type = 0
elif txt[0] == ' ':
type = 1
color = int(txt[1])
elif txt[0] == '=':
type = 2
color = int(txt[1])
elif txt[0] == '|':
type = 3
color = int(txt[1])
elif txt[0] == 'X':
type = 4
color = int(txt[1])
elif txt[0] == '#':
type = 5
color = 0
if txt[2] == 'L':
status = 2
elif txt[2] == 'l':
status = 1
board.type[i,k] = type
board.color[i,k] = color
board.status[i,k] = status
topfill = NastyTopFiller(board, ncolors=ncolors)
return board, topfill
def initGame(self):
#board, topfill = self.setupUniqueGame(self.gameid)
board, topfill = self.loadGame(self.gameid)
rng = self.rng
self.board = board
self.topfill = topfill
self.grav = BoardGravityPuller(board)
self.comb = Combiner(board)
self.paircomb = PairCombiner(board)
self.acto = Activater(board)
def fillBoardAndAnimate(self, board, points=None):
# dropping phase
anychange = True
nshuffles = 0
self.gameLog('fillBoardAndAnimate', self.board.copy())
print(self.board)
while True:
while anychange:
changes = self.grav.run()
anychange = len(changes) > 0
movingGems = []
#print('grav changes:', changes, anychange)
self.gameLog('grav', self.board.copy())
print(self.board)
for j, i, move in changes:
if move == 'dropped from top':
directionx = 0
directiony = 1
elif move == 'dropped from top-left':
directionx = 1
directiony = 1
elif move == 'dropped from top-right':
directionx = -1
directiony = 1
else:
assert False, move
assert self.getImageNum(j,i) != -1
movingGems.append(dict(imageNum=self.getImageNum(j,i),
x=i-directionx, y=j-directiony,
directionx=directionx, directiony=directiony))
changes = self.topfill.run()
anychange = len(changes) > 0 or anychange
#print('topfill changes:', changes, anychange)
self.gameLog('topfill', self.board.copy())
print(self.board)
for j, i, move in changes:
directionx = 0
directiony = 1
assert self.getImageNum(j,i) != -1
movingGems.append(dict(imageNum=self.getImageNum(j,i),
x=i-directionx, y=j-directiony,
directionx=directionx, directiony=directiony))
if movingGems:
#print('moving gems:', movingGems)
boardCopy = self.getBoardCopyMinusGems(board, movingGems)
self.animateMovingGems(boardCopy, movingGems, points)
#self.moveGems(board, movingGems)
self.updateBoard(board)
#print('board now:', board)
#print('final board:', board)
# combining phase
anychange = self.comb.run()
self.gameLog('comb', self.board.copy())
if anychange:
# have to find the differences and transition
# using fire
boardCopy = copy.deepcopy(board)
self.updateBoard(board)
print(self.board)
self.transitionBoard(boardCopy, board)
#print(('STEP %d: activation...' % nstep))
anychange = self.acto.run() or anychange
self.gameLog('acto', self.board.copy())
if anychange:
boardCopy = copy.deepcopy(board)
self.updateBoard(board)
print(self.board)
self.transitionBoard(boardCopy, board)
continue
# ok, the board settled down now
# we should ask the agent/user what they want to do now
#print(('STEP %d: finding valid moves ...' % nstep))
moves = list(self.paircomb.enumerate_valid_moves())
if len(moves) == 0:
# no moves left -- shuffle
#print(('shuffling ...'))
nshuffles += 1
if nshuffles > 20:
raise GameInvalidException('Too many shuffles')
boardCopy = copy.deepcopy(board)
self.paircomb.shuffle()
self.gameLog('paircomb.shuffle', self.board.copy())
self.updateBoard(board)
self.transitionBoard(boardCopy, board, type='glance')
continue
self.gameLog('moves', moves)
self.rng.shuffle(moves)
return moves
def continueGame(self, board, move):
boardCopy = copy.deepcopy(board)
self.paircomb.run(*move)
self.gameLog('paircomb.run', self.board.copy())
self.updateBoard(board)
self.transitionBoard(boardCopy, board)
self.comb.set_last_interaction(*move)
self.last_move = move
print(self.board)
# combining phase
anychange = self.comb.run()
self.gameLog('comb', self.board.copy())
if anychange:
# have to find the differences and transition
# using fire
boardCopy = copy.deepcopy(board)
self.updateBoard(board)
print(self.board)
self.transitionBoard(boardCopy, board)
#print(('STEP %d: activation...' % nstep))
anychange = self.acto.run() or anychange
self.gameLog('acto', self.board.copy())
if anychange:
boardCopy = copy.deepcopy(board)
self.updateBoard(board)
print(self.board)
self.transitionBoard(boardCopy, board)
# dropping phase
return self.fillBoardAndAnimate(board, [])
def isValidMove(self, x1, y1, x2, y2):
for (fromj,fromi,toj,toi),score in self.possible_moves:
if x1 == fromi and y1 == fromj and x2 == toi and y2 == toj:
return (fromj,fromi,toj,toi)
print('possible moves:')
for move, score in self.possible_moves:
print(move)
print()
print('not:',x1,y1,x2,y2)
return False
def getBoardCopyMinusGems(self, board, gems):
# Gems is a list of dicts, with keys 'imageNum', 'x', 'y', 'direction'.
boardCopy = copy.deepcopy(board)
# Remove some of the gems from this board data structure copy.
for gem in gems:
if gem['y'] != ROWABOVEBOARD and gem['y'] >= 0:
#print('temporarily disabling', gem['x'], gem['y'])
boardCopy[gem['x']][gem['y']] = EMPTY_SPACE
return boardCopy
def getImageNum(self, j, i):
color = self.board.color[j,i]
type = self.board.type[j,i]
status = self.board.status[j,i]
if type == 0 and status == 0:
return EMPTY_SPACE
else:
if type == 5:
color = 0
return (status, type, color)
def updateBoard(self, board):
for x in range(self.BOARDWIDTH):
for y in range(self.BOARDHEIGHT):
board[x][y] = self.getImageNum(y,x)
def moveGems(self, board, movingGems):
# movingGems is a list of dicts with keys 'x', 'y', 'direction', and 'imageNum'
for gem in movingGems:
if gem['y'] != ROWABOVEBOARD:
#print('marking empty', gem['x'], gem['y'])
board[gem['x']][gem['y']] = EMPTY_SPACE
movex = gem['directionx']
movey = gem['directiony']
#print('filling', gem['x']+movex, gem['y']+movey)
board[gem['x'] + movex][gem['y'] + movey] = gem['imageNum']
else:
board[gem['x']][0] = gem['imageNum'] # ignore 'direction', just move to top row
def transitionBoard(self, oldboard, newboard, type='fire'):
progress = 0 # progress at 0 represents beginning, progress at 100 represents finish.
differences = []
for x in range(self.BOARDWIDTH):
for y in range(self.BOARDHEIGHT):
if oldboard[x][y] != newboard[x][y]:
differences.append((x,y))
#print('differences:', differences)
if not differences: return
if type == 'fire':
NIMG = NUMFIREIMAGES
elif type == 'glance':
NIMG = NUMGLANCEIMAGES
while progress < NIMG:
#print('transitioning...', progress)
self.WINDOWSURF.fill(BGCOLOR)
if progress < 22:
self.drawBoard(oldboard)
else:
self.drawBoard(newboard)
# Draw fire where they differ.
for x,y in differences:
self.drawFire(x, y, progress, type=type)
self.drawScore(update=False)
pygame.display.update()
self.FPSCLOCK.tick(FPS)
progress += 1
self.drawBoard(newboard)
self.drawScore(update=True)
pygame.display.update()
def drawFire(self, x, y, progress, type):
pixelx = self.XMARGIN + (x * GEMIMAGESIZE)
pixely = self.YMARGIN + (y * GEMIMAGESIZE)
r = pygame.Rect( (pixelx, pixely, GEMIMAGESIZE, GEMIMAGESIZE) )
if type == 'fire':
self.WINDOWSURF.blit(self.FIREIMAGES[progress], r)
elif type == 'glance':
self.WINDOWSURF.blit(self.GLANCEIMAGES[progress], r)
else:
assert False, type
def animateMovingGems(self, board, gems, pointsText):
progress = 0 # progress at 0 represents beginning, progress at 100 represents finish.
while progress <= 100:
self.WINDOWSURF.fill(BGCOLOR)
self.drawBoard(board)
# Draw each gem.
for gem in gems:
self.drawMovingGem(gem, progress)
self.drawScore(update=False)
pygame.display.update()
self.FPSCLOCK.tick(FPS)
progress += MOVERATE
def drawMovingGem(self, gem, progress):
movex = 0
movey = 0
progress *= 0.01
#print('moving...', progress, gem)
fraction = progress
fraction = numpy.arctan((progress - 0.5) * 10) * 1.13 / numpy.pi + 0.5
movex = gem['directionx'] * int(fraction * GEMIMAGESIZE)
movey = gem['directiony'] * int(fraction * GEMIMAGESIZE)
basex = gem['x']
basey = gem['y']
if basey == ROWABOVEBOARD:
basey = -1
pixelx = self.XMARGIN + (basex * GEMIMAGESIZE)
pixely = self.YMARGIN + (basey * GEMIMAGESIZE)
r = pygame.Rect( (pixelx + movex, pixely + movey, GEMIMAGESIZE, GEMIMAGESIZE) )
self.WINDOWSURF.blit(self.GEMIMAGES[gem['imageNum']], r)
def drawBoard(self, board):
pygame.draw.rect(self.WINDOWSURF, BGCOLOR,
(self.XMARGIN, self.YMARGIN - GEMIMAGESIZE,
GEMIMAGESIZE * self.BOARDWIDTH,
GEMIMAGESIZE * (self.BOARDHEIGHT+1)), 0)
for x in range(self.BOARDWIDTH):
for y in range(self.BOARDHEIGHT):
pygame.draw.rect(self.WINDOWSURF, GRIDCOLOR, self.BOARDRECTS[x][y], 1)
gemToDraw = board[x][y]
if gemToDraw != EMPTY_SPACE:
self.WINDOWSURF.blit(self.GEMIMAGES[gemToDraw], self.BOARDRECTS[x][y])
def scoring_function(self, events):
nspecial = [0, 0, 0]
ncombispecial_index = {22:0,42:1,44:2,51:3,52:4,54:5,55:6}
ncombispecial = [0, 0, 0, 0, 0, 0, 0]
nunlocked = 0
ndestroyed = 0
score = 0
for type, value in events:
if type == 'activated':
if value in (2,3):
nspecial[0] += 1
elif value == 4:
nspecial[1] += 1
elif value == 5:
nspecial[2] += 1
score += 10 * value
elif type == 'unlocked':
nunlocked += value
elif type == 'destroyed':
ndestroyed += value
score += value
elif type == 'combined':
ncombispecial[ncombispecial_index[value]] += 1
return [score, ndestroyed, nunlocked] + nspecial + ncombispecial
def drawScore(self, update=True):
lastscore = self.score
if update:
newevents = self.board.events[self.events_processed:]
newscore = self.scoring_function(newevents)
self.score = [a+b for a,b in zip(newscore, lastscore)]
_, _, y, x = self.last_move
#print('new score:', newscore, x, y)
if newscore[0] > 0 and x is not None:
pointsSurf = self.BASICFONT.render(str(newscore[0]), 1, SCORECOLOR)
pointsRect = pointsSurf.get_rect()
pointsRect.center = (x * GEMIMAGESIZE + self.XMARGIN, y * GEMIMAGESIZE + self.YMARGIN)
self.WINDOWSURF.blit(pointsSurf, pointsRect)
pygame.display.update()
self.FPSCLOCK.tick(SCOREFPS)
self.events_processed = len(self.board.events)
done = self.score[self.goalid]
todo = self.goalvalue
top = 10
middle = WINDOWHEIGHT - GEMIMAGESIZE / 2 - 10
left = self.XMARGIN
anycolor = 6
scoretxt = '%d' % (done)
if self.goalid == 0:
imageIds = []
goaltxt = 'score > %d' % todo
elif self.goalid == 1:
imageIds = []
goaltxt = '%d destroyed' % todo
elif self.goalid == 2:
imageIds = [(1, 0, 0)]
goaltxt = '%d unlocked' % todo
elif self.goalid == 3:
imageIds = [(0, 2, anycolor)]
goaltxt = '%d stripes' % todo
elif self.goalid == 4:
imageIds = [(0, 4, anycolor)]
goaltxt = '%d bombs' % todo
elif self.goalid == 5:
imageIds = [(0, 5, 0)]
goaltxt = '%d zappers' % todo
elif self.goalid == 6:
imageIds = [(0, 2, anycolor), (0, 3, anycolor)]
goaltxt = '%d stripe+stripe' % todo
elif self.goalid == 7:
imageIds = [(0, 2, anycolor), (0, 4, anycolor)]
goaltxt = '%d stripe+bomb' % todo
elif self.goalid == 8:
imageIds = [(0, 4, anycolor), (0, 4, anycolor)]
goaltxt = '%d bomb+bomb' % todo
elif self.goalid == 9:
imageIds = [(0, 5, 0), (0, 1, anycolor)]
goaltxt = '%d zapper+gem' % todo
elif self.goalid == 10:
imageIds = [(0, 5, 0), (0, 2, anycolor)]
goaltxt = '%d zapper+stripe' % todo
elif self.goalid == 11:
imageIds = [(0, 5, 0), (0, 4, anycolor)]
goaltxt = '%d zapper+bomb' % todo
elif self.goalid == 12:
imageIds = [(0, 5, 0), (0, 5, 0)]
goaltxt = '%d zapper+zapper' % todo
else:
assert False
if len(imageIds) > 0:
goaltxt = '%d' % todo
leveltxt = '%s LEVEL %d' % (self.difficulty_text, self.gameid)
levelImg = self.BASICFONT.render(leveltxt, 1, SCORECOLOR) # score is a global variable
levelRect = levelImg.get_rect()
levelRect.top = top
levelRect.left = 10
self.WINDOWSURF.blit(levelImg, levelRect)
contacttxt = 'issue?'
contactImg = self.SMALLFONT.render(contacttxt, 1, LINKCOLOR) # score is a global variable
contactRect = contactImg.get_rect()
contactRect.top = top
contactRect.right = WINDOWWIDTH - 10
self.contactButton = contactRect
self.WINDOWSURF.blit(contactImg, contactRect)
scoreImg = self.BASICFONT.render(scoretxt, 1, SCORECOLOR) # score is a global variable
scoreRect = scoreImg.get_rect()
scoreRect.left = 10
scoreRect.centery = middle
self.WINDOWSURF.blit(scoreImg, scoreRect)
goaltxt = 'GOAL: %s' % (goaltxt)
goalImg = self.BASICFONT.render(goaltxt, 1, SCORECOLOR) # score is a global variable
goalRect = goalImg.get_rect()
goalRect.top = top
goalRect.left = left
goalRect.centery = middle
self.WINDOWSURF.blit(goalImg, goalRect)
sparewidth = 0
left += goalRect.width + sparewidth
if len(imageIds) > 0:
imageId = imageIds[0]
r = pygame.Rect((left, top, GEMIMAGESIZE, GEMIMAGESIZE) )
r.centery = middle
self.WINDOWSURF.blit(self.GEMIMAGES[imageId], r)
left += GEMIMAGESIZE + sparewidth
if len(imageIds) > 1:
plusImg = self.BASICFONT.render('+', 1, SCORECOLOR)
plusRect = plusImg.get_rect()
plusRect.left = left
plusRect.centery = middle
left += plusRect.width + sparewidth
self.WINDOWSURF.blit(plusImg, plusRect)
imageId = imageIds[1]
r = pygame.Rect((left, top, GEMIMAGESIZE, GEMIMAGESIZE) )
r.centery = middle
self.WINDOWSURF.blit(self.GEMIMAGES[imageId], r)
left += GEMIMAGESIZE + sparewidth
# draw number of swaps left
swaptxt = '%d' % (self.maxswaps - self.nswaps)
swapImg = self.BASICFONT.render(swaptxt, 1, SCORECOLOR)
swapRect = swapImg.get_rect()
swapRect.right = WINDOWWIDTH - 10
swapRect.centery = middle
self.WINDOWSURF.blit(swapImg, swapRect)
def checkForGemClick(self, pos):
# See if the mouse click was on the board
for x in range(self.BOARDWIDTH):
for y in range(self.BOARDHEIGHT):
if self.BOARDRECTS[x][y].collidepoint(pos[0], pos[1]):
return (x, y) # Return board x and y where the click occurred.
return None # Click was not on the board.
def gameLog(self, movement, newdata):
self.boardlog.append((movement, newdata))
def checkForLinkClick(self, pos):
# See if the mouse click was on the board
if self.contactButton.collidepoint(pos[0], pos[1]):
#try:
import webbrowser
import urllib
if hasattr(urllib, 'urlencode'):
urlencode = urllib.urlencode
else:
urlencode = urllib.parse.urlencode
logtxts = []
lastBoard = None
nevents_processed = 0
for i, (movement, newdata) in enumerate(self.boardlog, 1):
if movement == 'moves':
logtxt = "\n%d. possible moves:" % i
for move, score in newdata:
logtxt += "\n* %d,%d -> %d,%d" % (move)
logtxts.append(logtxt)
continue
if lastBoard is not None and newdata == lastBoard:
logtxts.append("%d: %s (no change)" % (i, movement))
continue
logtxt = "%d: after %s:\n```\n%s\n```\n" % (i, movement, newdata)
lastBoard = newdata
for type, value in newdata.events[nevents_processed:]:
logtxt += "* Event: %s - %s\n" % (type, value)
logtxts.append(logtxt)
nevents_processed = len(newdata.events)
logtxt = ''
for logtxti in logtxts[::-1]:
if len(logtxt) + len(logtxti) > 5000:
break
logtxt += '\n'
logtxt += logtxti
print(len(logtxt))
body = """
Hi!
My issue/suggestion/question/ is ...
Debug information
---------------------------------
I was playing this board:
```
%s
```
with %d colors, %d swaps. Goal %d of type=%d.
My last moves were:
%s
""" % (str(self.board), self.ncolors, self.maxswaps, self.goalvalue, self.goalid, logtxt)
#title = 'Level %d' % self.gameid
webbrowser.open("https://github.com/JohannesBuchner/ultragem/issues/new?%s" % (urlencode(dict(body=body))))
#except Exception:
# pass
def highlightSpace(self, x, y):
pygame.draw.rect(self.WINDOWSURF, HIGHLIGHTCOLOR, self.BOARDRECTS[x][y], 4)
def hintMove(self):
(fromj, fromi, toj, toi), score = self.possible_moves[0]
for i in range(3):
x, y = fromi, fromj
pygame.draw.rect(self.WINDOWSURF, HINTCOLOR, self.BOARDRECTS[x][y], 4)
x, y = toi, toj
pygame.draw.rect(self.WINDOWSURF, HINTCOLOR, self.BOARDRECTS[x][y], 4)
pygame.display.update()
self.FPSCLOCK.tick(HINTFPS)
x, y = fromi, fromj
pygame.draw.rect(self.WINDOWSURF, GRIDCOLOR, self.BOARDRECTS[x][y], 4)
x, y = toi, toj
pygame.draw.rect(self.WINDOWSURF, GRIDCOLOR, self.BOARDRECTS[x][y], 4)
pygame.display.update()
self.FPSCLOCK.tick(HINTFPS)
def getSwappingGems(self, board, firstXY, secondXY):
firstGem = dict(imageNum=board[firstXY[0]][firstXY[1]],
x=firstXY[0], directionx=0,
y=firstXY[1], directiony=0)
secondGem = dict(imageNum=board[secondXY[0]][secondXY[1]],
x=secondXY[0], directionx=0,
y=secondXY[1], directiony=0)
highlightedGem = None
if firstGem['x'] == secondGem['x'] + 1 and firstGem['y'] == secondGem['y']:
firstGem['directionx'] = -1
secondGem['directionx'] = +1
elif firstGem['x'] == secondGem['x'] - 1 and firstGem['y'] == secondGem['y']:
firstGem['directionx'] = +1
secondGem['directionx'] = -1
elif firstGem['y'] == secondGem['y'] + 1 and firstGem['x'] == secondGem['x']:
firstGem['directiony'] = -1
secondGem['directiony'] = +1
elif firstGem['y'] == secondGem['y'] - 1 and firstGem['x'] == secondGem['x']:
firstGem['directiony'] = +1
secondGem['directiony'] = -1
else:
# These gems are not adjacent and can't be swapped.
return None, None
return firstGem, secondGem
def runGame(self):
mainBoard = [[EMPTY_SPACE] * self.BOARDHEIGHT for x in range(self.BOARDWIDTH)]
# Drop the initial gems.
self.possible_moves = self.fillBoardAndAnimate(mainBoard, [])
self.nswaps = 0
firstSelectedGem = None
lastMouseDownX = None
lastMouseDownY = None
isGameOver = False
clickContinueTextSurf = None
while True:
clickedSpace = None
isGameOver = self.score[self.goalid] >= self.goalvalue or self.nswaps >= self.maxswaps
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit(0)
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit(0)
elif event.key == K_BACKSPACE:
return
if event.type == MOUSEBUTTONUP:
if isGameOver:
return
if event.pos == (lastMouseDownX, lastMouseDownY):
# This is a mouse click.
clickedSpace = self.checkForGemClick(event.pos)
self.checkForLinkClick(event.pos)
else:
# This is the end of a mouse drag, and the first gem has already been selected.
firstSelectedGem = self.checkForGemClick((lastMouseDownX, lastMouseDownY))
mouseOverSpace = self.checkForGemClick(event.pos)
if mouseOverSpace and (mouseOverSpace[0] == firstSelectedGem[0] + 1 or \
mouseOverSpace[0] == firstSelectedGem[0] - 1 or \
mouseOverSpace[1] == firstSelectedGem[1] + 1 or \
mouseOverSpace[1] == firstSelectedGem[1] - 1):
clickedSpace = mouseOverSpace
if not firstSelectedGem or not mouseOverSpace:
# If this MOUSEBUTTONUP was not part of a valid drag, deselect both.
firstSelectedGem = None
mouseOverSpace = None
if event.type == MOUSEBUTTONDOWN:
lastMouseDownX, lastMouseDownY = event.pos
# Check if this is the first or second gem to be clicked.
if clickedSpace and not firstSelectedGem:
firstSelectedGem = clickedSpace
elif clickedSpace and firstSelectedGem:
# Two gems have been selected. Swap the gems.
firstSwappingGem, secondSwappingGem = self.getSwappingGems(mainBoard, firstSelectedGem, clickedSpace)
if firstSwappingGem is None and secondSwappingGem is None:
firstSelectedGem = None # Deselect the first gem.
continue
# Show the swap animation on the screen.
boardCopy = self.getBoardCopyMinusGems(mainBoard, (firstSwappingGem, secondSwappingGem))
self.animateMovingGems(boardCopy, [firstSwappingGem, secondSwappingGem], [])
# Swap the gems in the board data structure.
mainBoard[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
mainBoard[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
# See if this is a matching move.
move = self.isValidMove(firstSwappingGem['x'],firstSwappingGem['y'],
secondSwappingGem['x'], secondSwappingGem['y'])
if not move:
# Not a matching move: swap the gems back
self.GAMESOUNDS['bad swap'].play()
firstSwappingGem, secondSwappingGem = self.getSwappingGems(mainBoard, firstSelectedGem, clickedSpace)
self.animateMovingGems(boardCopy, [firstSwappingGem, secondSwappingGem], [])
mainBoard[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
mainBoard[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
self.hintMove()
else:
# successful move.
# reset selection
firstSelectedGem = None
secondSwappingGem = None
self.nswaps += 1
self.possible_moves = self.continueGame(mainBoard, move)
if self.score[self.goalid] >= self.goalvalue or self.nswaps >= self.maxswaps:
isGameOver = True
# Draw the board.
self.WINDOWSURF.fill(BGCOLOR)
self.drawBoard(mainBoard)
if firstSelectedGem != None:
self.highlightSpace(firstSelectedGem[0], firstSelectedGem[1])
if isGameOver:
if clickContinueTextSurf == None:
if self.score[self.goalid] >= self.goalvalue:
#endtxt = 'Final Score: %s (Click to continue)' % (self.score[0])
endtxt = 'SUCCESS! Click for next level'
else:
endtxt = 'ALMOST! Click to try again.'
clickContinueTextSurf = self.BASICFONT.render(endtxt, 1, GAMEOVERCOLOR, GAMEOVERBGCOLOR)
clickContinueTextRect = clickContinueTextSurf.get_rect()
clickContinueTextRect.center = int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2)
self.WINDOWSURF.blit(clickContinueTextSurf, clickContinueTextRect)
self.drawScore(update=True)
pygame.display.update()
self.FPSCLOCK.tick(FPS)
if __name__ == '__main__':
try:
if len(sys.argv) > 1:
gameid = int(sys.argv[1])
else:
gameid = int(open('currentgame', 'r').read())
except Exception:
gameid = 1
game = UltraGemGame(gameid=gameid)
game.run()
``` |
{
"source": "JohannesBuchner/uncertaincolors",
"score": 3
} |
#### File: JohannesBuchner/uncertaincolors/uncertaincolors.py
```python
import numpy
import colorsys
from matplotlib import pyplot as plt
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def all_to_rgb(value, error, cm=plt.cm.cool, lo=0, hi=1):
x = numpy.linspace(0, 1, 400)
colors = cm(x*(hi - lo) + lo)[:,:3]
#J, C, H = cspace_convert(colors, 'sRGB1', satspace).transpose()
H, S, V = rgb_to_hsv(colors).transpose()
frac = S.min() / S
colors = cm(value.flatten()*(hi - lo) + lo)[:,:3]
H, S, V = rgb_to_hsv(colors).transpose()
S *= numpy.interp(x=value.flatten(), xp=x, fp=frac)
error = numpy.where(error >= 0, numpy.where(error <= 1, error, 1), 0)
S *= 1 - error.flatten()
total = hsv_to_rgb(numpy.dstack((H, S, V)))
total = total.reshape(value.shape[0], value.shape[1], 3)
total[total<0] = 0
total[total>1] = 1
return total
if __name__ == '__main__':
import matplotlib.pyplot as plt
x = numpy.linspace(-1,1, 500)
y = numpy.linspace(-1,1, 500)
X, Y = numpy.meshgrid(x, y)
value = (-Y+1)/2.
error = (X+1)/2.
total = all_to_rgb(value, error, cm=plt.cm.viridis_r, hi=0.5)
plt.imshow(total)
plt.xlabel('Uncertainty')
plt.ylabel('Value')
plt.savefig('demo_colorspace.png', bbox_inches='tight')
plt.close()
value = numpy.abs(X)
error = (X**2 + Y**2)**0.5
plt.title('Measurement Value')
plt.imshow(value)
plt.xlabel('X')
plt.ylabel('Y')
plt.colorbar()
plt.savefig('demo_observation_value.png', bbox_inches='tight')
plt.close()
plt.imshow(error)
plt.title('Measurement Error')
plt.xlabel('X')
plt.ylabel('Y')
plt.colorbar()
plt.savefig('demo_observation_error.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.viridis_r, hi=0.5)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_viridis.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.cool_r)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_cool.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.plasma_r, hi=0.6)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_plasma.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.winter)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_winter.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.spring)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_spring.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.summer)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_summer.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.RdBu_r)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_RdBu.png', bbox_inches='tight')
plt.close()
total = all_to_rgb(value, error, cm=plt.cm.coolwarm)
plt.imshow(total)
plt.xlabel('X')
plt.ylabel('Y')
plt.savefig('demo_observation_coolwarm.png', bbox_inches='tight')
plt.close()
``` |
{
"source": "johannesCmayer/pt-kana",
"score": 3
} |
#### File: johannesCmayer/pt-kana/data_manipulation.py
```python
import json
hiragana = json.load(open('Vocabularies/hiragana.json'))
katakana = json.load(open('Vocabularies/katakana.json'))
class Instance:
def __init__(self, target, translation, frequency, correct, incorrect):
self.target = target
self.translation = translation
self.frequency = frequency
self.correct = correct
self.incorrect = incorrect
l = []
for (hk, hv), (kk, kv) in zip(hiragana.items(), katakana.items()):
l.append(Instance(hk, hv, 0, [], []).__dict__)
l.append(Instance(kk,kv,0, [], []).__dict__)
print(f"generated file with {len(l)} entries")
tdump = json.dumps(l,indent=4)
with open("Vocabularies/alternating_kana_base.json", 'w') as f:
f.write(tdump)
``` |
{
"source": "johannesCmayer/tracker",
"score": 3
} |
#### File: johannesCmayer/tracker/main.py
```python
import datetime
import click
import os
import json
import re
from pathlib import Path
import math
#TODO [#A] fix negative time for future event being off by one day minus
#TODO Have tracker groups, that are visually seperated in the output and can be ordered
#TODO allow to change the ordering of trackers
#TODO Unhardcode goal display
#TODO Save the history of all updates in a file
#TODO Allow to different kinds of input as dates (not just one hardcoded one, e.g. allow to specify seconds, and have a shortcut for now, and allow to add to now[possibly without having to specify the now e.g. +1d10:00:00])
#TODO Allow teh user to specify the save file location (possibly with config in same dir or in .config)
SAVE_FILE = f"{Path.home()}/.config/tracker/tracker.json"
@click.group()
@click.pass_context
def cli(ctx):
pass
@cli.command()
@click.pass_context
@click.option('-n', '--name', required=True, help="The name of the tracker")
def remove(ctx, name):
"""Delete tracker"""
d_dict = ctx.obj
del d_dict[name]
with open(SAVE_FILE, 'w') as f:
json.dump(d_dict, f, indent=4)
print(f"Removed tracker {name}")
@cli.command()
@click.pass_context
@click.option('-n', '--name', required=True, help="The name of the tracker")
@click.option('-d', '--date', required=True, help="The start tracking date")
def create(ctx, name, date):
"""Create tracker"""
regex = r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d$'
if not re.search(regex, date):
print(f"Invalid format. Use format: {regex}")
exit(1)
d_dict = ctx.obj
if name in d_dict:
print(f"Tracker {name} already exists.")
return
d_dict[name] = date
with open(SAVE_FILE, 'w') as f:
json.dump(d_dict, f, indent=4)
print(f"Creating tracker {name}")
@cli.command()
@click.pass_context
@click.option('-n', '--name', required=True, help="The name of the tracker")
@click.option('-d', '--date', required=True, help="The start tracking date")
def update(ctx, name, date):
"""Update tracker"""
regex = r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d$'
if not re.search(regex, date):
print(f"Invalid format. Use format: {regex}")
exit(1)
d_dict = ctx.obj
if name not in d_dict:
print(f"Tracker {name} does not exsist.")
return
d_dict[name] = date
with open(SAVE_FILE, 'w') as f:
json.dump(d_dict, f, indent=4)
print(f"Updating tracker {name}")
#TODO fix if the event is more than a year away
@cli.command()
@click.pass_context
@click.option('-n', '--name', required=False, help="The name of the tracker")
def list(ctx, name):
"""List active trackers"""
if len(ctx.obj) == 0:
print(f"{SAVE_FILE} contains no trackers.")
else:
print_list = []
for item_name, date in ctx.obj.items():
if name != None and name != item_name:
continue
time = (datetime.datetime.now() - datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M'))
negative_days = time.days < 0
years = math.floor(abs(time.days / 365))
days = abs(time.days+1 if negative_days else time.days) % 365
#TODO fix the countdown of time (it seems to be offset by an hour)
total_seconds = (60*60*24 - time.seconds) if negative_days else time.seconds
hours = total_seconds // 60**2
minutes = abs(total_seconds // 60) % 60
seconds = abs(total_seconds) % 60
print_list.append([
item_name,
"- " if negative_days else "",
f"{years}y " if years != 0 else '',
f"{days}d " if days != 0 else '',
f"{hours:02.0f}:",
f"{minutes:02.0f}:",
f"{seconds:02}"
])
for j, e in enumerate(print_list):
for i, arg in enumerate(e):
max_len = max([len(x[i]) for x in print_list])
postfix = ": " if i == 0 else ""
print(arg + postfix + " " * (max_len - len(arg)), end='')
print()
def main():
data_dir = os.path.dirname(SAVE_FILE)
if not os.path.isdir(data_dir):
a = ''
while a != 'y' and a != 'n':
a = input(f"{SAVE_FILE} config file does not exsist. Create it? y/n: ")
if a == 'y':
os.makedirs(data_dir)
elif a == 'n':
print("No config file. Aborting.")
exit(1)
data = {}
if os.path.isfile(SAVE_FILE):
with open(SAVE_FILE, 'r') as f:
json_d = f.read()
if json_d != '':
data = json.loads(json_d)
cli(obj=data)
if __name__ == "__main__":
main()
``` |
{
"source": "JohannesDev/Tribus",
"score": 3
} |
#### File: JohannesDev/Tribus/sever.py
```python
import asyncio
import random
import websockets
async def sendMessage(websocket, path):
while True:
r = lambda: random.randint(0,255)
color = '#%02X%02X%02X' % (r(),r(),r())
await websocket.send(color)
print("Color: ", color)
await asyncio.sleep(5)
start_server = websockets.serve(sendMessage, '127.0.0.1', 5678)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
``` |
{
"source": "JohannesDienst/polyglot_integration",
"score": 2
} |
#### File: jython/jython/examplemodule.py
```python
def triple(value):
return value*value*value
``` |
{
"source": "Johannesduvenage/binance-tradebot",
"score": 3
} |
#### File: Johannesduvenage/binance-tradebot/Database.py
```python
import sqlite3
import os.path as op
from future.utils import iteritems
DATABASE = op.join(op.dirname(op.abspath(__file__)), 'database/tradebot.db')
try:
connection = sqlite3.connect(DATABASE, check_same_thread = False)
except Exception as e:
raise e
class Database(object):
def __init__(self):
self.cursor = connection.cursor()
def trader_exists(self, thread_name, key):
sql = "select id from trader_data where thread_name=? and key=?"
self.cursor.execute(sql, (thread_name, key))
data = self.cursor.fetchone()
if not data:
return False
return True
def trader_update(self, data):
"""
data = {'thread_name': name, 'pairs': {'key': 'value', 'key': 'value'}}
"""
thread_name = data['thread_name']
inserts = []
updates = []
for (key, val) in iteritems(data['pairs']):
if self.trader_exists(thread_name, key):
updates.append((val, thread_name, key))
else:
inserts.append((thread_name, key, val))
if inserts:
self.cursor.executemany("insert into trader_data (thread_name, key, value) values (?,?,?)", inserts)
if updates:
self.cursor.executemany("update trader_data set value=? where thread_name=? and key=?", updates)
connection.commit()
def trader_read(self, thread_name, key=''):
data = {'thread_name': thread_name, 'pairs': {}}
if not key:
self.cursor.execute("select key, value from trader_data where thread_name='thread-1'")
rows = self.cursor.fetchall()
for row in rows:
data['pairs'][row[0]]= row[1]
else:
self.cursor.execute("select value from trader_data where thread_name=? and key=?", (thread_name, key))
row = self.cursor.fetchone()
if row:
data['pairs'][key] = row[0]
return data
def order_exists(self, order_id):
sql = "select id from order_data where order_id=?"
self.cursor.execute(sql, (str(order_id),))
if not self.cursor.fetchone():
return False
return True
def order_update(self, data):
tup = (data['price'], data['orig_quantity'], data['executed_quantity'], data['side'], int(data['time']), data['status'], str(data['order_id']))
if self.order_exists(data['order_id']):
sql = "update order_data set price=?, orig_qty=?, exec_qty=?, side=?, time=?, status=? where order_id=?"
else:
sql = "insert into order_data (price, orig_qty, exec_qty, side, time, status, order_id) values (?,?,?,?,?,?,?)"
self.cursor.execute(sql, tup)
connection.commit()
def order_read(self, order_id, key=''):
if not key:
sql = "select price, orig_qty, exec_qty, side, time, status from order_data where order_id=?"
self.cursor.execute(sql, (str(order_id),))
order = self.cursor.fetchone()
if not order:
return None
order_dict = {'order_id': str(order_id), 'price': order[0], 'orig_quantity': order[1], 'executed_quantity': order[2], 'side': order[3], 'time': order[4], 'status': order[5]}
else:
sql = "select %s from order_data where order_id=?" % key
self.cursor.execute(sql, (str(order_id),))
order = self.cursor.fetchone()
if not order:
return None
order_dict = {'order_id': str(order_id), key: order[0]}
return order_dict
```
#### File: Johannesduvenage/binance-tradebot/DictMap.py
```python
class DictMap(dict):
"""
Example:
m = DictMap({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(DictMap, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
# for k, v in arg.iteritems():
for k, v in arg.items():
self[k] = v
if kwargs:
for k, v in kwargs.iteritems():
self[k] = v
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(DictMap, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(DictMap, self).__delitem__(key)
del self.__dict__[key]
``` |
{
"source": "JohannesEbke/drillbit",
"score": 2
} |
#### File: proof_of_concept/atlas_files/sum.py
```python
from cPickle import load
from glob import glob
from collections import defaultdict
from pprint import pprint
def dict_sum(defdicts):
res = defaultdict(lambda: 0)
keys = set()
for d in defdicts:
keys.update(d.keys())
for k in keys:
res[k] += d.get(k, 0)
return dict(res)
def count_stuff(filename):
d = load(file(filename))
runs = d['Run']
counts = defaultdict(lambda: 0)
for run in runs:
for ds in d[run]['Datasets']:
for x in ds['value']:
tag = ".".join(x[0].split('.')[-3:-1])
counts['files'] += x[4]
counts['files:' + tag] += x[4]
if x[6]:
counts['events.in:' + tag] += x[6]
counts['events.with.duplicates'] += x[6]
if x[5]:
counts['filesize:' + tag] += x[5]
counts['total.filesize'] += x[5]
if tag.startswith('merge'):
counts['total.merged.filesize'] += x[5]
if tag.startswith('physics'):
counts['total.physics.filesize'] += x[5]
if tag.startswith('recon'):
counts['total.recon.filesize'] += x[5]
return dict(counts)
def get_events_in_files(filename):
d = load(file(filename))
runs = d['Run']
return sum(sum(sum(x[6] for x in ds['value'] if x[6]) for ds in d[run]['Datasets']) for run in runs)
def get_events(filename):
d = load(file(filename))
runs = d['Run']
return sum(sum(int(ds['value']) for ds in d[run]['#Events'] if ds['value'] != 'n.a.' and ds['accepted']) for run in runs)
what = ""
func = count_stuff
#print what + " in 2008: "
#pprint( func("runquery-2008.pickle"))
#print what + " in 2009: "
#pprint( func("runquery-2009.pickle"))
#print what + " in 2010: "
#pprint( dict_sum(func(x) for x in glob("runquery-2010-*.pickle")))
#print what + " in 2011: "
#pprint( func("runquery-2011.pickle"))
#print what + " in 2012: "
#pprint( func("runquery-2012.pickle"))
#print what + " in 2013: "
#pprint( func("runquery-2013-1.pickle"))
#print what + " sum: "
pprint( dict_sum(func(x) for x in glob('*.pickle')))
```
#### File: proof_of_concept/dispatch_simulator/test_dispatch_simulator.py
```python
from dispatch_simulator import DitPackSet, EmptyDitPack
from pprint import pprint
from time import time
def as_cols(lst):
return ["Column_{}".format(i) for i in lst]
def as_tabs(lst):
return ["Tablet_{}".format(i) for i in lst]
def test_simple():
input_packs = [EmptyDitPack(as_cols(range(3)), as_tabs(range(10)))]
packset = DitPackSet(input_packs)
for tablet, readers in packset.readers_by_tablet():
print tablet
pprint(readers)
def test_massive():
input_packs = [EmptyDitPack(as_cols(range(10000)), as_tabs(range(1000*1000)))]
packset = DitPackSet(input_packs)
for i, (tablet, readers) in enumerate(packset.readers_by_tablet()):
if i%100000 == 0:
print i, tablet
pass
def test_multi():
input_packs = [EmptyDitPack(as_cols(range(0, 5)), as_tabs(range(0, 5))),
EmptyDitPack(as_cols(range(6, 10)), as_tabs(range(0, 5))),
EmptyDitPack(as_cols(range(0, 5)), as_tabs(range(5, 10))),
EmptyDitPack(as_cols(range(5, 10)), as_tabs(range(5, 10)))]
packset = DitPackSet(input_packs)
for tablet, readers in packset.readers_by_tablet():
print tablet
pprint(readers)
def test_multi_massive():
n_tablets = 1000*1000
input_packs = [EmptyDitPack(as_cols(range(0, 5000)), as_tabs(range(0, n_tablets))),
EmptyDitPack(as_cols(range(5010, 10000)), as_tabs(range(0, n_tablets))),
EmptyDitPack(as_cols(range(0, 4000)), as_tabs(range(n_tablets, 2*n_tablets))),
EmptyDitPack(as_cols(range(4010, 10000)), as_tabs(range(n_tablets, 2*n_tablets)))]
packset = DitPackSet(input_packs)
for i, (tablet, readers) in enumerate(packset.readers_by_tablet()):
if i%100000 == 0:
print i, tablet
pass
def test_many_massive():
input_packs = []
COLUMN_GROUPS = 10
COLUMNS_PER_FILE = 1000
TABLET_GROUPS = 1000
TABLETS_PER_FILE = 10
print "Running dispatching over", COLUMN_GROUPS*TABLET_GROUPS, "virtual files, each containing", COLUMNS_PER_FILE,
print "columns and", TABLETS_PER_FILE, "tablets (event groups), plus one file containing one column for all tablets."
print "Expected: Warnings about 10 tablets missing for one column, and 1 tablet missing for 1 column."
print "-"*80
start_time = time()
for i in range(COLUMN_GROUPS):
common_columns = as_cols(range(i*COLUMNS_PER_FILE, (i+1)*COLUMNS_PER_FILE))
for j in range(TABLET_GROUPS):
tablets = as_tabs(range(TABLETS_PER_FILE*j, TABLETS_PER_FILE*(j+1)))
if i == 3 and j == 234:
# This particular file is missing a column (note the COLUMNS_PER_FILE+1 instead of COLUMNS_PER_FILE)
columns = as_cols(range(i*COLUMNS_PER_FILE+1, COLUMNS_PER_FILE*(i+1)))
input_packs.append(EmptyDitPack(columns, tablets))
else:
input_packs.append(EmptyDitPack(common_columns, tablets))
# Extra column that spans all tablets except the last one
tablets = as_tabs(range(0, TABLETS_PER_FILE * TABLET_GROUPS - 1))
columns = as_cols([COLUMNS_PER_FILE * COLUMN_GROUPS])
input_packs.append(EmptyDitPack(columns, tablets))
packset = DitPackSet(input_packs)
for i, (tablet, readers) in enumerate(packset.readers_by_tablet()):
# Dispatch one job here
pass
end_time = time()
print "Dispatching took {:.03} seconds.".format(end_time - start_time)
def main():
test_many_massive()
if __name__ == "__main__":
main()
```
#### File: drillbit/src/httpzip.py
```python
from zipfile import ZipFile
from os import write
from sys import stderr
from urllib2 import urlopen, Request, HTTPError
from bisect import bisect
import struct
MAX_RANGES_PER_REQUEST = 250 # Tune this to avoid initial FULL HEAD
MIN_HOLE_SIZE = 64*1024 # minimum hole size between ranges, 64k seems to be nice
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
class HTTPFile(object):
"""A buffered HTTP file that supports prefetching of ranges"""
def __init__(self, url, filesize):
self.url = url
self.size = filesize
self.fpos = 0
self.prefetch_buffer = []
self.prefetch_buffer_start = []
self.prefetch_buffer_end = []
def vread(self, ranges):
"""Do a vector read"""
global MAX_RANGES_PER_REQUEST
if len(ranges) == 0:
return []
if len(ranges) > MAX_RANGES_PER_REQUEST:
# Do a Map-Reduce!
return reduce(list.__add__, map(self.vread, chunks(ranges, MAX_RANGES_PER_REQUEST)))
req = Request(self.url, headers={"Range": "bytes="+",".join("-".join((str(s), str(e))) for s,e in ranges)})
#print req.headers
try:
r = urlopen(req)
except HTTPError, e:
if e.code == 413: # FULL HEAD
if MAX_RANGES_PER_REQUEST < 2:
raise
else:
MAX_RANGES_PER_REQUEST = MAX_RANGES_PER_REQUEST/2
print >> stderr, "Hit HTTP full head - reducing max ranges per request to", MAX_RANGES_PER_REQUEST
return self.vread(ranges)
else:
raise
#print "CODE: ", r.getcode()
#print r.info()
assert r.getcode() == 206
if len(ranges) == 1:
return [r.read()]
else:
t = r.info().getheader("Content-Type")
tp, boundary = t.split(";")
boundary = boundary.strip()
assert tp == "multipart/byteranges"
assert boundary.startswith('boundary="') and boundary.endswith('"')
boundary = boundary[len('boundary="'):-1]
data = [None]*len(ranges)
range_map = dict(("%i-%i/%i" % (s,e,self.size), i) for i, (s, e) in enumerate(ranges))
current_range = None
ranges_received = set()
newlines = 0
while True:
line = r.readline()
if not line:
assert len(ranges_received) == len(ranges), ranges_received
break
line = line.strip()
if not line: # file starts after empty newline
newlines += 1
if newlines == 1:
if current_range is None:
# ignore additional ranges
continue
start, end = ranges[current_range]
#print "Reading range ", current_range, " with ", (end - start), " bytes..."
data[current_range] = r.read(end - start)
ranges_received.add(current_range)
current_range = None
if len(ranges_received)== len(ranges):
break
else:
newlines = 0
if line.startswith("Content-Range: bytes "):
current_range = range_map[line[len("Content-Range: bytes "):]]
assert not current_range in ranges_received
assert len(data) == len(ranges)
return data
def really_read(self, size=None):
if size is None:
size = self.size - self.fpos
res = self.vread([(self.fpos, self.fpos+size)])
self.fpos += size
return res
def read(self, size=None):
if size is None:
size = self.size - self.fpos
if self.prefetch_buffer_start:
i = bisect(self.prefetch_buffer_start, self.fpos)
if i > 0 and self.prefetch_buffer_end[i-1] >= (self.fpos + size):
# can be satisified from prefetch buffer
offset = self.fpos - self.prefetch_buffer_start[i-1]
self.fpos += size
return self.prefetch_buffer[i-1][offset:offset+size]
else:
return self.really_read(size)
else:
return self.really_read(size)
def seek(self, n, whence=0):
if whence == 0:
self.fpos = n
elif whence == 1:
self.fpos += n
elif whence == 2:
self.fpos = self.size + n
def tell(self):
return self.fpos
def prefetch(self, read_areas):
self.prefetch_buffer_start.extend(start for start, _ in read_areas)
self.prefetch_buffer_end.extend(end for _, end in read_areas)
self.prefetch_buffer.extend(self.vread(read_areas))
def httpopen(url):
r = urlopen(url)
size = int(r.info().getheader("Content-Length"))
r.close()
return HTTPFile(url, size)
class RemoteZipFile(object):
"""A potentially remote ZIP file"""
def __init__(self, name_or_url): # Holes smaller than 5MB will be read anyway.
if name_or_url.startswith("http:"):
self._f = httpopen(name_or_url)
self._use_read_buffer = True
# prefetch the last MB to capture most of the index
self._f.prefetch([(self._f.size-1024*1024, self._f.size)])
else:
self._f = open(name_or_url)
self._use_read_buffer = False
self._zf = ZipFile(self._f)
if self._use_read_buffer:
self._sinfo = sorted((i.header_offset, i) for i in self._zf.infolist())
self._dict = dict((i.filename, i) for i in self._zf.infolist())
def keys(self):
return self._zf.namelist()
def require(self, required):
if self._use_read_buffer:
def get_block_range(block_id):
s = self._sinfo[block_id][1].header_offset
if block_id != len(self._sinfo)-1:
e = self._sinfo[block_id + 1][1].header_offset
else:
e = self._f.size - 1
return (s, e)
blocks = [j for j, (_, i) in enumerate(self._sinfo) if i.filename in required]
read_blocks = []
for i in blocks:
if not read_blocks:
read_blocks.append(get_block_range(i))
else:
start, end = read_blocks[-1]
b_start, b_end = get_block_range(i)
if b_start > end + MIN_HOLE_SIZE:
read_blocks.append((b_start, b_end))
else:
read_blocks[-1] = (start, b_end)
self._f.prefetch(read_blocks)
rset = set(required)
for i in self._zf.infolist():
if i.filename in rset:
rset.remove(i.filename)
x = self._zf.open(i)
write(1, struct.pack("i", len(i.filename)))
write(1, i.filename)
write(1, struct.pack("i", i.file_size))
write(1, x.read())
#x.read1(i.file_size)
#x.read()#1(i.file_size)
assert not rset, rset
# "http://lcg-lrz-dc66.grid.lrz.de//pnfs/lrz-muenchen.de/data/atlas/dq2/atlaslocalgroupdisk/user/ebke/20130318/user.ebke.20130318.test1/dit0.zip")
if __name__ == "__main__":
import sys
f = RemoteZipFile(sys.argv[1])
if len(sys.argv) < 2:
print >> stderr, "Usage: <file> [one|all]"
elif len(sys.argv) == 2:
keys = "\n".join(sorted(f.keys()))
write(1, struct.pack("i", len(keys)))
write(1, keys)
required = map(str.strip, sys.stdin.readline().split(";"))
f.require(required)
elif sys.argv[2] == "all":
f.require(sorted(f.keys()))
elif sys.argv[2] == "half":
f.require(sorted(f.keys())[:len(f.keys())/2])
elif sys.argv[2] == "one":
f.require([sorted(f.keys())[42]])
elif sys.argv[2] == "two":
f.require(sorted(f.keys())[42:44])
else:
assert False
``` |
{
"source": "JohannesEbke/xdg_json_cache",
"score": 3
} |
#### File: xdg_json_cache/app_json_file_cache/app_cache.py
```python
from .function_cache import FunctionCache
class AppCache:
def __init__(self, app_name):
self.app_name = app_name
def __call__(self, name, vary=None, cheap_default_func=None):
return FunctionCache(self.app_name, name, vary, cheap_default_func=cheap_default_func)
```
#### File: xdg_json_cache/app_json_file_cache/function_cache.py
```python
from functools import partial
from .data_cache import DataCache
class FunctionCache(DataCache):
def __init__(self, *args, **kwargs):
cheap_default_func = kwargs.pop("cheap_default_func")
super(FunctionCache, self).__init__(*args, **kwargs)
self._cheap_default_func = cheap_default_func
def __call__(self, func):
def f(*args, **kwargs):
key = {'args': args, 'kwargs': kwargs}
assert args == () or kwargs == {}, 'Mixing positional and keyword arguments not supported: {}'.format(key)
try:
return self.get(key)
except KeyError:
if self._cheap_default_func:
return self._cheap_default_func(*args, **kwargs)
self.store(key, func(*args, **kwargs))
return self.get(key)
f.clear = self.clear
f.recalculate = partial(self.recalculate, func)
return f
def recalculate(self, func, *args, **kwargs):
key = {'args': args, 'kwargs': kwargs}
assert args == () or kwargs == {}, 'Mixing positional and keyword arguments not supported: {}'.format(key)
self.store(key, func(*args, **kwargs))
```
#### File: xdg_json_cache/app_json_file_cache/test_basics.py
```python
from os.path import exists
from . import AppCache
Cache = AppCache("app_json_file_cache")
def test_simple():
assert Cache("test_simple")(lambda: 12)() == 12
assert Cache("test_simple")(lambda: 13)() == 12
Cache("test_simple").clear()
def test_vary():
assert Cache("test_simple", vary=1)(lambda: 12)() == 12
assert Cache("test_simple", vary=2)(lambda: 13)() == 13
Cache("test_simple").clear()
def test_corrupt():
assert Cache("test_simple", vary=1)(lambda: 12)() == 12
c = Cache("test_simple")
fn = c._filename(c._key_to_string({'args': [], 'kwargs': {}}))
assert exists(fn)
with open(fn, "w") as fd:
fd.write("not JSON")
assert Cache("test_simple", vary=1)(lambda: 24)() == 24
Cache("test_simple")(lambda: 24).clear()
assert not exists(fn)
def test_parameter():
cached_f = Cache("test_parameter")(lambda x: x * 2)
assert cached_f(4) == 8
assert cached_f(5) == 10
cached_f2 = Cache("test_parameter")(lambda x: x * 4)
assert cached_f2(4) == 8 # Cached, returns "old" value
assert cached_f2(5) == 10 # Cached, returns "old" value
Cache("test_parameter").clear()
def test_parameter_dict():
cached_f = Cache("test_parameter")(lambda x, y: dict(list(x.items()) + list(y.items())))
assert cached_f({'a': 1, 'b': 2}, {'c': 3}) == {'a': 1, 'b': 2, 'c': 3}
assert cached_f({'a': 1, 'b': 3}, {'c': 3}) == {'a': 1, 'b': 3, 'c': 3}
cached_f2 = Cache("test_parameter")(lambda x, y: {})
assert cached_f2({'a': 1, 'b': 2}, {'c': 3}) == {'a': 1, 'b': 2, 'c': 3} # Cached, returns "old" value
assert cached_f2({'a': 1, 'b': 3}, {'c': 3}) == {'a': 1, 'b': 3, 'c': 3} # Cached, returns "old" value
assert cached_f2({'a': 2, 'b': 3}, {'c': 3}) == {} # This one is not cached
cached_f2.clear()
def test_keyword_args():
@Cache("keyword")
def keyword_function(a=1, b=2):
return a + b
@Cache("keyword")
def test_keyword_function(a=1, b=2, c=0):
return 0
assert keyword_function() == 3
assert keyword_function(a=10) == 12
assert keyword_function(b=10) == 11
assert keyword_function(b=10, a=10) == 20
assert test_keyword_function(a=10) == 12
assert test_keyword_function(b=10) == 11
assert test_keyword_function(b=10, a=10) == 20
assert test_keyword_function(a=10, b=10) == 20
assert test_keyword_function() == 3
assert test_keyword_function(a=10, b=10, c=1) == 0
keyword_function.clear()
``` |
{
"source": "johanneseder711/personal_finance",
"score": 3
} |
#### File: src/API/n26.py
```python
from n26.api import Api
def get_n26_balance():
api_client = Api()
total_balance = api_client.get_spaces()['totalBalance']
total_balance = str(total_balance).replace('.',',')
# display the string (which is the money amount) with a "." as a separator of thousands
num_digits = len(total_balance.split(',')[0])
# every 3 digits we need a seperator
num_digits -= 3
# only if the number has at least 4 digits
while num_digits > 0:
total_balance = total_balance[:num_digits] + '.' + total_balance[num_digits:]
num_digits -= 3
statements = api_client.get_transactions()
last_transaction_found = False
counter = 0
# define space name or list of spaces
spaces = ['Hawaii']
# while the last transaction is not found
while not last_transaction_found:
# iterate over the dicctionary while we have a transaction between main space and other spaces
# there transactions do not show as a payment (income or expense), so they will be excluded
if ('partnerName' in statements[counter]) and (any(space in statements[counter]['partnerName'] for space in spaces)):
counter += 1
else:
last_transaction_found = True
last_transaction_amount = statements[counter]['amount']
last_transaction_amount = str(last_transaction_amount).replace('.',',')
return (total_balance, last_transaction_amount)
```
#### File: src/WebScraping/bitpanda.py
```python
import streamlit as st
from selenium import webdriver
from selenium.webdriver.common.by import By
from WebScraping.helperfunctions.wait import wait_for_full_load
from WebScraping.helperfunctions.credentials import get_credentials
import time
def expand_shadow_element(driver, element):
shadow_root = driver.execute_script('return arguments[0].shadowRoot', element)
return shadow_root
def get_bitpanda_balance():
# define the URL for this site
URL = 'https://account.bitpanda.com/login'
# get the correct login credentials for this site
user,pw = get_credentials(URL)
# start the session
driver = webdriver.Safari()
driver.get(URL)
#driver.maximize_window()
time.sleep(5)
# find the shadow root element to accept cookies
root1 = driver.find_element(By.TAG_NAME,'bpc-cookie-banner')
shadow_root1 = expand_shadow_element(driver, root1)
print('\n \n \n shadow_root1 element',shadow_root1)
# accept cookies
# FINISHED HERE -> STILL NOT WORKING TO FIND THE CORRECT ROOT SHADOW ELEMENT AND ACCEPT COOKIES
# REFFERE https://stackoverflow.com/questions/37384458/how-to-handle-elements-inside-shadow-dom-from-selenium
expanded_driver = shadow_root1.find_element(By.CLASS_NAME,"bpc-cookie-accept-button").click()
print('\n \n \n \n expanded_driver element',expanded_driver)
# wait for full load
wait_for_full_load(expanded_driver,"login-submit",how='id')
# input username/email
expanded_driver.find_element(By.ID,"email").send_keys(user)
time.sleep(5)
# input password
expanded_driver.find_element(By.ID,"password']").send_keys(pw)
# click login submit button
expanded_driver.find_element(By.ID, "login-submit").click()
```
#### File: WebScraping/helperfunctions/wait.py
```python
import time
from selenium.webdriver.common.by import By
def wait_for_full_load(driver, path, how='xpath'):
'''
A function that takes in an xpath of an element and a driver (browser).
The function will be exited as soon as the element is available on the site.
'''
waiting = True
while waiting:
# check if at least one element is already available on the site
if how == 'xpath':
elements = driver.find_elements(By.XPATH, path)
print(len(elements))
elif how == 'css':
elements = driver.find_elements(By.CSS_SELECTOR, path)
elif how == 'id':
elements = driver.find_elements(By.ID, path)
if len(elements)!=0:
waiting = False
return;
else:
time.sleep(1)
``` |
{
"source": "johannesemme/ESA",
"score": 3
} |
#### File: johannesemme/ESA/esa.py
```python
import logging
import numpy as np
from xml_processing import XmlDumpFile
import gzip
import pickle
from sklearn.feature_extraction.text import TfidfVectorizer
import click
from click_loglevel import LogLevel
import os
import re
import requests
url = "http://snowball.tartarus.org/algorithms/danish/stop.txt"
snowball_stopwords = re.findall('^(\w+)', requests.get(url).text, flags=re.MULTILINE | re.UNICODE)
class ExplicitSemanticAnalysis(object):
"""
Computing semantic relatedness using Wikipedia-based explicit semantic analysis.
Explicit semantic analysis proposed by <NAME> and <NAME>, 2007.
"""
def __init__(self,
new_model = False, stop_words=snowball_stopwords, max_n_pages=None, model_name = "", noredirecting=True):
self.stop_words = stop_words
self.max_n_pages = max_n_pages
self.model_name = model_name
self.noredirecting = noredirecting
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
self.file_path = "ESA models/"
if new_model:
self.logger.info('Creating new ESA model')
self.setup_ESA()
self.logger.info('Storing ESA model in pickle files')
self.save_pkl()
else:
self.logger.info('Load existing model...')
self.load_pkl()
def setup_ESA(self):
"""
Setup ESA model
"""
self._dump_file = XmlDumpFile(discard_redirects=self.noredirecting)
self._titles, texts = zip(*[(page['title'], page['text'])
for page in self._dump_file.iter_article_pages(max_n_pages=self.max_n_pages)])
self.logger.info('TFIDF vectorizing')
# remove words occuring less than 5 times + remove words occuring in more than half the documents
self._transformer = TfidfVectorizer(stop_words=self.stop_words, min_df=5, max_df=0.5)
self._Y = self._transformer.fit_transform(texts)
def query(self, query_text, n=5):
processed_query_text = " ".join(query_text.lower().split()) # process query
y = self._transformer.transform([processed_query_text]) # transform query text with TFIDF transformer
D = np.array((self._Y * y.T).todense()) # calculate score using dot product
indices = np.argsort(-D, axis=0)
titles = [self._titles[index] for index in indices[:n, 0]]
return titles
def save_pkl(self):
"""Save parameters to pickle files."""
try:
os.makedirs(self.file_path)
except FileExistsError:
pass # directory already exists
items = [
('_titles', 'wikipedia-esa-titles.pkl.gz'),
('_Y', 'wikipedia-esa-y.pkl.gz'),
('_transformer', 'wikipedia-esa-transformer.pkl.gz')
]
for attr, filename in items:
full_filename = self.file_path + self.model_name + "_" + filename
self.logger.info('Writing parameters to pickle file {}'.format(full_filename))
with gzip.open(full_filename, 'w') as f:
pickle.dump(getattr(self, attr), f, -1)
def load_pkl(self):
"""Load parameters from pickle files."""
items = [
('_titles', 'wikipedia-esa-titles.pkl.gz'),
('_Y', 'wikipedia-esa-y.pkl.gz'),
('_transformer', 'wikipedia-esa-transformer.pkl.gz')
]
for attr, filename in items:
full_filename = self.file_path + self.model_name + "_" + filename
self.logger.info('Reading parameters from pickle file {}'.format(full_filename))
with gzip.open(full_filename) as f:
setattr(self, attr, pickle.load(f))
@click.command()
@click.option("--new_model", default=False)
@click.option("--noredirecting", default=True)
@click.option("--model_name", default="")
@click.option("-l", "--log_level", type=LogLevel(), default=logging.INFO)
@click.option("--num_matches", default=10)
@click.option("--num_wikipages", default=None)
def model(new_model, log_level, num_matches, num_wikipages, model_name, noredirecting):
logging.basicConfig(
format="[%(levelname)-8s] %(message)s",
level=log_level,
)
logging.log(log_level, "Log level set to %r", log_level)
esa_model = ExplicitSemanticAnalysis(new_model = new_model, max_n_pages = num_wikipages,
model_name = model_name, noredirecting = noredirecting)
while True:
input_text = input("Enter query (type 'q' for exit): ")
if input_text == "q":
break
print()
output = esa_model.query(query_text = input_text, n = num_matches)
print(output)
print("------" * 20 )
if __name__ == "__main__":
model()
```
#### File: johannesemme/ESA/wiki_db.py
```python
from marisa_trie import Trie
import logging
from logging import basicConfig
import re
import click
from typing import List
import numpy as np
KEY_RULE = re.compile("^(.*):([^:]+)$")
logger = logging.getLogger(__name__)
@click.command()
@click.argument("wikidata_dump_file", type=click.Path(exists=True))
@click.argument("out_file", type=click.Path())
def build_interwiki_db(wikidata_dump_file: str, out_file: List[str] = None):
logging.basicConfig(level=logging.INFO)
interwiki_db = WikiDB.build(wikidata_dump_file)
interwiki_db.save(out_file)
class WikIDB(object):
def __init__(self, title_trie: Trie, data: np.ndarray, indptr: np.ndarray, title_indices: np.ndarray):
self._title_trie = title_trie
self._data = data
self._indptr = indptr
self._title_indices = title_indices
``` |
{
"source": "johannesfritz/liberating-archives",
"score": 3
} |
#### File: johannesfritz/liberating-archives/app.py
```python
import flask
from flask import Response
import json
import sqlite
# Create the application.
app = flask.Flask(__name__)
@app.route('/')
def index():
""" Displays the index page accessible at '/'
"""
return flask.render_template('index.html')
if __name__ == '__main__':
app.debug=True
app.run()
``` |
{
"source": "johannesgaa/daydreamerhost",
"score": 2
} |
#### File: daydreamerhost/tests/test_app.py
```python
from .context import daydreamerhost
def test_app(capsys, example_fixture):
# pylint: disable=W0612,W0613
daydreamerhost.Blueprint.run()
captured = capsys.readouterr()
assert "Hello World..." in captured.out
``` |
{
"source": "JohannesGaessler/presentation_mc",
"score": 3
} |
#### File: JohannesGaessler/presentation_mc/02_pi_crude.py
```python
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
plt.figure(figsize=(32.0, 6.0))
def f(x):
return 1 - np.sqrt(1 - x ** 2)
SAMPLE_SIZE = 1000
Ef = quad(lambda x: f(x), 0, 1)[0]
Varf = quad(lambda x: (f(x) - Ef) ** 2, 0, 1)[0]
rand_x = np.random.rand(SAMPLE_SIZE)
rand_y = f(rand_x)
plot_x = np.linspace(start=0, stop=1.0, num=101, endpoint=True)
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.plot(plot_x, f(plot_x))
plt.bar(x=0, height=rand_y[i], width=1.0, align="edge", color=(1.0, 0.0, 0.0, 0.5))
plt.savefig("pi_crude.png")
pi_empirical = 4 * (1.0 - np.sum(rand_y)/SAMPLE_SIZE)
print(f"Estimate: {pi_empirical:.6f}")
print(f"Empirical uncertainty: {4 * np.sqrt(np.var(rand_y) / SAMPLE_SIZE) / pi_empirical * 100:.4f}%")
print(f"Expected uncertainty: {4 * np.sqrt(Varf / SAMPLE_SIZE) / np.pi * 100:.4f}%")
```
#### File: JohannesGaessler/presentation_mc/04_pi_vegas.py
```python
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(20.0, 6.0))
def f(x):
return 1 - np.sqrt(1 - x ** 2)
EXPECTED_AREA = 1.0 - np.pi / 4
def vegas(iterations=3, samples_per_iteration=333, num_bins=20, K=1000, alpha=1.0, make_plots=False):
bin_edges = np.linspace(start=0, stop=1, endpoint=True, num=num_bins+1)
bin_widths = bin_edges[1:] - bin_edges[:-1]
weighted_function_value_sum = 0.0
for j in range(iterations):
random_numbers = np.random.rand(samples_per_iteration)
random_bins = np.random.randint(low=0, high=num_bins, size=samples_per_iteration)
random_bins_low = bin_edges[random_bins]
random_bins_high = bin_edges[random_bins + 1]
random_bin_widths = random_bins_high - random_bins_low
random_numbers_transformed = random_bins_low + random_numbers * random_bin_widths
function_values = f(random_numbers_transformed)
weighted_function_values = function_values * random_bin_widths * num_bins
if make_plots:
plt.subplot(1, iterations, j+1)
plt.xlim(0, 1)
plt.ylim(0, 1)
plot_x = np.linspace(start=0.001, stop=1.0, num=1000, endpoint=True)
plt.vlines(
x=random_numbers_transformed[:100], ymin=0, ymax=weighted_function_values[:100], color="black",
label="$samples$"
)
plt.plot(plot_x, f(plot_x), label="$f(x)$")
plt.bar(
x=bin_edges[:-1], height=EXPECTED_AREA/(num_bins * bin_widths), width=bin_widths, align="edge",
color=(1.0, 0.0, 0.0, 0.5), label="$g(x)$"
)
plt.xlabel("$x$")
if j == 0:
plt.ylabel("$y$")
plt.legend(loc="upper left")
weighted_function_value_sum += np.sum(weighted_function_values)
bin_weights = np.zeros(num_bins)
for i in range(num_bins):
bin_weights[i] = np.sum(function_values[random_bins == i])
bin_weights *= bin_widths
#bin_splits = 1 + K * bin_weights / np.sum(bin_weights)
bin_splits = 1 + K * ((bin_weights / np.sum(bin_weights) - 1) / np.log(bin_weights / np.sum(bin_weights))) ** alpha
bin_splits = bin_splits.astype(int)
refined_bin_edges = np.zeros(1 + np.sum(bin_splits))
refined_bin_weights = np.zeros(refined_bin_edges.shape[0] - 1)
index = 0
for i in range(num_bins):
new_bin_edges = np.linspace(start=bin_edges[i], stop=bin_edges[i+1], num=bin_splits[i], endpoint=False)
refined_bin_edges[index:index+bin_splits[i]] = new_bin_edges
refined_bin_weights[index:index+bin_splits[i]] = bin_weights[i] / bin_splits[i]
index += bin_splits[i]
refined_bin_edges[-1] = 1.0
average_bin_weight = np.mean(bin_weights)
new_bin_edges = np.zeros_like(bin_edges)
current_sum = 0
current_refined_index = 0
for i in range(num_bins-1):
while current_sum < average_bin_weight:
current_sum += refined_bin_weights[current_refined_index]
current_refined_index += 1
current_sum -= average_bin_weight
new_bin_edges[i + 1] = refined_bin_edges[current_refined_index]
new_bin_edges[-1] = 1
bin_edges = new_bin_edges
bin_widths = bin_edges[1:] - bin_edges[:-1]
if make_plots:
plt.savefig("pi_vegas.png")
integral_estimate = weighted_function_value_sum / (iterations * samples_per_iteration)
return 4 * (1.0 - integral_estimate)
if __name__ == "__main__":
print(f"Estimate: {vegas(make_plots=True)} s")
#plt.show()
``` |
{
"source": "johannes-gehrs/centos_packages",
"score": 2
} |
#### File: johannes-gehrs/centos_packages/config.py
```python
from __future__ import absolute_import, division, unicode_literals
import os
import logging
OS_VERSIONS = ['6', '7']
DATA_DIR = '/tmp/centos_packages/'
REPO_BASE_URL = 'http://mirror.centos.org/centos/'
REPOSITORIES = ['os', 'updates', 'centosplus', 'extras', 'fasttrack']
REPOSITORIES_PRETTY = {'os': 'Base',
'updates': 'Updates',
'extras': 'Extras',
'fasttrack': 'Fasttrack'}
LIMIT_RESULTS = 250
CACHE_MAX_AGE = 4260
CACHE_IN_DEBUG_MODE = False
def active_repos():
return [repo for repo in REPOSITORIES if not repo == 'centosplus']
# Logging
LOGDIR = DATA_DIR + 'log/'
LOGFILE = LOGDIR + 'centos_packages.log'
if not os.path.isdir(LOGDIR):
os.makedirs(LOGDIR)
logging.basicConfig(filename=LOGFILE,
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s')
```
#### File: johannes-gehrs/centos_packages/packages.py
```python
from __future__ import absolute_import, division, unicode_literals
import xml.etree.ElementTree as ElT
import bz2
import io
import os
import uuid
import sqlite3
import config
import re
import datetime
import pickle
import requests
REPODATA_ARC_SUFFIX = "x86_64/"
METADATA_SUFFIX = "repodata/repomd.xml"
PACKAGE_TIMESTAMP_FILE = config.DATA_DIR + 'packages_timestamp.pickled'
YUM_REPODATA_XML_NAMESPACE = 'http://linux.duke.edu/metadata/repo'
def _find_db_link_in_xml(xml_text):
root = ElT.fromstring(xml_text)
for data_elmnt in root.iter('{' + YUM_REPODATA_XML_NAMESPACE + '}data'):
if data_elmnt.attrib['type'] == 'primary_db':
return data_elmnt.find('{' + YUM_REPODATA_XML_NAMESPACE + '}location').attrib['href']
else:
raise ValueError('Data not found in XML')
def _download_one(version):
for repo in config.active_repos():
repo_base_url = config.REPO_BASE_URL + unicode(version) + \
'/' + repo + '/' + REPODATA_ARC_SUFFIX
metadata_request_ulr = repo_base_url + METADATA_SUFFIX
metadata_request = requests.get(metadata_request_ulr)
db_href = _find_db_link_in_xml(metadata_request.text)
db_request_url = repo_base_url + db_href
db_request = requests.get(db_request_url)
if db_request.status_code != 200:
raise IOError('Could not get file ' + db_request_url)
database = bz2.decompress(db_request.content)
temp_filename = config.DATA_DIR + unicode(uuid.uuid1())
final_filename = config.DATA_DIR + repo + '_' + version + '.sqlite'
with io.open(temp_filename, mode='wb') as file:
file.write(database)
os.rename(temp_filename, final_filename)
def download():
for version in config.OS_VERSIONS:
_download_one(version)
def _conn_factory(version, repo):
conn = sqlite3.connect(config.DATA_DIR + repo + '_' + version + '.sqlite')
conn.row_factory = sqlite3.Row
return conn
def _primary_query_execute(conn, repo):
c = conn.cursor()
query = '''
SELECT name, arch, version, epoch,
? AS repo, "release", summary, description, rpm_sourcerpm,
url, rpm_license AS license, location_href, pkgKey
FROM packages
WHERE 1=1
-- AND name = 'kernel'
--LIMIT 15
'''
c.execute(query, (repo,))
return c.fetchall()
def _read_from_dbs(version):
package_list = []
for repo in config.active_repos():
conn = _conn_factory(version, repo)
package_list = package_list + _primary_query_execute(conn, repo)
return package_list
def _prepare(package_list):
prepared = {}
for row in package_list:
prepared.setdefault(row[b'name'], []).append(dict(row))
for name in prepared:
prepared[name].sort(cmp=compare_rpm_versions)
return prepared
def _not_none_epoch(epoch):
if epoch is not None:
return epoch
return '0'
def _is_int(mystring):
try:
int(mystring)
return True
except ValueError:
return False
# http://stackoverflow.com/questions/3206319/how-do-i-compare-rpm-versions-in-python
# hold my beer while I implement this
def _compare_rpm_label_fields(field1, field2):
alphanumeric_matches = lambda field: list(re.finditer(r'[a-zA-Z0-9]+', field))
field1_matches, field2_matches = alphanumeric_matches(field1), alphanumeric_matches(field2)
for match_pair in zip(field1_matches, field2_matches):
value_pair = [match.group() for match in match_pair]
numeric_vals = [_is_int(value) for value in value_pair]
# Non-equal types
if not all(numeric_vals) and any(numeric_vals):
if numeric_vals[1]:
return -1
if numeric_vals[0]:
return 1
# Equal types: Alphanumeric
if not any(numeric_vals):
if value_pair[0] < value_pair[1]:
return -1
if value_pair[0] > value_pair[1]:
return 1
# Equal types: Numeric
if all(numeric_vals):
if int(value_pair[0]) < int(value_pair[1]):
return -1
if int(value_pair[0]) > int(value_pair[1]):
return 1
assert value_pair[0] == value_pair[1]
# Decision by no. of fields
if len(field1_matches) < len(field2_matches):
return -1
if len(field1_matches) > len(field2_matches):
return 1
if len(field1_matches) == len(field2_matches):
return 0
raise RuntimeError('This code should not be reached, because one of the if paths '
'should have been executed.')
def compare_rpm_versions(version_one, version_two):
label_components = ['epoch', 'version', 'release']
for component in label_components:
result = _compare_rpm_label_fields(version_one[component], version_two[component])
if result != 0:
break
return result
def get_version(version):
return _prepare(_read_from_dbs(version))
def get_all():
packages_dict = {}
for os_version in config.OS_VERSIONS:
packages_dict[os_version] = get_version(os_version)
return packages_dict
def minor_os_release(all_packages_dict):
newest_package_version = all_packages_dict['centos-release'][-1]
major_release = newest_package_version['version']
minor_release_integer = re.match(r'.*?\.', newest_package_version['release']).group()[:-1]
return major_release + '.' + minor_release_integer
def set_timestamp_to_now():
now = datetime.datetime.now()
with io.open(PACKAGE_TIMESTAMP_FILE, mode='wb') as myfile:
pickle.dump(now, myfile)
def get_timestamp():
with io.open(PACKAGE_TIMESTAMP_FILE, mode='rb') as myfile:
timestamp = pickle.load(myfile)
return timestamp
def rpm_download_url(package_version, os_version):
return config.REPO_BASE_URL + os_version + '/' + package_version['repo'] + \
'/' + REPODATA_ARC_SUFFIX + package_version['location_href']
def newest_versions_as_list(os_version, all_packages_dict):
newest_versions_list = []
for package_name in all_packages_dict[os_version]:
newest_versions_list.append(all_packages_dict[os_version][package_name][-1])
return newest_versions_list
``` |
{
"source": "johannes-gehrs/nutrition-diary",
"score": 3
} |
#### File: nutrition-diary/app/scrape_fddb.py
```python
from __future__ import absolute_import, division, unicode_literals
from decimal import Decimal
import re
from pyquery import PyQuery as Pq
from app import models
def _decimal_from_string(value_string):
decimal_match = re.search(r'\d+\,\d+', value_string)
if decimal_match is not None:
return Decimal(decimal_match.group().replace(',', '.'))
else:
return Decimal(re.search(r'\d+', value_string).group())
def _quantities(pq_page):
identifier_divs = pq_page('div.sidrow')
raw_values = {Pq(div).text(): Pq(div).next().text() for div in identifier_divs}
relevant_keys = ['Fett', 'Ballaststoffe', 'Kohlenhydrate', 'Protein', 'Kalorien']
filtered_values = {key: raw_values.get(key, '0 g') for key in relevant_keys}
return {key: _decimal_from_string(filtered_values[key]) for key in filtered_values}
def _serving(pq_page):
serving = pq_page('a.servb')[0]
description = serving.text_content()
serving_size_in_g_match = re.search(r'\d+\ g', description)
serving_size_in_ml_match = re.search(r'\d+\ ml', description)
to_int = lambda string: int(re.search(r'\d+', string).group())
if serving_size_in_g_match is not None:
serving_size_in_g_as_int = to_int(serving_size_in_g_match.group())
# We simply treat ml as grams (Punk rock)
elif serving_size_in_ml_match is not None:
serving_size_in_g_as_int = to_int(serving_size_in_ml_match.group())
else:
raise ValueError("Can't find serving size")
return description, serving_size_in_g_as_int
def _name(pq_page):
return pq_page('div.pageheadline h1').text()
def item(url):
pq_page = Pq(url)
name = _name(pq_page)
quantities = _quantities(pq_page)
serving = _serving(pq_page)
return models.FoodType(name=name,
source_url=url,
calories=quantities['Kalorien'],
fiber=quantities['Ballaststoffe'],
fat=quantities['Fett'],
carbon=quantities['Kohlenhydrate'],
protein=quantities['Protein'],
serving_description=serving[0],
serving_size=serving[1])
```
#### File: johannes-gehrs/nutrition-diary/fabfile.py
```python
from __future__ import absolute_import, division, unicode_literals
from fabric.api import run, cd, env
env.hosts = ['<EMAIL>']
def deploy():
code_dir = '/usr/share/nutrition-diary'
with cd(code_dir):
run("git pull")
run("pip install --upgrade --requirement=requirements.txt")
run("/etc/init.d/gunicorn reload")
``` |
{
"source": "johannesgiorgis/my-timewarrior-extensions",
"score": 2
} |
#### File: my-timewarrior-extensions/extensions/catsum.py
```python
import datetime
import io
import json
import logging
import pprint
import sys
from typing import Dict, Any
from dateutil import tz
# set logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create handler
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
# Create formatters and add it to handlers
LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s"
c_format = logging.Formatter(LOG_FORMAT)
c_handler.setFormatter(c_format)
# Add handlers to the logger
logger.addHandler(c_handler)
DATE_FORMAT = "%Y%m%dT%H%M%SZ"
# TODO: Convert to defaultdict
# https://www.accelebrate.com/blog/using-defaultdict-python
# https://stackoverflow.com/questions/9358983/dictionaries-and-default-values
# https://docs.python.org/2/library/collections.html#collections.defaultdict
CATEGORIES: dict = {
"PT": "Personal Time",
"PW": "Planned Work",
"UW": "Unplanned Work",
"OW": "Other Work",
}
def main():
print("~" * 100)
totals = calculate_totals(sys.stdin)
# print(totals)
if not totals:
sys.exit(0)
categories_total = extract_categories(totals)
# All Categories Statistics
category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_category_breakdown = format_category_breakdown(category_percent_breakdown)
display_category_breakdown(formatted_category_breakdown)
# remove personal category
categories_total.pop("Personal Time", None)
work_category_percent_breakdown = get_category_percent_breakdown(categories_total)
formatted_work_category_breakdown = format_category_breakdown(work_category_percent_breakdown)
display_category_breakdown(formatted_work_category_breakdown)
# formatted_category_breakdown.pop("Personal Time", None)
# formatted
# print(type(formatted_category_breakdown))
# print(formatted_category_breakdown.keys())
def format_seconds(seconds: int) -> str:
"""
Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# print(seconds, type(seconds))
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return f"{hours:4d}:{minutes:02d}:{seconds:02d}"
def calculate_totals(input_stream: io.TextIOWrapper) -> Dict[str, datetime.timedelta]:
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
# Extract the configuration settings.
header = 1
configuration = dict()
body = ""
for line in input_stream:
if header:
if line == "\n":
header = 0
else:
fields = line.strip().split(": ", 2)
if len(fields) == 2:
configuration[fields[0]] = fields[1]
else:
configuration[fields[0]] = ""
else:
body += line
# Sum the seconds tracked by tag
totals = dict()
untagged = None
j = json.loads(body)
for object in j:
start = datetime.datetime.strptime(object["start"], DATE_FORMAT)
if "end" in object:
end = datetime.datetime.strptime(object["end"], DATE_FORMAT)
else:
end = datetime.datetime.utcnow()
tracked = end - start
if "tags" not in object or object["tags"] == []:
if untagged is None:
untagged = tracked
else:
untagged += tracked
else:
for tag in object["tags"]:
if tag in totals:
totals[tag] += tracked
else:
totals[tag] = tracked
if "temp.report.start" not in configuration:
print("There is no data in the database")
return totals
start_utc = datetime.datetime.strptime(configuration["temp.report.start"], DATE_FORMAT)
start_utc = start_utc.replace(tzinfo=from_zone)
start = start_utc.astimezone(to_zone)
if "temp.report.end" in configuration:
end_utc = datetime.datetime.strptime(configuration["temp.report.end"], DATE_FORMAT)
end_utc = end_utc.replace(tzinfo=from_zone)
end = end_utc.astimezone(to_zone)
else:
end = datetime.datetime.now()
if len(totals) == 0 and untagged is None:
print(f"No data in the range {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
print(f"\nCategory Summary Data for {start:%Y-%m-%d %H:%M:%S} - {end:%Y-%m-%d %H:%M:%S}")
return totals
def extract_categories(totals: Dict[str, datetime.timedelta]) -> Dict[str, datetime.timedelta]:
categories_total = {}
for category, category_full_name in CATEGORIES.items():
categories_total[category_full_name] = totals.get(category, datetime.timedelta(0))
return categories_total
def get_category_percent_breakdown(
category_run_times: Dict[str, datetime.timedelta]
) -> Dict[str, Any]:
logger.debug("Getting category percentage breakdown...")
total_time = sum([run_time.total_seconds() for run_time in category_run_times.values()])
logger.debug(f"Total Time:{total_time}")
category_percentage_breakdown: dict = {}
for category, run_time in category_run_times.items():
category_percent = run_time.total_seconds() / total_time
category_percentage_breakdown[category] = {
"percent": category_percent,
"duration": run_time.total_seconds() / 60,
"run_time": format_seconds(int(run_time.total_seconds())),
}
# add total time statistics
category_percentage_breakdown["Total"] = {
"percent": total_time / total_time,
"duration": total_time / 60,
"run_time": format_seconds(int(total_time)),
}
logger.debug(pprint.pformat(category_percentage_breakdown))
return category_percentage_breakdown
def format_category_breakdown(category_breakdown: dict) -> Dict[str, Any]:
# print(type(category_breakdown))
# pprint.pprint(category_breakdown)
formatted_category_breakdown = {}
for category, category_statistics in category_breakdown.items():
formatted_category_breakdown[category] = {
# convert duration to mins
"duration": round(category_statistics["duration"], 2),
"percent": round(category_statistics["percent"] * 100, 2),
"run_time": category_statistics["run_time"],
}
return formatted_category_breakdown
def display_category_breakdown(category_breakdown: dict, title: str = "Category Breakdown"):
# Determine largest width
max_width = len("Category")
for category_statistics in category_breakdown.values():
if len(category_statistics) > max_width:
max_width = len(category_statistics)
print_dotted_line()
print(f"\t\t{title.capitalize():>{max_width}}")
print(
f"{'Category':{max_width}}\t"
f"{'Duration':{max_width}}\t"
f"{'Run_Time':>{max_width + 2}}\t"
f"{'Percent':{max_width + 1}}"
)
for category, category_statistics in category_breakdown.items():
print(
f"{category:{max_width}}\t"
f"{category_statistics['duration']:{max_width}}\t"
f"{category_statistics['run_time']:}\t"
f"{category_statistics['percent']}%"
)
print_dotted_line()
def print_dotted_line(width: int = 72):
"""Print a dotted (rather 'dashed') line"""
print("-" * width)
if __name__ == "__main__":
main()
``` |
{
"source": "johannes-graeter/UnFlow",
"score": 2
} |
#### File: e2eflow/core/train.py
```python
import os
import re
from multiprocessing import Process
import numpy as np
import tensorflow as tf
try:
import memory_saving_gradients
tf.__dict__["gradients"] = memory_saving_gradients.gradients_memory
# tf.__dict__["gradients"] = memory_saving_gradients.gradients_speed
print("Use memory_saving_gradients reduce net memory usage for cost of speed. "
"See https://github.com/openai/gradient-checkpointing.")
except ImportError:
print("To fit bigger nets into memory get https://github.com/openai/gradient-checkpointing "
"and put it in your Pythonpath.")
pass
import tensorflow.contrib.slim as slim
from . import util
from .flow_util import flow_error_avg, flow_to_color, flow_error_image, outlier_pct
from .image_warp import image_warp
from .input import resize_input, resize_output_crop, resize_output, resize_output_flow
from .losses import occlusion, DISOCC_THRESH, create_outgoing_mask
from .supervised import supervised_loss
from .unsupervised import unsupervised_loss
from .util import add_to_debug_output
from .util import summarized_placeholder
from ..gui import display
from ..ops import forward_warp
def restore_networks(sess, params, ckpt, ckpt_path=None):
# Attention this is converted to checkpoints in e2eflow/util.py::convert_input_strings
finetune = params.get('finetune', [])
train_all = params.get('train_all', None)
spec = params.get('flownet', 'S')
flownet_num = len(spec)
net_names = ['flownet_c'] + ['stack_{}_flownet'.format(i + 1) for i in range(flownet_num - 1)] + ['funnet']
assert len(finetune) <= flownet_num
# Save all trained networks, restore all networks which are kept fixed
if train_all:
restore_external_nets = finetune if ckpt is None else []
variables_to_save = slim.get_variables_to_restore(include=net_names)
else:
restore_external_nets = finetune if ckpt is None else finetune[:flownet_num - 1]
variables_to_save = slim.get_variables_to_restore(include=net_names[-2:])
saver = tf.train.Saver(variables_to_save, max_to_keep=1000)
sess.run(tf.global_variables_initializer())
if ckpt is not None:
# continue training
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
for i, ckpt in enumerate(restore_external_nets):
print('-- restore', net_names[i], ckpt.model_checkpoint_path)
try:
nets_to_restore = [net_names[i]]
variables_to_restore = slim.get_variables_to_restore(
include=nets_to_restore)
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
except:
# load partial network (missing final 2 upconvolutions)
nets_to_restore = [net_names[i]]
variables_to_restore = slim.get_variables_to_restore(
include=nets_to_restore)
variables_to_restore = [v for v in variables_to_restore
if not 'full_res' in v.name]
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, ckpt.model_checkpoint_path)
return saver
def _add_loss_summaries():
losses = tf.get_collection('losses')
for l in losses:
tensor_name = re.sub('tower_[0-9]*/', '', l.op.name)
tf.summary.scalar(tensor_name, l)
def _add_variable_summaries():
ms = tf.get_collection('motion_angles')
assert (len(ms) == 1)
batch_size, var_length = ms[0].shape.as_list()
for i in range(batch_size):
for j in range(var_length):
tensor_names = "motion_angles/batch{}/motion{}".format(i, j)
tf.summary.scalar(tensor_names, ms[0][i, j])
# train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
def add_weights(layer_name):
conv5_vars = tf.get_default_graph().get_tensor_by_name('funnet/alexnet_v2/{}/weights:0'.format(layer_name))
add_to_debug_output('debug/{}/weights'.format(layer_name), conv5_vars)
# for n in ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']:
# add_weights(n)
# act = tf.get_collection('funnet/alexnet_v2/fc8')
# print(act)
# bs, a, b, c = act[0].shape.as_list()
# act = tf.reshape(act, (bs, a * b * c))
# for i in range(batch_size):
# for j in range(a * b * c):
# tensor_names = "motion_angles/batch{}/activation{}".format(i, j)
# tf.summary.scalar(tensor_names, act[i, j])
def _add_param_summaries():
params = tf.get_collection('params')
for p in params:
tensor_name = re.sub('tower_[0-9]*/', '', p.op.name)
tf.summary.scalar(tensor_name, p)
def _add_image_summaries():
images = tf.get_collection('train_images')
for im in images:
tensor_name = re.sub('tower_[0-9]*/', '', im.op.name)
tf.summary.image(tensor_name, im)
def _add_debug_tensor_summaries():
ts = tf.get_collection('debug_tensors')
for t in ts:
name = re.sub('tower_[0-9]*/', '', t.op.name)
tf.summary.scalar(name + '/mean', tf.reduce_mean(t))
tf.summary.scalar(name + '/max', tf.reduce_max(t))
tf.summary.scalar(name + '/min', tf.reduce_min(t))
def _eval_plot(results, image_names, title):
display(results, image_names, title)
class Trainer():
def __init__(self, train_batch_fn, eval_batch_fn, params,
train_summaries_dir, eval_summaries_dir, ckpt_dir,
normalization, debug=False, experiment="", interactive_plot=False,
supervised=False, devices=None):
self.train_summaries_dir = train_summaries_dir
self.eval_summaries_dir = eval_summaries_dir
self.ckpt_dir = ckpt_dir
self.params = params
self.debug = debug
self.train_batch_fn = train_batch_fn
self.eval_batch_fn = eval_batch_fn
self.normalization = normalization
self.experiment = experiment
self.interactive_plot = interactive_plot
self.plot_proc = None
self.supervised = supervised
self.loss_fn = supervised_loss if supervised else unsupervised_loss
self.devices = devices or '/gpu:0'
self.shared_device = devices[0] if len(devices) == 1 else '/cpu:0'
def run(self, min_iter, max_iter):
"""Train (at most) from min_iter + 1 to max_iter.
If checkpoints are found in ckpt_dir,
they must be have a global_step within [min_iter, max_iter]. In this case,
training is continued from global_step + 1 until max_iter is reached.
"""
save_interval = self.params['save_interval']
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
if ckpt is not None:
ckpt_path = ckpt.model_checkpoint_path
global_step = int(ckpt_path.split('/')[-1].split('-')[-1])
assert global_step >= min_iter, 'training stage not reached'
start_iter = global_step + 1
if start_iter > max_iter:
print('-- train: max_iter reached')
return
else:
start_iter = min_iter + 1
print('-- training from i = {} to {}'.format(start_iter, max_iter))
assert (max_iter - start_iter + 1) % save_interval == 0
for i in range(start_iter, max_iter + 1, save_interval):
self.train(i, i + save_interval - 1, i - (min_iter + 1))
#self.eval(1)
if self.plot_proc:
self.plot_proc.join()
def get_train_and_loss_ops(self, batch, learning_rate, global_step):
if self.params['flownet'] == 'resnet':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9)
else:
opt = tf.train.AdamOptimizer(beta1=0.9, beta2=0.999,
learning_rate=learning_rate)
def _add_summaries():
_add_loss_summaries()
_add_param_summaries()
_add_variable_summaries()
if self.debug:
_add_image_summaries()
_add_debug_tensor_summaries()
if len(self.devices) == 1:
loss_ = self.loss_fn(batch, self.params, self.normalization)
if self.params.get('train_motion_only'):
scope = "funnet"
else:
scope = None
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
train_op = opt.minimize(loss_, var_list=train_vars)
_add_summaries()
else:
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i, devid in enumerate(self.devices):
with tf.device(devid):
with tf.name_scope('tower_{}'.format(i)) as scope:
loss_ = self.loss_fn(batch, self.params, self.normalization)
_add_summaries()
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
tower_summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
scope)
grads = opt.compute_gradients(loss_)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = opt.apply_gradients(grads)
train_op = apply_gradient_op
return train_op, loss_
def train(self, start_iter, max_iter, iter_offset):
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
with tf.Graph().as_default(), tf.device(self.shared_device):
batch = self.train_batch_fn(iter_offset)
with tf.name_scope('params') as scope:
learning_rate_ = util.summarized_placeholder('learning_rate', 'train')
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
global_step_ = tf.placeholder(tf.int32, name="global_step")
train_op, loss_ = self.get_train_and_loss_ops(batch, learning_rate_, global_step_)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_ = tf.summary.merge(summaries)
sess_config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=sess_config) as sess:
if self.debug:
summary_writer = tf.summary.FileWriter(self.train_summaries_dir,
sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,
report_tensor_allocations_upon_oom=True)
# run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
else:
summary_writer = tf.summary.FileWriter(self.train_summaries_dir)
run_options = None
run_metadata = None
saver = restore_networks(sess, self.params, ckpt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for local_i, i in enumerate(range(start_iter, max_iter + 1)):
# if INTERACTIVE_PLOT:
# plt.title = "{} ({})".format(self.experiment, i)
decay_iters = local_i + iter_offset
if 'manual_decay_lrs' in self.params \
and 'manual_decay_iters' in self.params:
decay_index = 0
iter_counter = 0
for decay_i, manual_decay_iter in enumerate(self.params['manual_decay_iters']):
iter_counter += manual_decay_iter
if decay_iters <= iter_counter:
decay_index = decay_i
break
learning_rate = self.params['manual_decay_lrs'][decay_index]
else:
decay_interval = self.params['decay_interval']
decay_after = self.params.get('decay_after', 0)
if decay_iters >= decay_after:
decay_minimum = decay_after / decay_interval
decay = (decay_iters // decay_interval) - decay_minimum
learning_rate = self.params['learning_rate'] / (2 ** decay)
else:
learning_rate = self.params['learning_rate']
feed_dict = {learning_rate_: learning_rate, global_step_: i}
_, loss = sess.run(
[train_op, loss_],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if i == 1 or i % self.params['display_interval'] == 0:
summary = sess.run(summary_, feed_dict=feed_dict)
summary_writer.add_summary(summary, i)
print("-- train: i = {}, loss = {}".format(i, loss))
save_path = os.path.join(self.ckpt_dir, 'model.ckpt')
saver.save(sess, save_path, global_step=max_iter)
summary_writer.close()
coord.request_stop()
coord.join(threads)
def eval(self, num):
assert num == 1 # TODO enable num > 1
with tf.Graph().as_default():
inputs = self.eval_batch_fn()
im1, im2, input_shape = inputs[:3]
truths = inputs[3:]
height, width, _ = tf.unstack(tf.squeeze(input_shape), num=3, axis=0)
im1 = resize_input(im1, height, width, 384, 1280)
im2 = resize_input(im2, height, width, 384, 1280)
_, flow, flow_bw = unsupervised_loss(
(im1, im2),
params=self.params,
normalization=self.normalization,
augment=False, return_flow=True)
im1 = resize_output(im1, height, width, 3)
im2 = resize_output(im2, height, width, 3)
flow = resize_output_flow(flow, height, width, 2)
flow_bw = resize_output_flow(flow_bw, height, width, 2)
variables_to_restore = tf.all_variables()
images_ = [image_warp(im1, flow) / 255,
flow_to_color(flow),
1 - (1 - occlusion(flow, flow_bw)[0]) * create_outgoing_mask(flow),
forward_warp(flow_bw) < DISOCC_THRESH]
image_names = ['warped image', 'flow', 'occ', 'reverse disocc']
values_ = []
averages_ = []
truth_tuples = []
if len(truths) == 4:
flow_occ, mask_occ, flow_noc, mask_noc = truths
flow_occ = resize_output_crop(flow_occ, height, width, 2)
flow_noc = resize_output_crop(flow_noc, height, width, 2)
mask_occ = resize_output_crop(mask_occ, height, width, 1)
mask_noc = resize_output_crop(mask_noc, height, width, 1)
truth_tuples.append(('occluded', flow_occ, mask_occ))
truth_tuples.append(('non-occluded', flow_noc, mask_noc))
images_ += [flow_error_image(flow, flow_occ, mask_occ, mask_noc)]
image_names += ['flow error']
else:
raise NotImplementedError()
truth_tuples.append(('flow', truths[0], truths[1]))
for name, gt_flow, mask in truth_tuples:
error_ = flow_error_avg(gt_flow, flow, mask)
error_avg_ = summarized_placeholder('AEE/' + name, key='eval_avg')
outliers_ = outlier_pct(gt_flow, flow, mask)
outliers_avg = summarized_placeholder('outliers/' + name,
key='eval_avg')
values_.extend([error_, outliers_])
averages_.extend([error_avg_, outliers_avg])
losses = tf.get_collection('losses')
for l in losses:
values_.append(l)
tensor_name = re.sub('tower_[0-9]*/', '', l.op.name)
loss_avg_ = summarized_placeholder(tensor_name, key='eval_avg')
averages_.append(loss_avg_)
ckpt = tf.train.get_checkpoint_state(self.ckpt_dir)
assert ckpt is not None, "No checkpoints to evaluate"
# Correct path for ckpts from different machine
# ckpt_path = self.ckpt_dir + "/" + os.path.basename(ckpt.model_checkpoint_path)
ckpt_path = ckpt.model_checkpoint_path
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(self.eval_summaries_dir)
saver = tf.train.Saver(variables_to_restore)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
restore_networks(sess, self.params, ckpt)
global_step = ckpt_path.split('/')[-1].split('-')[-1]
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,
coord=coord)
averages = np.zeros(len(averages_))
num_iters = 0
image_lists = []
try:
while not coord.should_stop():
results = sess.run(values_ + images_)
values = results[:len(averages_)]
images = results[len(averages_):]
image_lists.append(images)
averages += values
num_iters += 1
except tf.errors.OutOfRangeError:
pass
averages /= num_iters
feed = {k: v for (k, v) in zip(averages_, averages)}
summary_ = tf.summary.merge_all('eval_avg')
summary = sess.run(summary_, feed_dict=feed)
summary_writer.add_summary(summary, global_step)
print("-- eval: i = {}".format(global_step))
coord.request_stop()
coord.join(threads)
summary_writer.close()
if self.interactive_plot:
if self.plot_proc:
self.plot_proc.terminate()
self.plot_proc = Process(target=_eval_plot,
args=([image_lists], image_names,
"{} (i={})".format(self.experiment,
global_step)))
self.plot_proc.start()
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is not None:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
if grads != []:
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
``` |
{
"source": "johannesgreiner/qiskit-optimization",
"score": 3
} |
#### File: qiskit_optimization/converters/inequality_to_equality.py
```python
import copy
import math
from typing import List, Optional, Union
import numpy as np
from ..exceptions import QiskitOptimizationError
from ..problems.constraint import Constraint
from ..problems.linear_constraint import LinearConstraint
from ..problems.quadratic_constraint import QuadraticConstraint
from ..problems.quadratic_objective import QuadraticObjective
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import Variable
from .quadratic_program_converter import QuadraticProgramConverter
class InequalityToEquality(QuadraticProgramConverter):
"""Convert inequality constraints into equality constraints by introducing slack variables.
Examples:
>>> from qiskit_optimization.problems import QuadraticProgram
>>> from qiskit_optimization.converters import InequalityToEquality
>>> problem = QuadraticProgram()
>>> # define a problem
>>> conv = InequalityToEquality()
>>> problem2 = conv.convert(problem)
"""
_delimiter = "@" # users are supposed not to use this character in variable names
def __init__(self, mode: str = "auto") -> None:
"""
Args:
mode: To choose the type of slack variables. There are 3 options for mode.
- 'integer': All slack variables will be integer variables.
- 'continuous': All slack variables will be continuous variables.
- 'auto': Use integer variables if possible, otherwise use continuous variables.
"""
self._src: Optional[QuadraticProgram] = None
self._dst: Optional[QuadraticProgram] = None
self._mode = mode
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
"""Convert a problem with inequality constraints into one with only equality constraints.
Args:
problem: The problem to be solved, that may contain inequality constraints.
Returns:
The converted problem, that contain only equality constraints.
Raises:
QiskitOptimizationError: If a variable type is not supported.
QiskitOptimizationError: If an unsupported mode is selected.
QiskitOptimizationError: If an unsupported sense is specified.
"""
self._src = copy.deepcopy(problem)
self._dst = QuadraticProgram(name=problem.name)
# set a converting mode
mode = self._mode
if mode not in ["integer", "continuous", "auto"]:
raise QiskitOptimizationError(f"Unsupported mode is selected: {mode}")
# Copy variables
for x in self._src.variables:
if x.vartype == Variable.Type.BINARY:
self._dst.binary_var(name=x.name)
elif x.vartype == Variable.Type.INTEGER:
self._dst.integer_var(name=x.name, lowerbound=x.lowerbound, upperbound=x.upperbound)
elif x.vartype == Variable.Type.CONTINUOUS:
self._dst.continuous_var(
name=x.name, lowerbound=x.lowerbound, upperbound=x.upperbound
)
else:
raise QiskitOptimizationError(f"Unsupported variable type {x.vartype}")
# Copy the objective function
constant = self._src.objective.constant
linear = self._src.objective.linear.to_dict(use_name=True)
quadratic = self._src.objective.quadratic.to_dict(use_name=True)
if self._src.objective.sense == QuadraticObjective.Sense.MINIMIZE:
self._dst.minimize(constant, linear, quadratic)
else:
self._dst.maximize(constant, linear, quadratic)
# For linear constraints
for lin_const in self._src.linear_constraints:
if lin_const.sense == Constraint.Sense.EQ:
self._dst.linear_constraint(
lin_const.linear.coefficients, lin_const.sense, lin_const.rhs, lin_const.name
)
elif lin_const.sense in [Constraint.Sense.LE, Constraint.Sense.GE]:
self._add_slack_var_linear_constraint(lin_const)
else:
raise QiskitOptimizationError(
f"Internal error: type of sense in {lin_const.name} is not supported: "
f"{lin_const.sense}"
)
# For quadratic constraints
for quad_const in self._src.quadratic_constraints:
if quad_const.sense == Constraint.Sense.EQ:
self._dst.quadratic_constraint(
quad_const.linear.coefficients,
quad_const.quadratic.coefficients,
quad_const.sense,
quad_const.rhs,
quad_const.name,
)
elif quad_const.sense in [Constraint.Sense.LE, Constraint.Sense.GE]:
self._add_slack_var_quadratic_constraint(quad_const)
else:
raise QiskitOptimizationError(
f"Internal error: type of sense in {quad_const.name} is not supported: "
f"{quad_const.sense}"
)
return self._dst
def _add_slack_var_linear_constraint(self, constraint: LinearConstraint):
linear = constraint.linear
sense = constraint.sense
name = constraint.name
any_float = self._any_float(linear.to_array())
mode = self._mode
if mode == "integer":
if any_float:
raise QiskitOptimizationError(
f'"{name}" contains float coefficients. '
'We can not use an integer slack variable for "{name}"'
)
elif mode == "auto":
mode = "continuous" if any_float else "integer"
new_rhs = constraint.rhs
if mode == "integer":
# If rhs is float number, round up/down to the nearest integer.
if sense == Constraint.Sense.LE:
new_rhs = math.floor(new_rhs)
if sense == Constraint.Sense.GE:
new_rhs = math.ceil(new_rhs)
lin_bounds = linear.bounds
lhs_lb = lin_bounds.lowerbound
lhs_ub = lin_bounds.upperbound
var_ub = 0.0
sign = 0
if sense == Constraint.Sense.LE:
var_ub = new_rhs - lhs_lb
if var_ub > 0:
sign = 1
elif sense == Constraint.Sense.GE:
var_ub = lhs_ub - new_rhs
if var_ub > 0:
sign = -1
new_linear = linear.to_dict(use_name=True)
if var_ub > 0:
# Add a slack variable.
mode_name = {"integer": "int", "continuous": "continuous"}
slack_name = f"{name}{self._delimiter}{mode_name[mode]}_slack"
if mode == "integer":
self._dst.integer_var(name=slack_name, lowerbound=0, upperbound=var_ub)
elif mode == "continuous":
self._dst.continuous_var(name=slack_name, lowerbound=0, upperbound=var_ub)
new_linear[slack_name] = sign
self._dst.linear_constraint(new_linear, "==", new_rhs, name)
def _add_slack_var_quadratic_constraint(self, constraint: QuadraticConstraint):
quadratic = constraint.quadratic
linear = constraint.linear
sense = constraint.sense
name = constraint.name
any_float = self._any_float(linear.to_array()) or self._any_float(quadratic.to_array())
mode = self._mode
if mode == "integer":
if any_float:
raise QiskitOptimizationError(
f'"{name}" contains float coefficients. '
'We can not use an integer slack variable for "{name}"'
)
elif mode == "auto":
mode = "continuous" if any_float else "integer"
new_rhs = constraint.rhs
if mode == "integer":
# If rhs is float number, round up/down to the nearest integer.
if sense == Constraint.Sense.LE:
new_rhs = math.floor(new_rhs)
if sense == Constraint.Sense.GE:
new_rhs = math.ceil(new_rhs)
lin_bounds = linear.bounds
quad_bounds = quadratic.bounds
lhs_lb = lin_bounds.lowerbound + quad_bounds.lowerbound
lhs_ub = lin_bounds.upperbound + quad_bounds.upperbound
var_ub = 0.0
sign = 0
if sense == Constraint.Sense.LE:
var_ub = new_rhs - lhs_lb
if var_ub > 0:
sign = 1
elif sense == Constraint.Sense.GE:
var_ub = lhs_ub - new_rhs
if var_ub > 0:
sign = -1
new_linear = linear.to_dict(use_name=True)
if var_ub > 0:
# Add a slack variable.
mode_name = {"integer": "int", "continuous": "continuous"}
slack_name = f"{name}{self._delimiter}{mode_name[mode]}_slack"
if mode == "integer":
self._dst.integer_var(name=slack_name, lowerbound=0, upperbound=var_ub)
elif mode == "continuous":
self._dst.continuous_var(name=slack_name, lowerbound=0, upperbound=var_ub)
new_linear[slack_name] = sign
self._dst.quadratic_constraint(new_linear, quadratic.coefficients, "==", new_rhs, name)
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert a result of a converted problem into that of the original problem.
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
"""
# convert back the optimization result into that of the original problem
names = [var.name for var in self._dst.variables]
# interpret slack variables
sol = {name: x[i] for i, name in enumerate(names)}
new_x = np.zeros(self._src.get_num_vars())
for i, var in enumerate(self._src.variables):
new_x[i] = sol[var.name]
return new_x
@staticmethod
def _any_float(values: np.ndarray) -> bool:
"""Check whether the list contains float or not.
This method is used to check whether a constraint contain float coefficients or not.
Args:
values: Coefficients of the constraint
Returns:
bool: If the constraint contains float coefficients, this returns True, else False.
"""
return any(isinstance(v, float) and not v.is_integer() for v in values)
@property
def mode(self) -> str:
"""Returns the mode of the converter
Returns:
The mode of the converter used for additional slack variables
"""
return self._mode
@mode.setter
def mode(self, mode: str) -> None:
"""Set a new mode for the converter
Args:
mode: The new mode for the converter
"""
self._mode = mode
```
#### File: qiskit_optimization/translators/ising.py
```python
import math
from typing import Tuple
import numpy as np
from qiskit.opflow import I, ListOp, OperatorBase, PauliOp, PauliSumOp, SummedOp
from qiskit.quantum_info import Pauli
from qiskit_optimization.exceptions import QiskitOptimizationError
from qiskit_optimization.problems.quadratic_program import QuadraticProgram
def to_ising(quad_prog: QuadraticProgram) -> Tuple[OperatorBase, float]:
"""Return the Ising Hamiltonian of this problem.
Variables are mapped to qubits in the same order, i.e.,
i-th variable is mapped to i-th qubit.
See https://github.com/Qiskit/qiskit-terra/issues/1148 for details.
Args:
quad_prog: The problem to be translated.
Returns:
A tuple (qubit_op, offset) comprising the qubit operator for the problem
and offset for the constant value in the Ising Hamiltonian.
Raises:
QiskitOptimizationError: If an integer variable or a continuous variable exists
in the problem.
QiskitOptimizationError: If constraints exist in the problem.
"""
# if problem has variables that are not binary, raise an error
if quad_prog.get_num_vars() > quad_prog.get_num_binary_vars():
raise QiskitOptimizationError(
"The type of all variables must be binary. "
"You can use `QuadraticProgramToQubo` converter "
"to convert integer variables to binary variables. "
"If the problem contains continuous variables, `to_ising` cannot handle it. "
"You might be able to solve it with `ADMMOptimizer`."
)
# if constraints exist, raise an error
if quad_prog.linear_constraints or quad_prog.quadratic_constraints:
raise QiskitOptimizationError(
"There must be no constraint in the problem. "
"You can use `QuadraticProgramToQubo` converter "
"to convert constraints to penalty terms of the objective function."
)
# initialize Hamiltonian.
num_nodes = quad_prog.get_num_vars()
pauli_list = []
offset = 0.0
zero = np.zeros(num_nodes, dtype=bool)
# set a sign corresponding to a maximized or minimized problem.
# sign == 1 is for minimized problem. sign == -1 is for maximized problem.
sense = quad_prog.objective.sense.value
# convert a constant part of the object function into Hamiltonian.
offset += quad_prog.objective.constant * sense
# convert linear parts of the object function into Hamiltonian.
for idx, coef in quad_prog.objective.linear.to_dict().items():
z_p = zero.copy()
weight = coef * sense / 2
z_p[idx] = True
pauli_list.append(PauliOp(Pauli((z_p, zero)), -weight))
offset += weight
# create Pauli terms
for (i, j), coeff in quad_prog.objective.quadratic.to_dict().items():
weight = coeff * sense / 4
if i == j:
offset += weight
else:
z_p = zero.copy()
z_p[i] = True
z_p[j] = True
pauli_list.append(PauliOp(Pauli((z_p, zero)), weight))
z_p = zero.copy()
z_p[i] = True
pauli_list.append(PauliOp(Pauli((z_p, zero)), -weight))
z_p = zero.copy()
z_p[j] = True
pauli_list.append(PauliOp(Pauli((z_p, zero)), -weight))
offset += weight
# Remove paulis whose coefficients are zeros.
qubit_op = sum(pauli_list)
# qubit_op could be the integer 0, in this case return an identity operator of
# appropriate size
if isinstance(qubit_op, OperatorBase):
qubit_op = qubit_op.reduce()
else:
qubit_op = I ^ num_nodes
return qubit_op, offset
def from_ising(
qubit_op: OperatorBase,
offset: float = 0.0,
linear: bool = False,
) -> QuadraticProgram:
r"""Create a quadratic program from a qubit operator and a shift value.
Variables are mapped to qubits in the same order, i.e.,
i-th variable is mapped to i-th qubit.
See https://github.com/Qiskit/qiskit-terra/issues/1148 for details.
Args:
qubit_op: The qubit operator of the problem.
offset: The constant term in the Ising Hamiltonian.
linear: If linear is True, :math:`x^2` is treated as a linear term
since :math:`x^2 = x` for :math:`x \in \{0,1\}`.
Otherwise, :math:`x^2` is treat as a quadratic term.
The default value is False.
Returns:
The quadratic program corresponding to the qubit operator.
Raises:
QiskitOptimizationError: if there are Pauli Xs or Ys in any Pauli term
QiskitOptimizationError: if there are more than 2 Pauli Zs in any Pauli term
QiskitOptimizationError: if any Pauli term has an imaginary coefficient
NotImplementedError: If the input operator is a ListOp
"""
if isinstance(qubit_op, PauliSumOp):
qubit_op = qubit_op.to_pauli_op()
# No support for ListOp yet, this can be added in future
# pylint: disable=unidiomatic-typecheck
if type(qubit_op) == ListOp:
raise NotImplementedError(
"Conversion of a ListOp is not supported, convert each "
"operator in the ListOp separately."
)
quad_prog = QuadraticProgram()
quad_prog.binary_var_list(qubit_op.num_qubits)
if not isinstance(qubit_op, SummedOp):
pauli_list = [qubit_op.to_pauli_op()]
else:
pauli_list = qubit_op.to_pauli_op()
# prepare a matrix of coefficients of Pauli terms
# `pauli_coeffs_diag` is the diagonal part
# `pauli_coeffs_triu` is the upper triangular part
pauli_coeffs_diag = [0.0] * qubit_op.num_qubits
pauli_coeffs_triu = {}
for pauli_op in pauli_list:
pauli_op = pauli_op.to_pauli_op()
pauli = pauli_op.primitive
coeff = pauli_op.coeff
if not math.isclose(coeff.imag, 0.0, abs_tol=1e-10):
raise QiskitOptimizationError(f"Imaginary coefficient exists: {pauli_op}")
if np.any(pauli.x):
raise QiskitOptimizationError(f"Pauli X or Y exists in the Pauli term: {pauli}")
# indices of Pauli Zs in the Pauli term
z_index = np.where(pauli.z)[0]
num_z = len(z_index)
if num_z == 1:
pauli_coeffs_diag[z_index[0]] = coeff.real
elif num_z == 2:
pauli_coeffs_triu[z_index[0], z_index[1]] = coeff.real
else:
raise QiskitOptimizationError(
f"There are more than 2 Pauli Zs in the Pauli term: {pauli}"
)
linear_terms = {}
quadratic_terms = {}
# For quadratic pauli terms of operator
# x_i * x_j = (1 - Z_i - Z_j + Z_i * Z_j)/4
for (i, j), weight in pauli_coeffs_triu.items():
# Add a quadratic term to the object function of `QuadraticProgram`
# The coefficient of the quadratic term in `QuadraticProgram` is
# 4 * weight of the pauli
quadratic_terms[i, j] = 4 * weight
pauli_coeffs_diag[i] += weight
pauli_coeffs_diag[j] += weight
offset -= weight
# After processing quadratic pauli terms, only linear paulis are left
# x_i = (1 - Z_i)/2
for i, weight in enumerate(pauli_coeffs_diag):
# Add a linear term to the object function of `QuadraticProgram`
# The coefficient of the linear term in `QuadraticProgram` is
# 2 * weight of the pauli
if linear:
linear_terms[i] = -2 * weight
else:
quadratic_terms[i, i] = -2 * weight
offset += weight
quad_prog.minimize(constant=offset, linear=linear_terms, quadratic=quadratic_terms)
return quad_prog
```
#### File: test/converters/test_converters.py
```python
import unittest
from test.optimization_test_case import QiskitOptimizationTestCase
import numpy as np
from docplex.mp.model import Model
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.opflow import Z, I
import qiskit_optimization.optionals as _optionals
from qiskit_optimization import QuadraticProgram, QiskitOptimizationError
from qiskit_optimization.algorithms import (
MinimumEigenOptimizer,
CplexOptimizer,
ADMMOptimizer,
)
from qiskit_optimization.algorithms.admm_optimizer import ADMMParameters
from qiskit_optimization.converters import (
InequalityToEquality,
IntegerToBinary,
LinearEqualityToPenalty,
MaximizeToMinimize,
)
from qiskit_optimization.problems import Constraint, Variable
from qiskit_optimization.translators import from_docplex_mp
QUBIT_OP_MAXIMIZE_SAMPLE = (
-199999.5 * (I ^ I ^ I ^ Z)
+ -399999.5 * (I ^ I ^ Z ^ I)
+ -599999.5 * (I ^ Z ^ I ^ I)
+ -799999.5 * (Z ^ I ^ I ^ I)
+ 100000 * (I ^ I ^ Z ^ Z)
+ 150000 * (I ^ Z ^ I ^ Z)
+ 300000 * (I ^ Z ^ Z ^ I)
+ 200000 * (Z ^ I ^ I ^ Z)
+ 400000 * (Z ^ I ^ Z ^ I)
+ 600000 * (Z ^ Z ^ I ^ I)
)
OFFSET_MAXIMIZE_SAMPLE = 1149998
class TestConverters(QiskitOptimizationTestCase):
"""Test Converters"""
def test_empty_problem(self):
"""Test empty problem"""
op = QuadraticProgram()
conv = InequalityToEquality()
op = conv.convert(op)
conv = IntegerToBinary()
op = conv.convert(op)
conv = LinearEqualityToPenalty()
op = conv.convert(op)
conv = MaximizeToMinimize()
op = conv.convert(op)
_, shift = op.to_ising()
self.assertEqual(shift, 0.0)
def test_valid_variable_type(self):
"""Validate the types of the variables for QuadraticProgram.to_ising."""
# Integer variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.integer_var(0, 10, "int_var")
_ = op.to_ising()
# Continuous variable
with self.assertRaises(QiskitOptimizationError):
op = QuadraticProgram()
op.continuous_var(0, 10, "continuous_var")
_ = op.to_ising()
def test_inequality_binary(self):
"""Test InequalityToEqualityConverter with binary variables"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, "x0x2")
# Quadratic constraints
quadratic = {("x0", "x1"): 1, ("x1", "x2"): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, "x0x1_x1x2LE")
quadratic = {("x0", "x1"): 3, ("x1", "x2"): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, "x0x1_x1x2GE")
# Convert inequality constraints into equality constraints
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual(
[v.name for v in op2.variables],
[
"x0",
"x1",
"x2",
"x1x2@int_slack",
"x0x2@int_slack",
"x0x1_x1x2LE@int_slack",
"x0x1_x1x2GE@int_slack",
],
)
# Check names and objective senses
self.assertEqual(op.name, op2.name)
self.assertEqual(op.objective.sense, op2.objective.sense)
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 3])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 2])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 3])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 4])
new_x = conv.interpret(np.arange(7))
np.testing.assert_array_almost_equal(new_x, np.arange(3))
def test_inequality_integer(self):
"""Test InequalityToEqualityConverter with integer variables"""
op = QuadraticProgram()
for i in range(3):
op.integer_var(name=f"x{i}", lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, "x0x2")
# Quadratic constraints
quadratic = {("x0", "x1"): 1, ("x1", "x2"): 2}
op.quadratic_constraint({}, quadratic, Constraint.Sense.LE, 3, "x0x1_x1x2LE")
quadratic = {("x0", "x1"): 3, ("x1", "x2"): 4}
op.quadratic_constraint({}, quadratic, Constraint.Sense.GE, 3, "x0x1_x1x2GE")
conv = InequalityToEquality()
op2 = conv.convert(op)
self.assertListEqual(
[v.name for v in op2.variables],
[
"x0",
"x1",
"x2",
"x1x2@int_slack",
"x0x2@int_slack",
"x0x1_x1x2LE@int_slack",
"x0x1_x1x2GE@int_slack",
],
)
# For linear constraints
lst = [
op2.linear_constraints[0].linear.to_dict()[0],
op2.linear_constraints[0].linear.to_dict()[1],
]
self.assertListEqual(lst, [1, 1])
self.assertEqual(op2.linear_constraints[0].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[1].linear.to_dict()[1],
op2.linear_constraints[1].linear.to_dict()[2],
op2.linear_constraints[1].linear.to_dict()[3],
]
self.assertListEqual(lst, [1, -1, 1])
lst = [op2.variables[3].lowerbound, op2.variables[3].upperbound]
self.assertListEqual(lst, [0, 8])
self.assertEqual(op2.linear_constraints[1].sense, Constraint.Sense.EQ)
lst = [
op2.linear_constraints[2].linear.to_dict()[0],
op2.linear_constraints[2].linear.to_dict()[2],
op2.linear_constraints[2].linear.to_dict()[4],
]
self.assertListEqual(lst, [1, 3, -1])
lst = [op2.variables[4].lowerbound, op2.variables[4].upperbound]
self.assertListEqual(lst, [0, 10])
self.assertEqual(op2.linear_constraints[2].sense, Constraint.Sense.EQ)
# For quadratic constraints
lst = [
op2.quadratic_constraints[0].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[0].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[0].linear.to_dict()[5],
]
self.assertListEqual(lst, [1, 2, 1])
lst = [op2.variables[5].lowerbound, op2.variables[5].upperbound]
self.assertListEqual(lst, [0, 30])
lst = [
op2.quadratic_constraints[1].quadratic.to_dict()[(0, 1)],
op2.quadratic_constraints[1].quadratic.to_dict()[(1, 2)],
op2.quadratic_constraints[1].linear.to_dict()[6],
]
self.assertListEqual(lst, [3, 4, -1])
lst = [op2.variables[6].lowerbound, op2.variables[6].upperbound]
self.assertListEqual(lst, [0, 60])
new_x = conv.interpret(np.arange(7))
np.testing.assert_array_almost_equal(new_x, np.arange(3))
def test_inequality_mode_integer(self):
"""Test integer mode of InequalityToEqualityConverter()"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, "x0x2")
conv = InequalityToEquality(mode="integer")
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.INTEGER])
def test_inequality_mode_continuous(self):
"""Test continuous mode of InequalityToEqualityConverter()"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, "x0x2")
conv = InequalityToEquality(mode="continuous")
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.CONTINUOUS, Variable.Type.CONTINUOUS])
def test_inequality_mode_auto(self):
"""Test auto mode of InequalityToEqualityConverter()"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1.1, "x2": 2.2}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 3.3, "x0x2")
conv = InequalityToEquality(mode="auto")
op2 = conv.convert(op)
lst = [op2.variables[3].vartype, op2.variables[4].vartype]
self.assertListEqual(lst, [Variable.Type.INTEGER, Variable.Type.CONTINUOUS])
def test_penalize_sense(self):
"""Test PenalizeLinearEqualityConstraints with senses"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.LE, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.GE, 2, "x0x2")
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
with self.assertRaises(QiskitOptimizationError):
conv.convert(op)
def test_penalize_binary(self):
"""Test PenalizeLinearEqualityConstraints with binary variables"""
op = QuadraticProgram()
for i in range(3):
op.binary_var(name=f"x{i}")
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": 3}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, "x0x2")
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
new_x = conv.interpret(np.arange(3))
np.testing.assert_array_almost_equal(new_x, np.arange(3))
def test_penalize_integer(self):
"""Test PenalizeLinearEqualityConstraints with integer variables"""
op = QuadraticProgram()
for i in range(3):
op.integer_var(name=f"x{i}", lowerbound=-3, upperbound=3)
# Linear constraints
linear_constraint = {"x0": 1, "x1": 1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x1")
linear_constraint = {"x1": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 2, "x1x2")
linear_constraint = {"x0": 1, "x2": -1}
op.linear_constraint(linear_constraint, Constraint.Sense.EQ, 1, "x0x2")
op.minimize(constant=3, linear={"x0": 1}, quadratic={("x1", "x2"): 2})
self.assertEqual(op.get_num_linear_constraints(), 3)
conv = LinearEqualityToPenalty()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_linear_constraints(), 0)
new_x = conv.interpret([0, 1, -1])
np.testing.assert_array_almost_equal(new_x, [0, 1, -1])
def test_integer_to_binary(self):
"""Test integer to binary"""
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name=f"x{i}")
op.integer_var(name="x2", lowerbound=0, upperbound=5)
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.maximize(0, linear, {})
conv = IntegerToBinary()
op2 = conv.convert(op)
self.assertEqual(op2.get_num_vars(), 5)
self.assertListEqual([x.vartype for x in op2.variables], [Variable.Type.BINARY] * 5)
self.assertListEqual([x.name for x in op2.variables], ["x0", "x1", "x2@0", "x2@1", "x2@2"])
dct = op2.objective.linear.to_dict()
self.assertEqual(dct[2], 3)
self.assertEqual(dct[3], 6)
self.assertEqual(dct[4], 6)
def test_binary_to_integer(self):
"""Test binary to integer"""
op = QuadraticProgram()
for i in range(0, 2):
op.binary_var(name=f"x{i}")
op.integer_var(name="x2", lowerbound=0, upperbound=5)
linear = {"x0": 1, "x1": 2, "x2": 1}
op.maximize(0, linear, {})
linear = {}
for x in op.variables:
linear[x.name] = 1
op.linear_constraint(linear, Constraint.Sense.EQ, 6, "x0x1x2")
conv = IntegerToBinary()
_ = conv.convert(op)
new_x = conv.interpret([0, 1, 1, 1, 1])
np.testing.assert_array_almost_equal(new_x, [0, 1, 5])
def test_optimizationproblem_to_ising(self):
"""Test optimization problem to operators"""
op = QuadraticProgram()
for i in range(4):
op.binary_var(name=f"x{i}")
linear = {}
for x in op.variables:
linear[x.name] = 1
op.maximize(0, linear, {})
linear = {}
for i, x in enumerate(op.variables):
linear[x.name] = i + 1
op.linear_constraint(linear, Constraint.Sense.EQ, 3, "sum1")
penalize = LinearEqualityToPenalty(penalty=1e5)
op2 = penalize.convert(op)
qubitop, offset = op2.to_ising()
self.assertEqual(qubitop, QUBIT_OP_MAXIMIZE_SAMPLE)
self.assertEqual(offset, OFFSET_MAXIMIZE_SAMPLE)
def test_ising_to_quadraticprogram_linear(self):
"""Test optimization problem to operators with linear=True"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram("test")
quadratic.from_ising(op, offset, linear=True)
self.assertEqual(quadratic.name, "test")
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
linear_matrix = np.zeros((1, 4))
linear_matrix[0, 0] = -500001
linear_matrix[0, 1] = -800001
linear_matrix[0, 2] = -900001
linear_matrix[0, 3] = -800001
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 3] = 2400000
np.testing.assert_array_almost_equal(
quadratic.objective.linear.coefficients.toarray(), linear_matrix
)
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
def test_ising_to_quadraticprogram_quadratic(self):
"""Test optimization problem to operators with linear=False"""
op = QUBIT_OP_MAXIMIZE_SAMPLE
offset = OFFSET_MAXIMIZE_SAMPLE
quadratic = QuadraticProgram("test")
quadratic.from_ising(op, offset, linear=False)
self.assertEqual(quadratic.name, "test")
self.assertEqual(quadratic.get_num_vars(), 4)
self.assertEqual(quadratic.get_num_linear_constraints(), 0)
self.assertEqual(quadratic.get_num_quadratic_constraints(), 0)
self.assertEqual(quadratic.objective.sense, quadratic.objective.Sense.MINIMIZE)
self.assertAlmostEqual(quadratic.objective.constant, 900000)
quadratic_matrix = np.zeros((4, 4))
quadratic_matrix[0, 0] = -500001
quadratic_matrix[0, 1] = 400000
quadratic_matrix[0, 2] = 600000
quadratic_matrix[0, 3] = 800000
quadratic_matrix[1, 1] = -800001
quadratic_matrix[1, 2] = 1200000
quadratic_matrix[1, 3] = 1600000
quadratic_matrix[2, 2] = -900001
quadratic_matrix[2, 3] = 2400000
quadratic_matrix[3, 3] = -800001
np.testing.assert_array_almost_equal(
quadratic.objective.quadratic.coefficients.toarray(), quadratic_matrix
)
@unittest.skipIf(not _optionals.HAS_CPLEX, "CPLEX not available.")
def test_continuous_variable_decode(self):
"""Test decode func of IntegerToBinaryConverter for continuous variables"""
mdl = Model("test_continuous_varable_decode")
c = mdl.continuous_var(lb=0, ub=10.9, name="c")
x = mdl.binary_var(name="x")
mdl.maximize(c + x * x)
op = from_docplex_mp(mdl)
converter = IntegerToBinary()
op = converter.convert(op)
admm_params = ADMMParameters()
qubo_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
continuous_optimizer = CplexOptimizer(cplex_parameters={"threads": 1, "randomseed": 1})
solver = ADMMOptimizer(
qubo_optimizer=qubo_optimizer,
continuous_optimizer=continuous_optimizer,
params=admm_params,
)
result = solver.solve(op)
new_x = converter.interpret(result.x)
self.assertEqual(new_x[0], 10.9)
def test_auto_penalty(self):
"""Test auto penalty function"""
op = QuadraticProgram()
op.binary_var("x")
op.binary_var("y")
op.binary_var("z")
op.minimize(constant=3, linear={"x": 1}, quadratic={("x", "y"): 2})
op.linear_constraint(linear={"x": 1, "y": 1, "z": 1}, sense="EQ", rhs=2, name="xyz_eq")
lineq2penalty = LinearEqualityToPenalty(penalty=1e5)
lineq2penalty_auto = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(op)
qubo_auto = lineq2penalty_auto.convert(op)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
result_auto = exact.solve(qubo_auto)
self.assertEqual(result.fval, result_auto.fval)
np.testing.assert_array_almost_equal(result.x, result_auto.x)
def test_auto_penalty_warning(self):
"""Test warnings of auto penalty function"""
op = QuadraticProgram()
op.binary_var("x")
op.binary_var("y")
op.binary_var("z")
op.minimize(linear={"x": 1, "y": 2})
op.linear_constraint(linear={"x": 0.5, "y": 0.5, "z": 0.5}, sense="EQ", rhs=1, name="xyz")
with self.assertLogs("qiskit_optimization", level="WARNING") as log:
lineq2penalty = LinearEqualityToPenalty()
_ = lineq2penalty.convert(op)
warning = (
"WARNING:qiskit_optimization.converters.linear_equality_to_penalty:"
"Warning: Using 100000.000000 for the penalty coefficient because a float "
"coefficient exists in constraints. \nThe value could be too small. If so, "
"set the penalty coefficient manually."
)
self.assertIn(warning, log.output)
def test_penalty_recalculation_when_reusing(self):
"""Test the penalty retrieval and recalculation of LinearEqualityToPenalty"""
op = QuadraticProgram()
op.binary_var("x")
op.binary_var("y")
op.binary_var("z")
op.minimize(constant=3, linear={"x": 1}, quadratic={("x", "y"): 2})
op.linear_constraint(linear={"x": 1, "y": 1, "z": 1}, sense="EQ", rhs=2, name="xyz_eq")
# First, create a converter with no penalty
lineq2penalty = LinearEqualityToPenalty()
self.assertIsNone(lineq2penalty.penalty)
# Then converter must calculate the penalty for the problem (should be 4.0)
lineq2penalty.convert(op)
self.assertEqual(4, lineq2penalty.penalty)
# Re-use the converter with a newly defined penalty
lineq2penalty.penalty = 3
lineq2penalty.convert(op)
self.assertEqual(3, lineq2penalty.penalty)
# Re-use the converter letting the penalty be calculated again
lineq2penalty.penalty = None
lineq2penalty.convert(op)
self.assertEqual(4, lineq2penalty.penalty)
def test_penalty_recalculation_when_reusing2(self):
"""Test the penalty retrieval and recalculation of LinearEqualityToPenalty 2"""
op = QuadraticProgram()
op.binary_var("x")
op.binary_var("y")
op.binary_var("z")
op.minimize(constant=3, linear={"x": 1}, quadratic={("x", "y"): 2})
op.linear_constraint(linear={"x": 1, "y": 1, "z": 1}, sense="EQ", rhs=2, name="xyz_eq")
# First, create a converter with no penalty
lineq2penalty = LinearEqualityToPenalty()
self.assertIsNone(lineq2penalty.penalty)
# Then converter must calculate the penalty for the problem (should be 4.0)
lineq2penalty.convert(op)
self.assertEqual(4, lineq2penalty.penalty)
# Re-use the converter for a new problem
op2 = QuadraticProgram()
op2.binary_var("x")
op2.minimize(linear={"x": 10})
op2.linear_constraint({"x": 1}, "==", 0)
lineq2penalty.convert(op2)
self.assertEqual(11, lineq2penalty.penalty)
def test_linear_equality_to_penalty_decode(self):
"""Test decode func of LinearEqualityToPenalty"""
qprog = QuadraticProgram()
qprog.binary_var("x")
qprog.binary_var("y")
qprog.binary_var("z")
qprog.maximize(linear={"x": 3, "y": 1, "z": 1})
qprog.linear_constraint(linear={"x": 1, "y": 1, "z": 1}, sense="EQ", rhs=2, name="xyz_eq")
lineq2penalty = LinearEqualityToPenalty()
qubo = lineq2penalty.convert(qprog)
exact_mes = NumPyMinimumEigensolver()
exact = MinimumEigenOptimizer(exact_mes)
result = exact.solve(qubo)
new_x = lineq2penalty.interpret(result.x)
np.testing.assert_array_almost_equal(new_x, [1, 1, 0])
infeasible_x = lineq2penalty.interpret([1, 1, 1])
np.testing.assert_array_almost_equal(infeasible_x, [1, 1, 1])
def test_0var_range_inequality(self):
"""Test InequalityToEquality converter when the var_rang of the slack variable is 0"""
op = QuadraticProgram()
op.binary_var("x")
op.binary_var("y")
op.linear_constraint(linear={"x": 1, "y": 1}, sense="LE", rhs=0, name="xy_leq1")
op.linear_constraint(linear={"x": 1, "y": 1}, sense="GE", rhs=2, name="xy_geq1")
op.quadratic_constraint(quadratic={("x", "x"): 1}, sense="LE", rhs=0, name="xy_leq2")
op.quadratic_constraint(quadratic={("x", "y"): 1}, sense="GE", rhs=1, name="xy_geq2")
ineq2eq = InequalityToEquality()
new_op = ineq2eq.convert(op)
self.assertEqual(new_op.get_num_vars(), 2)
self.assertTrue(
all(l_const.sense == Constraint.Sense.EQ for l_const in new_op.linear_constraints)
)
self.assertTrue(
all(q_const.sense == Constraint.Sense.EQ for q_const in new_op.quadratic_constraints)
)
def test_integer_to_binary2(self):
"""Test integer to binary variables 2"""
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=0, upperbound=1)
mod.integer_var(name="y", lowerbound=0, upperbound=1)
mod.minimize(1, {"x": 1}, {("x", "y"): 2})
mod.linear_constraint({"x": 1}, "==", 1)
mod.quadratic_constraint({"x": 1}, {("x", "y"): 2}, "==", 1)
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual(
[e.name + "@0" for e in mod.variables], [e.name for e in mod2.variables]
)
self.assertDictEqual(mod.objective.linear.to_dict(), mod2.objective.linear.to_dict())
self.assertDictEqual(mod.objective.quadratic.to_dict(), mod2.objective.quadratic.to_dict())
self.assertEqual(mod.get_num_linear_constraints(), mod2.get_num_linear_constraints())
for cst, cst2 in zip(mod.linear_constraints, mod2.linear_constraints):
self.assertDictEqual(cst.linear.to_dict(), cst2.linear.to_dict())
self.assertEqual(mod.get_num_quadratic_constraints(), mod2.get_num_quadratic_constraints())
for cst, cst2 in zip(mod.quadratic_constraints, mod2.quadratic_constraints):
self.assertDictEqual(cst.linear.to_dict(), cst2.linear.to_dict())
self.assertDictEqual(cst.quadratic.to_dict(), cst2.quadratic.to_dict())
def test_integer_to_binary_quadratic(self):
"""Test integer to binary variables with quadratic expressions"""
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=10, upperbound=13)
mod.minimize(quadratic={("x", "x"): 1})
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual([e.name for e in mod2.variables], ["x@0", "x@1"])
self.assertEqual(mod.get_num_linear_constraints(), 0)
self.assertEqual(mod.get_num_quadratic_constraints(), 0)
self.assertAlmostEqual(mod2.objective.constant, 100)
self.assertDictEqual(mod2.objective.linear.to_dict(use_name=True), {"x@0": 20, "x@1": 40})
self.assertDictEqual(
mod2.objective.quadratic.to_dict(use_name=True),
{("x@0", "x@0"): 1, ("x@1", "x@1"): 4, ("x@0", "x@1"): 4},
)
def test_integer_to_binary_zero_range_variable(self):
"""Test integer to binary variables with zero range variables"""
with self.subTest("zero range variable in a linear expression of the objective"):
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=10, upperbound=10)
mod.minimize(linear={"x": 1})
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual([e.name for e in mod2.variables], ["x@0"])
self.assertEqual(mod.get_num_linear_constraints(), 0)
self.assertEqual(mod.get_num_quadratic_constraints(), 0)
self.assertAlmostEqual(mod2.objective.constant, 10)
self.assertDictEqual(mod2.objective.linear.to_dict(), {})
self.assertDictEqual(mod2.objective.quadratic.to_dict(), {})
with self.subTest("zero range variable in a quadratic expression of the objective"):
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=10, upperbound=10)
mod.minimize(quadratic={("x", "x"): 1})
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual([e.name for e in mod2.variables], ["x@0"])
self.assertEqual(mod.get_num_linear_constraints(), 0)
self.assertEqual(mod.get_num_quadratic_constraints(), 0)
self.assertAlmostEqual(mod2.objective.constant, 100)
self.assertDictEqual(mod2.objective.linear.to_dict(), {})
self.assertDictEqual(mod2.objective.quadratic.to_dict(), {})
with self.subTest("zero range variable in a linear constraint"):
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=10, upperbound=10)
mod.binary_var(name="y")
mod.linear_constraint({"x": 1, "y": 1}, "<=", 100)
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual([e.name for e in mod2.variables], ["x@0", "y"])
self.assertEqual(mod.get_num_linear_constraints(), 1)
self.assertEqual(mod.get_num_quadratic_constraints(), 0)
self.assertAlmostEqual(mod2.objective.constant, 0)
self.assertDictEqual(mod2.objective.linear.to_dict(), {})
self.assertDictEqual(mod2.objective.quadratic.to_dict(), {})
cst = mod2.get_linear_constraint(0)
self.assertDictEqual(cst.linear.to_dict(use_name=True), {"y": 1})
self.assertEqual(cst.sense, Constraint.Sense.LE)
self.assertAlmostEqual(cst.rhs, 90)
self.assertEqual(cst.name, "c0")
with self.subTest("zero range variable in a quadratic constraint"):
mod = QuadraticProgram()
mod.integer_var(name="x", lowerbound=10, upperbound=10)
mod.binary_var(name="y")
mod.quadratic_constraint({"x": 1}, {("x", "x"): 2, ("x", "y"): 3}, ">=", 100)
mod2 = IntegerToBinary().convert(mod)
self.assertListEqual([e.name for e in mod2.variables], ["x@0", "y"])
self.assertEqual(mod.get_num_linear_constraints(), 0)
self.assertEqual(mod.get_num_quadratic_constraints(), 1)
self.assertAlmostEqual(mod2.objective.constant, 0)
self.assertDictEqual(mod2.objective.linear.to_dict(), {})
self.assertDictEqual(mod2.objective.quadratic.to_dict(), {})
cst = mod2.get_quadratic_constraint(0)
self.assertDictEqual(cst.linear.to_dict(use_name=True), {"y": 30})
self.assertEqual(cst.sense, Constraint.Sense.GE)
self.assertAlmostEqual(cst.rhs, -110)
self.assertEqual(cst.name, "q0")
if __name__ == "__main__":
unittest.main()
```
#### File: qiskit-optimization/test/test_readme_sample.py
```python
import unittest
import contextlib
import io
from pathlib import Path
import re
from test import QiskitOptimizationTestCase
class TestReadmeSample(QiskitOptimizationTestCase):
"""Test sample code from readme"""
def test_readme_sample(self):
"""readme sample test"""
# pylint: disable=exec-used
readme_name = "README.md"
readme_path = Path(__file__).parent.parent.joinpath(readme_name)
if not readme_path.exists() or not readme_path.is_file():
self.fail(msg=f"{readme_name} not found at {readme_path}")
return
# gets the first matched code sample
# assumes one code sample to test per readme
readme_sample = None
with open(readme_path, encoding="UTF-8") as readme_file:
match_sample = re.search(
"```python.*```",
readme_file.read(),
flags=re.S,
)
if match_sample:
# gets the matched string stripping the markdown code block
readme_sample = match_sample.group(0)[9:-3]
if readme_sample is None:
self.skipTest(f"No sample found inside {readme_name}.")
return
with contextlib.redirect_stdout(io.StringIO()) as out:
try:
exec(readme_sample, globals())
except Exception as ex: # pylint: disable=broad-except
self.fail(str(ex))
return
result_x = None
result_fval = None
str_ref1 = "optimal value:"
str_ref2 = "optimal function value:"
texts = out.getvalue().split("\n")
for text in texts:
idx = text.find(str_ref1)
if idx >= 0:
result_x = text[idx + len(str_ref1) :].strip()
continue
idx = text.find(str_ref2)
if idx >= 0:
result_fval = float(text[idx + len(str_ref2) :])
if result_x is not None and result_fval is not None:
break
if result_x is None:
self.fail(f"Failed to find result.x inside {readme_name}.")
return
if result_fval is None:
self.fail(f"Failed to find result.fval inside {readme_name}.")
return
with self.subTest("test result.x"):
self.assertEqual(result_x, "[1. 0. 1. 0.]")
with self.subTest("test result.fval"):
self.assertAlmostEqual(result_fval, 4.0)
if __name__ == "__main__":
unittest.main()
```
#### File: test/translators/test_docplex_mp.py
```python
from test.optimization_test_case import QiskitOptimizationTestCase
from docplex.mp.model import Model
from qiskit_optimization.exceptions import QiskitOptimizationError
from qiskit_optimization.problems import Constraint, QuadraticProgram
from qiskit_optimization.translators.docplex_mp import from_docplex_mp, to_docplex_mp
class TestDocplexMpTranslator(QiskitOptimizationTestCase):
"""Test from_docplex_mp and to_docplex_mp"""
def test_from_and_to(self):
"""test from_docplex_mp and to_docplex_mp"""
q_p = QuadraticProgram("test")
q_p.binary_var(name="x")
q_p.integer_var(name="y", lowerbound=-2, upperbound=4)
q_p.continuous_var(name="z", lowerbound=-1.5, upperbound=3.2)
q_p.minimize(
constant=1,
linear={"x": 1, "y": 2},
quadratic={("x", "y"): -1, ("z", "z"): 2},
)
q_p.linear_constraint({"x": 2, "z": -1}, "==", 1)
q_p.quadratic_constraint({"x": 2, "z": -1}, {("y", "z"): 3}, "==", 1)
q_p2 = from_docplex_mp(to_docplex_mp(q_p))
self.assertEqual(q_p.export_as_lp_string(), q_p2.export_as_lp_string())
mod = Model("test")
x = mod.binary_var("x")
y = mod.integer_var(-2, 4, "y")
z = mod.continuous_var(-1.5, 3.2, "z")
mod.minimize(1 + x + 2 * y - x * y + 2 * z * z)
mod.add(2 * x - z == 1, "c0")
mod.add(2 * x - z + 3 * y * z == 1, "q0")
self.assertEqual(q_p.export_as_lp_string(), mod.export_as_lp_string())
def test_from_without_variable_names(self):
"""test from_docplex_mp without explicit variable names"""
mod = Model()
x = mod.binary_var()
y = mod.continuous_var()
z = mod.integer_var()
mod.minimize(x + y + z + x * y + y * z + x * z)
mod.add_constraint(x + y == z) # linear EQ
mod.add_constraint(x + y >= z) # linear GE
mod.add_constraint(x + y <= z) # linear LE
mod.add_constraint(x * y == z) # quadratic EQ
mod.add_constraint(x * y >= z) # quadratic GE
mod.add_constraint(x * y <= z) # quadratic LE
q_p = from_docplex_mp(mod)
var_names = [v.name for v in q_p.variables]
self.assertListEqual(var_names, ["x0", "x1", "x2"])
senses = [Constraint.Sense.EQ, Constraint.Sense.GE, Constraint.Sense.LE]
for i, c in enumerate(q_p.linear_constraints):
self.assertDictEqual(c.linear.to_dict(use_name=True), {"x0": 1, "x1": 1, "x2": -1})
self.assertEqual(c.rhs, 0)
self.assertEqual(c.sense, senses[i])
for i, c in enumerate(q_p.quadratic_constraints):
self.assertEqual(c.rhs, 0)
self.assertDictEqual(c.linear.to_dict(use_name=True), {"x2": -1})
self.assertDictEqual(c.quadratic.to_dict(use_name=True), {("x0", "x1"): 1})
self.assertEqual(c.sense, senses[i])
def test_unsupported_features(self):
"""Test unsupported features"""
with self.subTest("semiinteget_var"), self.assertRaises(QiskitOptimizationError):
mod = Model()
mod.semiinteger_var(lb=1, name="x")
_ = from_docplex_mp(mod)
with self.subTest("range constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
mod.add_range(0, 2 * x, 1)
_ = from_docplex_mp(mod)
with self.subTest("equivalence constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
mod.add_equivalence(x, x + y <= 1, 1)
_ = from_docplex_mp(mod)
with self.subTest("not equal constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
mod.add(x != y)
_ = from_docplex_mp(mod)
with self.subTest("PWL constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
mod.add(mod.piecewise(-1, [(0, 0)], 1)(x) <= 1)
_ = from_docplex_mp(mod)
with self.subTest("lazy constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
mod.add_lazy_constraint(x + y <= 1)
_ = from_docplex_mp(mod)
with self.subTest("user cut constraint"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
mod.add_user_cut_constraint(x + y <= 1)
_ = from_docplex_mp(mod)
with self.subTest("sos1"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
mod.add_sos1([x, y])
_ = from_docplex_mp(mod)
with self.subTest("sos2"), self.assertRaises(QiskitOptimizationError):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
z = mod.binary_var("z")
mod.add_sos2([x, y, z])
_ = from_docplex_mp(mod)
def test_indicator_constraints(self):
"""Test indicator constraints"""
with self.subTest("active 0, sense <="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z <= 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": -5.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 1)
with self.subTest("active 0, sense >="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z >= 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 4.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 1)
with self.subTest("active 1, sense <="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z <= 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 5.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 6)
with self.subTest("active 1, sense >="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z >= 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": -4.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -3)
with self.subTest("active 0, sense =="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z == 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": -5.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 1)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 4.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 1)
with self.subTest("active 1, sense =="):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z == 1), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 5.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 6)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": -4.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -3)
with self.subTest("active 0, sense <=, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z <= 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 1)
with self.subTest("active 0, sense >=, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z >= 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": 100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 1)
with self.subTest("active 1, sense <=, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z <= 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": 100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 101)
with self.subTest("active 1, sense >=, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z >= 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, -99)
with self.subTest("active 0, sense ==, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z == 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 1)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": 100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 1)
with self.subTest("active 1, sense ==, indicator_big_m"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z == 1), name="ind")
quad_prog = from_docplex_mp(mod, indicator_big_m=100)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": 100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, 101)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -100.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, -99)
with self.subTest("active 0, sense <=, obvious bound"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z <= 10), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 10)
with self.subTest("active 0, sense >=, obvious bound"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(
binary_var=x, active_value=0, linear_ct=(y + 2 * z >= -10), name="ind"
)
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -10)
with self.subTest("active 1, sense <=, obvious bound"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z <= 10), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 10)
with self.subTest("active 1, sense >=, obvious bound"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(
binary_var=x, active_value=1, linear_ct=(y + 2 * z >= -10), name="ind"
)
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -10)
with self.subTest("active 0, sense ==, too small rhs"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(
binary_var=x, active_value=0, linear_ct=(y + 2 * z == -10), name="ind"
)
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -16.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, -10)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -10)
with self.subTest("active 0, sense ==, too large rhs"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=0, linear_ct=(y + 2 * z == 10), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 10)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 13, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 10)
with self.subTest("active 1, sense ==, too small rhs"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(
binary_var=x, active_value=1, linear_ct=(y + 2 * z == -10), name="ind"
)
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 16.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 6)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -10)
with self.subTest("active 1, sense ==, too large rhs"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y + 2 * z == 10), name="ind")
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 10)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -13.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, -3)
with self.subTest("no name"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y == 1))
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y <= 1))
mod.add_indicator(binary_var=x, active_value=1, linear_ct=(y >= 1))
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 4)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind0_LE")
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind0_GE")
ind = quad_prog.get_linear_constraint(2)
self.assertEqual(ind.name, "ind1")
ind = quad_prog.get_linear_constraint(3)
self.assertEqual(ind.name, "ind2")
with self.subTest("sense <=, binary_var is included as part of linear_ct too"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, linear_ct=(x + y + 2 * z <= -10))
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind0")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 18.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 7)
with self.subTest("sense >=, binary_var is included as part of linear_ct too"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, linear_ct=(x + y + 2 * z >= 10))
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 1)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind0")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(
ind.linear.to_dict(use_name=True), {"x": -12.0, "y": 1.0, "z": 2.0}
)
self.assertEqual(ind.rhs, -3)
with self.subTest("sense ==, binary_var is included as part of linear_ct too"):
mod = Model()
x = mod.binary_var("x")
y = mod.integer_var(lb=-1, ub=2, name="y")
z = mod.continuous_var(lb=-1, ub=2, name="z")
mod.add_indicator(binary_var=x, linear_ct=(x + y + 2 * z == 0))
quad_prog = from_docplex_mp(mod)
self.assertEqual(quad_prog.get_num_linear_constraints(), 2)
ind = quad_prog.get_linear_constraint(0)
self.assertEqual(ind.name, "ind0_LE")
self.assertEqual(ind.sense, Constraint.Sense.LE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": 8.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, 7)
ind = quad_prog.get_linear_constraint(1)
self.assertEqual(ind.name, "ind0_GE")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(use_name=True), {"x": -2.0, "y": 1.0, "z": 2.0})
self.assertEqual(ind.rhs, -3)
def test_logical_expressions(self):
"""test from_docplex_mp with logical expressions"""
with self.subTest("logical NOT"):
mod = Model()
x = mod.binary_var("x")
y = mod.logical_not(x)
mod.add_constraint(y <= 1)
mod.add_constraint(y**2 == 2)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x", "_not1"])
self.assertEqual(q_p.get_num_linear_constraints(), 2)
lin = q_p.get_linear_constraint(0)
self.assertEqual(lin.name, "c0")
self.assertEqual(lin.sense, Constraint.Sense.EQ)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"x": 1, "_not1": 1})
self.assertAlmostEqual(lin.rhs, 1)
lin = q_p.get_linear_constraint(1)
self.assertEqual(lin.name, "c1")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"_not1": 1})
self.assertAlmostEqual(lin.rhs, 1)
self.assertEqual(q_p.get_num_quadratic_constraints(), 1)
quad = q_p.get_quadratic_constraint(0)
self.assertEqual(quad.name, "q0")
self.assertEqual(quad.sense, Constraint.Sense.EQ)
self.assertDictEqual(quad.linear.to_dict(), {})
self.assertDictEqual(quad.quadratic.to_dict(use_name=True), {("_not1", "_not1"): 1})
self.assertAlmostEqual(quad.rhs, 2)
with self.subTest("logical AND"):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
z = mod.logical_and(x, y)
mod.add_constraint(z <= 1)
mod.add_constraint(z**2 == 2)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x", "y", "_and2"])
self.assertEqual(q_p.get_num_linear_constraints(), 4)
lin = q_p.get_linear_constraint(0)
self.assertEqual(lin.name, "c0")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"x": -1, "_and2": 1})
self.assertAlmostEqual(lin.rhs, 0)
lin = q_p.get_linear_constraint(1)
self.assertEqual(lin.name, "c1")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"y": -1, "_and2": 1})
self.assertAlmostEqual(lin.rhs, 0)
lin = q_p.get_linear_constraint(2)
self.assertEqual(lin.name, "c2")
self.assertEqual(lin.sense, Constraint.Sense.GE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"x": -1, "y": -1, "_and2": 1})
self.assertAlmostEqual(lin.rhs, -1)
lin = q_p.get_linear_constraint(3)
self.assertEqual(lin.name, "c3")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"_and2": 1})
self.assertAlmostEqual(lin.rhs, 1)
self.assertEqual(q_p.get_num_quadratic_constraints(), 1)
quad = q_p.get_quadratic_constraint(0)
self.assertEqual(quad.name, "q0")
self.assertEqual(quad.sense, Constraint.Sense.EQ)
self.assertDictEqual(quad.linear.to_dict(), {})
self.assertDictEqual(quad.quadratic.to_dict(use_name=True), {("_and2", "_and2"): 1})
self.assertAlmostEqual(quad.rhs, 2)
with self.subTest("logical OR"):
mod = Model()
x = mod.binary_var("x")
y = mod.binary_var("y")
z = mod.logical_or(x, y)
mod.add_constraint(z <= 1)
mod.add_constraint(z**2 == 2)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x", "y", "_or2"])
self.assertEqual(q_p.get_num_linear_constraints(), 4)
lin = q_p.get_linear_constraint(0)
self.assertEqual(lin.name, "c0")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"x": 1, "_or2": -1})
self.assertAlmostEqual(lin.rhs, 0)
lin = q_p.get_linear_constraint(1)
self.assertEqual(lin.name, "c1")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"y": 1, "_or2": -1})
self.assertAlmostEqual(lin.rhs, 0)
lin = q_p.get_linear_constraint(2)
self.assertEqual(lin.name, "c2")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"x": -1, "y": -1, "_or2": 1})
self.assertAlmostEqual(lin.rhs, 0)
lin = q_p.get_linear_constraint(3)
self.assertEqual(lin.name, "c3")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(use_name=True), {"_or2": 1})
self.assertAlmostEqual(lin.rhs, 1)
self.assertEqual(q_p.get_num_quadratic_constraints(), 1)
quad = q_p.get_quadratic_constraint(0)
self.assertEqual(quad.name, "q0")
self.assertEqual(quad.sense, Constraint.Sense.EQ)
self.assertDictEqual(quad.linear.to_dict(), {})
self.assertDictEqual(quad.quadratic.to_dict(use_name=True), {("_or2", "_or2"): 1})
self.assertAlmostEqual(quad.rhs, 2)
def test_trivial_constraints_from_docplex_mp(self):
"""test trivial constraints of from_docplex_mp"""
with self.subTest("trivial linear constraint"), self.assertWarns(UserWarning):
mod = Model()
x = mod.binary_var("x")
mod.add_constraint(x + 1 <= x + 1)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x"])
self.assertEqual(q_p.get_num_linear_constraints(), 1)
self.assertEqual(q_p.get_num_quadratic_constraints(), 0)
lin = q_p.get_linear_constraint(0)
self.assertEqual(lin.name, "c0")
self.assertEqual(lin.sense, Constraint.Sense.LE)
self.assertDictEqual(lin.linear.to_dict(), {})
self.assertAlmostEqual(lin.rhs, 0)
with self.subTest("trivial quadratic constraint"), self.assertWarns(UserWarning):
mod = Model()
x = mod.binary_var("x")
mod.add_constraint(x * x == x * x)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x"])
self.assertEqual(q_p.get_num_linear_constraints(), 0)
self.assertEqual(q_p.get_num_quadratic_constraints(), 1)
quad = q_p.get_quadratic_constraint(0)
self.assertEqual(quad.name, "q0")
self.assertEqual(quad.sense, Constraint.Sense.EQ)
self.assertDictEqual(quad.linear.to_dict(), {})
self.assertDictEqual(quad.quadratic.to_dict(), {})
self.assertAlmostEqual(quad.rhs, 0)
with self.subTest("trivial indicator constraint"), self.assertWarns(UserWarning):
mod = Model()
x = mod.binary_var("x")
mod.add_indicator(x, x + 1 >= x + 1)
q_p = from_docplex_mp(mod)
self.assertListEqual([v.name for v in q_p.variables], ["x"])
self.assertEqual(q_p.get_num_linear_constraints(), 1)
self.assertEqual(q_p.get_num_quadratic_constraints(), 0)
ind = q_p.get_linear_constraint(0)
self.assertEqual(ind.name, "ind0")
self.assertEqual(ind.sense, Constraint.Sense.GE)
self.assertDictEqual(ind.linear.to_dict(), {})
self.assertAlmostEqual(ind.rhs, 0)
def test_trivial_constraints_to_docplex_mp(self):
"""test trivial constraints of to_docplex_mp"""
with self.subTest("trivial linear constraint"):
q_p = QuadraticProgram()
q_p.linear_constraint(sense="==", rhs=1.0)
mod = to_docplex_mp(q_p)
self.assertEqual(mod.number_of_variables, 0)
self.assertEqual(mod.number_of_constraints, 1)
self.assertEqual(mod.number_of_linear_constraints, 1)
cst = mod.get_constraint_by_index(0)
left = cst.get_left_expr()
self.assertTrue(left.is_constant())
self.assertAlmostEqual(left.constant, 0)
right = cst.get_right_expr()
self.assertTrue(right.is_constant())
self.assertAlmostEqual(right.constant, 1)
with self.subTest("trivial quadratic constraint"):
q_p = QuadraticProgram()
q_p.quadratic_constraint(sense="==", rhs=1.0)
mod = to_docplex_mp(q_p)
self.assertEqual(mod.number_of_variables, 0)
self.assertEqual(mod.number_of_constraints, 1)
self.assertEqual(mod.number_of_linear_constraints, 1)
cst = mod.get_constraint_by_index(0)
left = cst.get_left_expr()
self.assertTrue(left.is_constant())
self.assertAlmostEqual(left.constant, 0)
right = cst.get_right_expr()
self.assertTrue(right.is_constant())
self.assertAlmostEqual(right.constant, 1)
``` |
{
"source": "johannesgrothe/Smarthome_System",
"score": 3
} |
#### File: Smarthome_System/exporters/temp_dir_manager.py
```python
import logging
import os
import shutil
class TempDirManager:
_path: str
_logger: logging.Logger
def __init__(self, path: str):
self._logger = logging.getLogger(self.__class__.__name__)
self._path = path
def assert_temp(self):
if not os.path.isdir(self._path):
os.mkdir(self._path)
def clean_temp(self):
for filename in [x for x
in os.listdir(self._path)
if os.path.isfile(os.path.join(self._path, x))]:
self._logger.info(f"Deleting '{filename}'")
os.remove(os.path.join(self._path, filename))
for dirname in [x for x
in os.listdir(self._path)
if os.path.isdir(os.path.join(self._path, x))]:
self._logger.info(f"Deleting Directory '{dirname}'")
shutil.rmtree(os.path.join(self._path, dirname))
```
#### File: Smarthome_System/tests/test_software_version.py
```python
import pytest
from utils.software_version import SoftwareVersion
def test_software_version_operators():
with pytest.raises(ValueError):
SoftwareVersion.from_string("2.4")
with pytest.raises(ValueError):
SoftwareVersion.from_string("2.4.3.5")
assert SoftwareVersion.from_string("2.4.11") == SoftwareVersion(2, 4, 11)
assert str(SoftwareVersion(1, 0, 8)) == "1.0.8"
assert SoftwareVersion(1, 0, 8) == SoftwareVersion(1, 0, 8)
assert SoftwareVersion(1, 0, 8) != SoftwareVersion(1, 0, 9)
assert SoftwareVersion(1, 0, 8) < SoftwareVersion(1, 0, 9)
assert SoftwareVersion(1, 0, 8) > SoftwareVersion(1, 0, 7)
assert SoftwareVersion(1, 1, 8) > SoftwareVersion(1, 0, 15)
def test_software_version_follows():
assert SoftwareVersion(2, 3, 8).follows(SoftwareVersion(2, 3, 7))
assert SoftwareVersion(2, 3, 0).follows(SoftwareVersion(2, 2, 7))
assert SoftwareVersion(3, 0, 0).follows(SoftwareVersion(2, 3, 7))
assert not SoftwareVersion(2, 3, 8).follows(SoftwareVersion(2, 3, 8))
assert not SoftwareVersion(2, 3, 8).follows(SoftwareVersion(2, 3, 9))
assert not SoftwareVersion(2, 3, 8).follows(SoftwareVersion(2, 2, 13))
assert not SoftwareVersion(3, 0, 1).follows(SoftwareVersion(2, 3, 7))
assert not SoftwareVersion(3, 1, 0).follows(SoftwareVersion(2, 3, 7))
assert not SoftwareVersion(4, 0, 0).follows(SoftwareVersion(2, 3, 7))
```
#### File: Smarthome_System/utils/cpp_file.py
```python
from abc import abstractmethod, ABCMeta
from typing import Union
_indentation_depth = 4
class CppElement(metaclass=ABCMeta):
@staticmethod
def _render_indent(indentation: int) -> str:
return " " * _indentation_depth * indentation
@abstractmethod
def render_content(self, indentation: int) -> [str]:
"""
Renders the content of the C++ element into lines of code
:return: the lines of code as strings
"""
class CppContainer(metaclass=ABCMeta):
_elements: list[CppElement]
def __init__(self):
self._elements = []
def add(self, elem: CppElement):
"""
Adds an element to the C++ file
:param elem: Element to add
:return: None
"""
self._elements.append(elem)
def _render_elements(self, indentation: int) -> [str]:
lines = []
for elem in self._elements:
elem_lines = elem.render_content(indentation)
elem_lines[len(elem_lines) - 1] = elem_lines[len(elem_lines) - 1]
lines += elem_lines
return lines
class CppPragma(CppElement):
_name: str
def __init__(self, name: str):
self._name = name
def render_content(self, indentation: int) -> [str]:
return [self._render_indent(indentation) + f"#pragma {self._name}"]
class CppImport(CppElement):
_name: str
_in_package: bool
def __init__(self, name: str, in_package: bool):
self._name = name
self._in_package = in_package
def render_content(self, indentation: int) -> [str]:
include_buf = f"\"{self._name}\"" if self._in_package else f"<{self._name}>"
return [self._render_indent(indentation) + f"#include {include_buf}"]
class CppBlankLine(CppElement):
_blank_lines: int
def __init__(self, blank_lines: int = 0):
self._blank_lines = blank_lines
def render_content(self, indentation: int) -> [str]:
return ["\n" * self._blank_lines]
class CppVariable(CppElement):
_type: str
_name: str
_value: Union[int, str]
_docstring: str
def __init__(self, var_type: str, name: str, value: Union[int, str], docstr: str = ""):
self._type = var_type
self._name = name
self._value = value
self._docstring = docstr
def render_content(self, indentation: int) -> [str]:
docstr_buf = "" if not self._docstring else f" // {self._docstring}"
value_buf = str(self._value) if isinstance(self._value, int) else f"\"{self._value}\""
type_prefix = self._type
type_suffix = ""
if type_prefix.endswith("[]"):
type_prefix = type_prefix[:-2].strip()
type_suffix = "[] "
return [self._render_indent(indentation) +
f"{type_prefix} {self._name} {type_suffix}= {value_buf};{docstr_buf}"]
class CppComment(CppElement):
_content: str
def __init__(self, content: str):
self._content = content
def render_content(self, indentation: int) -> [str]:
parts = self._content.split("\n")
return [self._render_indent(indentation) + f"// {x}" for x in parts]
class CppEnumClass(CppElement):
class _CppEnumClassElement(CppElement):
_name: str
_value: int
_docstring: str
def __init__(self, name: str, value: int, docstring: str = ""):
self._name = name
self._value = value
self._docstring = docstring
def render_content(self, indentation: int, last_elem: bool = False) -> str:
buf_comment = ""
if self._docstring:
buf_comment = f" // {self._docstring}"
line_end = ""
if not last_elem:
line_end = ","
return f"{self._render_indent(indentation)}{self._name} = {str(self._value)}{line_end}{buf_comment}"
_name: str
_docstring: str
_items: list[_CppEnumClassElement]
def __init__(self, name: str, docstring: str):
super().__init__()
self._name = name
self._docstring = docstring
self._items = []
def add_element(self, name: str, value: int, docstring: str = ""):
self._items.append(self._CppEnumClassElement(name, value, docstring))
def render_content(self, indentation: int) -> [str]:
return [""] + \
CppComment(self._docstring).render_content(indentation) + \
[self._render_indent(indentation) + "enum class " + self._name + " {"] + \
[x.render_content(indentation + 1) for x in self._items[:-1]] + \
[self._items[-1].render_content(indentation + 1, True)] + \
[self._render_indent(indentation) + "};"]
class CppNamespace(CppElement, CppContainer):
_name: str
_docstring: str
def __init__(self, name: str, docstring: str):
super().__init__()
self._name = name
self._docstring = docstring
def render_content(self, indentation: int) -> [str]:
return [""] + \
CppComment(self._docstring).render_content(indentation) + \
[self._render_indent(indentation) + "namespace " + self._name + " {"] + \
self._render_elements(indentation + 1) + \
[self._render_indent(indentation) + "}"]
class CppFile(CppContainer):
"""Contains a C++ file for editing and saving"""
def save(self, filename: str):
"""
Saves the C++ file to the disk
:param filename: Name of the file to add
:return: None
"""
lines = self._render_elements(0)
with open(filename, "w") as file_p:
out_lines = [x + "\n" for x in lines]
file_p.writelines(out_lines)
``` |
{
"source": "johanneshardt/raspberrify",
"score": 3
} |
#### File: raspberrify/raspberrify/sense.py
```python
import logging
from sense_hat import SenseHat
from typing import List
sense = SenseHat()
sense.set_rotation(r=180)
sense.clear()
def show(matrix: List[List[int]]) -> None:
if len(matrix) == 64 and all(len(e) == 3 for e in matrix):
sense.set_pixels(matrix)
else:
logging.exception(msg="Invalid matrix dimensions.")
```
#### File: raspberrify/raspberrify/spotify.py
```python
import spotipy
import requests
from PIL import Image
from spotipy.oauth2 import SpotifyOAuth
from enum import Enum, unique, auto
@unique
class State(Enum):
PLAYING = auto()
PAUSED = auto()
class Playback:
def __init__(self, sp: spotipy.client.Spotify):
self.client = sp
self.track = None
self.track_id = None
self.state = None
self.image_link = None
self.cached_track = None
self.refresh()
def refresh(self) -> None:
track = self.client.currently_playing()
if track is not None and track["item"] is not None:
self.state = State.PLAYING
self.track = track["item"]["name"]
self.track_id = track["item"]["id"]
self.image_link = track["item"]["album"]["images"][0]["url"]
else:
self.status = State.PAUSED
def get_cover(self) -> Image.Image:
response = requests.get(self.image_link, stream=True)
response.raise_for_status()
im = Image.open(response.raw)
return im
def toggle_playback(self) -> None:
if self.state == State.PLAYING:
print("Paused playback.")
self.pause_playback()
else:
print("Resumed playback.")
self.start_playback()
# TODO Handle case when nothing is playing??
# TODO combine with Playback class somehow?
def authorize(
client_id: str, client_secret: str, redirect_uri: str
) -> spotipy.client.Spotify:
scope = ["user-read-currently-playing", "user-library-read"]
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
scope=" ".join(scope),
)
)
return sp
``` |
{
"source": "johannesharmse/move_37_course",
"score": 3
} |
#### File: dynamic_programming/basic_scripts/value_iteration.py
```python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from grid_world import standard_grid
from utils import print_values, print_policy
# SMALL_ENOUGH is referred to by the mathematical symbol theta in equations
SMALL_ENOUGH = 1e-3
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
def best_action_value(grid, V, s):
# finds the highest value action (max_a) from state s, returns the action and value
best_a = None
best_value = float('-inf')
grid.set_state(s)
# loop through all possible actions to find the best current action
for a in ALL_POSSIBLE_ACTIONS:
transititions = grid.get_transition_probs(a)
expected_v = 0
expected_r = 0
for (prob, r, state_prime) in transititions:
expected_r += prob * r
expected_v += prob * V[state_prime]
v = expected_r + GAMMA * expected_v
if v > best_value:
best_value = v
best_a = a
return best_a, best_value
def calculate_values(grid):
# initialize V(s)
V = {}
states = grid.all_states()
for s in states:
V[s] = 0
# repeat until convergence
# V[s] = max[a]{ sum[s',r] { p(s',r|s,a)[r + gamma*V[s']] } }
while True:
# biggest_change is referred to by the mathematical symbol delta in equations
biggest_change = 0
for s in grid.non_terminal_states():
old_v = V[s]
_, new_v = best_action_value(grid, V, s)
V[s] = new_v
biggest_change = max(biggest_change, np.abs(old_v - new_v))
if biggest_change < SMALL_ENOUGH:
break
return V
def initialize_random_policy():
# policy is a lookup table for state -> action
# we'll randomly choose an action and update as we learn
policy = {}
for s in grid.non_terminal_states():
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
return policy
def calculate_greedy_policy(grid, V):
policy = initialize_random_policy()
# find a policy that leads to optimal value function
for s in policy.keys():
grid.set_state(s)
# loop through all possible actions to find the best current action
best_a, _ = best_action_value(grid, V, s)
policy[s] = best_a
return policy
if __name__ == '__main__':
# this grid gives you a reward of -0.1 for every non-terminal state
# we want to see if this will encourage finding a shorter path to the goal
grid = standard_grid(obey_prob=0.8, step_cost=-0.5)
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# calculate accurate values for each square
V = calculate_values(grid)
# calculate the optimum policy based on our values
policy = calculate_greedy_policy(grid, V)
# our goal here is to verify that we get the same answer as with policy iteration
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
```
#### File: src/gradient_policy/lunar_lander.py
```python
import tensorflow as tf
import numpy as np
import gym
def discount_and_normalize_rewards(episode_rewards):
"""Calculate normalized episode reward"""
discounted_episode_rewards = np.zeros_like(episode_rewards)
cumulative = 0.0
for i in reversed(range(len(episode_rewards))):
cumulative = cumulative * gamma + episode_rewards[i]
discounted_episode_rewards[i] = cumulative
mean = np.mean(discounted_episode_rewards)
std = np.std(discounted_episode_rewards)
discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)
return discounted_episode_rewards
def reinforce(env, state_size, action_size,
max_episodes, learning_rate, gamma, render=False):
"""Training"""
### POLICY ESTIMATOR ###
with tf.name_scope("inputs"):
input_ = tf.placeholder(tf.float32, [None, state_size], name="input_")
actions = tf.placeholder(tf.int32, [None, action_size], name="actions")
discounted_episode_rewards_ = tf.placeholder(tf.float32, [None,], name="discounted_episode_rewards")
# Add this placeholder for having this variable in tensorboard
mean_reward_ = tf.placeholder(tf.float32 , name="mean_reward")
with tf.name_scope("fc1"):
fc1 = tf.contrib.layers.fully_connected(inputs = input_,
num_outputs = 10,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("fc2"):
fc2 = tf.contrib.layers.fully_connected(inputs = fc1,
num_outputs = action_size,
activation_fn= tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("fc3"):
fc3 = tf.contrib.layers.fully_connected(inputs = fc2,
num_outputs = action_size,
activation_fn= None,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("softmax"):
action_distribution = tf.nn.softmax(fc3)
with tf.name_scope("loss"):
# tf.nn.softmax_cross_entropy_with_logits computes the cross entropy of the result after applying the softmax function
# If you have single-class labels, where an object can only belong to one class, you might now consider using
# tf.nn.sparse_softmax_cross_entropy_with_logits so that you don't have to convert your labels to a dense one-hot array.
neg_log_prob = tf.nn.softmax_cross_entropy_with_logits_v2(logits = fc3, labels = actions)
loss = tf.reduce_mean(neg_log_prob * discounted_episode_rewards_)
with tf.name_scope("train"):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# tensorboard
# Setup TensorBoard Writer
# writer = tf.summary.FileWriter("./tensorboard/pg/1")
## Losses
# tf.summary.scalar("Loss", loss)
## Reward mean
# tf.summary.scalar("Reward_mean", mean_reward_)
# write_op = tf.summary.merge_all()
### TRAINING ###
allRewards = []
total_rewards = 0
maximumRewardRecorded = 0
episode = 0
episode_states, episode_actions, episode_rewards = [],[],[]
saver = tf.train.Saver()
with tf.Session() as sess:
# sess.run(tf.initialize_all_variables())
sess.run(tf.global_variables_initializer())
for episode in range(max_episodes):
episode_rewards_sum = 0
# Launch the game
state = env.reset()
while True:
# Choose action a, remember WE'RE NOT IN A DETERMINISTIC ENVIRONMENT, WE'RE OUTPUT PROBABILITIES.
action_probability_distribution = sess.run(action_distribution, feed_dict={input_: state.reshape([1,8])})
action = np.random.choice(range(action_probability_distribution.shape[1]), p=action_probability_distribution.ravel()) # select action w.r.t the actions prob
if render:
env.render()
# Perform a
new_state, reward, done, info = env.step(action)
# Store s, a, r
episode_states.append(state)
# For actions because we output only one (the index) we need 2 (1 is for the action taken)
# We need [0., 1.] (if we take right) not just the index
action_ = np.zeros(action_size)
action_[action] = 1
episode_actions.append(action_)
episode_rewards.append(reward)
if done:
# Calculate sum reward
episode_rewards_sum = np.sum(episode_rewards)
# allRewards.append(episode_rewards_sum)
# total_rewards = np.sum(allRewards)
# # Mean reward
# mean_reward = np.divide(total_rewards, episode+1)
# maximumRewardRecorded = np.amax(allRewards)
print("==========================================")
print("Episode: ", episode)
print("Reward: ", episode_rewards_sum)
# print("Mean Reward", mean_reward)
# print("Max reward so far: ", maximumRewardRecorded)
# Calculate discounted reward
discounted_episode_rewards = discount_and_normalize_rewards(episode_rewards)
# Feedforward, gradient and backpropagation
loss_, _ = sess.run([loss, train_opt], feed_dict={input_: np.vstack(np.array(episode_states)),
actions: np.vstack(np.array(episode_actions)),
discounted_episode_rewards_: discounted_episode_rewards
})
# # Write TF Summaries
# summary = sess.run(write_op, feed_dict={input_: np.vstack(np.array(episode_states)),
# actions: np.vstack(np.array(episode_actions)),
# discounted_episode_rewards_: discounted_episode_rewards,
# mean_reward_: mean_reward
# })
# writer.add_summary(summary, episode)
# writer.flush()
# Reset the transition stores
episode_states, episode_actions, episode_rewards = [],[],[]
break
state = new_state
# Save Model
if episode % 100 == 0:
saver.save(sess, "./models/model.ckpt")
print("Model saved")
def predict(env, state_size, action_size, n_episodes=10, render=True):
### POLICY ESTIMATOR ###
with tf.name_scope("inputs"):
input_ = tf.placeholder(tf.float32, [None, state_size], name="input_")
actions = tf.placeholder(tf.int32, [None, action_size], name="actions")
discounted_episode_rewards_ = tf.placeholder(tf.float32, [None,], name="discounted_episode_rewards")
# Add this placeholder for having this variable in tensorboard
mean_reward_ = tf.placeholder(tf.float32 , name="mean_reward")
with tf.name_scope("fc1"):
fc1 = tf.contrib.layers.fully_connected(inputs = input_,
num_outputs = 10,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("fc2"):
fc2 = tf.contrib.layers.fully_connected(inputs = fc1,
num_outputs = action_size,
activation_fn= tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("fc3"):
fc3 = tf.contrib.layers.fully_connected(inputs = fc2,
num_outputs = action_size,
activation_fn= None,
weights_initializer=tf.contrib.layers.xavier_initializer())
with tf.name_scope("softmax"):
action_distribution = tf.nn.softmax(fc3)
with tf.name_scope("loss"):
# tf.nn.softmax_cross_entropy_with_logits computes the cross entropy of the result after applying the softmax function
# If you have single-class labels, where an object can only belong to one class, you might now consider using
# tf.nn.sparse_softmax_cross_entropy_with_logits so that you don't have to convert your labels to a dense one-hot array.
neg_log_prob = tf.nn.softmax_cross_entropy_with_logits_v2(logits = fc3, labels = actions)
loss = tf.reduce_mean(neg_log_prob * discounted_episode_rewards_)
with tf.name_scope("train"):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# tensorboard
# Setup TensorBoard Writer
# writer = tf.summary.FileWriter("./tensorboard/pg/1")
## Losses
# tf.summary.scalar("Loss", loss)
## Reward mean
# tf.summary.scalar("Reward_mean", mean_reward_)
# write_op = tf.summary.merge_all()
### TRAINING ###
allRewards = []
total_rewards = 0
maximumRewardRecorded = 0
episode = 0
episode_states, episode_actions, episode_rewards = [],[],[]
# saver = tf.train.Saver()
saver = tf.train.Saver()
with tf.Session() as sess:
env.reset()
rewards = []
# Load the model
saver.restore(sess, "./models/model.ckpt")
for episode in range(n_episodes):
state = env.reset()
step = 0
done = False
total_rewards = 0
print("****************************************************")
print("EPISODE ", episode)
while True:
# Choose action a, remember WE'RE NOT IN A DETERMINISTIC ENVIRONMENT, WE'RE OUTPUT PROBABILITIES.
action_probability_distribution = sess.run(action_distribution, feed_dict={input_: state.reshape([1,8])})
#print(action_probability_distribution)
action = np.random.choice(range(action_probability_distribution.shape[1]), p=action_probability_distribution.ravel()) # select action w.r.t the actions prob
if render:
env.render()
new_state, reward, done, info = env.step(action)
total_rewards += reward
if done:
rewards.append(total_rewards)
print ("Score", total_rewards)
break
state = new_state
env.close()
print ("Score over time: " + str(sum(rewards)/10))
if __name__ == "__main__":
env = gym.make('LunarLander-v2')
env = env.unwrapped
# n frames
state_size = 8
# possible actions
action_size = env.action_space.n
# max episodes for training
max_episodes = 1000
learning_rate = 0.01
# discount rate
gamma = 0.95
# reinforce(env, state_size, action_size, \
# max_episodes, learning_rate, gamma, render=False)
predict(env, state_size, action_size)
```
#### File: src/monte_carlo/mc.py
```python
from copy import deepcopy
import numpy as np
class FiniteMCModel:
def __init__(self, state_space, action_space, gamma=1.0, epsilon=0.1):
"""MCModel takes in state_space and action_space (finite)
Arguments
---------
state_space: int OR list[observation], where observation is any hashable type from env's obs.
action_space: int OR list[action], where action is any hashable type from env's actions.
gamma: float, discounting factor.
epsilon: float, epsilon-greedy parameter.
If the parameter is an int, then we generate a list, and otherwise we generate a dictionary.
>>> m = FiniteMCModel(2,3,epsilon=0)
>>> m.Q
[[0, 0, 0], [0, 0, 0]]
>>> m.Q[0][1] = 1
>>> m.Q
[[0, 1, 0], [0, 0, 0]]
>>> m.pi(1, 0)
1
>>> m.pi(1, 1)
0
>>> d = m.generate_returns([(0,0,0), (0,1,1), (1,0,1)])
>>> assert(d == {(1, 0): 1, (0, 1): 2, (0, 0): 2})
>>> m.choose_action(m.pi, 1)
0
"""
self.gamma = gamma
self.epsilon = epsilon
self.Q = None
if isinstance(action_space, int):
self.action_space = np.arange(action_space)
actions = [0]*action_space
# Action representation
self._act_rep = "list"
else:
self.action_space = action_space
actions = {k:0 for k in action_space}
self._act_rep = "dict"
if isinstance(state_space, int):
self.state_space = np.arange(state_space)
self.Q = [deepcopy(actions) for _ in range(state_space)]
else:
self.state_space = state_space
self.Q = {k:deepcopy(actions) for k in state_space}
# Frequency of state/action.
self.Ql = deepcopy(self.Q)
def pi(self, action, state):
"""pi(a,s,A,V) := pi(a|s)
We take the argmax_a of Q(s,a).
q[s] = [q(s,0), q(s,1), ...]
"""
if self._act_rep == "list":
if action == np.argmax(self.Q[state]):
return 1
return 0
elif self._act_rep == "dict":
if action == max(self.Q[state], key=self.Q[state].get):
return 1
return 0
def b(self, action, state):
"""b(a,s,A) := b(a|s)
Sometimes you can only use a subset of the action space
given the state.
Randomly selects an action from a uniform distribution.
"""
return self.epsilon/len(self.action_space) + (1-self.epsilon) * self.pi(action, state)
def generate_returns(self, ep):
"""Backup on returns per time period in an epoch
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
G = {} # return on state
C = 0 # cumulative reward
for tpl in reversed(ep):
observation, action, reward = tpl
G[(observation, action)] = C = reward + self.gamma*C
return G
def choose_action(self, policy, state):
"""Uses specified policy to select an action randomly given the state.
Arguments
---------
policy: function, can be self.pi, or self.b, or another custom policy.
state: observation of the environment.
"""
probs = [policy(a, state) for a in self.action_space]
return np.random.choice(self.action_space, p=probs)
def update_Q(self, ep):
"""Performs a action-value update.
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
# Generate returns, return ratio
G = self.generate_returns(ep)
for s in G:
state, action = s
q = self.Q[state][action]
self.Ql[state][action] += 1
N = self.Ql[state][action]
self.Q[state][action] = q * N/(N+1) + G[s]/(N+1)
def score(self, env, policy, n_samples=1000):
"""Evaluates a specific policy with regards to the env.
Arguments
---------
env: an openai gym env, or anything that follows the api.
policy: a function, could be self.pi, self.b, etc.
"""
rewards = []
for _ in range(n_samples):
observation = env.reset()
cum_rewards = 0
while True:
action = self.choose_action(policy, observation)
observation, reward, done, _ = env.step(action)
cum_rewards += reward
if done:
rewards.append(cum_rewards)
break
return np.mean(rewards)
if __name__ == "__main__":
import doctest
doctest.testmod()
``` |
{
"source": "JohannesHaubner/TopOpt",
"score": 3
} |
#### File: topopt/quadrature/quadrature.py
```python
from scipy import integrate as integrate
import numpy as np
import pickle
from pathlib import Path
here = Path(__file__).parent
sigma = 7./32
def f(r):
return 2*r**3 - 3*r**2 +1
def squared_norm(x1, x2, y1, y2):
return np.power(x1 -y1, 2) + np.power(x2 -y2, 2)
def kappa(x1, x2, y1, y2):
if squared_norm(x1,x2,y1,y2)<=8:
return 1.
elif squared_norm(x1,x2,y1,y2) <= 25./2:
return f((squared_norm(x1, x2, y1,y2)-8)/(9./2))
else:
return 0.
def int_jk(x1, x2, y1, y2,sigma):
return np.power(squared_norm(x1,x2,y1,y2),-1-1*sigma)*kappa(x1,x2,y1,y2)
def tilde_h2(x,y,z, sigma):
return np.power(np.power(x+y,2) + np.power(z,2), -1-sigma)
def hat_h2(x,y,z, sigma):
return -1.0*z*np.power(np.power(x+y,2)+np.power(z,2),-1-sigma)
def h3(x1,x2,y1,y2, sigma):
return np.power(np.power(x1+y1,2) + np.power(x2+y2,2),-1-sigma)
def int_21(x,y,sigma):
return tilde_h2(1,x,y,sigma) + tilde_h2(x,1,y,sigma) + tilde_h2(x,y,1,sigma)
def int_22(x,y,sigma):
return hat_h2(1,x,y,sigma) + hat_h2(x,1,y,sigma) + hat_h2(x,y,1,sigma)
def int_3(x, y, z, sigma):
return h3(1,x,y,z, sigma)
def get_weights(sigma):
string = str(here) + "/integral_approximations_sigma_" + str(sigma) + ".pkl"
try:
file = open(string, "rb")
except:
save_weights(sigma)
file = open(string, "rb")
return file
def save_weights(sigma):
sig = sigma
ints = {}
print('compute quadrature for ', 2)
ints["int2_1"] = integrate.nquad(int_21, [[0, 1], [0, 1]], args=(sig,))
ints["int2_2"] = integrate.nquad(int_22, [[0, 1], [0, 1]], args=(sig,))
print('compute quadrature for ', 3)
ints["int3"] = integrate.nquad(int_3, [[0, 1], [0, 1], [0, 1]], args=(sig,))
indexes = [[2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [3, 2], [3, 3], [4, 0], [4, 1], [4, 2]]
for k in range(len(indexes)):
print('compute quadrature for ', k + 4)
ints["int" + str(4 + k)] = integrate.nquad(int_jk,
[[0, 1], [0, 1],
[indexes[k][0], indexes[k][0] + 1],
[indexes[k][1], indexes[k][1] + 1]],
args=(sig,))
string = str(here) + "/integral_approximations_sigma_" + str(sigma) + ".pkl"
file = open(string, "wb")
pickle.dump(ints, file)
file.close()
if __name__ == '__main__':
sigma = 7./16
string = "integral_approximations_sigma_" + str(sigma) + ".pkl"
try:
file = open(string, "rb")
except:
save_weights(sigma)
file = open(string, "rb")
output = pickle.load(file)
print(output)
```
#### File: TopOpt/topopt/topopt.py
```python
from dolfin import *
from dolfin_adjoint import *
import numpy as np
from scipy import io
import ufl
set_log_level(LogLevel.ERROR)
from preprocessing import Preprocessing
from ipopt_solver import IPOPTSolver, IPOPTProblem
import Hs_regularization as Hs_reg
try:
from pyadjoint import ipopt # noqa: F401
except ImportError:
print("""This example depends on IPOPT and Python ipopt bindings. \
When compiling IPOPT, make sure to link against HSL, as it \
is a necessity for practical problems.""")
raise
# turn off redundant output in parallel
parameters["std_out_all_processes"] = False
mu = Constant(1.0) # viscosity
alphaunderbar = 2.5 * mu / (100**2) # parameter for \alpha
alphabar = 2.5 * mu / (0.01**2) # parameter for \alpha
q = Constant(0.01) # q value that controls difficulty/discrete-valuedness of solution
def alpha(rho):
"""Inverse permeability as a function of rho, equation (40)"""
return conditional(gt(rho, 1.0),0.0, conditional(gt(rho, -1.0),
alphabar*(-1.0/16*rho**4 + 3.0/8*rho**2 -0.5*rho + 3.0/16),
-1.0*alphabar*rho))
N = 40
delta = 1.5 # The aspect ratio of the domain, 1 high and \delta wide
V = 1.0/3 * delta # want the fluid to occupy 1/3 of the domain
mesh = Mesh(RectangleMesh(MPI.comm_world, Point(0.0, 0.0), Point(delta, 1.0), int(delta*N), N))
controls_file = File('../Output/final_controls_' + str(N) +'.pvd')
# test if alpha does the correct thing
#P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
#P = FunctionSpace(mesh, P_h)
#c = interpolate(Expression("-4+8*x[0]", degree=1), P)
#testfile = File('./Output/c.pvd')
#v = TestFunction(P)
#vh = assemble(alpha(c)*v*dx)
#c.vector()[:] = vh[:]
#testfile << c
A = FunctionSpace(mesh, "CG", 1) # control function space
U_h = VectorElement("CG", mesh.ufl_cell(), 2)
P_h = FiniteElement("CG", mesh.ufl_cell(), 1)
W = FunctionSpace(mesh, U_h*P_h) # mixed Taylor-Hood function space
B = FunctionSpace(mesh, "DG", 0)
b = Function(B)
k = len(b.vector()[:])
b.vector()[:] = range(k)
#file = File("./Output/b_ved.pvd")
#file << b
# Define the boundary condition on velocity
class InflowOutflow(UserExpression):
def eval(self, values, x):
values[1] = 0.0
values[0] = 0.0
l = 1.0/6.0
gbar = 1.0
if x[0] == 0.0 or x[0] == delta:
if (1.0/4 - l/2) < x[1] < (1.0/4 + l/2):
t = x[1] - 1.0/4
values[0] = gbar*(1 - (2*t/l)**2)
if (3.0/4 - l/2) < x[1] < (3.0/4 + l/2):
t = x[1] - 3.0/4
values[0] = gbar*(1 - (2*t/l)**2)
def value_shape(self):
return (2,)
def forward(rho):
"""Solve the forward problem for a given fluid distribution rho(x)."""
w = Function(W)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
F = (alpha(rho) * inner(u, v) * dx + inner(grad(u), grad(v)) * dx +
inner(grad(p), v) * dx + inner(div(u), q) * dx)
bc = DirichletBC(W.sub(0), InflowOutflow(degree=2), "on_boundary")
solve(lhs(F) == rhs(F), w, bcs=bc)
return w
def save_control(x0, controls_file, index=-1, J = None): #TODO
rho = preprocessing.dof_to_control(x0)
rho.rename("density", "density")
print('objective function value J', J(rho))
controls_file << rho
if index +1:
filename = '../Output/matlab_controls_' + str(N) + '_' + str(index +1) + '.mat'
io.savemat(filename, mdict={'data': x0})
pass
if __name__ == "__main__":
x0 = (2.*V/delta -1)*np.ones(int(k/2))
# preprocessing class which contains dof_to_control-mapping
weighting = 1. # consider L2-mass-matrix + weighting * Hs-matrix
sigma = 7./16
preprocessing = Preprocessing(N, B)
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
rho = preprocessing.dof_to_control(x0)
# get reduced objective function: rho --> j(rho)
set_working_tape(Tape())
w = forward(rho)
(u, p) = split(w)
controls = File("../Output/control_iterations_guess" + str(N) +".pvd")
allctrls = File("../Output/allcontrols_" + str(N) + ".pvd")
rho_viz = Function(A, name="ControlVisualisation")
def eval_cb(j, rho):
rho_viz.assign(rho)
controls << rho_viz
allctrls << rho_viz
# objective function
J = assemble(0.5 * inner(alpha(rho) * u, u) * dx + 0.5 * mu * inner(grad(u), grad(u)) * dx)
# penalty term in objective function
J2 = assemble(ufl.Max(rho - 1.0, 0.0)**2 *dx + ufl.Max(-rho - 1.0, 0.0)**2 *dx)
m = Control(rho)
Jhat = [ReducedFunctional(J, m, eval_cb_post=eval_cb), ReducedFunctional(J2, m)]
# constraints
v = 1.0 /V * assemble((0.5 * (rho + 1)) * dx) - 1.0 # volume constraint
s = assemble( 1.0/delta*(rho*rho -1.0) *dx) # spherical constraint
constraints = [ReducedFunctional(v,m), ReducedFunctional(s,m)]
bounds = [[0.0, 0.0],[-1.0, 0.0]] # [[lower bound vc, upper bound vc],[lower bound sc, upper bound sc]]
# scaling
scaling_Jhat = [1.0, 0.0] # objective for optimization: scaling_Jhat[0]*Jhat[0]+scaling_Jhat[1]*Jhat[1]
scaling_constraints = [1.0, 1.0] # scaling of constraints for Ipopt
reg = 10.0 # regularization parameter
# problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds,
preprocessing, inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
#ipopt.test_objective(len(x0))
#ipopt.test_constraints(len(x0), 1, option=1)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, 0, J = Jhat[0])
# different weights for H_sigma matrix
weight = [0.01, 0.01, 0.01]
# different penalization parameters
eta = [40, 200, 1000]
# bounds for the constraints
bounds = [[0.0, 0.0], [0.0, 0.0]]
for j in range(len(eta)):
# update inner product
weighting = weight[j] # consider L2-mass-matrix + weighting * Hs-matrix
inner_product_matrix = Hs_reg.AssembleHs(N,delta,sigma).get_matrix(weighting)
scaling_Jhat = [1.0, eta[j]]
# move x0 onto sphere
x0 = preprocessing.move_onto_sphere(x0, V, delta)
# solve optimization problem
problem = IPOPTProblem(Jhat, scaling_Jhat, constraints, scaling_constraints, bounds, preprocessing,
inner_product_matrix, reg)
ipopt = IPOPTSolver(problem)
x0 = ipopt.solve(x0)
save_control(x0, controls_file, j+1, J = Jhat[0])
``` |
{
"source": "johannesheinz/morsecode",
"score": 4
} |
#### File: morsecode/src/clustering.py
```python
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from decoder import Signal
class Clustering:
"""
A machine learning algorithm that uses unsupervised learning processes in order to determine clusters.
"""
_kmeans: KMeans
_prediction: list
_n_clusters: int = 0
def __init__(self, number_of_clusters: int):
"""
Configures the algorithm
:param number_of_clusters: The number of clusters for the algorithm to form.
"""
self._n_clusters = number_of_clusters
def train(self, batch: list) -> list:
"""
Forms the configured number of clusters out of the given batch.
:param batch: Training data in the form of a list of tuples.
:return: Returns a list of labels for each data point
"""
data = np.array(batch)
n_samples = len(batch)
print("Samples: %d" % n_samples)
self._kmeans = KMeans(n_clusters=self._n_clusters, init='k-means++', random_state=150).fit(data)
self._prediction = self._kmeans.predict(data)
plt.figure(figsize=(12, 12))
plt.scatter(data[:, 0], data[:, 1], c=self._prediction)
plt.title("k-means++")
plt.show()
return self._prediction
def get_label_mapping(self) -> dict:
"""
Matches the internal labels to pre-defined enums that contain semantics.
:return: A mapping that contains a Signal for every internally used label.
"""
mapping = dict()
print()
model_long = ((0.30, 0.85), (0.39, 0.93), (0.25, 0.99))
self._map(mapping, model_long, Signal.LONG)
model_short = ((0.10, 0.85), (0.15, 0.92), (0.08, 0.99))
self._map(mapping, model_short, Signal.SHORT)
model_pause_short = ((0.10, 0.05), (0.15, 0.15), (0.08, 0.10))
self._map(mapping, model_pause_short, Signal.PAUSE_SHORT)
model_pause_medium = ((0.30, 0.05), (0.39, 0.15), (0.25, 0.10))
self._map(mapping, model_pause_medium, Signal.PAUSE_MEDIUM)
model_pause_long = ((1.70, 0.05), (2.81, 0.15), (1.65, 0.10))
self._map(mapping, model_pause_long, Signal.PAUSE_LONG)
# TODO : Plot test points
print()
return mapping
def _map(self, _map: dict, tuples: list, signal: Signal) -> None:
prediction = self._kmeans.predict(np.array(tuples))
_sum = 0.0
for label in prediction:
_sum += label
print("%20s -> %f" % (signal, (_sum / len(prediction))))
label = round(_sum / len(prediction))
_map[label] = signal
```
#### File: morsecode/src/input.py
```python
import csv
import sys
import wave
from _datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
from scipy.fftpack import fft
# Based on the source code of 'Rattlesnake', a script for active noise cancellation.
# > Source: https://github.com/loehnertz/rattlesnake
# > Author: <NAME>
# Also based on the source code of 'Audio-Spectrum-Analyzer-in-Python'
# > Source: https://github.com/markjay4k/Audio-Spectrum-Analyzer-in-Python
# > Author: <NAME>
class Input:
def __init__(self):
# stream constants
self.CHUNK = 256
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 1
self.RATE = 8000
self.pause = False
# Loudness area in which the signal is thought to be the same
self.TOLERANCE = 0.48 # 0.225
# stream object
self.pa = pyaudio.PyAudio()
def _read_waveaudio(self, file):
"""
Reads in the given wave file and returns a new PyAudio stream object from it.
:param file: The path to the file to read in
:return (waveform, stream): (The actual audio data as a waveform, the PyAudio object for said data)
"""
# Open the waveform from the command argument
try:
waveform = wave.open(file, 'rb')
except wave.Error:
print('The program can only process wave audio files (.wav)')
sys.exit()
except FileNotFoundError:
print('The chosen file does not exist')
sys.exit()
print("Sample width: %d" % waveform.getsampwidth())
print("Format: %d" % self.pa.get_format_from_width(waveform.getsampwidth()))
print("Channels: %d" % waveform.getnchannels())
print("Framerate: %d" % waveform.getframerate())
# Load PyAudio and create a useable waveform object
self.stream = self.pa.open(
format=self.pa.get_format_from_width(waveform.getsampwidth()),
channels=waveform.getnchannels(),
rate=waveform.getframerate(),
input=True,
output=False,
frames_per_buffer=self.CHUNK,
)
# self.stream = self.pa.open(
# format=self.FORMAT,
# channels=self.CHANNELS,
# rate=self.RATE,
# input=True,
# output=True,
# frames_per_buffer=self.CHUNK,
# )
# Return the waveform as well as the generated PyAudio stream object
return waveform # , stream
def _export(self, tuples: list) -> str:
filename = 'waveaudio_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '.csv'
print(" - Writing read audio wave data to '%s'." % filename)
with open(filename, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['endtime'] + ['loudness'])
for endtime, loudness in tuples:
writer.writerow([round(endtime, 4)] + [round(loudness, 4)])
return filename
def _plot_wave(self, amplitudes):
# Find out max value for normalization
maxPCM = 0
for pcmMax in np.abs(amplitudes):
if pcmMax is not np.nan and pcmMax > maxPCM:
maxPCM = pcmMax
counter = 1
previous = 0.0
x = list()
y = list()
average = [0.0]
for pcmMax in np.array(amplitudes):
normalized = abs(pcmMax / maxPCM)
if abs(normalized - previous) > self.TOLERANCE:
# Signal has changed
if previous < 0.4 and normalized > 0.6:
y.append(np.min(average))
elif previous > 0.6 and normalized < 0.4:
y.append(np.max(average))
else:
y.append(np.mean(average))
x.append(counter)
average.clear()
average.append(normalized)
previous = normalized
counter += 1
print("Length amplitudes: %d, Length y: %d, Max amplitude: %f" % (len(amplitudes), len(y), maxPCM))
# absys = np.abs(amplitudes)
# print(len(absys))
# print("#############################################")
# for p, absy in zip(y, absys):
# print('p: %f, abs: %f' % (p, absy))
# print("#############################################")
# Display the plotted graph
fig = plt.figure(figsize=(25, 5))
ax = fig.add_subplot(111)
ax.plot(x, y, 'b')
plt.show()
return list(zip(x, y))
def read_file(self, filename: str) -> list:
"""
Reads a sound file and extracts data.
:param filename:
:return:
"""
print("Opening sound file '%s' ..." % filename)
# Read in the given file
# (waveform, stream) = self._read_waveaudio(filename)
waveform = self._read_waveaudio(filename)
fmt = self.pa.get_format_from_width(waveform.getsampwidth())
originals = list()
fouriers = list()
# Counting the iterations of the while-loop
iteration = 0
# Read a first chunk and continue to do so for as long as there is a stream to read in
original = waveform.readframes(self.CHUNK)
threshold = 0.0
while original != b'':
try:
# Read byte array as signed 16 bit PCM data
_bytes = np.frombuffer(original, dtype=np.int16)
originals.extend(_bytes)
# Read as floats
# if len(original) % 4 == 0:
# format = int(len(original) / 4) * 'f'
# unpacked = struct.unpack(format, original)
# try:
# data_int = struct.unpack(str(2 * self.CHUNK) + 'B', original)
# except struct.error:
# break
_fft = fft(_bytes)
# Reduce FFT to relevant values (exclude edge cases)
lower_bound: int = 10
upper_bound: int = round(0.45 * len(_fft))
fourier = (np.abs(_fft[0:self.CHUNK]) / (128 * self.CHUNK))[lower_bound:upper_bound]
if len(fourier) > 0:
fourier_max = np.max(fourier)
fourier_min = np.min(fourier)
# Set threshold to 50%
if fourier_max > 2 * threshold:
threshold = fourier_max / 2
if fourier_max > threshold: # self.THRESHOLD_FOURIER:
fouriers.append(fourier_max)
else:
fouriers.append(fourier_min)
##################################################################################
# print(fourier)
# print(np.abs(fourier[0:self.CHUNK]) / (128 * self.CHUNK))
# fig = plt.figure(figsize=(7, 4))
# ax = fig.add_subplot(111)
# xf = np.linspace(0, self.RATE, self.CHUNK)
# line_fft, = ax.semilogx(xf, np.random.rand(self.CHUNK), '-', lw=2)
# line_fft.set_ydata((np.abs(fourier[0:self.CHUNK]) / (128 * self.CHUNK)))
# plt.show()
# print(np.max((np.abs(fourier[0:self.CHUNK]) / (128 * self.CHUNK))))
# originals.extend(np.array(data_int, dtype='b')[::2] + 128)
##################################################################################
# Read in the next chunk of data
original = waveform.readframes(self.CHUNK)
# Add up one to the iterations
iteration += 1
except (KeyboardInterrupt, SystemExit):
break
# Stop the stream after there is no more data to read
self.stream.stop_stream()
self.stream.close()
# Plot input stream and derived max/min FFT
_, (ax1, ax2) = plt.subplots(2, figsize=(20, 6))
ax1.plot(originals, 'g')
ax2.plot(fouriers, 'r')
plt.show()
# Terminate PyAudio as well as the program
self.pa.terminate()
# sys.exit()
tuples = self._plot_wave(fouriers)
samplewidth = waveform.getsampwidth()
framerate = int(waveform.getframerate())
seconds = (iteration * samplewidth * self.CHUNK) / (2 * framerate)
print("Estimated duration (s): %f" % seconds)
# print("LENGTHS: iterations: %d, originals: %d, fouriers: %d, tuples: %d" % (iteration, len(originals), len(fouriers), len(tuples)))
# Transform time unit to seconds
factor = seconds / iteration
tuples_in_seconds = list()
for endtime, loudness in tuples:
tuples_in_seconds.append((factor * endtime, loudness))
# TODO: Normalize durations to ~12 WPM
# Return filename of the exported file
return self._export(tuples_in_seconds)
def record_microphone(self, resolution: int) -> None:
print(resolution)
pass
if __name__ == "__main__":
_input = Input()
_input.read_file('testfile.wav')
``` |
{
"source": "johannesheinz/triangulation",
"score": 2
} |
#### File: triangulation/tests/test_triangulation.py
```python
import pytest
from triangulation import calculate as calc
@pytest.fixture(scope='function')
def setup():
return 3
def test_fail(setup):
assert setup == 2
def test_calculate():
assert calc.check_intersection()
def test_calculate_2():
assert calc.check_orientation()
```
#### File: triangulation/triangulation/input.py
```python
import json
def read_coordinates(filename='coordinates.json'):
"""
Summary line.
Parameters
----------
arg1 : int
Description of arg1
Returns
-------
int
Description of return value
"""
with open(filename, 'r') as input:
# Example plot: http://www.wolframalpha.com/input/?i=plot+%5B+%5B1,+1%5D,+%5B1,+6%5D,+%5B10,+6%5D,+%5B10,+1%5D,+%5B7,+1%5D,+%5B7,+4%5D,+%5B4,+4%5D,+%5B4,+1%5D+%5D
coordinates_json = json.loads(input.read())
# Store in dictionary
coordinates = {}
for index, coordinate in enumerate(coordinates_json):
coordinates[index + 1] = coordinate
return(coordinates)
``` |
{
"source": "johanneshiry/Weinretter",
"score": 3
} |
#### File: Weinretter/backend/validation.py
```python
import requests
from schematics.exceptions import ValidationError
def validate_captcha(token):
r = requests.post(
"https://www.google.com/recaptcha/api/siteverify",
{"response": token, "secret": "<KEY>"},
)
json = r.json()
if not json["success"] or json["score"] < 0.5:
raise ValidationError("Captcha invalid")
``` |
{
"source": "johanneshk/pyleus",
"score": 2
} |
#### File: tests/cli/topologies_test.py
```python
import pytest
from pyleus.configuration import Configuration, DEFAULTS
import pyleus.cli.topologies
from pyleus.cli.topologies import kill_topology
from pyleus.cli.topologies import list_topologies
from pyleus.cli.topologies import submit_topology
from pyleus.testing import mock
@pytest.fixture
def configs():
"""Create a mock Configuration object with mock.sentinel values
Eg.
Configuration(
base_jar=mock.sentinel.base_jar,
config_file=mock.sentinel.config_file,
...
)
"""
return Configuration(**dict(
(k, getattr(mock.sentinel, k))
for k in DEFAULTS._asdict().keys()
))
def test_submit_topology(configs):
mock_storm_cluster = mock.Mock()
with mock.patch.object(pyleus.cli.topologies, 'StormCluster',
return_value=mock_storm_cluster) as mock_ctr:
submit_topology(mock.sentinel.jar_path, configs)
mock_ctr.assert_called_once_with(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts,
)
mock_storm_cluster.submit.assert_called_once_with(mock.sentinel.jar_path)
def test_kill_topology(configs):
mock_storm_cluster = mock.Mock()
with mock.patch.object(pyleus.cli.topologies, 'StormCluster',
return_value=mock_storm_cluster) as mock_ctr:
kill_topology(configs)
mock_ctr.assert_called_once_with(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts,
)
mock_storm_cluster.kill.assert_called_once_with(configs.topology_name, configs.wait_time)
def test_list_topologies(configs):
mock_storm_cluster = mock.Mock()
with mock.patch.object(pyleus.cli.topologies, 'StormCluster',
return_value=mock_storm_cluster) as mock_ctr:
list_topologies(configs)
mock_ctr.assert_called_once_with(
configs.storm_cmd_path,
configs.nimbus_host,
configs.nimbus_port,
configs.verbose,
configs.jvm_opts,
)
mock_storm_cluster.list.assert_called_once_with()
``` |
{
"source": "johanneshugger/speedrun",
"score": 4
} |
#### File: speedrun/speedrun/resource.py
```python
import os
import time
class WaiterMixin(object):
"""
Class to wait on a process to finish before starting the computation.
"""
@staticmethod
def is_alive(pid):
"""Checks if a process with PID `pid` is alive."""
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def wait_for_pid(self, pid, timeout=1):
"""Wait till the process with PID `pid` is dead."""
while True:
if not self.is_alive(pid):
break
else:
time.sleep(timeout)
continue
# noinspection PyUnresolvedReferences
def wait(self):
"""
This function blocks until a specified process has terminated. This can be useful for
pipelining multiple experiments. You may call this function anytime after `auto_setup` is
called, e.g. in the `__init__` of your experiment or before the training training loop.
The commandline arguments this function listens to are:
`--wait.for`: specifies the PID of the process to wait for.
`--wait.check_interval`: Interval to query the status of the process being waited for.
`--wait.verbose`: Whether to print info.
Example
-------
This is assuming that your file calls this function somewhere.
$ python my_script.py TEST-0 --wait.for 1234 --wait.check_interval 10 --wait.verbose True
This will wait for the process with PID 1234. While doing so, it will check its status
every 10 seconds.
Warning
-------
May destroy friendships.
"""
pid_to_wait_for = self.get_arg('wait.for', None)
timeout = self.get_arg('wait.check_interval', 1)
verbose = self.get_arg('wait.verbose', True)
if pid_to_wait_for is None:
return
if verbose:
message = f"Waiting for PID {pid_to_wait_for} to finish (my PID is {os.getpid()})..."
(self.print if hasattr(self, 'print') else print)(message)
self.wait_for_pid(pid_to_wait_for, timeout)
if verbose:
message = f"Done waiting for PID {pid_to_wait_for}. It's showtime!"
(self.print if hasattr(self, 'print') else print)(message)
return True
``` |
{
"source": "JohannesIBK/plyoox-bot",
"score": 2
} |
#### File: plyoox-bot/src/main.py
```python
import asyncio
import json
import logging
import time
import traceback
import aiohttp
import asyncpg
import discord
from asyncpg.pool import Pool
from discord.ext import commands
from utils.db.cache import BotCache
from utils.ext.context import Context
logger = logging.getLogger(__name__)
available_langs = ["de", "en"]
loaded_langs = {}
for _lang in available_langs:
with open(f"utils/languages/{_lang}/commands_{_lang}.json", 'r') as f:
lang = dict(json.load(f))
loaded_langs.update({_lang: lang})
cogs = [
"plugins.Owner",
"plugins.Moderation",
"plugins.Administration",
"plugins.Help",
"plugins.Leveling",
"plugins.Utilities",
"plugins.Commands",
"plugins.Errors",
"plugins.Fun",
"plugins.Events",
"plugins.Infos",
"plugins.Logging",
"plugins.Timers",
'plugins.SupportServer'
]
intents = discord.Intents.none()
intents.guild_messages = True
intents.bans = True
intents.reactions = True
intents.guilds = True
intents.members = True
async def get_prefix(bot, msg: discord.Message):
config = await bot.cache.get(msg.guild.id)
if config is not None:
return config.prefix
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> ']
async def set_game(bot):
while True:
await bot.change_presence(
activity=discord.Activity(
type=discord.ActivityType.listening,
name='plyoox.net | +help'),
status=discord.Status.online)
await asyncio.sleep(3600)
class Plyoox(commands.Bot):
def __init__(self):
super().__init__(
command_prefix=get_prefix,
case_insensitive=True,
max_messages=10000,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=False),
intents=intents
)
self.startTime = time.time()
self.version = 'v3.0.0'
self.owner_id = 263347878150406144
self.commandsCount = {}
self.cache: BotCache = BotCache(self)
self._lang = loaded_langs
async def on_ready(self):
self.gamesLoop = asyncio.create_task(set_game(self))
self.session = aiohttp.ClientSession(loop=self.loop)
for cog in cogs:
try:
self.load_extension(cog)
except commands.ExtensionAlreadyLoaded:
self.reload_extension(cog)
if self.user.id == 505433541916622850:
self.load_extension('plugins.BotLists')
logger.info(time.strftime("Started at %d.%m.%Y %H:%M:%S"))
logger.info(f"Boot-Time: {round(time.time() - self.startTime, 2)}s")
logger.info(f'{len(cogs)} Plugins loaded.')
print(f"Boot-Time: {round(time.time() - self.startTime, 2)}s")
print(f'Server: {len(self.guilds)} [{self.shard_count}]')
print(f'{len(cogs)} Plugins loaded.')
async def get_context(self, message, *, cls=Context):
return await super().get_context(message, cls=cls)
async def process_commands(self, message: discord.Message):
ctx = await self.get_context(message)
if ctx.command is None:
return
try:
await self.invoke(ctx)
finally:
await ctx.release()
async def on_command(self, ctx):
command = ctx.command.parent or ctx.command
command_name = command.name.lower()
if command_name not in self.commandsCount:
self.commandsCount[command_name] = 1
else:
self.commandsCount[command_name] += 1
async def lang(self, guild_id, modul, utils=False):
cache = await self.cache.get(guild_id)
if not cache:
if utils:
return {**self._lang["en"][modul.lower()], **self._lang["utils"]}
else:
return self._lang["en"][modul.lower()]
guild_lang = cache.lang
if utils:
return {**self._lang[guild_lang][modul.lower()], **self._lang[guild_lang]["utils"]}
else:
return self._lang[guild_lang][modul.lower()]
async def create_db_pool(self, port):
self.db: Pool = await asyncpg.create_pool(
database='discord',
user='plyoox',
password='1',
port=port,
host="localhost"
)
async def on_error(self, event_method, *args, **kwargs):
logger.error(traceback.format_exc())
```
#### File: src/plugins/Events.py
```python
import logging
import typing
import discord
from discord.ext import commands
import main
from other import db
from utils.ext.formatter import formatMessage
class Events(commands.Cog):
def __init__(self, bot: main.Plyoox):
self.bot = bot
async def checkGuilds(self):
await self.bot.wait_until_ready()
guilds = self.bot.guilds
db_guilds = (entry['sid'] for entry in
await self.bot.db.fetch('SELECT sid FROM config.guild'))
for guild in guilds:
if guild.id not in db_guilds:
await db.gotAddet(self.bot, guild)
bots = len(list(filter(lambda m: m.bot, guild.members)))
embed = discord.Embed(color=discord.Color.green(), title="**__SERVER JOINED__**")
embed.add_field(name="Name", value=guild.name, inline=False)
embed.add_field(name="Member", value=f'User: {len(guild.members)}\nBots: {bots}',
inline=False)
embed.add_field(name="Owner", value=guild.owner, inline=False)
embed.add_field(name="Region", value=str(guild.region), inline=False)
embed.add_field(name="Stats",
value=f'__Rollen:__ {len(guild.roles)}'
f'\n__TextChannel:__ {len(guild.text_channels)}\n'
f'__VoiceChannels:__ {len(guild.voice_channels)}',
inline=False)
await self.bot.get_channel(715260033926955070).send(embed=embed)
@commands.Cog.listener()
async def on_guild_channel_create(self, channel: typing.Union[discord.TextChannel,
discord.VoiceChannel, discord.CategoryChannel]):
guild = channel.guild
if not guild.me.guild_permissions.manage_channels:
return
mute_role_id = await self.bot.db.fetchval(
'SELECT muterole from automod.config WHERE sid = $1', guild.id)
mute_role = guild.get_role(mute_role_id)
if mute_role is None:
return
if isinstance(channel, discord.TextChannel):
if channel.permissions_synced:
return
overwrite = discord.PermissionOverwrite.from_pair(
deny=discord.Permissions(permissions=2099776),
allow=discord.Permissions(permissions=0))
return await channel.set_permissions(mute_role, overwrite=overwrite)
if isinstance(channel, discord.VoiceChannel):
if channel.permissions_synced:
return
overwrite = discord.PermissionOverwrite.from_pair(
deny=discord.Permissions(permissions=2097664),
allow=discord.Permissions(permissions=0))
return await channel.set_permissions(mute_role, overwrite=overwrite)
if isinstance(channel, discord.CategoryChannel):
overwrite = discord.PermissionOverwrite.from_pair(
deny=discord.Permissions(permissions=2099776),
allow=discord.Permissions(permissions=0))
return await channel.set_permissions(mute_role, overwrite=overwrite)
@commands.Cog.listener()
async def on_guild_role_delete(self, role: discord.Role):
guild = role.guild
roles = await self.bot.db.fetchrow(
'SELECT welcomer.joinroles, config.modroles, config.muterole, leveling.noxprole, '
'leveling.roles, config.helperroles '
'FROM automod.config LEFT JOIN config.leveling ON config.sid = leveling.sid '
'LEFT JOIN config.welcomer ON config.sid = welcomer.sid WHERE config.sid = $1',
role.guild.id)
if roles is None:
return
if roles['noxprole'] == role.id:
return await self.bot.db.execute(
"UPDATE config.leveling SET noxprole = NULL WHERE sid = $2", roles['noxprole'],
guild.id)
if (levelRoles := roles['roles']) is not None:
if role.id in levelRoles:
for lvlRole in levelRoles:
if lvlRole[0] == role.id:
return await self.bot.db.execute(
"UPDATE config.leveling SET roles = array_remove(roles, $1) "
"WHERE sid = $2",
lvlRole, guild.id)
if (modRoles := roles['modroles']) is not None:
if role.id in modRoles:
for modRole in modRoles:
if role.id == modRole:
return await self.bot.db.execute(
"UPDATE automod.config SET modroles = array_remove(modroles, $1) "
"WHERE sid = $2",
role.id, guild.id)
if (helperRoles := roles['helperroles']) is not None:
if role.id in helperRoles:
for helperRole in helperRoles:
if role.id == helperRole:
return await self.bot.db.execute(
"UPDATE automod.config SET helperroles = array_remove(helperroles, $1) "
"WHERE sid = $2",
role.id, guild.id)
if (joinRoles := roles['joinroles']) is not None:
if role.id in joinRoles:
for joinRole in joinRoles:
if role.id == joinRole:
return await self.bot.db.execute(
"UPDATE config.welcomer SET joinroles = array_remove(joinroles, $1) "
"WHERE sid = $2",
role.id, guild.id)
if role.id == roles['muterole']:
return await self.bot.db.execute(
"UPDATE automod.config SET muterole = NULL WHERE sid = $1", guild.id)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
guild = channel.guild
channels = await self.bot.db.fetchrow(
'SELECT welcomer.joinchannel, welcomer.leavechannel, leveling.channel AS lvlchannel, '
'leveling.noxpchannels, config.logchannel FROM automod.config '
'LEFT JOIN config.leveling ON config.sid = leveling.sid LEFT JOIN config.welcomer '
'ON config.sid = welcomer.sid WHERE config.sid = $1',
guild.id)
if channels is None:
return
if channel.id == channels['joinchannel']:
return await self.bot.db.execute(
"UPDATE config.welcomer SET joinchannel = NULL WHERE sid = $1", guild.id)
elif channel.id == channels['leavechannel']:
return await self.bot.db.execute(
"UPDATE config.welcomer SET leavechannel = NULL WHERE sid = $1", guild.id)
elif channel.id == channels['logchannel']:
return await self.bot.db.execute(
"UPDATE automod.config SET logchannel = NULL WHERE sid = $1", guild.id)
elif channel.id == channels['lvlchannel']:
return await self.bot.db.execute(
"UPDATE config.leveling SET channel = NULL WHERE sid = $1", guild.id)
if (noXpChannels := channels['noxpchannels']) is not None:
if channel.id in noXpChannels:
for noXpChannel in noXpChannels:
if channel.id == noXpChannel:
return await self.bot.db.execute(
"UPDATE config.leveling SET noxpchannels = "
"array_remove(noxpchannels, $1) WHERE sid = $2",
noXpChannel, guild.id)
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
await db.gotAddet(self.bot, guild)
bots = len(list(filter(lambda m: m.bot, guild.members)))
embed = discord.Embed(color=discord.Color.green(), title="**__SERVER JOINED__**")
embed.add_field(name="Name", value=guild.name, inline=False)
embed.add_field(name="Member", value=f'User: {len(guild.members)}\nBots: {bots}',
inline=False)
embed.add_field(name="Owner", value=guild.owner, inline=False)
embed.add_field(name="Region", value=str(guild.region), inline=False)
embed.add_field(name="Stats",
value=f'__Rollen:__ {len(guild.roles)}'
f'\n__TextChannel:__ {len(guild.text_channels)}'
f'\n__VoiceChannels:__ {len(guild.voice_channels)}',
inline=False)
await self.bot.get_channel(715260033926955070).send(embed=embed)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
await self.bot.db.execute("DELETE FROM config.guild WHERE sid = $1", guild.id)
await self.bot.db.execute("DELETE FROM automod.users WHERE sid = $1", guild.id)
await self.bot.db.execute("DELETE FROM extra.timers WHERE sid = $1", guild.id)
await self.bot.db.execute("DELETE FROM extra.commands WHERE sid = $1", guild.id)
await self.bot.cache.remove(guild.id)
bots = len(list(filter(lambda m: m.bot, guild.members)))
embed = discord.Embed(color=discord.Color.red(), title="**__SERVER LEAVED__**")
embed.add_field(name="Name", value=guild.name, inline=False)
embed.add_field(name="Member", value=f'User: {len(guild.members)}\nBots: {bots}',
inline=False)
embed.add_field(name="Owner", value=guild.owner, inline=False)
embed.add_field(name="Region", value=guild.region, inline=False)
embed.add_field(name="Stats",
value=f'__Rollen:__ {len(guild.roles)}'
f'\n__TextChannel:__ {len(guild.text_channels)}'
f'\n__VoiceChannels:__ {len(guild.voice_channels)}',
inline=False)
await self.bot.get_channel(715260033926955070).send(embed=embed)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
guild = member.guild
data = await self.bot.db.fetchrow(
'SELECT joinmessage, joinroles, joinchannel, joinstate, modules.welcomer '
'FROM config.welcomer '
'FULL OUTER JOIN config.modules ON welcomer.sid = modules.sid WHERE welcomer.sid = $1',
guild.id)
if not data or not data['welcomer']:
return
if data["joinstate"] != "o" and data["joinmessage"]:
msg = formatMessage(data["joinmessage"], member)
if msg is None:
return
if data['joinstate'] == "d":
await member.send(msg)
elif data["joinstate"] == "c":
channel = guild.get_channel(data["joinchannel"])
await channel.send(msg)
if data["joinroles"]:
roles = []
for _role in data["joinroles"]:
_role = guild.get_role(_role)
if _role is not None:
roles.append(_role)
try:
await member.add_roles(*roles)
except discord.Forbidden:
logging.info(f"Could not add role to {member.id}")
punish_data = await self.bot.db.fetchrow(
'SELECT timers.type, config.muterole FROM automod.config INNER JOIN extra.timers '
'ON config.sid = timers.sid WHERE config.sid = $1 AND timers.objid = $2 AND '
'timers.type = 1',
guild.id, member.id)
if punish_data:
muterole_id: int = punish_data['muterole']
muterole = guild.get_role(muterole_id)
if muterole is not None:
await member.add_roles(muterole)
@commands.Cog.listener()
async def on_member_remove(self, member: discord.Member):
data = await self.bot.db.fetchrow(
'SELECT leavechannel, leavemessage, leavestate, modules.welcomer FROM config.welcomer '
'INNER JOIN config.modules ON welcomer.sid = modules.sid WHERE welcomer.sid = $1',
member.guild.id)
if not data or not data['welcomer'] or data["leavestate"] == "o":
return
if data["leavemessage"] and data["leavechannel"]:
channel = member.guild.get_channel(data["leavechannel"])
msg = formatMessage(data['leavemessage'], member)
if msg is not None and channel is not None:
await channel.send(msg)
def setup(bot):
bot.add_cog(Events(bot))
```
#### File: src/utils/automod.py
```python
import datetime
import json
import re
import time
import discord
from utils.enums.Timer import TimerType
from utils.ext import checks
from utils.ext import logs
from utils.ext import standards as std
from utils.ext.context import Context
DISCORD_INVITE = r'(discord(app\.com\/invite|\.com(\/invite)?|\.gg)\/?[a-zA-Z0-9-]{2,32})'
EXTERNAL_LINK = r'((https?:\/\/(www\.)?|www\.)[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6})'
EVERYONE_MENTION = r'@(here|everyone)'
discordRegex = re.compile(DISCORD_INVITE, re.IGNORECASE)
linkRegex = re.compile(EXTERNAL_LINK, re.IGNORECASE)
everyoneRegex = re.compile(EVERYONE_MENTION)
def find_word(word):
return re.compile(r'\b({0})\b'.format(word), flags=re.IGNORECASE).search
async def manage_punishment(ctx: Context, punishment, reason):
try:
await ctx.message.delete()
except discord.NotFound:
return
lang = await ctx.lang(module=["automod", "moderation"], utils=True)
config = await ctx.bot.cache.get(ctx.guild.id)
if not config.automod:
return
config = config.automod.config
user = ctx.author
reason = lang["word.automod"] + ": " + reason
punishment_str = ''
date = None
if punishment == 1:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
punishment_str = "kick"
await ctx.guild.kick(user, reason=reason)
elif punishment == 2:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
punishment_str = "ban"
await ctx.guild.ban(user, reason=reason)
elif punishment == 3:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
punishment_str = "tempban"
date = datetime.timedelta(seconds=config.bantime)
await ctx.db.execute(
'INSERT INTO extra.timers (sid, objid, type, time, data) VALUES '
'($1, $2, $3, $4, $5)',
ctx.guild.id, user.id, TimerType.BAN.value, config.bantime, json.dumps({ 'reason': reason }))
await ctx.guild.ban(user, reason=reason)
elif punishment == 4:
if checks.hasPermsByName(ctx, ctx.me, 'manage_roles'):
if config.muterole is None:
return
punishment_str = "tempmute"
date = datetime.timedelta(seconds=config.mutetime)
await ctx.db.execute(
'INSERT INTO extra.timers (sid, objid, type, time, data) VALUES '
'($1, $2, $3, $4, $5)',
ctx.guild.id, user.id, TimerType.MUTE.value, config.mutetime,
json.dumps({ 'reason': reason }))
await user.add_roles(config.muterole, reason=reason)
mod_embed = std.automodLog(ctx, punishment_str, lang, date, reason)
user_embed = std.dmEmbed(lang, reason=reason, guildName=ctx.guild.name,
punishType=punishment_str, duration=date)
await logs.createLog(ctx, mEmbed=mod_embed, uEmbed=user_embed, automod=True, user=user)
async def add_points(ctx: Context, new_points, reason, user: discord.Member = None):
try:
await ctx.message.delete()
except discord.NotFound:
pass
lang = await ctx.lang(module=["automod", "moderation"], utils=True)
config = await ctx.bot.cache.get(ctx.guild.id)
if user is not None and reason is not None:
reason = reason.replace(str(ctx.author) + ": ", "")
reason = reason or lang["log.noreason"][:-1]
if not config.automod:
return
config = config.automod.config
if user is None:
punished_user = ctx.author
else:
punished_user = user
await ctx.bot.db.execute(
'INSERT INTO automod.users (uid, sid, points, time, reason) VALUES ($1, $2, $3, $4, $5)',
punished_user.id, ctx.guild.id, new_points, time.time(), reason)
points = await ctx.bot.db.fetchval(
'SELECT sum(points) FROM automod.users WHERE uid = $1 AND sid = $2 AND $3 - time < 2592000',
punished_user.id, ctx.guild.id, time.time())
action = config.action
max_points = config.maxpoints
unix_time_mute = unix_time_ban = time.time() + 86400
if config.mutetime:
unix_time_mute = time.time() + config.mutetime
if config.bantime:
unix_time_ban = time.time() + config.bantime
if points >= max_points and action is not None:
if action == 1:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
await createAutomodLog(ctx, reason, lang, points=f"{points}/{max_points}",
punished_user=punished_user, punishment="kick", user=user)
await ctx.guild.kick(punished_user, reason=lang["word.automod"] + ": " + reason)
await ctx.bot.db.execute(
"DELETE FROM automod.users WHERE uid = $1 AND sid = $2",
punished_user.id, ctx.guild.id)
elif action == 2:
if checks.hasPermsByName(ctx, ctx.me, 'kick_members'):
await createAutomodLog(ctx, reason, lang, points=f"{points}/{max_points}",
punished_user=punished_user, punishment="ban", user=user)
await ctx.guild.ban(punished_user, reason=lang["word.automod"] + ": " + reason)
await ctx.bot.db.execute(
"DELETE FROM automod.users WHERE uid = $1 AND sid = $2",
punished_user.id, ctx.guild.id)
elif action == 3:
if checks.hasPermsByName(ctx, ctx.me, 'ban_members'):
date = datetime.timedelta(seconds=unix_time_ban)
await createAutomodLog(ctx, reason, lang, points=f"{points}/{max_points}",
date=date, punished_user=punished_user, punishment="tempban", user=user)
await ctx.guild.ban(punished_user, reason=lang["word.automod"] + ": " + reason)
await ctx.db.execute(
'INSERT INTO extra.timers (sid, objid, type, time, data) VALUES'
' ($1, $2, $3, $4, $5)',
ctx.guild.id, punished_user.id, 0, unix_time_ban,
json.dumps({ 'reason': lang["word.automod"] + ": " + lang["word.tempban"] }))
elif action == 4:
if checks.hasPermsByName(ctx, ctx.me, 'manage_roles'):
if config.muterole is None:
return
date = datetime.timedelta(seconds=unix_time_ban)
await createAutomodLog(ctx, reason, lang, points=f"{points}/{max_points}",
date=date, punished_user=punished_user, punishment="tempmute", user=user)
await punished_user.add_roles(config.muterole,
reason=lang["word.automod"] + ": " + reason)
await ctx.bot.db.execute(
"DELETE FROM automod.users WHERE uid = $1 AND sid = $2",
punished_user.id, ctx.guild.id)
await ctx.db.execute(
'INSERT INTO extra.timers (sid, objid, type, time, data) VALUES '
'($1, $2, $3, $4, $5)',
ctx.guild.id, punished_user.id, 1, unix_time_mute,
json.dumps({ 'reason': lang["word.automod"] + ": " + reason }))
else:
await createAutomodLog(ctx, reason, lang, points=f"{points}/{max_points}",
punished_user=punished_user, punishment="log", user=user)
async def createAutomodLog(ctx, reason, lang, *, points, punishment, date=None, user, punished_user):
if user is None:
mod_embed = std.automodLog(ctx, punishment, lang, date, reason,
points)
else:
mod_embed = std.automodLog(ctx, punishment, lang, date, reason,
points, extra_user=punished_user, mod=ctx.author)
user_embed = std.automodUserEmbed(lang, reason, ctx.guild.name, punishment,
points, date)
await logs.createLog(ctx=ctx, mEmbed=mod_embed, uEmbed=user_embed,
user=punished_user, automod=True)
async def automod(ctx: Context):
bot = ctx.bot
guild = ctx.guild
msg = ctx.message
channel = ctx.channel
config = await ctx.cache.get(ctx.guild.id)
modules = config.modules
automod_cf = config.automod
lang = await ctx.lang(module=["automod", "moderation"], utils=True)
if not modules.automod or not config.automod:
return
if automod_cf.blacklist.state:
blacklist = automod_cf.blacklist
for word in blacklist.words:
if find_word(word)(msg.content.lower()):
if not await checks.ignoresAutomod(ctx):
if channel.id in blacklist.whitelist:
return
if blacklist.state == 5:
return await add_points(ctx, blacklist.points,
lang["reason.blacklistedword"])
else:
return await manage_punishment(ctx, blacklist.state,
lang["reason.blacklistedword"])
if discordRegex.findall(msg.content):
invites = automod_cf.invites
if await checks.ignoresAutomod(ctx):
return
if not invites.state:
return
if channel.id in invites.whitelist:
return
whitelisted_servers = [guild.id]
whitelisted_servers.extend([int(guildID) for guildID in invites.partner])
has_invite = False
for invite in discordRegex.findall(msg.content):
try:
invite = await bot.fetch_invite(invite[0])
except discord.NotFound:
continue
except discord.Forbidden:
if invites.state == 5:
return await add_points(ctx, invites.points, lang["reason.invite"])
else:
return await manage_punishment(ctx, invites.state, lang["reason.invite"])
if invite.guild.id not in whitelisted_servers:
has_invite = True
break
if has_invite:
if invites.state == 5:
return await add_points(ctx, invites.points, lang["reason.invite"])
else:
return await manage_punishment(ctx, invites.state, lang["reason.invite"])
elif linkRegex.findall(msg.content):
links = automod_cf.links
if await checks.ignoresAutomod(ctx):
return
if not links.state:
return
if channel.id in links.whitelist:
return
links_list = ['discord.gg', 'discord.com', 'plyoox.net', 'wiki.plyoox.net']
links_list.extend(links.links)
links_obj = linkRegex.findall(msg.content)
for linkObj in links_obj:
link = linkObj[0].replace(linkObj[1], '')
if links.iswhitelist:
if link not in links_list:
if links.state == 5:
return await add_points(ctx, links.points, lang["reason.link"])
else:
return await manage_punishment(ctx, links.state, lang["reason.link"])
else:
if link in links_list:
if links.state == 5:
return await add_points(ctx, links.points, lang["reason.link"])
else:
return await manage_punishment(ctx, links.state, lang["reason.link"])
if not msg.clean_content.islower() and len(msg.content) > 15:
caps = automod_cf.caps
if await checks.ignoresAutomod(ctx):
return
len_caps = len(re.findall(r'[A-ZÄÖÜ]', msg.clean_content))
percent = len_caps / len(msg.content)
if percent > 0.7:
if not caps.state:
return
if channel.id in caps.whitelist:
return
if caps.state == 5:
return await add_points(ctx, caps.points, lang["reason.caps"])
else:
return await manage_punishment(ctx, caps.state, lang["reason.caps"])
if len(msg.raw_mentions) + len(msg.raw_role_mentions) + \
len(everyoneRegex.findall(msg.content)) >= 3:
mentions = automod_cf.mentions
if await checks.ignoresAutomod(ctx):
return
len_mentions = sum(m != ctx.author.id for m in msg.raw_mentions) \
+ len(msg.raw_role_mentions)
if not mentions.state:
return
if channel.id in mentions.whitelist:
return
if mentions.everyone:
len_mentions += len(everyoneRegex.findall(msg.content))
if len_mentions >= mentions.count:
if mentions.state == 5:
return await add_points(ctx, mentions.points, lang["reason.mentions"])
else:
return await manage_punishment(ctx, mentions.state, lang["reason.mentions"])
```
#### File: utils/ext/checks.py
```python
import discord
from discord.ext import commands
from utils.ext import context
def isMod(*, helper: bool = False):
async def predicate(ctx: context.Context):
perms = ctx.author.permissions_in(ctx.channel)
if ctx.message.author.id == <PASSWORD> or perms.manage_guild:
return True
data = await ctx.cache.get(ctx.guild.id)
config = data.automod
if data is None or data.automod is None:
return
roles = []
roles.extend(config.config.modroles)
if helper:
roles.extend(config.config.helperroles)
user_roles = [role.id for role in ctx.author.roles]
if any(role in roles for role in user_roles):
return True
else:
if helper:
raise commands.MissingPermissions(['Du musst ein Moderator oder Helfer sein, um den Command auszuführen'])
raise commands.MissingPermissions(['Du musst ein Moderator sein, um den Command auszuführen'])
return commands.check(predicate)
def isAdmin():
async def predicate(ctx):
user = ctx.author
if user.id == ctx.bot.owner_id or user.guild_permissions.administrator:
return True
else:
raise commands.MissingPermissions(['administrator'])
return commands.check(predicate)
def hasPerms(**perms):
async def predicate(ctx):
if ctx.message.author.id == <PASSWORD>:
return True
permissions = ctx.channel.permissions_for(ctx.author)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value]
if not missing:
return True
raise commands.MissingPermissions(missing)
return commands.check(predicate)
def isBriiaan():
async def predicate(ctx):
return ctx.guild.id == 665609018793787422
return commands.check(predicate)
def isActive(modul):
async def predicate(ctx: context.Context):
config = await ctx.cache.get(ctx.guild.id)
if config is None or not config.modules:
return False
if modul == 'fun' and config.modules.fun:
return True
if modul == 'leveling' and config.modules.leveling:
return True
if modul == 'timers' and config.modules.timers:
return True
else:
raise commands.DisabledCommand
return commands.check(predicate)
def hasPermsByName(ctx, member, permsSearch):
if not isinstance(member, discord.Member):
return False
perms = [perm for perm, value in member.permissions_in(ctx.channel) if value]
if permsSearch.lower() in perms:
return True
async def ignoresAutomod(ctx: context.Context):
if not ctx.me.top_role.position > ctx.message.author.top_role.position:
return True
if ctx.author.permissions_in(ctx.channel).manage_messages:
return True
data = await ctx.cache.get(ctx.guild.id)
if data is None or data.automod.config is None:
return False
roles = []
roles.extend(data.automod.config.modroles)
roles.extend(data.automod.config.helperroles)
author_roles = [role.id for role in ctx.author.roles]
if any(roleID in roles for roleID in author_roles):
return True
```
#### File: utils/ext/cmds.py
```python
from discord.ext import commands
class CommandsExtension(commands.Command):
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.showHelp = kwargs.get('showHelp', True)
if not isinstance(self.showHelp, bool):
raise TypeError(f'Excepted type bool got type {type(self.showHelp)}')
self.category = kwargs.get('category', None)
self.help = kwargs.get('help', None)
def cmd(*args, **kwargs):
return commands.command(*args, **kwargs, cls=CommandsExtension)
class GroupExtension(commands.Group):
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.showHelp = kwargs.get('showHelp', True)
if not isinstance(self.showHelp, bool):
raise TypeError(f'Excepted type bool got type {type(self.showHelp)}')
self.category = kwargs.get('category', None)
self.help = kwargs.get('help', None)
def grp(*args, **kwargs):
return commands.group(*args, **kwargs, cls=GroupExtension)
```
#### File: utils/ext/context.py
```python
import asyncio
import io
import discord
from asyncpg.pool import Pool
from discord.ext import commands
from utils.db.cache import BotCache
from utils.ext import standards as std
class Context(commands.Context):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pool = self.bot.db
self._db = None
async def safe_send(self, content, *, escape_mentions=True, **kwargs):
if escape_mentions:
content = discord.utils.escape_mentions(content)
content.replace("`", "")
if len(content) > 2000:
fp = io.BytesIO(content.encode())
kwargs.pop('file', None)
return await self.reply(file=discord.File(fp, filename='message_to_long.txt'), **kwargs)
else:
return await self.reply(content)
@property
def db(self) -> Pool:
return self._db if self._db else self.pool
@property
def cache(self) -> BotCache:
return self.bot.cache
async def lang(self, utils=False, module=None):
if module is None:
module = self.cog.qualified_name
if isinstance(module, list):
data = {}
for _module in module:
data |= await self.bot.lang(self.guild.id, _module.lower(), utils)
else:
data = await self.bot.lang(self.guild.id, module.lower(), utils)
return data
async def release(self):
if self._db is not None:
await self.bot.pool.release(self._db)
self._db = None
async def error(self, message: str, **kwargs):
return await self.reply(embed=std.getErrorEmbed(message), **kwargs)
async def embed(self, message: str, signed=False, **kwargs):
embed = std.getEmbed(message)
if signed:
embed.set_footer(icon_url=self.author.avatar_url, text=f'Requested by {self.author}')
return await self.reply(embed=embed, **kwargs)
async def prompt(self, message, *, timeout=60.0, delete_after=True, reacquire=True,
author_id=None):
if not self.channel.permissions_for(self.me).add_reactions:
raise RuntimeError('Der Bot kann keine Reaktionen hinzufügen.')
fmt = f'{message}\n\nReagiere mit {std.yes_emoji} um zu bestätigen oder {std.no_emoji} ' \
f'um abzubrechen. '
author_id = author_id or self.author.id
msg = await self.reply('Ping!', embed=discord.Embed(color=std.normal_color, description=fmt))
confirm = None
def check(payload):
nonlocal confirm
if payload.message_id != msg.id or payload.user_id != author_id:
return False
codepoint = str(payload.emoji)
if codepoint == std.yes_emoji:
confirm = True
return True
elif codepoint == std.no_emoji:
confirm = False
return True
return False
for emoji in (std.yes_emoji, std.no_emoji):
await msg.add_reaction(emoji)
if reacquire:
await self.release()
try:
await self.bot.wait_for('raw_reaction_add', check=check, timeout=timeout)
except asyncio.TimeoutError:
confirm = None
try:
if delete_after:
await msg.delete()
finally:
return confirm
class FakeContext:
def __init__(self, bot, guild):
self.bot = bot
self.guild = guild
@property
def cache(self):
return self.bot.cache
@property
def me(self):
return self.guild.me
```
#### File: utils/ext/converters.py
```python
import discord
from discord.ext import commands
from utils.ext.context import Context
class ActionReason(commands.Converter):
reason: str
raw_reason: str
async def convert(self, ctx: Context, argument):
lang = await ctx.lang()
if len(argument) > 510 - len(str(ctx.author)):
raise commands.BadArgument(lang["converters.error.reasontolong"].format(a=str(len(argument)), m=str(len(str(ctx.author)))))
reason = str(ctx.author) + ": " + argument
self.reason = reason
self.raw_reason = argument
return self
def __str__(self):
return self.raw_reason
class BannedMember(commands.Converter):
async def convert(self, ctx: Context, argument):
lang = await ctx.lang()
if argument.isdigit():
member_id = int(argument, base=10)
try:
return await ctx.guild.fetch_ban(discord.Object(id=member_id))
except discord.NotFound as e:
raise commands.BadArgument(lang["converters.notbanned"]) from e
ban_list = await ctx.guild.bans()
user = discord.utils.find(lambda u: str(u.user) == argument, ban_list)
if user is None:
raise commands.BadArgument(lang["converters.notbanned"])
return user
class AdvancedMember(commands.Converter):
async def convert(self, ctx, argument):
try:
return await commands.UserConverter().convert(ctx, argument)
except commands.BadArgument:
try:
user_id = int(argument, base=10)
return await ctx.bot.fetch_user(user_id)
except (ValueError, discord.NotFound):
raise commands.UserNotFound(argument)
```
#### File: utils/ext/errors.py
```python
class FakeArgument:
def __init__(self, name):
self.name = name
class ModeratorCommand(Exception):
pass
```
#### File: utils/ext/standards.py
```python
import datetime
import discord
yes_emoji = "<:yes:703900321465892914>"
no_emoji = "<:no:703900335327936602>"
ola_emoji = '<:ola:703928958063738910>'
info_emoji = '<:info:703900394580869140>'
error_emoji = '<:close:820400830480908428>'
law_emoji = '<:law:704432646356467814>'
level_emoji = '<:level:704442402718482432>'
question_emoji = '\u2754'
coin_emoji = '<:coin:718169101821804564>'
upvote_emoji = '+' # '\u2795'
downvote_emoji = '-' # '\u2796'
# Status
online_emoji = "<:online:703932456289435659>"
offline_emoji = "<:offline:703932515349430292>"
idle_emoji = "<:idle:703932474501103706>"
dnd_emoji = "<:dnd:703932490485727232>"
streaming_emoji = '<:streaming:703900783543975956>'
bots_emoji = "<:bot:703924061004234752>"
clyde_emoji = '<:clyde:703927804563030076>'
# Badges
balance_emoji = '<:balance:820728032775372821>'
brilliance_emoji = '<:brilliance:820728032590692373>'
bravery_emoji = '<:bravery:820728032817709107>'
booster_emoji = '<:booster:703906501382766612>'
booster2_emoji = '<:booster2:703906512246145044>'
booster3_emoji = '<:booster3:703906523092484177>'
booster4_emoji = '<:booster4:703906534534414346>'
supporter_emoji = '<:supporter:703906781859938364>'
partner_emoji = '<:partner:703906812348334130>'
staff_emoji = '<:staff:703906898851790859>'
botdev_emoji = '<:botdev:703907073402077205>'
bughunter_badge = '<:bughunter:703906553325158440>'
bughunter2_badge = '<:bughunter2:747904909378060319>'
nitro_emoji = '<:nitro:703907279795257444>'
hypesquad_emoji = '<:hypesquad:314068430854684672>'
# Guild
channel_emoji = '<:channel:703906598879494257>'
lockedchannel_emoji = '<:locked_channel:703906663140294666>'
lockedVoice_emoji = '<:locked_voice:703906683918745630>'
voice_emoji = '<:voice:703906627065085973>'
verified_emoji = '<:verified:703900283192868894>'
members_emoji = '<:members:703906700889161748>'
owner_emoji = '<:owner:703906757344493649>'
invite_emoji = '<:invite:703906645113045062>'
richPresence_emoji = '<:richpresence:703907208340963378>'
mention_emoji = '<:mention:703907048760279072>'
folder_emoji = '<:folder:703900379104149534>'
nametag_emoji = '<:nametag:703936161089060895>'
globe_emoji = '\U0001F310'
stats_emoji = '\U0001F4CA'
list_emoji = '\U0001F4C4'
lock_emoji = '\U0001F512'
date_emoji = '\U0001F4C5'
inbox_emoji = '\U0001F4E5'
outbox_emoji = '\U0001F4E4'
tropy_emoji = '\U0001F3C6'
arrow = '<:arrow:762598973886169099>'
# Colors
error_color = 0xff0000
normal_color = 0x7289DA
help_color = 0x38b3e8
tag_color = 0x7adeaa
plyoox_color = 0x24c689
avatar_url = 'https://cdn.discordapp.<EMAIL>' \
'/avatars/505433541916622850/ccc8ba894dd4188ecf37de0a53430f22.webp?size=1024'
def quote(string, shorten=False):
if shorten and len(string) > 1018:
string = string[:1015] + "..."
return "```" + str(string) + "```"
def cut(string: str, max_len=1024):
if len(string) > max_len:
string = string[:1021] + "..."
return string
# Embeds
def getEmbed(description: str = None, signed: discord.Member = None) -> discord.Embed:
"""
:param description: Message to send
:param signed: User who requested the command
:return: discord.Embed
"""
embed = discord.Embed(color=normal_color)
if description is not None:
embed.description = description
if signed:
embed.set_footer(icon_url=signed.avatar_url, text=f'Requested by {signed}')
return embed
def getErrorEmbed(errorMessage: str) -> discord.Embed:
"""
:param errorMessage: Message to send
:return: discord.Embed
"""
embed = discord.Embed(
color=error_color,
title=f'{error_emoji} __**ERROR**__',
description=errorMessage)
return embed
def dmEmbed(lang, *, reason, guildName, punishType,
duration: datetime.timedelta = None) -> discord.Embed:
embed = discord.Embed(color=normal_color)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=lang['log.embed.footer'], icon_url=avatar_url)
duration = fixTimeDelta(duration)
reason = reason or ''
if reason:
reason = lang['log.embed.reason'].format(r=reason)
if punishType in ["ban", "tempban"]:
if duration:
embed.description = lang['log.embed.ban.temp'] \
.format(n=guildName, r=reason, d=duration)
else:
embed.description = lang['log.embed.ban.perm'] \
.format(n=guildName, r=reason)
elif punishType == 'kick':
embed.description = lang["log.embed.kick"].format(n=guildName, r=reason)
elif punishType in ["tempmute", "mute"]:
if duration:
embed.description = lang['log.embed.mute.temp'] \
.format(n=guildName, r=reason, d=duration)
else:
embed.description = lang['log.embed.mute.perm'] \
.format(n=guildName, r=reason)
return embed
def automodUserEmbed(lang, reason, guildName, type: str, points=None, duration: datetime.timedelta = None):
embed = discord.Embed(color=normal_color)
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=lang['log.embed.footer'], icon_url=avatar_url)
if duration is not None:
duration = fixTimeDelta(duration)
if type.replace("temp", "") in ["ban", "mute"]:
if type.startswith("temp"):
message = lang["log.message.temp"].format(d=duration)
else:
message = lang["log.message.perm"]
message = message.format(t=lang["word." + type.replace("temp", "")])
else:
message = lang["log.message." + type]
if type == "log":
message = message.format(p=points, n=guildName, r=reason)
else:
message = message.format(n=guildName, r=reason)
embed.description = message
return embed
def cmdEmbed(action, reason, lang: dict[str, str], mod=None, user=None,
amount=None, duration: datetime.timedelta = None) -> discord.Embed:
current_user = mod or user
reason = reason or lang['log.noreason']
embed = discord.Embed(color=discord.Color.orange(), title=lang["word." + action].upper())
embed.set_footer(text="Plyoox", icon_url=avatar_url)
embed.set_author(name=str(current_user), icon_url=current_user.avatar_url)
if user is not None:
embed.add_field(name=arrow + lang["word.user"], value=f"```{user} [{user.id}]```")
embed.set_author(name=str(user), icon_url=user.avatar_url)
if mod is not None:
embed.add_field(name=arrow + lang["word.moderator"], value=quote(mod))
if reason is not None:
embed.add_field(name=arrow + lang["word.reason"], value=quote(reason))
if duration is not None:
embed.add_field(name=arrow + lang["word.duration"], value=quote(fixTimeDelta(duration)))
if amount is not None:
embed.add_field(name=arrow + lang["word.amount"], value=quote(amount))
return embed
def automodLog(ctx, action, lang: dict[str, str], duration: datetime.timedelta,
reason, points=None, extra_user: discord.Member = None, mod: discord.Member = None):
user = extra_user or ctx.author
embed = discord.Embed(
color=plyoox_color,
title=lang["word." + action],
timestamp=datetime.datetime.utcnow()
)
embed.set_author(name=lang["word.automod"], icon_url=user.avatar_url)
embed.set_footer(text=f'ID: {user.id}')
embed.description = lang[action.replace("temp", "") + ".embed.description"].format(
u=user,
c=ctx.channel,
r=reason.replace(lang["word.automod"] + ": ", "")
)
if mod:
embed.add_field(name=arrow + lang["word.moderator"], value=ctx.author.mention)
if duration is not None:
embed.add_field(name=arrow + lang["word.punishuntil"],
value=quote(fixTimeDelta(duration)))
if points is not None:
embed.add_field(name=arrow + lang["word.points"], value=quote(points))
if not mod:
embed.add_field(name=arrow + lang["word.message"], value=quote(ctx.message.content))
return embed
def fixTimeDelta(time: datetime.timedelta) -> datetime.timedelta:
if time is None:
return
try:
time_str = str(time)[-2:]
time_seconds = int(time_str)
except (IndexError, ValueError):
return time
if time_seconds >= 55:
time += datetime.timedelta(seconds=60-time_seconds)
return time
```
#### File: src/utils/mee6level.py
```python
import json
import asyncio
import aiohttp
BASE = "https://mee6.xyz/api/plugins/levels/leaderboard/"
GUILD_ID = "YOUR_ID"
user_list = []
LEVEL_API_URL = BASE + str(GUILD_ID) + "?page="
async def get_level():
counter = 0
fetched_all = False
async with aiohttp.ClientSession() as session:
while True:
async with session.get(LEVEL_API_URL + str(counter)) as res:
data = await res.json()
users = data["players"]
counter += 1
for user in users:
if user["level"] == 0:
fetched_all = True
break
user_list.append({"uid": int(user["id"]), "xp": user["xp"]})
if fetched_all:
break
async def main():
await get_level()
print(len(user_list))
with open("users.json_files", "w") as f:
json_files.dump(user_list, fp=f)
asyncio.run(main())
``` |
{
"source": "johannesjh/fava",
"score": 3
} |
#### File: fava/ext/__init__.py
```python
import ast
import importlib
import inspect
import sys
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from fava.helpers import BeancountError
class FavaExtensionError(BeancountError):
"""Error in one of Fava's extensions."""
class FavaExtensionBase:
"""Base class for extensions for Fava.
Any extension should inherit from this class. :func:`find_extension` will
discover all subclasses of this class in the specified modules.
"""
report_title: Optional[str] = None
def __init__(self, ledger, config=None) -> None:
"""
Base init function.
Args:
ledger: Input ledger file.
config: Configuration options string passed from the
beancount file's 'fava-extension' line.
"""
self.ledger = ledger
try:
self.config = ast.literal_eval(config)
except ValueError:
self.config = None
self.name = self.__class__.__qualname__
def run_hook(self, event, *args) -> None:
"""Run a hook.
Args:
event: One of the possible events.
"""
try:
getattr(self, event)(*args)
except AttributeError:
pass
def find_extensions(
base_path: str, name: str
) -> Tuple[List[Type[FavaExtensionBase]], List[FavaExtensionError]]:
"""Find extensions in a module.
Args:
base_path: The module can be relative to this path.
name: The name of the module containing the extensions.
Returns:
A tuple (classes, errors) where classes is a list of subclasses of
:class:`FavaExtensionBase` found in ``name``.
"""
classes = []
sys.path.insert(0, base_path)
try:
module = importlib.import_module(name)
except ImportError:
error = FavaExtensionError(
None, f'Importing module "{name}" failed.', None
)
return (
[],
[error],
)
for _, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, FavaExtensionBase) and obj != FavaExtensionBase:
classes.append(obj)
sys.path.pop(0)
if not classes:
error = FavaExtensionError(
None,
f'Module "{name}" contains no extensions.',
None,
)
return (
[],
[error],
)
return classes, []
```
#### File: ext/portfolio_list/__init__.py
```python
import re
from beancount.core.data import Open
from beancount.core.number import Decimal
from beancount.core.number import ZERO
from fava.ext import FavaExtensionBase
from fava.helpers import FavaAPIException
from fava.template_filters import cost_or_value
class PortfolioList(FavaExtensionBase): # pragma: no cover
"""Sample Extension Report that just prints out an Portfolio List."""
report_title = "Portfolio List"
def portfolio_accounts(self):
"""An account tree based on matching regex patterns."""
tree = self.ledger.root_tree
portfolios = []
for option in self.config:
opt_key = option[0]
if opt_key == "account_name_pattern":
portfolio = self._account_name_pattern(tree, option[1])
elif opt_key == "account_open_metadata_pattern":
portfolio = self._account_metadata_pattern(
tree, option[1][0], option[1][1]
)
else:
raise FavaAPIException("Portfolio List: Invalid option.")
portfolios.append(portfolio)
return portfolios
def _account_name_pattern(self, tree, pattern):
"""
Returns portfolio info based on matching account name.
Args:
tree: Ledger root tree node.
pattern: Account name regex pattern.
Return:
Data structured for use with a querytable (types, rows).
"""
title = "Account names matching: '" + pattern + "'"
selected_accounts = []
regexer = re.compile(pattern)
for acct in tree.keys():
if (regexer.match(acct) is not None) and (
acct not in selected_accounts
):
selected_accounts.append(acct)
selected_nodes = [tree[x] for x in selected_accounts]
portfolio_data = self._portfolio_data(selected_nodes)
return title, portfolio_data
def _account_metadata_pattern(self, tree, metadata_key, pattern):
"""
Returns portfolio info based on matching account open metadata.
Args:
tree: Ledger root tree node.
metadata_key: Metadata key to match for in account open.
pattern: Metadata value's regex pattern to match for.
Return:
Data structured for use with a querytable - (types, rows).
"""
title = (
"Accounts with '"
+ metadata_key
+ "' metadata matching: '"
+ pattern
+ "'"
)
selected_accounts = []
regexer = re.compile(pattern)
for entry in self.ledger.all_entries_by_type[Open]:
if (metadata_key in entry.meta) and (
regexer.match(entry.meta[metadata_key]) is not None
):
selected_accounts.append(entry.account)
selected_nodes = [tree[x] for x in selected_accounts]
portfolio_data = self._portfolio_data(selected_nodes)
return title, portfolio_data
def _portfolio_data(self, nodes):
"""
Turn a portfolio of tree nodes into querytable-style data.
Args:
nodes: Account tree nodes.
Return:
types: Tuples of column names and types as strings.
rows: Dictionaries of row data by column names.
"""
operating_currency = self.ledger.options["operating_currency"][0]
acct_type = ("account", str(str))
bal_type = ("balance", str(Decimal))
alloc_type = ("allocation", str(Decimal))
types = [acct_type, bal_type, alloc_type]
rows = []
portfolio_total = ZERO
for node in nodes:
row = {}
row["account"] = node.name
balance = cost_or_value(node.balance)
if operating_currency in balance:
balance_dec = balance[operating_currency]
portfolio_total += balance_dec
row["balance"] = balance_dec
rows.append(row)
for row in rows:
if "balance" in row:
row["allocation"] = round(
(row["balance"] / portfolio_total) * 100, 2
)
return types, rows
```
#### File: src/fava/json_api.py
```python
import functools
import os
import shutil
from os import path
from os import remove
from typing import List
from flask import Blueprint
from flask import get_template_attribute
from flask import jsonify
from flask import render_template
from flask import request
from fava.context import g
from fava.core.documents import filepath_in_document_folder
from fava.core.documents import is_document_or_import_file
from fava.core.misc import align
from fava.helpers import FavaAPIException
from fava.serialisation import deserialise
from fava.serialisation import serialise
json_api = Blueprint("json_api", __name__) # pylint: disable=invalid-name
def get_api_endpoint(func):
"""Register a GET endpoint."""
@json_api.route(f"/{func.__name__}", methods=["GET"])
@functools.wraps(func)
def _wrapper(*args, **kwargs):
return jsonify({"success": True, "data": func(*args, **kwargs)})
return _wrapper
def put_api_endpoint(func):
"""Register a PUT endpoint."""
@json_api.route(f"/{func.__name__}", methods=["PUT"])
@functools.wraps(func)
def _wrapper(*args, **kwargs):
request_data = request.get_json()
if request_data is None:
raise FavaAPIException("Invalid JSON request.")
res = func(request_data, *args, **kwargs)
return jsonify({"success": True, "data": res})
return _wrapper
def delete_api_endpoint(func):
"""Register a DELETE endpoint."""
route = func.__name__.replace("delete_", "")
@json_api.route(f"/{route}", methods=["DELETE"])
@functools.wraps(func)
def _wrapper(*args, **kwargs):
return jsonify({"success": True, "data": func(*args, **kwargs)})
return _wrapper
def json_response(func):
"""Jsonify the response."""
@functools.wraps(func)
def _wrapper(*args, **kwargs):
json_data = func(*args, **kwargs)
if "success" not in json_data:
json_data["success"] = True
return jsonify(json_data)
return _wrapper
@json_api.errorhandler(FavaAPIException)
@json_response
def _json_api_exception(error):
return {"success": False, "error": error.message}
@json_api.errorhandler(OSError)
@json_response
def _json_api_oserror(error):
return {"success": False, "error": error.strerror}
@get_api_endpoint
def changed() -> bool:
"""Check for file changes."""
return g.ledger.changed()
@get_api_endpoint
def errors() -> int:
"""Number of errors."""
return len(g.ledger.errors)
@get_api_endpoint
def payee_accounts() -> List[str]:
"""Rank accounts for the given payee."""
payee = request.args.get("payee", "")
return g.ledger.attributes.payee_accounts(payee)
@get_api_endpoint
def query_result():
"""Render a query result to HTML."""
query = request.args.get("query_string", "")
table = get_template_attribute("_query_table.html", "querytable")
contents, types, rows = g.ledger.query_shell.execute_query(query)
if contents:
if "ERROR" in contents:
raise FavaAPIException(contents)
table = table(contents, types, rows)
if types and g.ledger.charts.can_plot_query(types):
return {
"chart": g.ledger.charts.query(types, rows),
"table": table,
}
return {"table": table}
@get_api_endpoint
def extract():
"""Extract entries using the ingest framework."""
entries = g.ledger.ingest.extract(
request.args.get("filename"), request.args.get("importer")
)
return list(map(serialise, entries))
@get_api_endpoint
def context():
"""Entry context."""
entry_hash = request.args.get("entry_hash")
entry, balances, slice_, sha256sum = g.ledger.context(entry_hash)
content = render_template("_context.html", entry=entry, balances=balances)
return {"content": content, "sha256sum": sha256sum, "slice": slice_}
@get_api_endpoint
def move() -> str:
"""Move a file."""
if not g.ledger.options["documents"]:
raise FavaAPIException("You need to set a documents folder.")
account = request.args.get("account")
new_name = request.args.get("newName")
filename = request.args.get("filename")
if not account:
raise FavaAPIException("No account specified.")
if not filename:
raise FavaAPIException("No filename specified.")
if not new_name:
raise FavaAPIException("No new filename given.")
new_path = filepath_in_document_folder(
g.ledger.options["documents"][0], account, new_name, g.ledger
)
if not path.isfile(filename):
raise FavaAPIException(f"Not a file: '{filename}'")
if path.exists(new_path):
raise FavaAPIException(f"Target file exists: '{new_path}'")
if not path.exists(path.dirname(new_path)):
os.makedirs(path.dirname(new_path), exist_ok=True)
shutil.move(filename, new_path)
return f"Moved {filename} to {new_path}."
@get_api_endpoint
def payee_transaction():
"""Last transaction for the given payee."""
entry = g.ledger.attributes.payee_transaction(request.args.get("payee"))
return serialise(entry)
@put_api_endpoint
def source(request_data) -> str:
"""Write one of the source files and return the updated sha256sum."""
return g.ledger.file.set_source(
request_data.get("file_path"),
request_data.get("source"),
request_data.get("sha256sum"),
)
@put_api_endpoint
def source_slice(request_data) -> str:
"""Write an entry source slice and return the updated sha256sum."""
return g.ledger.file.save_entry_slice(
request_data.get("entry_hash"),
request_data.get("source"),
request_data.get("sha256sum"),
)
@put_api_endpoint
def format_source(request_data) -> str:
"""Format beancount file."""
return align(
request_data["source"], g.ledger.fava_options["currency-column"]
)
@delete_api_endpoint
def delete_document() -> str:
"""Delete a document."""
filename = request.args.get("filename")
if not filename:
raise FavaAPIException("No filename specified.")
if not is_document_or_import_file(filename, g.ledger):
raise FavaAPIException("No valid document or import file.")
if not path.exists(filename):
raise FavaAPIException(f"{filename} does not exist.")
remove(filename)
return f"Deleted {filename}."
@json_api.route("/add_document", methods=["PUT"])
@json_response
def add_document():
"""Upload a document."""
if not g.ledger.options["documents"]:
raise FavaAPIException("You need to set a documents folder.")
upload = request.files["file"]
if not upload:
raise FavaAPIException("No file uploaded.")
filepath = filepath_in_document_folder(
request.form["folder"],
request.form["account"],
upload.filename,
g.ledger,
)
directory, filename = path.split(filepath)
if path.exists(filepath):
raise FavaAPIException(f"{filepath} already exists.")
if not path.exists(directory):
os.makedirs(directory, exist_ok=True)
upload.save(filepath)
if request.form.get("hash"):
g.ledger.file.insert_metadata(
request.form["hash"], "document", filename
)
return {"data": f"Uploaded to {filepath}"}
@put_api_endpoint
def attach_document(request_data):
"""Attach a document to an entry."""
filename = request_data["filename"]
entry_hash = request_data["entry_hash"]
g.ledger.file.insert_metadata(entry_hash, "document", filename)
return f"Attached '{filename}' to entry."
@put_api_endpoint
def add_entries(request_data):
"""Add multiple entries."""
try:
entries = [deserialise(entry) for entry in request_data["entries"]]
except KeyError as error:
raise FavaAPIException(f"KeyError: {error}") from error
g.ledger.file.insert_entries(entries)
return f"Stored {len(entries)} entries."
```
#### File: fava/plugins/tag_discovered_documents.py
```python
from beancount.core.data import Document
__plugins__ = ["tag_discovered_documents"]
def tag_discovered_documents(entries, options_map):
"""Tag automatically added documents."""
errors = []
if "documents" not in options_map or not options_map["documents"]:
return entries, errors
for index, entry in enumerate(entries):
if isinstance(entry, Document) and entry.meta["lineno"] == 0:
tags = (
set(entry.tags).union(["discovered"])
if entry.tags
else {"discovered"}
)
entries[index] = entry._replace(tags=tags)
return entries, errors
```
#### File: fava/tests/test_core_inventory.py
```python
from beancount.core.amount import A
from fava.core.inventory import CounterInventory
def test_add():
inv = CounterInventory()
key = "KEY"
inv.add(key, 10)
assert len(inv) == 1
inv.add(key, -10)
assert inv.is_empty()
def test_add_amount():
inv = CounterInventory()
inv.add_amount(A("10 USD"))
inv.add_amount(A("30 USD"))
assert len(inv) == 1
inv.add_amount(A("-40 USD"))
assert inv.is_empty()
inv.add_amount(A("10 USD"))
inv.add_amount(A("20 CAD"))
inv.add_amount(A("10 USD"))
assert len(inv) == 2
inv.add_amount(A("-20 CAD"))
assert len(inv) == 1
def test_add_inventory():
inv = CounterInventory()
inv2 = CounterInventory()
inv3 = CounterInventory()
inv.add_amount(A("10 USD"))
inv2.add_amount(A("30 USD"))
inv3.add_amount(A("-40 USD"))
inv.add_inventory(inv2)
assert len(inv) == 1
inv.add_inventory(inv3)
assert inv.is_empty()
inv = CounterInventory()
inv.add_inventory(inv2)
assert len(inv) == 1
``` |
{
"source": "johannesjmeyer/pennylane-pyquest",
"score": 2
} |
#### File: pennylane-pyquest/pennylane_pyquest/pyquest_mixed.py
```python
import numpy as np
import pyquest_cffi as pqc
from .pyquest_device import PyquestDevice
from .utils import reorder_matrix, reorder_state
class DensityQuregContext:
def __init__(self, wires):
self.wires = wires
def __enter__(self):
self.env = pqc.utils.createQuestEnv()()
self.qureg = pqc.utils.createDensityQureg()(self.wires, env=self.env)
return self
def __exit__(self, etype, value, traceback):
pqc.utils.destroyQureg()(self.qureg, env=self.env)
pqc.utils.destroyQuestEnv()(self.env)
class PyquestMixed(PyquestDevice):
_capabilities = {"mixed_state": True}
operations = {
"BasisState",
"QubitStateVector",
"QubitUnitary",
"PauliX",
"PauliY",
"PauliZ",
"MultiRZ",
"PauliRot",
"Hadamard",
"S",
"T",
"CNOT",
"SWAP",
"CZ",
"PhaseShift",
"RX",
"RY",
"RZ",
"CRX",
"CRY",
"CRZ",
"MixDephasing",
"MixDepolarising",
"MixDamping",
"MixKrausMap",
}
def __init__(self, wires, *, shots=1000, analytic=True, error_model=None):
"""
Args:
error_model(operation->list[operation]): A function that is called for every operation in the
queue and returns a list of operations that represent additional errors.
"""
super().__init__(wires, shots=shots, analytic=analytic)
self.error_model = error_model
def reset(self):
super().reset()
self._density_matrix = None
self._probs = None
def _qureg_context(self):
return DensityQuregContext(self.num_wires)
def _init_state_vector(self, state, context):
state = reorder_state(state)
matrix = np.outer(state.conj(), state).ravel()
pqc.cheat.setDensityAmps()(
qureg=context.qureg,
startind=0,
reals=np.real(matrix),
imags=np.imag(matrix),
numamps=len(matrix),
)
def _preprocess_operations(self, operations):
if not self.error_model:
return operations
out = []
for op in operations:
out.append(op)
out = out + self.error_model(op)
return out
def _extract_information(self, context):
self._density_matrix = reorder_matrix(pqc.cheat.getDensityMatrix()(context.qureg))
self._probs = np.real(np.diag(self._density_matrix))
@property
def state(self):
return self._density_matrix
@property
def density_matrix(self):
return self._density_matrix
``` |
{
"source": "johannesjmeyer/pennylane-sf",
"score": 2
} |
#### File: pennylane-sf/pennylane_sf/simulator.py
```python
import abc
import numpy as np
from pennylane import Device
import strawberryfields as sf
from ._version import __version__
class StrawberryFieldsSimulator(Device):
r"""Abstract StrawberryFields simulator device for PennyLane.
Args:
wires (int): the number of modes to initialize the device in
analytic (bool): indicates if the device should calculate expectations
and variances analytically
shots (int): Number of circuit evaluations/random samples used
to estimate expectation values of observables. If ``analytic=True``,
this setting is ignored.
hbar (float): the convention chosen in the canonical commutation
relation :math:`[x, p] = i \hbar`
"""
name = 'Strawberry Fields Simulator PennyLane plugin'
pennylane_requires = '>=0.6.0'
version = __version__
author = '<NAME>ac'
short_name = 'strawberryfields'
_operation_map = {}
_observable_map = {}
def __init__(self, wires, *, analytic=True, shots=1000, hbar=2):
super().__init__(wires, shots)
self.hbar = hbar
self.prog = None
self.eng = None
self.q = None
self.state = None
self.samples = None
self.analytic = analytic
def execution_context(self):
"""Initialize the engine"""
self.reset()
self.prog = sf.Program(self.num_wires)
self.q = self.prog.register
return self.prog
def apply(self, operation, wires, par):
"""Apply a quantum operation.
Args:
operation (str): name of the operation
wires (Sequence[int]): subsystems the operation is applied on
par (tuple): parameters for the operation
"""
# convert PennyLane parameter conventions to
# Strawberry Fields conventions
if operation == "DisplacedSqueezedState":
sf_par = (par[0]*np.exp(par[1]*1j), par[2], par[3])
elif operation == "CatState":
sf_par = (par[0]*np.exp(par[1]*1j), par[2])
else:
sf_par = par
op = self._operation_map[operation](*sf_par)
op | [self.q[i] for i in wires] #pylint: disable=pointless-statement
@abc.abstractmethod
def pre_measure(self):
"""Run the engine"""
raise NotImplementedError
def expval(self, observable, wires, par):
"""Evaluate the expectation of an observable.
Args:
observable (str): name of the observable
wires (Sequence[int]): subsystems the observable is evaluated on
par (tuple): parameters for the observable
Returns:
float: expectation value
"""
ex, var = self._observable_map[observable](self.state, wires, par)
if not self.analytic:
# estimate the expectation value
# use central limit theorem, sample normal distribution once, only ok
# if shots is large (see https://en.wikipedia.org/wiki/Berry%E2%80%93Esseen_theorem)
ex = np.random.normal(ex, np.sqrt(var / self.shots))
return ex
def var(self, observable, wires, par):
"""Evaluate the variance of an observable.
Args:
observable (str): name of the observable
wires (Sequence[int]): subsystems the observable is evaluated on
par (tuple): parameters for the observable
Returns:
float: variance value
"""
_, var = self._observable_map[observable](self.state, wires, par)
return var
def cov(self, observable1, wires1, par1, observable2, wires2, par2):
if observable1 != "NumberOperator" or observable2 != "NumberOperator":
raise Exception("Only NumberOperator supported so far.")
ev1 = self.expval(observable1, wires1, par1)
ev2 = self.expval(observable2, wires2, par2)
data = self.state.all_fock_probs()
photon_numbers = np.zeros_like(data, dtype=int)
wires = sorted([wires1[0], wires2[0]])
photon_numbers = np.moveaxis(photon_numbers, wires[0], 0)
photon_numbers = np.moveaxis(photon_numbers, wires[1], 1)
for i in range(data.shape[wires1[0]]):
for j in range(data.shape[wires2[0]]):
photon_numbers[i, j, ...] = i * j
photon_numbers = np.moveaxis(photon_numbers, 1, wires[1])
photon_numbers = np.moveaxis(photon_numbers, 0, wires[0])
# print("wires = ", wires)
# for i in range(photon_numbers.shape[0]):
# for j in range(photon_numbers.shape[1]):
# for k in range(photon_numbers.shape[2]):
# print("photon_numbers[{0}, {1}, {2}] = {3}".format(i, j, k, photon_numbers[i, j, k]))
ev12 = np.abs(np.sum(photon_numbers * data))
return ev12 - ev1 * ev2
def reset(self):
"""Reset the device"""
sf.hbar = self.hbar
if self.eng is not None:
self.eng.reset()
self.eng = None
if self.state is not None:
self.state = None
if self.q is not None:
self.q = None
if self.prog is not None:
self.prog = None
if self.samples is not None:
self.samples = None
@property
def operations(self):
"""Get the supported set of operations.
Returns:
set[str]: the set of PennyLane operation names the device supports
"""
return set(self._operation_map.keys())
@property
def observables(self):
"""Get the supported set of observables.
Returns:
set[str]: the set of PennyLane observable names the device supports
"""
return set(self._observable_map.keys())
``` |
{
"source": "johannesjmeyer/qml",
"score": 3
} |
#### File: qml/demonstrations/qsim_beyond_classical.py
```python
import pennylane as qml
from pennylane_cirq import ops
import cirq
import numpy as np
######################################################################
# To start, we need to define the qubit grid that we will use for mimicking
# Google's Sycamore chip, although we will only use 12 qubits instead of
# the 54 that the actual chip has. This is so that you can run
# this demo without having access to a supercomputer!
#
# We define the 12 qubits in a rectangular grid, setting the coordinates for
# each qubit following the paper's suplementary dataset [#Martinis2020]_. We also create
# a mapping between the wire number and the Cirq qubit to more easily reference
# specific qubits later. Feel free to play around with different grids and
# number of qubits. Just keep in mind that the grid needs to stay
# connected. You could, for example, remove the final row (last four qubits
# in the list) to simulate an 8-qubit system.
#
qubits = sorted([
cirq.GridQubit(3, 3),
cirq.GridQubit(3, 4),
cirq.GridQubit(3, 5),
cirq.GridQubit(3, 6),
cirq.GridQubit(4, 3),
cirq.GridQubit(4, 4),
cirq.GridQubit(4, 5),
cirq.GridQubit(4, 6),
cirq.GridQubit(5, 3),
cirq.GridQubit(5, 4),
cirq.GridQubit(5, 5),
cirq.GridQubit(5, 6),
])
wires = len(qubits)
# create a mapping between wire number and Cirq qubit
qb2wire = {i: j for i, j in zip(qubits, range(wires))}
######################################################################
# Now let's create the ``qsim`` device, available via the Cirq plugin, making
# use of the ``wires`` and ``qubits`` keywords that we defined above.
# First, we need to define the number of 'shots' per circuit instance to
# be used---where the number of shots simply corresponds to the number
# of times that the circuit is sampled. This will also be needed later when
# calculating the cross-entropy benchmarking fidelity. The more shots, the
# more accurate the results will be. 500,000 shots will be used here---the same
# number of samples used in the paper---but feel free to
# change this (depending on your own computational restrictions).
#
shots = 500000
dev = qml.device('cirq.qsim', wires=wires, qubits=qubits, shots=shots)
######################################################################
# The next step would be to prepare the necessary gates. Some of these
# gates are not natively supported in PennyLane, but are accessible
# through the Cirq plugin. We can define the remaining gates by hand.
#
# For the single-qubit gates we need the :math:`\sqrt{X}` and
# :math:`\sqrt{Y}` gates, which can be written as :math:`RX(\pi/2)` and
# :math:`RY(\pi/2)` respectively, as well as the :math:`\sqrt{W}` gate,
# where :math:`W = \frac{X + Y}{2}`. The latter is easiest defined by its
# unitary matrix
#
# .. math::
#
# \frac{1}{\sqrt{2}}
# \begin{bmatrix}
# 1 & \sqrt{i} \\
# \sqrt{-i} & 1 \\
# \end{bmatrix}.
#
# The :math:`\sqrt{X}` gate is already implemented in PennyLane, while the
# two other gates can be implemented as follows:
sqrtYgate = lambda wires: qml.RY(np.pi / 2, wires=wires)
sqrtWgate = lambda wires: qml.QubitUnitary(
np.array([[1, -np.sqrt(1j)],
[np.sqrt(-1j), 1]]) / np.sqrt(2), wires=wires
)
single_qubit_gates = [qml.SX, sqrtYgate, sqrtWgate]
######################################################################
# For the two-qubit gates we need the iSWAP gate
#
# .. math::
#
# \begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & 0 & i & 0 \\
# 0 & i & 0 & 0 \\
# 0 & 0 & 0 & 1
# \end{bmatrix},
#
# as well as the CPhase gate
#
# .. math::
#
# \begin{bmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 \\
# 0 & 0 & 1 & 0 \\
# 0 & 0 & 0 & e^{-i\phi}
# \end{bmatrix},
#
# both accessible via the Cirq plugin.
#
######################################################################
# Assembling the circuit
# ----------------------
#
# Here comes one of the tricky parts. To decide which qubits the
# two-qubit gates should be applied to, we have to look at how they are
# connected to each other. In an alternating pattern, each pair of
# neighbouring qubits gets labeled with a letter A-D, where A and B
# correspond to all horizontally neighbouring qubits (in a row), and C and
# D to the vertically neighbouring qubits (in a column). This is depicted
# in the figure below, where you can also see how the single-qubit gates
# are applied, as well as the cycles, each consisting of a layer of
# single-qubit gates and a pair of two-qubit gates. Note that each coloured
# two-qubit gate represented in the image is implemented as the two
# consecutive gates iSWAP and CPhase in this demo.
#
# .. figure:: ../demonstrations/qsim_beyond_classical/supremacy_circuit.png
# :align: center
# :width: 90%
#
# **Image taken from <NAME>., <NAME>., <NAME>. et al.** [#Arute2019]_
#
# The logic below iterates through all connections and returns a
# dictionary, ``gate_order``, where the keys are the connection labels
# between different qubits and the values are lists of all neighbouring
# qubit pairs. We will use this dictionary inside the circuit to iterate
# through the different pairs and apply the two two-qubit gates that we
# just defined above. The way we iterate through the dictionary will depend
# on a gate sequence defined in the next section.
#
from itertools import combinations
gate_order = {"A":[], "B":[], "C":[], "D":[]}
for i, j in combinations(qubits, 2):
wire_1 = qb2wire[i]
wire_2 = qb2wire[j]
if i in j.neighbors():
if i.row == j.row and i.col % 2 == 0:
gate_order["A"].append((wire_1, wire_2))
elif i.row == j.row and j.col % 2 == 0:
gate_order["B"].append((wire_1, wire_2))
elif i.col == j.col and i.row % 2 == 0:
gate_order["C"].append((wire_1, wire_2))
elif i.col == j.col and j.row % 2 == 0:
gate_order["D"].append((wire_1, wire_2))
######################################################################
# At this point we can define the gate sequence, which is the order the
# two-qubit gates are applied to the different qubit pairs. For example,
# ``["A", "B"]`` would mean that the two-qubit gates are first applied to
# all qubits connected with label A, and then, during the next full cycle,
# the two-qubit gates are applied to all qubits connected with label B.
# This would then correspond to a 2-cycle run (or a circuit with a depth of
# 2).
#
# While we can define any patterns we'd like, the two gate sequences below
# are the ones that are used in the paper. The shorter one is
# used for their classically verifiable benchmarking. The slightly
# longer sequence, which is much harder to simulate classically, is used
# for estimating the cross-entropy fidelity in what they call the "supremacy
# regime". We will use the shorter gate sequence for the following
# demonstration; feel free to play around with other combinations.
#
m = 14 # number of cycles
gate_sequence_longer = np.resize(["A", "B", "C", "D", "C", "D", "A", "B"], m)
gate_sequence = np.resize(["A", "B", "C", "D"], m)
######################################################################
# The single-qubit gates are randomly selected and applied to each qubit in
# the circuit, while avoiding the same gate being applied to the same wire
# twice in a row. We do this by creating a helper function ``generate_single_qubit_gate_list()`` that
# specifies the order in which the single-qubit
# gates should be applied. We can use this list within the
# circuit to know which gate to apply when.
#
def generate_single_qubit_gate_list():
# create the first list by randomly selecting indices
# from single_qubit_gates
g = [list(np.random.choice(range(len(single_qubit_gates)), size=wires))]
for cycle in range(len(gate_sequence)):
g.append([])
for w in range(wires):
# check which gate was applied to the wire previously
one_gate_removed = list(range(len(single_qubit_gates)))
bool_list = np.array(one_gate_removed) == g[cycle][w]
# and remove it from the choices of gates to be applied
pop_idx = np.where(bool_list)[0][0]
one_gate_removed.pop(pop_idx)
g[cycle + 1].append(np.random.choice(one_gate_removed))
return g
######################################################################
# Finally, we can define the circuit itself and create a QNode that we will
# use for circuit evaluation with the ``qsim`` device. The two-qubit gates
# are applied to the qubits connected by A, B, C, or D as defined above.
# The circuit ends with a half-cycle, consisting of only a layer of
# single-qubit gates.
#
# From the QNode, we need both the probabilities of the measurement
# results, as well as raw samples. To facilitate this, we add a keyword
# argument to our circuit allowing us to switch between the two returns. We
# sample from the Pauli-Z observable on all wires, which will give us the
# eigenvalues :math:`\pm 1` of the observable, corresponding to the states
# :math:`\left|0\right>` and :math:`\left|1\right>`.
#
@qml.qnode(dev)
def circuit(seed=42, return_probs=False):
np.random.seed(seed)
gate_idx = generate_single_qubit_gate_list()
# m full cycles - single-qubit gates & two-qubit gate
for i, gs in enumerate(gate_sequence):
for w in range(wires):
single_qubit_gates[gate_idx[i][w]](wires=w)
for qb_1, qb_2 in gate_order[gs]:
ops.ISWAP(wires=(qb_1, qb_2))
ops.CPhase(-np.pi/6, wires=(qb_1, qb_2))
# one half-cycle - single-qubit gates only
for w in range(wires):
single_qubit_gates[gate_idx[-1][w]](wires=w)
if return_probs:
return qml.probs(wires=range(wires))
else:
return [qml.sample(qml.PauliZ(i)) for i in range(wires)]
######################################################################
# The cross-entropy benchmarking fidelity
# ---------------------------------------
#
# The performance metric that is used in the experiment, and the one that we
# will use in this demo, is called the linear cross-entropy benchmarking
# fidelity. It's defined as
#
# .. math::
#
# F_{XEB} = 2^{n}\left<P(x_i)\right> - 1,
#
# where :math:`n` is the number of qubits, :math:`P(x_i)` is the
# probability of bitstring :math:`x_i` computed for the ideal quantum
# circuit, and the average is over the observed bitstrings.
#
# The idea behind using this fidelity is that it will be close to 1 for
# samples obtained from random quantum circuits, such as the one we defined
# above, and close to zero for a uniform probability distribution, which
# can be effectively sampled from classically. Sampling a bitstring from a
# random quantum circuit would follow the distribution
#
# .. math::
#
# Pr(p) = (N - 1)(1- p)^{N-2},
#
# where :math:`N = 2^n` is the number of possible bitstrings [#Boixo2018]_.
# This distribution is approximated well by the Porter-Thomas distribution,
# given by :math:`Pr(p) = Ne^{-Np}`, a characteristic property of chaotic quantum
# systems. From this we can then calculate the expectation value
# :math:`\left<P(x_i)\right>` as follows:
#
# .. math::
#
# \left<P(x_i)\right> = \int_0^1 p^2 N (N-1)(1-p)^{N-2}dp = \frac{2}{N+1},
#
# which leads to the theoretical fidelity
#
# .. math::
#
# F_{XEB} = 2^{n}\left<P(x_i)\right> - 1 = \frac{2N}{N+1} - 1.
#
# We implement this fidelity as the function below, where ``samples`` is a
# list of sampled bitstrings, and ``probs`` is a list with corresponding
# sampling probabilities for the same noiseless circuit.
#
def fidelity_xeb(samples, probs):
sampled_probs = []
for bitstring in samples:
# convert each bitstring into an integer
bitstring_idx = int(bitstring, 2)
# retrieve the corresponding probability for the bitstring
sampled_probs.append(probs[bitstring_idx])
return 2 ** len(samples[0]) * np.mean(sampled_probs) - 1
######################################################################
# We set a random seed and use it to calculate the probability for all the
# possible bitstrings. It is then possible to sample from exactly the same
# circuit by using the same seed. Before calculating the cross-entropy
# benchmarking fidelity, the Pauli-Z samples need to be converted into
# their correponding bitstrings, since we need the samples to be in the
# computational basis.
#
# .. note::
#
# Every time the previously defined circuit is run using the ``qsim`` device, ``qsimcirq``
# will print a warning message because the circuit has no intermediate measurements.
# More information about this warning can be found in the `Measurement sampling
# section of the qsimcirq guide <https://quantumai.google/qsim/tutorials/qsimcirq#measurement_sampling>`__.
#
seed = np.random.randint(0, 42424242)
probs = circuit(seed=seed, return_probs=True)
# transpose the samples to get the shape (shots, wires)
circuit_samples = circuit(seed=seed).T
# take the eigenvalues and transform -1 to 1 and 1 to 0
bitstring_samples = []
for sam in circuit_samples:
bitstring_sample = -(sam - 1) // 2
bitstring_samples.append("".join(str(bs) for bs in bitstring_sample))
f_circuit = fidelity_xeb(bitstring_samples, probs)
######################################################################
# Similarly, we can sample random bitstrings from a uniform probability
# distribution by generating all basis states, along with their
# corresponding bitstrings, and sample directly from them using NumPy.
#
basis_states = dev.generate_basis_states(wires)
random_integers = np.random.randint(0, len(basis_states), size=shots)
bitstring_samples = []
for i in random_integers:
bitstring_samples.append("".join(str(bs) for bs in basis_states[i]))
f_uniform = fidelity_xeb(bitstring_samples, probs)
######################################################################
# Finally, let's compare the two different values. Sampling from the
# circuit's probability distribution should give a fidelity close to 1,
# while sampling from a uniform distribution should give a fidelity
# close to 0.
#
# .. note::
#
# The cross-entropy benchmarking fidelity may output
# values that are negative or that are larger than 1, for any finite
# number of samples. This is due to the random nature of the sampling.
# For an infinite amount of samples, or circuit runs, the observed
# values will tend towards the theoretical ones, and will then always
# lie in the 0-to-1 interval.
#
print("Circuit's distribution:", f"{f_circuit:.7f}".rjust(12))
print("Uniform distribution:", f"{f_uniform:.7f}".rjust(14))
######################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Circuit's distribution: 1.0398803
# Uniform distribution: 0.0013487
#
######################################################################
# To show that the fidelity from the circuit sampling actually tends
# towards the theoretical value calculated above we can run several
# different random circuits, calculate their respective cross-entropy
# benchmarking fidelities and then calculate the mean fidelity of all the
# runs. The more evaluations we do, the closer to the theoretical value we
# should get.
#
# In the experiment, they typically calculate each of their
# presented fidelities over ten circuit instances, which only differ
# in the choices of single-qubit gates. In this demo, we use even more
# instances to demonstrate a value closer to the theoretically obtained
# one.
#
# .. note::
#
# The following mean fidelity calculations can be interesting to play
# around with. You can change the qubit grid at the top of this demo
# using, e.g., 8 or 4 qubits; change the number of shots used; as well
# as the number of circuit evaluations below. Running the following code
# snippet, the mean fidelity should still tend towards the theoretical
# value (which will be lower for fewer qubits).
#
N = 2 ** wires
theoretical_value = 2 * N / (N + 1) - 1
print("Theoretical:", f"{theoretical_value:.7f}".rjust(24))
f_circuit = []
num_of_evaluations = 100
for i in range(num_of_evaluations):
seed = np.random.randint(0, 42424242)
probs = circuit(seed=seed, return_probs=True)
samples = circuit(seed=seed).T
bitstring_samples = []
for sam in samples:
new_sam = -(sam - 1) // 2
bitstring_samples.append("".join(str(bs) for bs in new_sam))
f_circuit.append(fidelity_xeb(bitstring_samples, probs))
print(f"\r{i + 1:4d} / {num_of_evaluations:4d}{' ':17}{np.mean(f_circuit):.7f}", end="")
print("\rObserved:", f"{np.mean(f_circuit):.7f}".rjust(27))
##############################################################################
# .. rst-class:: sphx-glr-script-out
#
# Out:
#
# .. code-block:: none
#
# Theoretical: 0.9995118
# Observed: 0.9999512
#
######################################################################
# Classical hardness
# ------------------
#
# Why are we calculating this specific fidelity, and what does it actually
# mean if we get a cross-entropy benchmarking fidelity close to 1? This is
# an important question, containing one of the main arguments behind why
# this experiment is used to demonstrate "quantum supremacy".
#
# Much is due to the Porter-Thompson probability distribution that the
# random quantum circuits follow, which is hard to simulate classically.
# On the other hand, a quantum device, running a circuit as the one
# constructed above, should be able to sample from such a distribution
# without much overhead. Thus, by showing that a quantum device can produce
# a high enough fidelity value for a large enough circuit, "quantum
# supremacy" can be claimed. This is exactly what Google's experiment
# has done.
#
# There's still one issue that hasn't been touched on yet: the addition of
# noise in quantum hardware. Simply put, this noise will lower the
# cross-entropy benchmarking fidelity---the larger the
# circuit, the more noise there will be, and thus the lower the fidelity, with the
# fidelity approaching 0 as the noise increases.
# By calculating the specific single-qubit, two-qubit, and readout errors
# of the Sycamore chip, and using them to simulate a noisy circuit, the Google
# AI quantum team was able to compare the run-times with the output from
# their actual hardware device. This way, they managed to show that a
# significant speedup could be gained from using a quantum computer, and
# thus proclaimed "quantum supremacy" (see Fig. 4 in [#Arute2019]_).
#
# .. note::
#
# For more reading on this, the original paper [#Arute2019]_ is highly
# recommended (along with the suplementary information [#Arute2019sup]_ if you want
# to dive deeper into the math and physics of the experiment). The blog
# post in [#Sohaib2019]_, along with the accompanying GitHub repo, also provides
# a nice introduction to the cross-entropy benchmarking fidelity, and
# includes calculations highlighting the effects of added noise models.
#
######################################################################
# References
# ----------
#
# .. [#Arute2019]
#
# <NAME>., <NAME>., <NAME>. et al. "Quantum supremacy using a programmable
# superconducting processor"
# `Nature 574, 505-510 (2019) <https://doi.org/10.1038/s41586-019-1666-5>`__.
#
# .. [#Arute2019sup]
#
# <NAME>., <NAME>., <NAME>. et al. Supplementary information for "Quantum
# supremacy using a programmable superconducting processor"
# `arXiv:1910.11333 (2019) <https://arxiv.org/abs/1910.11333>`__
#
# .. [#Martinis2020]
#
# <NAME>. et al. (2020), `Quantum supremacy using a programmable
# superconducting processor, Dryad, Dataset <https://doi.org/10.5061/dryad.k6t1rj8>`__
#
# .. [#Boixo2018]
#
# <NAME>., <NAME>., <NAME>. et al. Characterizing quantum supremacy
# in near-term devices.
# `Nature Phys 14, 595-600 (2018) <https://doi.org/10.1038/s41567-018-0124-x>`__
#
# .. [#Sohaib2019]
#
# <NAME>. and <NAME>., `Unpacking the Quantum Supremacy Benchmark with Python
# <https://medium.com/@sohaib.alam/unpacking-the-quantum-supremacy-benchmark-with-python-67a46709d>`__
#
``` |
{
"source": "JohannesJolkkonen/aws-twitter-analysis",
"score": 2
} |
#### File: aws-twitter-analysis/lamba/amazon-comprehend.py
```python
import boto3
import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
client = boto3.client('comprehend')
def start_topic_job(client, input, output):
client.start_topics_detection_job(
InputDataConfig={
'S3Uri': input,
'InputFormat': 'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': output,
'KmsKeyId': ''
},
JobName='job-name-string',
NumberOfTopics=4,
VpcConfig={
'SecurityGroupIds': ['string'],
'Subnets': ['string']
}
)
def start_sentiment_job(client, input, output):
client.start_sentiment_detection_job(
InputDataConfig={
'S3Uri': input,
'InputFormat': 'ONE_DOC_PER_LINE'
},
OutputDataConfig={
'S3Uri': output
},
DataAccessRoleArn=os.getenv('COMPREHEND_IAM_ROLE'),
JobName='job-name-string',
LanguageCode='en'
)
def lambda_handler(event, context):
logger.info(f'Event: {event}')
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key'].replace('%3A', ':')
input = f's3://{bucket}/{key}'
output = f"s3://{os.getenv('OUTPUT_BUCKET')}"
start_sentiment_job(client, input, output)
```
#### File: aws-twitter-analysis/lamba/parse-comprehend-results.py
```python
import json
import random
import boto3
import shutil
import os
download_path = './download/output.tar.gz'
upload_path = './upload'
try:
os.mkdir('./download')
os.mkdir(upload_path)
except:
pass
uri = 'twitter-comprehend-output-bucket'
key = '902466892473-KP-04d2491a86f2290b69d20c43fa98eaac/output/output.tar.gz'
root = key.split('/')[0]
def s3_unpack(uri, key):
# Download compressed comprehend-output from s3 and unpack it.
s3_client = boto3.client('s3')
s3_client.download_file(uri, key, Filename=download_path)
shutil.unpack_archive(download_path, extract_dir=upload_path)
s3_unpack(uri, key)
def parse_phrases(infile_path):
# Read and write keyphrases from comprehend-output into json.
# Return random selection of phrases as a list.
data = []
with open(infile_path, 'r') as file:
for line in file.readlines():
try:
obj = json.loads(line)['KeyPhrases']
except:
pass
n = json.loads(line)['Line']
for item in obj:
phrase = item['Text']
score = item['Score']
if len(phrase) > 16 and len(phrase) < 80 and '@' not in phrase and score > 0.99:
item = {}
item['id'] = n
item['score'] = float(score)
item['phrase'] = phrase
data.append(item)
# items = sorted(data.items(), key=lambda x: x[1], reverse=True)
with open('clean-json.json', 'w') as file:
for item in data:
file.write(json.dumps(item, indent=4) + '\n')
highlights = []
for i in range(15):
phrase = random.choice(data)
print((phrase)['phrase'])
highlights.append(phrase)
parse_phrases(upload_path+'/output')
``` |
{
"source": "johannesjung/uncertainty-adversarial-paper",
"score": 2
} |
#### File: johannesjung/uncertainty-adversarial-paper/cats_and_dogs.py
```python
import keras
import numpy as np
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dropout, Dense
import src.utilities as U
import os
import h5py
from keras import backend as K
H5PATH = '/data-local/lsgs/cats_dogs.h5'
def load_or_create_dataset():
if not os.path.exists(H5PATH):
cats = U.load_jpgs('/data-local/lsgs/PetImages/Cat')
catlabel = np.zeros(cats.shape[0])
dogs = U.load_jpgs('/data-local/lsgs/PetImages/Dog')
doglabel = np.ones(dogs.shape[0])
data = np.concatenate([cats, dogs])
labels = np.concatenate([catlabel, doglabel])
inds = np.random.permutation(data.shape[0])
X = preprocess_input(data.astype(np.float))
Y = keras.utils.to_categorical(labels)
# shuffle data
X = X[inds]
Y = Y[inds]
N = X.shape[0]
split = int(0.8 * N)
X_train = X[:split]
Y_train = Y[:split]
X_test = X[split:]
Y_test = Y[split:]
# write to database file to avoid this crap later
with h5py.File(H5PATH, 'w') as f:
tr = f.create_group('train')
te = f.create_group('test')
tr.create_dataset('X', data=X_train)
tr.create_dataset('Y', data=Y_train)
te.create_dataset('X', data=X_test)
te.create_dataset('Y', data=Y_test)
return X_train, Y_train, X_test, Y_test
else:
with h5py.File(H5PATH, 'r') as f:
X_train = f['train']['X'].value
Y_train = f['train']['Y'].value
X_test = f['test']['X'].value
Y_test = f['test']['Y'].value
return X_train, Y_train, X_test, Y_test
def define_model_resnet():
K.set_learning_phase(True)
rn50 = ResNet50(weights='imagenet', include_top='False')
a = Dropout(rate=0.5)(rn50.output)
a = Dense(2, activation='softmax')(a)
model = keras.models.Model(inputs=rn50.input, outputs=a)
# freeze resnet layers
for layer in rn50.layers:
layer.trainable = False
return model
if __name__ == '__main__':
model = define_model_resnet()
wname = 'save/cats_dogs_rn50_w_run.h5'
model.compile(loss='categorical_crossentropy',
metrics=['accuracy'], optimizer='adam')
X_train, Y_train, X_test, Y_test = load_or_create_dataset()
model.fit(X_train, Y_train, epochs=15, validation_data=(X_test, Y_test), shuffle='batch')
name = U.gen_save_name(wname)
model.save_weights(name)
``` |
{
"source": "johanneskastl/opnsense-core",
"score": 2
} |
#### File: ipsec/vici/session.py
```python
import collections
import socket
from .exception import SessionException, CommandException, EventUnknownException
from .protocol import Transport, Packet, Message
class Session(object):
def __init__(self, sock=None):
if sock is None:
sock = socket.socket(socket.AF_UNIX)
sock.connect("/var/run/charon.vici")
self.handler = SessionHandler(Transport(sock))
def version(self):
"""Retrieve daemon and system specific version information.
:return: daemon and system specific version information
:rtype: dict
"""
return self.handler.request("version")
def stats(self):
"""Retrieve IKE daemon statistics and load information.
:return: IKE daemon statistics and load information
:rtype: dict
"""
return self.handler.request("stats")
def reload_settings(self):
"""Reload strongswan.conf settings and any plugins supporting reload.
"""
self.handler.request("reload-settings")
def initiate(self, sa):
"""Initiate an SA.
:param sa: the SA to initiate
:type sa: dict
:return: generator for logs emitted as dict
:rtype: generator
"""
return self.handler.streamed_request("initiate", "control-log", sa)
def terminate(self, sa):
"""Terminate an SA.
:param sa: the SA to terminate
:type sa: dict
:return: generator for logs emitted as dict
:rtype: generator
"""
return self.handler.streamed_request("terminate", "control-log", sa)
def redirect(self, sa):
"""Redirect an IKE_SA.
:param sa: the SA to redirect
:type sa: dict
"""
self.handler.request("redirect", sa)
def install(self, policy):
"""Install a trap, drop or bypass policy defined by a CHILD_SA config.
:param policy: policy to install
:type policy: dict
"""
self.handler.request("install", policy)
def uninstall(self, policy):
"""Uninstall a trap, drop or bypass policy defined by a CHILD_SA config.
:param policy: policy to uninstall
:type policy: dict
"""
self.handler.request("uninstall", policy)
def list_sas(self, filters=None):
"""Retrieve active IKE_SAs and associated CHILD_SAs.
:param filters: retrieve only matching IKE_SAs (optional)
:type filters: dict
:return: generator for active IKE_SAs and associated CHILD_SAs as dict
:rtype: generator
"""
return self.handler.streamed_request("list-sas", "list-sa", filters)
def list_policies(self, filters=None):
"""Retrieve installed trap, drop and bypass policies.
:param filters: retrieve only matching policies (optional)
:type filters: dict
:return: generator for installed trap, drop and bypass policies as dict
:rtype: generator
"""
return self.handler.streamed_request("list-policies", "list-policy",
filters)
def list_conns(self, filters=None):
"""Retrieve loaded connections.
:param filters: retrieve only matching configuration names (optional)
:type filters: dict
:return: generator for loaded connections as dict
:rtype: generator
"""
return self.handler.streamed_request("list-conns", "list-conn",
filters)
def get_conns(self):
"""Retrieve connection names loaded exclusively over vici.
:return: connection names
:rtype: dict
"""
return self.handler.request("get-conns")
def list_certs(self, filters=None):
"""Retrieve loaded certificates.
:param filters: retrieve only matching certificates (optional)
:type filters: dict
:return: generator for loaded certificates as dict
:rtype: generator
"""
return self.handler.streamed_request("list-certs", "list-cert", filters)
def load_conn(self, connection):
"""Load a connection definition into the daemon.
:param connection: connection definition
:type connection: dict
"""
self.handler.request("load-conn", connection)
def unload_conn(self, name):
"""Unload a connection definition.
:param name: connection definition name
:type name: dict
"""
self.handler.request("unload-conn", name)
def load_cert(self, certificate):
"""Load a certificate into the daemon.
:param certificate: PEM or DER encoded certificate
:type certificate: dict
"""
self.handler.request("load-cert", certificate)
def load_key(self, private_key):
"""Load a private key into the daemon.
:param private_key: PEM or DER encoded key
"""
self.handler.request("load-key", private_key)
def load_shared(self, secret):
"""Load a shared IKE PSK, EAP or XAuth secret into the daemon.
:param secret: shared IKE PSK, EAP or XAuth secret
:type secret: dict
"""
self.handler.request("load-shared", secret)
def flush_certs(self, filter=None):
"""Flush the volatile certificate cache.
Flush the certificate stored temporarily in the cache. The filter
allows to flush only a certain type of certificates, e.g. CRLs.
:param filter: flush only certificates of a given type (optional)
:type filter: dict
"""
self.handler.request("flush-certs", filter)
def clear_creds(self):
"""Clear credentials loaded over vici.
Clear all loaded certificate, private key and shared key credentials.
This affects only credentials loaded over vici, but additionally
flushes the credential cache.
"""
self.handler.request("clear-creds")
def load_pool(self, pool):
"""Load a virtual IP pool.
Load an in-memory virtual IP and configuration attribute pool.
Existing pools with the same name get updated, if possible.
:param pool: virtual IP and configuration attribute pool
:type pool: dict
"""
return self.handler.request("load-pool", pool)
def unload_pool(self, pool_name):
"""Unload a virtual IP pool.
Unload a previously loaded virtual IP and configuration attribute pool.
Unloading fails for pools with leases currently online.
:param pool_name: pool by name
:type pool_name: dict
"""
self.handler.request("unload-pool", pool_name)
def get_pools(self, options):
"""Retrieve loaded pools.
:param options: filter by name and/or retrieve leases (optional)
:type options: dict
:return: loaded pools
:rtype: dict
"""
return self.handler.request("get-pools", options)
def listen(self, event_types):
"""Register and listen for the given events.
:param event_types: event types to register
:type event_types: list
:return: generator for streamed event responses as (event_type, dict)
:rtype: generator
"""
return self.handler.listen(event_types)
class SessionHandler(object):
"""Handles client command execution requests over vici."""
def __init__(self, transport):
self.transport = transport
def _communicate(self, packet):
"""Send packet over transport and parse response.
:param packet: packet to send
:type packet: :py:class:`vici.protocol.Packet`
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
"""
self.transport.send(packet)
return Packet.parse(self.transport.receive())
def _register_unregister(self, event_type, register):
"""Register or unregister for the given event.
:param event_type: event to register
:type event_type: str
:param register: whether to register or unregister
:type register: bool
"""
if register:
packet = Packet.register_event(event_type)
else:
packet = Packet.unregister_event(event_type)
response = self._communicate(packet)
if response.response_type == Packet.EVENT_UNKNOWN:
raise EventUnknownException(
"Unknown event type '{event}'".format(event=event_type)
)
elif response.response_type != Packet.EVENT_CONFIRM:
raise SessionException(
"Unexpected response type {type}, "
"expected '{confirm}' (EVENT_CONFIRM)".format(
type=response.response_type,
confirm=Packet.EVENT_CONFIRM,
)
)
def request(self, command, message=None):
"""Send request with an optional message.
:param command: command to send
:type command: str
:param message: message (optional)
:type message: str
:return: command result
:rtype: dict
"""
if message is not None:
message = Message.serialize(message)
packet = Packet.request(command, message)
response = self._communicate(packet)
if response.response_type != Packet.CMD_RESPONSE:
raise SessionException(
"Unexpected response type {type}, "
"expected '{response}' (CMD_RESPONSE)".format(
type=response.response_type,
response=Packet.CMD_RESPONSE
)
)
command_response = Message.deserialize(response.payload)
if "success" in command_response:
if command_response["success"] != b"yes":
raise CommandException(
"Command failed: {errmsg}".format(
errmsg=command_response["errmsg"]
)
)
return command_response
def streamed_request(self, command, event_stream_type, message=None):
"""Send command request and collect and return all emitted events.
:param command: command to send
:type command: str
:param event_stream_type: event type emitted on command execution
:type event_stream_type: str
:param message: message (optional)
:type message: str
:return: generator for streamed event responses as dict
:rtype: generator
"""
if message is not None:
message = Message.serialize(message)
self._register_unregister(event_stream_type, True);
try:
packet = Packet.request(command, message)
self.transport.send(packet)
exited = False
while True:
response = Packet.parse(self.transport.receive())
if response.response_type == Packet.EVENT:
if not exited:
try:
yield Message.deserialize(response.payload)
except GeneratorExit:
exited = True
pass
else:
break
if response.response_type == Packet.CMD_RESPONSE:
command_response = Message.deserialize(response.payload)
else:
raise SessionException(
"Unexpected response type {type}, "
"expected '{response}' (CMD_RESPONSE)".format(
type=response.response_type,
response=Packet.CMD_RESPONSE
)
)
finally:
self._register_unregister(event_stream_type, False);
# evaluate command result, if any
if "success" in command_response:
if command_response["success"] != b"yes":
raise CommandException(
"Command failed: {errmsg}".format(
errmsg=command_response["errmsg"]
)
)
def listen(self, event_types):
"""Register and listen for the given events.
:param event_types: event types to register
:type event_types: list
:return: generator for streamed event responses as (event_type, dict)
:rtype: generator
"""
for event_type in event_types:
self._register_unregister(event_type, True)
try:
while True:
response = Packet.parse(self.transport.receive())
if response.response_type == Packet.EVENT:
try:
yield response.event_type, Message.deserialize(response.payload)
except GeneratorExit:
break
finally:
for event_type in event_types:
self._register_unregister(event_type, False)
```
#### File: service/tests/core.py
```python
import unittest
import json
from modules import processhandler
class DummySocket(object):
""" Simple wrapper to simulate socket client for the processhandler
"""
def __init__(self):
""" init
:return:
"""
self._send_data = ''
self._receive_data = []
self._closed = False
def setTestData(self, data):
""" set data to send
:param data: text
:return:
"""
self._closed = False
self._receive_data = []
self._send_data = data
def recv(self, size):
""" implement sock.rec, flush to self._send_data
:param size:
:return:
"""
return self._send_data
def sendall(self, data):
""" send back to "client"
:param data: text
:return:
"""
self._receive_data.append(data)
def close(self):
""" close connection
:return:
"""
self._closed = True
def getReceived(self):
""" fetch received data
:return:
"""
return ''.join(self._receive_data)
class TestCoreMethods(unittest.TestCase):
def setUp(self):
""" setup test, load config
:return:
"""
self.config_path = '%s/../conf' % '/'.join(__file__.split('/')[:-1])
self.dummysock = DummySocket()
self.act_handler = processhandler.ActionHandler(config_path=self.config_path,
config_environment={})
def tearDown(self):
""" end test
:return:
"""
self.dummysock = None
def test_escape_sequence(self):
""" test if "end of data" is send correctly
:return:
"""
# send unknown command
self.dummysock.setTestData('xxxxxx\n')
cmd_thread = processhandler.HandlerClient(connection=self.dummysock,
client_address=None,
action_handler=self.act_handler,
simulation_mode=False)
cmd_thread.run()
self.assertEquals(self.dummysock.getReceived()[-4:], '\n%c%c%c' % (chr(0), chr(0), chr(0)), "Invalid sequence")
def test_command_unknown(self):
""" test invalid command
:return:
"""
self.dummysock.setTestData('xxxxxx\n')
cmd_thread = processhandler.HandlerClient(connection=self.dummysock,
client_address=None,
action_handler=self.act_handler,
simulation_mode=False)
cmd_thread.run()
self.assertEquals(self.dummysock.getReceived().split('\n')[0], 'Action not found', 'Invalid response')
def test_configd_actions(self):
""" request configd command list
:return:
"""
self.dummysock.setTestData('configd actions json\n')
cmd_thread = processhandler.HandlerClient(connection=self.dummysock,
client_address=None,
action_handler=self.act_handler,
simulation_mode=False)
cmd_thread.run()
response = json.loads(self.dummysock.getReceived()[:-4])
self.assertGreater(len(response), 10, 'number of configd commands very suspicious')
```
#### File: service/tests/template.py
```python
import os
import unittest
import collections
from modules import config
from modules import template
class TestConfigMethods(unittest.TestCase):
def setUp(self):
""" setup test, load config
:return:
"""
conf_path = '%s/config/config.xml' % '/'.join(__file__.split('/')[:-1])
self.conf = config.Config(conf_path)
def tearDown(self):
""" end test
:return:
"""
self.conf = None
def test_type(self):
""" test correct config type
:return:
"""
self.assertEquals(type(self.conf.get()), collections.OrderedDict)
def test_interface(self):
""" test existence of interface
:return:
"""
self.assertIn('interfaces', self.conf.get(), 'interfaces section missing')
self.assertIn('lan', self.conf.get()['interfaces'], 'lan section missing')
self.assertIn('ipaddr', self.conf.get()['interfaces']['lan'], 'lan address missing')
class TestTemplateMethods(unittest.TestCase):
def setUp(self):
""" setup test, load config create temp directory
:return:
"""
conf_path = '%s/config/config.xml' % '/'.join(__file__.split('/')[:-1])
self.output_path = '%s/output/' % '/'.join(__file__.split('/')[:-1])
self.conf = config.Config(conf_path)
self.tmpl = template.Template(target_root_directory=self.output_path)
self.tmpl.set_config(self.conf.get())
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
def tearDown(self):
""" end test, remove test data
:return:
"""
self.conf = None
if os.path.exists(self.output_path):
for root, dirs, files in os.walk(self.output_path, topdown=False):
for filename in files:
os.unlink('%s/%s' % (root, filename))
for dirname in dirs:
os.rmdir('%s/%s' % (root, dirname))
os.rmdir(self.output_path)
def test_sample(self):
""" test sample template
:return:
"""
generated_filenames = self.tmpl.generate('OPNsense.Sample')
self.assertEquals(len(generated_filenames), 3, 'number of output files not 3')
def test_all(self):
""" Test if all expected templates are created, can only find test for static defined cases.
Calls "generate *" and compares that to all defined templates in all +TARGET files
Fails on first missing case.
:return:
"""
self.expected_filenames = dict()
self.generated_filenames = list()
templates_path = '%s/../templates' % '/'.join(__file__.split('/')[:-1])
for root, dirs, files in os.walk(templates_path):
for filenm in files:
if filenm == '+TARGETS':
filename = '%s/%s' % (root, filenm)
for line in open(filename).read().split('\n'):
line = line.strip()
if len(line) > 1 and line[0] != '#' and line.find('[') == -1:
expected_filename = ('%s%s' % (self.output_path, line.split(':')[-1])).replace('//', '/')
self.expected_filenames[expected_filename] = {'src': filename}
for filename in self.tmpl.generate('*'):
self.generated_filenames.append(filename.replace('//', '/'))
for expected_filename in self.expected_filenames:
message = 'missing %s (%s' % (expected_filename, self.expected_filenames[expected_filename]['src'])
self.assertIn(expected_filename, self.generated_filenames, message)
``` |
{
"source": "johanneskoester/igv-reports",
"score": 2
} |
#### File: igv-reports/test/test_bam.py
```python
import unittest
import pathlib
from igv_reports import bam
class BAMTest(unittest.TestCase):
def test_bam(self):
region = {
"chr": "minigenome",
"start": 4000,
"end": 10000
}
bam_file_path = str((pathlib.Path(__file__).parent / "data/minigenome/alignments.bam").resolve())
data = bam.get_data(bam_file_path, region)
self.assertTrue(data)
``` |
{
"source": "johanneskoester/JUDI",
"score": 3
} |
#### File: JUDI/judi/paramdb.py
```python
import pandas as pd
class ParamDb(object):
"""Parameter database"""
def __init__(self, name=''):
self.name = name
self.df = pd.DataFrame({'JUDI': ['*']})
def add_param(self, param_info, name=None):
if isinstance(param_info, list):
param_info = {name: param_info}
if isinstance(param_info, dict):
param_info = pd.DataFrame(param_info)
if isinstance(param_info, pd.Series):
param_info = pd.DataFrame([param_info])
if not isinstance(param_info, pd.DataFrame):
print("Error! input data must be a list, series or dataframe!!!")
return 1
self.df = self.df.assign(key=1).merge(param_info.assign(key=1), on='key', how='outer').drop('key', 1)
def copy(self, name=''):
other = ParamDb(name)
other.df = self.df.copy()
return other
def mask(self, mask_cols):
self.df = self.df.drop(mask_cols, 1).drop_duplicates()
def show(self):
print(self.name, ':')
if 'JUDI' in self.df.columns:
print(self.df.drop('JUDI', 1))
else:
print(self.df)
JUDI_PARAM = ParamDb("global pdb")
def add_param(param_info, name = None):
"""Add a parameter or a group of parameters in the global parameter database
Args:
param_info (list/dict/Pandas Series/DataFrame): Information about the parameter or group of parameters.
If not already so, param_info is converted to a pandas DataFrame and then it is added to the global
parameter database via a Cartesian product.
Kwargs:
name (str): Used if param_info is a list and denotes the name of the parameter.
Returns:
int. The return code: 0 for success and 1 for error!
Raises:
None
"""
JUDI_PARAM.add_param(param_info, name)
return 0
def show_param_db():
"""Print the global parameter database
"""
JUDI_PARAM.show()
def copy_param_db():
return JUDI_PARAM.copy()
def mask_global_param_db(mask_cols):
param = JUDI_PARAM.copy()
masked = JUDI_PARAM.copy()
param_cols = list(set(param.columns) - set(mask_cols))
param = param.drop(mask_cols, 1).drop_duplicates()
masked = masked.drop(param_cols, 1).drop_duplicates()
return(param, masked)
def mask_param_db(param_db, mask_cols):
pdb = param_db.copy()
pdb = pdb.drop(mask_cols, 1).drop_duplicates()
return pdb
def param_diff(big, small):
diff_cols = list(set(big.param.columns) - set(small.param.columns))
return big.param[diff_cols].drop_duplicates()
``` |
{
"source": "johanneskool/Ceres29",
"score": 3
} |
#### File: backend/orm/models.py
```python
__author__ = '<NAME>'
import os
import pickle
import secrets
from datetime import datetime
from backend import app
from backend import db
from backend.parsing import Network
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
timestamp = db.Column(db.DateTime)
filename = db.Column(db.String)
filesize = db.Column(db.Integer)
hash = db.Column(db.String, unique=True)
def __init__(self, file, name):
self.timestamp = datetime.utcnow()
self.filename = file
self.filesize = os.path.getsize(os.path.join(app.config['UPLOAD_FOLDER'], file))
# create hash to save directory in
while True:
hash = secrets.token_urlsafe(64)
if hash not in [file.hash for file in File.query.all()]:
self.hash = hash
break
# check if name already present here
count = 1
actual_name = name
while name in [file.name for file in File.query.all()]:
# append (number) to end if already exists
name = actual_name + " (" + str(count) + ")"
count += 1
self.name = name
network = Network.TopNetwork(self.name, self.filename,
self.hash, self.filesize, self.timestamp) # converts to correct models and saves file in hash folder
with open(os.path.join(app.config['JSON_FOLDER'], self.hash, "network.p"), "wb") as f:
pickle.dump(network, f, protocol=pickle.HIGHEST_PROTOCOL)
@property
def location_path(self):
return os.path.join(app.config['JSON_FOLDER'], self.hash)
@property
def default(self):
return os.path.join(app.config["JSON_FOLDER"], self.location_path, Network.filenames['default'])
@property
def fiedler(self):
return os.path.join(app.config["JSON_FOLDER"], self.location_path, Network.filenames['fiedler'])
def get_pickle(self):
with open(os.path.join(self.location_path, "network.p"), 'rb') as f:
return pickle.load(f)
def __repr__(self):
return "<File {}>".format(self.name)
```
#### File: Ceres29/backend/views.py
```python
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>'
import os
from flask import render_template, request, redirect, flash, url_for, send_from_directory, abort, jsonify
from backend import app
from backend.functions_file import get_available_files, handle_file_upload
from backend.orm.models import File
# Index page
@app.route('/', methods=['GET', 'POST'])
def index():
data_id = request.args.get('data')
if request.method == 'GET':
if app.config['DEVELOPMENT'] == True: flash(
'Flask is currently running development mode. This is an example to show how we handle messages in our layout. Possible types for flash are info, warning, danger, success, and Flask\'s default category message is also allowed. HTML is no longer allowed in the messages for safety reasons',
'info')
return render_template("index.html", data=data_id, title="Home")
if request.method == 'POST':
return handle_file_upload(request)
# Choose file page
@app.route('/upload', methods=['GET', 'POST'])
def upload():
data_id = request.args.get('data')
if request.method == 'GET':
return render_template("upload.html", files_available=get_available_files(), data=data_id,
title="Upload a file")
if request.method == 'POST':
return handle_file_upload(request)
# Visualization page
@app.route('/vis', methods=['GET'])
@app.route('/vis/<int:data_id>', methods=['GET'])
def vis(data_id=None):
if (data_id is None) and (request.args.get('data') is None):
flash('Please select a file before going to the visualization')
return redirect(url_for('upload'))
elif data_id is None:
data_id = request.args.get('data')
data_name = File.query.get(data_id).name
if request.method == 'GET':
return render_template("vis.html", files_available=get_available_files(), data=data_name, title=data_name,
data_id=data_id)
# Get data endpoint
@app.route('/data/<data_id>', methods=['GET'])
def data(data_id):
file = File.query.get(data_id)
if request.args.get('trace'):
trace = int(request.args.get('trace'))
network = file.get_pickle().get_subnetwork(trace)
return network.json_string
else:
clustertype = request.args.get('type')
# If unknown type do a 400 Bad Request; type does not exist
if clustertype not in ['pagerank', 'cluster', 'degrees', 'lexicographic', 'cluster_graph', 'betweenness',
'fiedler', 'default']: abort(400)
# load the graph either from an already generated json or create the json
graph_path = os.path.join(app.config["JSON_FOLDER"], file.hash)
filename = clustertype + ".json"
if os.path.exists(os.path.join(graph_path, filename)):
return send_from_directory(graph_path, filename)
else:
# get serialized data
network = file.get_pickle()
if clustertype == "cluster_graph":
cluster_network = network.get_cluster_graph()
cluster_network.save_as_json(os.path.join(graph_path, filename))
else:
network.reorder(clustertype)
network.save_as_json(os.path.join(graph_path, filename))
return send_from_directory(graph_path, filename)
# Block some requests to static
@app.before_request
def a_little_bit_of_security_is_allowed():
if '/static/uploads' in request.path \
and '/static/uploads/Quick_Test_10x10_sparse.csv' not in request.path:
abort(403)
if '/static/json' in request.path:
abort(403)
# # Endpoint for different visualizations
# @app.route('/subgraphs/<int:id>', methods=['GET'])
# def subgraph(id):
# if request.args.get('trace'):
# trace = [int(i) for i in request.args.get('trace').split(',')]
# file = File.query.get(id)
# network = file.get_pickle()
# for i in trace:
# network = network.get_subnetwork(i)
# return network.json_string
``` |
{
"source": "JohannesKreuzer/temp-probe-exporter",
"score": 3
} |
#### File: JohannesKreuzer/temp-probe-exporter/prometheus_temperature.py
```python
from __future__ import print_function
import re
import sys
import os
import time
import serial
import yaml
from prometheus_client import start_http_server, Gauge
def main():
"""Do things with stuff"""
if len(sys.argv) != 2:
print('Usage: {0} <config.yaml>'.format(sys.argv[0]))
sys.exit()
try:
conf_file = open(sys.argv[1], 'r')
conf = yaml.safe_load(conf_file)
except Exception as e:
print('Error loading config: {0}'.format(str(e)), file=sys.stderr)
sensor_mappings = conf.get('sensor_mappings')
prometheus_port = conf.get('exporter_port', 8104)
method = conf.get('method')
onewire_temperature_c = Gauge('onewire_temp', 'Temperature in C', ['name','id'])
# Start the prometheus HTTP server
start_http_server(prometheus_port)
if method == 'serial':
read_serial(onewire_temperature_c, sensor_mappings, conf.get('serial_port'))
elif method == 'w1':
read_w1(onewire_temperature_c, sensor_mappings)
else:
print('Invalid method specified: {0}'.format(method), file=sys.stderr)
sys.exit()
def read_serial(onewire_temperature_c, sensor_mappings, serial_port):
"""Read data from a serial port"""
ser = serial.Serial(serial_port, timeout=60)
while 1:
line = ser.readline()
m = re.match(r'(.*):(-?[0-9.]+)\n', line.decode('ascii'))
if m:
onewire_temperature_c.labels(name=sensor_mappings[m.group(1)], id=m.group(1)).set(m.group(2))
def read_w1(onewire_temperature_c, sensor_mappings):
"""Read data from /sys/bus/w1/drivers/w1_slave_driver/"""
base_dir = '/sys/bus/w1/drivers/w1_slave_driver/'
# Get our device:
path_mappings = {}
for (directory, dirs, files) in os.walk(base_dir):
for dev_dir in dirs:
try:
id_file = open('{0}/{1}/id'.format(base_dir, dev_dir), 'r')
id_val = id_file.read().encode('hex').upper()
id_file.close()
therm_file = open('{0}/{1}/w1_slave'.format(base_dir, dev_dir), 'r')
path_mappings[id_val] = therm_file
except (OSError, IOError) as e:
print('Skipping {0} due to error: {1}'.format(dev_dir, str(e)), file=sys.stderr)
break
while 1:
for device_id, therm_file in path_mappings.items():
therm_contents = therm_file.read()
therm_file.seek(0)
m = re.search(r't=(-?\d+)$', therm_contents)
if m:
temperature = (float(m.group(1)) / 1000)
# A reading of 85000 seems to mean "it's not working". If you actually want to
# measure things that are 85°C, then my apologies.
if temperature != 85:
onewire_temperature_c.labels(name=sensor_mappings[device_id],id=device_id).set(temperature)
time.sleep(1)
if __name__ == '__main__':
main()
``` |
{
"source": "johanneslanger/sagemaker-workshop-with-ground-truth",
"score": 3
} |
#### File: tf-2-workflow-smpipelines/train_model/model_def.py
```python
import tensorflow as tf
def get_model():
inputs = tf.keras.Input(shape=(13,))
hidden_1 = tf.keras.layers.Dense(13, activation='tanh')(inputs)
hidden_2 = tf.keras.layers.Dense(6, activation='sigmoid')(hidden_1)
outputs = tf.keras.layers.Dense(1)(hidden_2)
return tf.keras.Model(inputs=inputs, outputs=outputs)
``` |
{
"source": "JohannesLiu/Deep-Learning-Loss-Function-Collection-for-Imbalanced-Data",
"score": 2
} |
#### File: Deep-Learning-Loss-Function-Collection-for-Imbalanced-Data/losses/ClassBalancedLoss.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .LossFunc import focal_loss
import numpy as np
class ClassBalancedLoss(torch.nn.Module):
def __init__(self, samples_per_class=None, beta=0.9999, gamma=0.5, loss_type="focal"):
super(ClassBalancedLoss, self).__init__()
if loss_type not in ["focal", "sigmoid", "softmax"]:
loss_type = "focal"
if samples_per_class is None:
num_classes = 5000
samples_per_class = [1] * num_classes
effective_num = 1.0 - np.power(beta, samples_per_class)
weights = (1.0 - beta) / np.array(effective_num)
self.constant_sum = len(samples_per_class)
weights = (weights / np.sum(weights) * self.constant_sum).astype(np.float32)
self.class_weights = weights
self.beta = beta
self.gamma = gamma
self.loss_type = loss_type
def update(self, samples_per_class):
if samples_per_class is None:
return
effective_num = 1.0 - np.power(self.beta, samples_per_class)
weights = (1.0 - self.beta) / np.array(effective_num)
self.constant_sum = len(samples_per_class)
weights = (weights / np.sum(weights) * self.constant_sum).astype(np.float32)
self.class_weights = weights
def forward(self, x, y):
_, num_classes = x.shape
labels_one_hot = F.one_hot(y, num_classes).float()
weights = torch.tensor(self.class_weights, device=x.device).index_select(0, y)
weights = weights.unsqueeze(1)
if self.loss_type == "focal":
cb_loss = focal_loss(x, labels_one_hot, weights, self.gamma)
elif self.loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(x, labels_one_hot, weights)
else: # softmax
pred = x.softmax(dim=1)
cb_loss = F.binary_cross_entropy(pred, labels_one_hot, weights)
return cb_loss
class ClassBalancedLossMod(nn.Module):
def __init__(self,beta = 0.9999,gamma = 0.5,epsilon=0.1, loss_type = 'softmax'):
super(ClassBalancedLossMod, self).__init__()
self.beta = beta
self.gamma = gamma
self.epsilon = epsilon
self.loss_type = loss_type
def forward(self,logits, labels, device="cuda"):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
# self.epsilon = 0.1 #labelsmooth
beta = self.beta
gamma = self.gamma
no_of_classes = logits.shape[1]
samples_per_cls = torch.Tensor([sum(labels == i) for i in range(logits.shape[1])])
if torch.cuda.is_available():
samples_per_cls = samples_per_cls.cuda()
effective_num = 1.0 - torch.pow(beta, samples_per_cls)
weights = (1.0 - beta) / ((effective_num)+1e-8)
# print(weights)
weights = weights / torch.sum(weights) * no_of_classes
labels =labels.reshape(-1,1)
labels_one_hot = torch.zeros(len(labels), no_of_classes).to(device).scatter_(1, labels, 1)
weights = torch.tensor(weights).float()
if torch.cuda.is_available():
weights = weights.cuda()
labels_one_hot = torch.zeros(len(labels), no_of_classes).cuda().scatter_(1, labels, 1).cuda()
labels_one_hot = (1 - self.epsilon) * labels_one_hot + self.epsilon / no_of_classes
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
if self.loss_type == "focal":
cb_loss = focal_loss(labels_one_hot, logits, weights, gamma)
elif self.loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, pos_weight = weights)
elif self.loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot, weight = weights)
return cb_loss
def test():
torch.manual_seed(123)
batch_size = 10
num_classes = 5
x = torch.rand(batch_size, num_classes)
y = torch.randint(0, 5, size=(batch_size,))
samples_per_class = [1, 2, 3, 4, 5]
loss_type = "focal"
loss_fn = ClassBalancedLoss(samples_per_class, loss_type=loss_type)
loss = loss_fn(x, y)
print(loss)
if __name__ == '__main__':
test()
```
#### File: Deep-Learning-Loss-Function-Collection-for-Imbalanced-Data/losses/CSCE.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
class CSCE(nn.Module):
"""
CBCE(CSCE) with DRW
"""
def __init__(self, para_dict=None):
super(CSCE, self).__init__()
self.num_class_list = para_dict["num_class_list"]
self.device = para_dict["device"]
cfg = para_dict["cfg"]
scheduler = cfg.LOSS.CSCE.SCHEDULER
self.step_epoch = cfg.LOSS.CSCE.DRW_EPOCH
if scheduler == "drw":
self.betas = [0, 0.999999]
elif scheduler == "default":
self.betas = [0.999999, 0.999999]
self.weight = None
def update_weight(self, beta):
effective_num = 1.0 - np.power(beta, self.num_class_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(self.num_class_list)
self.weight = torch.FloatTensor(per_cls_weights).to(self.device)
def reset_epoch(self, epoch):
idx = (epoch-1) // self.step_epoch
beta = self.betas[idx]
self.update_weight(beta)
def forward(self, x, target, **kwargs):
return F.cross_entropy(x, target, weight= self.weight)
```
#### File: Deep-Learning-Loss-Function-Collection-for-Imbalanced-Data/losses/QualityFocalLoss.py
```python
import torch.nn as nn
import torch.nn.functional as F
from .utils import weighted_loss
from .utils import quality_focal_loss
class QualityFocalLoss(nn.Module):
r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
use_sigmoid=True,
beta=2.0,
reduction='mean',
loss_weight=1.0):
super(QualityFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * quality_focal_loss(
pred,
target,
beta=self.beta)
else:
raise NotImplementedError
return loss_cls
```
#### File: Deep-Learning-Loss-Function-Collection-for-Imbalanced-Data/losses/utils.py
```python
import functools
import torch.nn.functional as F
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
def reduce_loss(loss, reduction):
"""Reduce losses as specified.
Args:
loss (Tensor): Elementwise losses tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced losses tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce losses.
Args:
loss (Tensor): Element-wise losses.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed losses values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the losses
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the losses by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given losses function.
To use this decorator, the losses function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise losses without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise losses
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight.mean(dim=-1)
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
``` |
{
"source": "JohannesLiu/HNRE-Pytorch",
"score": 2
} |
#### File: kddirkit/config/args.py
```python
import datetime
import json
import os
import pickle
import sys
import time
import torch
import math
import argparse
class Parser(object):
def __init__(self, config_path, model , is_training = None):
self.config = json.loads(open(config_path,'r').read())
self.is_training = is_training
self.model = model
self._trainParser = argparse.ArgumentParser(description ="training-" + model)
self._testParser = argparse.ArgumentParser(description ="testing-" + model)
self._oneParser = argparse.ArgumentParser(description ="one-" + model)
if self.is_training == True:
self.reset_train_parser()
elif self.is_training == False:
self.reset_test_parser()
else :
self.reset_one_parser()
@property
def trainParser(self):
return self._trainParser
@property
def testParser(self):
return self._testParser
@property
def oneParser(self):
return self._oneParser
def reset_train_parser(self):
# training
self._trainParser.add_argument('--model', help='neural models to encode sentences', type=str,
default=self.model)
self._trainParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._trainParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._trainParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._trainParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._trainParser.add_argument('--data_path', help ='path to load data', type=str, default='./data/')
self._trainParser.add_argument('--model_dir', help ='path to store model', type= str, default ='./outputs/ckpt/')
self._trainParser.add_argument('--summary_dir', help ='path to store summary_dir', type=str, default='./outputs/summary')
self._trainParser.add_argument('--batch_size', help ='entity numbers used each training time', type= int, default= 160)
self._trainParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
self._trainParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default= 40)
self._trainParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._trainParser.add_argument('--restore_epoch', help='epoch to continue training', type=int, default=0)
self._trainParser.add_argument('--learning_rate', help='learning rate', type=float, default=0.2)
self._trainParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._trainParser.add_argument('--keep_prob', help='dropout rate', type=float, default=0.5)
self._trainParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._trainParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._trainParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
# statistics
self._trainParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._trainParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2 +1)
self._trainParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._trainParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
def reset_test_parser(self):
# test_settings
self._testParser.add_argument('--model', help='neural models to encode sentences', type=str, default=self.model)
self._testParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._testParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._testParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._testParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._testParser.add_argument('--allow_growth', help='occupying gpu(s) gradually', type=bool, default=True)
self._testParser.add_argument('--checkpoint_path', help='path to store model', type=str, default='./outputs/ckpt/')
self._testParser.add_argument('--logits_path', help='path to store model', type=str, default='./outputs/logits/')
self._testParser.add_argument('--data_path', help='path to load data', type=str, default='./data/')
self._testParser.add_argument('--batch_size',
help='instance(entity pair) numbers to use each training(testing) time', type=int,
default=262)
self._testParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# training settings
self._testParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default=30)
self._testParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._testParser.add_argument('--learning_rate', help='entity numbers used each training time', type=float,
default=0.2)
self._testParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._testParser.add_argument('--keep_prob', help='dropout rate', type=float, default=1.0)
# test_settings
self._testParser.add_argument('--test_single', help='only test one checkpoint', type=bool, default=True)
self._testParser.add_argument('--test_start_ckpt', help='first epoch to test', type=int, default=1)
self._testParser.add_argument('--test_end_ckpt', help='last epoch to test', type=int, default=30)
self._testParser.add_argument('--test_sleep', help='time units to sleep ', type=float, default=10)
self._testParser.add_argument('--test_use_step', help='test step instead of epoch', type=bool, default=False)
self._testParser.add_argument('--test_start_step', help='first step to test', type=int, default=0 * 1832)
self._testParser.add_argument('--test_end_step', help='last step to test', type=int, default=30 * 1832)
self._testParser.add_argument('--test_step', help='step to add per test', type=int, default=1832)
# parameters
# self._testParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._testParser.add_argument('--word_size', help='maximum of relations', type=int, default=50)
self._testParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._testParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
# statistics
self._testParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._testParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2+1)
self._testParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._testParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
def reset_one_parser(self):
#traning
# overall
self._oneParser.add_argument('--model', help='neural models to encode sentences', type=str,
default=self.model)
self._oneParser.add_argument('--use_baseline', help='baseline or hier', type=bool, default=False)
self._oneParser.add_argument('--mode', help='test mode', type=str, default='pr')
self._oneParser.add_argument('--gpu', help='gpu(s) to use', type=str, default='0')
self._oneParser.add_argument('--no_cuda', action='store_true', default=False,
help='disables CUDA training')
self._oneParser.add_argument('--allow_growth', help='occupying gpu(s) gradually', type=bool, default=True)
self._oneParser.add_argument('--data_path', help ='path to load data', type=str, default='./data/')
self._oneParser.add_argument('--model_dir', help ='path to store model', type= str, default ='./outputs/ckpt/')
self._oneParser.add_argument('--summary_dir', help ='path to store summary_dir', type=str, default='./outputs/summary')
self._oneParser.add_argument('--training_batch_size', help ='entity numbers used each training time', type= int, default= 160)
self._oneParser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
self._oneParser.add_argument('--layer_pattern', help='default, ag-0, ag-1, ag-2', type=str, default='default')
# training
self._oneParser.add_argument('--max_epoch', help='maximum of training epochs', type=int, default= 80)
self._oneParser.add_argument('--save_epoch', help='frequency of training epochs', type=int, default=2)
self._oneParser.add_argument('--restore_epoch', help='epoch to continue training', type=int, default=0)
self._oneParser.add_argument('--learning_rate', help='learning rate', type=float, default=0.2)
self._oneParser.add_argument('--weight_decay', help='weight_decay', type=float, default=0.00001)
self._oneParser.add_argument('--keep_prob', help='dropout rate', type=float, default=0.5)
# parameters
self._oneParser.add_argument('--word_size', help='maximum of relations', type=int, default=self.config['word_size'])
self._oneParser.add_argument('--hidden_size', help='hidden feature size', type=int, default=230)
self._oneParser.add_argument('--pos_size', help='position embedding size', type=int, default=5)
self._oneParser.add_argument('--losses', help='loss_function', type=str, default='cross_entropy')
# statistics
self._oneParser.add_argument('--max_length', help='maximum of number of words in one sentence', type=int,
default=self.config['fixlen'])
self._oneParser.add_argument('--pos_num', help='number of position embedding vectors', type=int,
default=self.config['maxlen']*2 +1)
self._oneParser.add_argument('--num_classes', help='maximum of relations', type=int,
default=len(self.config['relation2id']))
self._oneParser.add_argument('--vocabulary_size', help='maximum of relations', type=int,
default=len(self.config['word2id']))
#testing
#overall
self._oneParser.add_argument('--checkpoint_path', help='path to store model', type=str, default='./outputs/ckpt/')
self._oneParser.add_argument('--logits_path', help='path to store model', type=str, default='./outputs/logits/')
self._oneParser.add_argument('--testing_batch_size',
help='instance(entity pair) numbers to use each training(testing) time', type=int,
default=262)
# test_settings
self._oneParser.add_argument('--test_single', help='only test one checkpoint', type=bool, default=True)
self._oneParser.add_argument('--test_start_ckpt', help='first epoch to test', type=int, default=1)
self._oneParser.add_argument('--test_end_ckpt', help='last epoch to test', type=int, default=30)
self._oneParser.add_argument('--test_sleep', help='time units to sleep ', type=float, default=10)
self._oneParser.add_argument('--test_use_step', help='test step instead of epoch', type=bool, default=False)
self._oneParser.add_argument('--test_start_step', help='first step to test', type=int, default=0 * 1832)
self._oneParser.add_argument('--test_end_step', help='last step to test', type=int, default=30 * 1832)
self._oneParser.add_argument('--test_step', help='step to add per test', type=int, default=1832)
if __name__=="__main__":
args = Parser("./data/config", "trials")
trainParser = args.trainParser
testParser = args.testParser
oneParser = args.oneParser
for key in args.__dict__:
print(f"{key}:{args.__dict__[key]}")
```
#### File: kddirkit/dataloaders/DealDataset.py
```python
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.autograd import Variable
import numpy as np
class DealDataset(Dataset):
"""
下载数据、初始化数据,都可以在这里完成
"""
def __init__(self):
# xy = np.loadtxt('./dataset/diabetes.csv.gz', delimiter=',', dtype=np.float32) # 使用numpy读取数据
# self.x_data = torch.from_numpy(xy[:, 0:-1])
# self.y_data = torch.from_numpy(xy[:, [-1]])
# self.len = xy.shape[0]
NotImplemented
def __getitem__(self, index):
# return self.x_data[index], self.y_data[index]
NotImplemented
def __len__(self):
# return self.len
NotImplemented
```
#### File: kddirkit/frameworks/Tester.py
```python
import torch
import numpy as np
import pandas as pd
class Tester(object):
def __init__(self):
super(Tester, self).__init__()
f = open("raw_data/relation2id.txt", "r")
content = f.readlines()[1:]
self.id2rel = {}
for i in content:
rel, rid = i.strip().split()
self.id2rel[(int)(rid)] = rel
f.close()
self.fewrel_100 = {}
f = open("data/rel100.txt", "r")
content = f.readlines()
for i in content:
self.fewrel_100[i.strip()] = 1
f.close()
self.fewrel_200 = {}
f = open("data/rel200.txt", "r")
content = f.readlines()
for i in content:
self.fewrel_200[i.strip()] = 1
f.close()
```
#### File: networks/embedders/RelQueryMatrix.py
```python
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
class RelQueryMatrix(nn.module):
def __init__(self, hier, hidden_size, relation_level_layer, DEVICE = "cpu", weight_matrix =None):
nn.Module.__init__(self)
self.layer = relation_level_layer
self.hier = hier
self.hidden_size = hidden_size
self.weight_matrix = weight_matrix
self.relation_matrixs = []
self.DEVICE= DEVICE
self.reset_parameter()
def reset_parameter(self):
if self.weight_matrix != None :
for i in range(self.hier):
self.relation_matrixs.append(nn.Embedding(self.layer[i], self.hidden_size, _weight=nn.init.xavier_uniform(
torch.Tensor(self.layer[i], self.hidden_size))).to(self.DEVICE))
else: #这里要放每一层关系的权重
NotImplemented
@property
def relation_matrixs(self):
return self.relation_matrixs
```
#### File: kddirkit/plots/RelTree.py
```python
import dgl
import networkx as nx
import torch
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
class RelTree(object):
def __init__(self, nodes_data, edges_data):
super(RelTree, self).__init__()
self.nodes_data = nodes_data
self.edges_data = edges_data
self.src = nyt_data.edges_data['src'].to_numpy()
self.dst = nyt_data.edges_data['des'].to_numpy()
self.g = dgl.graph((self.dst, self.src))
self.savePath = "./data_analysis/tree.png"
self.rel2id_path = "D:/PycharmProjects/KGNS/raw_data/relation2id.csv"
self.rel2id_pd = pd.read_csv(self.rel2id_path)
self.virtual_weight = 570088
self.weight_rel2id_pd = pd.DataFrame([self.virtual_weight], columns=['Counts']).append(self.rel2id_pd[['Counts']]).fillna(0)
self.weight_rel2id_np = self.weight_rel2id_pd.to_numpy()
# weight_rel2id_np = weight_rel2id_np[np.isnan(weight_rel2id_np)] = 0
self.weight_rel2id_Tensor = torch.LongTensor(self.weight_rel2id_np)
def out_degree2color(self, out_degree):
if out_degree==1 and out_degree<5 :
return 'g'
if out_degree>1 and out_degree<5 :
return 'y'
elif out_degree>=5 :
return 'r'
else:
return 'b'
def longTail2color(self, instanceCounts):
if instanceCounts>=1 and instanceCounts<=200 :
return 'g'
if instanceCounts>200 and instanceCounts<=10000 :
return 'y'
elif instanceCounts>10000:
return 'r'
else:
return 'b'
@property
def OutDegreeTree(self, savePath = None):
tree_g = self.g.to_networkx().to_directed()
fig = plt.figure(figsize=(16, 9))
pos = nx.drawing.nx_agraph.graphviz_layout(tree_g, prog='dot')
values = [self.out_degree2color(tree_g.out_degree(i)) for i in range(len(self.nodes_data))]
values[0] = 'purple'
pos[0] = [1800, 306]
nx.draw(tree_g, pos, with_labels=True, node_color=values, arrows=True, alpha=0.5)
color = ['purple', 'red', 'yellow', 'green', 'blue'] # 指定bar的颜色
labels = ['virtual relation', 'counts of sub chidr en $> 5$', 'counts of sub chidren $\geq 1$',
'counts of sub chidren $=1$', 'bottom relation'] # legend标签列表,上面的color即是颜色列表
patches = [mpatches.Patch(color=color[i], label="{:s}".format(labels[i])) for i in range(len(color))]
plt.legend(handles=patches)
if savePath != None:
fig.savefig(savePath) # 保存图片 ./data_analysis/tree.png]
return fig
@property
def InstanceTree(self, savePath = None):
tree_g = self.g.to_networkx().to_directed()
# tree_g.add_node("root")
fig = plt.figure(figsize=(16, 9))
pos = nx.drawing.nx_agraph.graphviz_layout(tree_g, prog='dot')
values = [self.longTail2color(self.weight_rel2id_np[i]) for i in range(len(self.nodes_data))]
# tree_g.remove_node(1)
values[0] = 'purple'
pos[0] = [1800, 306]
nx.draw(tree_g, pos, with_labels=True, node_color=values, arrows=True, alpha=0.5)
color = ['purple', 'red', 'yellow', 'green', 'blue'] # 指定bar的颜色
labels = ['virtual relation', 'counts of instances $> 10000$', 'counts of instances $> 200$',
'counts of instances $\geq=1$', 'counts of instances $=0$'] # legend标签列表,上面的color即是颜色列表
patches = [mpatches.Patch(color=color[i], label="{:s}".format(labels[i])) for i in range(len(color))]
plt.legend(handles=patches)
if savePath != None:
fig.savefig('./data_analysis/tree.png') # 保存图片
return fig
if __name__ == '__main__':
from kddirkit.dataloaders.LoadNYT import *
nyt_data = NYTDataLoader()
RelTree = RelTree(nyt_data.nodes_data, nyt_data.edges_data)
fig = RelTree.OutDegreeTree
plt.show()
``` |
{
"source": "johanneslotz/scripts",
"score": 3
} |
#### File: johanneslotz/scripts/histoapp_to_bigtiff_tiles.py
```python
from curses import meta
from fileinput import filename
import io
import json
from urllib import response
import numpy as np
import pyvips
import requests
import tqdm
from PIL import Image
import math
import shutil
import os
Image.MAX_IMAGE_PIXELS = None
# adapt as needed
baseurl="https://histoapp.mevis.fraunhofer.de"
patch_size = 8000
project="project"
image="image.sqreg"
level=4
z=1
userCredentials=('user','<PASSWORD>')
def setupBigTiff(project, imageName, level):
metadata = requests.get('{}/api/v1/projects/{}/images/{}'.format(baseurl, project, imageName), auth = userCredentials).json()
try:
serverLevel = len(metadata["voxelsizes"])-level-1
except KeyError:
if metadata['status'] == "unauthenticated":
raise Exception("Username or password seems to be wrong.")
extent = [math.ceil(d/(2**level)) for d in metadata["extent"]]
voxelsize = [metadata["voxelsizes"][serverLevel]['x'], metadata["voxelsizes"][serverLevel]['y']]
# imagefile = pyvips.Image.black(extent[0],extent[1],bands=3)
print("Downloading {} at resolution {}x{}...".format(imageName,extent[0],extent[1]))
return serverLevel, extent, voxelsize
def getPatch(project, image, level, z, startPx, endPx, patch_number):
url = '{}/api/v1/projects/{}/images/{}/region/{}/start/{}/{}/{}/size/{}/{}'.format(baseurl, project, image, level, startPx[0], startPx[1], z, endPx[0]-startPx[0], endPx[1]-startPx[1])
response = requests.get(url, auth = userCredentials)
filename = os.path.join("tmp","{:04d}.jpg".format(patch_number))
try:
os.mkdir("tmp")
except FileExistsError:
pass
except Exception as e:
raise(e)
try:
with open(filename, 'wb') as f:
# result.raw.decode_content = True
f.write(response.content)
except Exception as e:
print(url)
print(response)
print(response.content)
raise(e)
return filename
def main():
serverLevel, extent, voxelsize = setupBigTiff(project, image, level)
voxelsize = (1.0/(np.array(voxelsize)/1000000)).tolist() # µm/pixel to pixel/mm
patch_number = 0
tiles=[]
rows = math.ceil(extent[0]/ patch_size)
for y in tqdm.trange(0, extent[1], patch_size, desc="Rows "):
for x in tqdm.trange(0, extent[0], patch_size, desc="Columns", leave=False):
startPx=(x,y)
endPx=(extent[0] if x+patch_size > extent[0] else x+patch_size, extent[1] if y+patch_size > extent[1] else y+patch_size)
if endPx[0] > extent[0]: endPx[0]
tile_filename = getPatch(project, image, level, z, startPx, endPx, patch_number)
tiles.append(tile_filename)
patch_number = patch_number + 1
# save tiles to file
vips_tiles = [pyvips.Image.new_from_file(f) for f in tiles]
im = pyvips.Image.arrayjoin(vips_tiles, across=rows)
im.tiffsave("{}_{}_{}.tif".format(image,level,z), xres=voxelsize[0], yres=voxelsize[1], tile=True, pyramid=True, compression="jpeg", bigtiff=True, rgbjpeg=False)
# im.write_to_file("{}_{}_{}.jpg".format(image,level,z))
if __name__ == "__main__":
main()
``` |
{
"source": "johannesmik/neurons",
"score": 3
} |
#### File: neurons/examples/jeffress_small.py
```python
import numpy as np
from neurons import spiking
from neurons import plotting
from neurons import tools
def test_jeffress():
neurons = 11
timesteps = 100
ax_delays = np.array([0, 5, 15, 25, 0, 25, 15, 5, 0, 0, 0])
model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([5]*neurons),
t_membrane=np.array([10]*neurons), eta_reset=np.array([2.0]*neurons), ax_delay=ax_delays)
weights = np.zeros((neurons, neurons))
# Connect input layer
weights[0, (1, 2, 3)] = 1
weights[4, (5, 6, 7)] = 1
# Connect to output layer
weights[(1, 5), 8] = 1.05
weights[(2, 6), 9] = 1.05
weights[(3, 7), 10] = 1.05
print(weights)
spiketrain = np.zeros((neurons, timesteps), dtype=bool)
manual_spiketrain = True
if manual_spiketrain:
spiketrain[0, (20, 25, 30)] = 1
spiketrain[4, (0, 5, 10)] = 1
else:
spiketrain[0,:] = tools.sound(timesteps, 85, 0.6, 4)
spiketrain[4,:] = tools.sound(timesteps, 60, 0.6, 4)
psth = plotting.PSTH(spiketrain, binsize=5)
curr = plotting.CurrentPlot(4)
for t in range(timesteps):
current = model.check_spikes(spiketrain, weights, t)
curr.add(current[[0, 3, 7, 10]])
psth.show_plot()
curr.show_plot()
if __name__ == "__main__":
test_jeffress()
plotting.show()
```
#### File: neurons/tests/test_tutorials.py
```python
__author__ = 'johannes'
import pytest
import numpy as np
from neurons import spiking, learning
class TestSRMNetwork:
" The first tutorial: SRM network "
def test_tutorial_works(self):
model = spiking.SRM(neurons=3, threshold=1, t_current=0.3, t_membrane=20, eta_reset=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
for time in range(10):
total_potential = model.check_spikes(spiketrain, weights, time)
print("Spiketrain:")
print(spiketrain)
expected_spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
assert np.array_equal(spiketrain, expected_spiketrain)
class TestLearning:
" The second tutorial: Learning "
def test_tutorial_works(self):
stdp_model = learning.STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
for time in range(10):
stdp_model.weight_change(spiketrain, weights, time)
print("Weights after")
print(weights)
# That's the output that I got during my first run
expected_weights = np.array([[0, 0, 1.18586337],
[0, 0, 1.17766241],
[0, 0, 0]])
nullmatrix = np.zeros((3, 3))
assert np.array_equal(nullmatrix, np.around(expected_weights - weights, 5))
class TestSpikeAndLearn:
" The third tutorial: Spike and Learn "
def test_tutorial_works(self):
srm_model = spiking.SRM(neurons=3, threshold=1, t_current=0.3, t_membrane=20, eta_reset=5)
stdp_model = learning.STDP(eta=0.05, w_in=0.5, w_out=0.5, tau=10.0, window_size=5)
weights = np.array([[0, 0, 1.], [0, 0, 1.], [0, 0, 0]])
spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=bool)
for time in range(10):
srm_model.check_spikes(spiketrain, weights, time)
stdp_model.weight_change(spiketrain, weights, time)
# Output that I got during my first run. There's a possibility that this is wrong calculations.
expected_spiketrain = np.array([[0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 0]], dtype=bool)
print(weights)
expected_weights = np.array([[0, 0, 1.18586337],
[0, 0, 1.17766241],
[0, 0, 0]])
assert np.array_equal(spiketrain, expected_spiketrain)
nullmatrix = np.zeros((3, 3))
assert np.array_equal(nullmatrix, np.around(expected_weights - weights, 5))
``` |
{
"source": "johannesmse/Euler",
"score": 4
} |
#### File: johannesmse/Euler/problem_007.py
```python
import math
def sieveOfEratosthenes(n) :
primes = [True for i in range(n + 1)]
i = 2
while i <= math.sqrt(n) :
if primes[i] :
j = i**2
k = 0
while j <= n :
primes[j] = False
k += 1
j = i**2 + k * i
i += 1
n_primes = []
for i in range(2, n+1) :
if primes[i] :
n_primes.append(i)
print(n_primes[10000])
sieveOfEratosthenes(150000)
```
#### File: johannesmse/Euler/problem_011.py
```python
def file_to_grid(filename) :
grid = []
f = open(filename, "r")
for row in f :
numbers = row.split(" ")
grid.append(list(map(int, numbers)))
return grid
# Finds greatest product of four adjacent numbers in the rows of the grid
def greatest_row_product(grid) :
product = 0
for row in grid :
for i in range(len(row) - 3) :
product = max(product, row[i] * row[i+1] * row[i+2] * row[i+3])
return product
# Diagonal
def greatest_diagonal_product(grid) :
product = 0
for i in range(len(grid) - 3) :
# right-diagonal numbers
for j in range(len(grid) - 3) :
product = max(product, grid[i][j] * grid[i+1][j+1] * grid[i+2][j+2] * grid[i+3][j+3])
# left-diagonal numbers
for j in range(3, len(grid)) :
product = max(product, grid[i][j] * grid[i+1][j-1] * grid[i+2][j-2] * grid[i+3][j-3])
return product
grid = file_to_grid("problem_011.txt")
transpose = list(map(list, zip(*grid)))
product = max(greatest_row_product(grid), greatest_row_product(transpose), greatest_diagonal_product(grid))
print(product)
``` |
{
"source": "johannes-mueller/cython",
"score": 3
} |
#### File: cython/bin/cython-generate-lexicon.py
```python
import functools
import re
import os
import sys
# Make sure we import the right Cython
cythonpath, _ = os.path.split(os.path.realpath(__file__)) # bin directory
cythonpath, _ = os.path.split(cythonpath)
if os.path.exists(os.path.join(cythonpath, "Cython")):
sys.path.insert(0, cythonpath)
print("Found (and using) local cython directory")
# else we aren't in a development directory
from Cython.Compiler import Lexicon
def main():
arg = '--overwrite'
if len(sys.argv) == 2:
arg = sys.argv[1]
if len(sys.argv) > 2 or arg not in ['--overwrite','--here']:
print("""Call the script with either:
--overwrite to update the existing Lexicon.py file (default)
--here to create an version of Lexicon.py in the current directory
""")
return
generated_code = (
f"# generated with:\n"
f"# {sys.implementation.name} {sys.version.splitlines()[0].strip()}\n"
"\n"
f"{generate_character_sets()}\n"
)
print("Reading file", Lexicon.__file__)
with open(Lexicon.__file__, 'r') as f:
parts = re.split(r"(# (?:BEGIN|END) GENERATED CODE\n?)", f.read())
if len(parts) not in (4,5) or ' GENERATED CODE' not in parts[1] or ' GENERATED CODE' not in parts[3]:
print("Warning: generated code section not found - code not inserted")
return
parts[2] = generated_code
output = "".join(parts)
if arg == "--here":
outfile = "Lexicon.py"
else:
assert arg == "--overwrite"
outfile = Lexicon.__file__
print("Writing to file", outfile)
with open(outfile, 'w') as f:
f.write(output)
# The easiest way to generate an appropriate character set is just to use the str.isidentifier method
# An alternative approach for getting character sets is at https://stackoverflow.com/a/49332214/4657412
@functools.lru_cache()
def get_start_characters_as_number():
return [ i for i in range(sys.maxunicode) if str.isidentifier(chr(i)) ]
def get_continue_characters_as_number():
return [ i for i in range(sys.maxunicode) if str.isidentifier('a'+chr(i)) ]
def get_continue_not_start_as_number():
start = get_start_characters_as_number()
cont = get_continue_characters_as_number()
assert set(start) <= set(cont), \
"We assume that all identifier start characters are also continuation characters."
return sorted(set(cont).difference(start))
def to_ranges(char_num_list):
# Convert the large lists of character digits to
# list of characters
# a list pairs of characters representing closed ranges
char_num_list = sorted(char_num_list)
first_good_val = char_num_list[0]
single_chars = []
ranges = []
for n in range(1, len(char_num_list)):
if char_num_list[n]-1 != char_num_list[n-1]:
# discontinuous
if first_good_val == char_num_list[n-1]:
single_chars.append(chr(char_num_list[n-1]))
else:
ranges.append(chr(first_good_val) + chr(char_num_list[n-1]))
first_good_val = char_num_list[n]
return ''.join(single_chars), ''.join(ranges)
def make_split_strings(chars, splitby=60, indent=" "):
lines = [f'u"{chars[i:i+splitby]}"' for i in range(0, len(chars), splitby)]
return indent + f"\n{indent}".join(lines)
def generate_character_sets():
declarations = []
for char_type, char_generator in [
("unicode_start_ch", get_start_characters_as_number),
("unicode_continuation_ch", get_continue_not_start_as_number),
]:
for set_type, chars in zip(("any", "range"), to_ranges(char_generator())):
declarations.append(
f"{char_type}_{set_type} = (\n"
f"{make_split_strings(chars)}\n"
f")\n"
)
return "".join(declarations)
if __name__ == "__main__":
main()
```
#### File: Cython/Compiler/AnalysedTreeTransforms.py
```python
from __future__ import absolute_import
from .Visitor import ScopeTrackingTransform
from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
from .PyrexTypes import py_object_type
from .StringEncoding import EncodedString
from . import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
excludelist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if u'__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString(u'__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = u'%s (line %d)' % (path, testpos[1])
key = UnicodeNode(pos, value=EncodedString(keystr))
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.excludelist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
```
#### File: Compiler/Tests/TestParseTreeTransforms.py
```python
import os
from Cython.TestUtils import TransformTest
from Cython.Compiler.ParseTreeTransforms import *
from Cython.Compiler.Nodes import *
from Cython.Compiler import Main, Symtab, Options
class TestNormalizeTree(TransformTest):
def test_parserbehaviour_is_what_we_coded_for(self):
t = self.fragment(u"if x: y").root
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_wrap_singlestat(self):
t = self.run_pipeline([NormalizeTree(None)], u"if x: y")
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_wrap_multistat(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
if z:
x
y
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
stats[1]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_statinexpr(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
a, b = x, y
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: SingleAssignmentNode
lhs: TupleNode
args[0]: NameNode
args[1]: NameNode
rhs: TupleNode
args[0]: NameNode
args[1]: NameNode
""", self.treetypes(t))
def test_wrap_offagain(self):
t = self.run_pipeline([NormalizeTree(None)], u"""
x
y
if z:
x
""")
self.assertLines(u"""
(root): StatListNode
stats[0]: ExprStatNode
expr: NameNode
stats[1]: ExprStatNode
expr: NameNode
stats[2]: IfStatNode
if_clauses[0]: IfClauseNode
condition: NameNode
body: StatListNode
stats[0]: ExprStatNode
expr: NameNode
""", self.treetypes(t))
def test_pass_eliminated(self):
t = self.run_pipeline([NormalizeTree(None)], u"pass")
self.assertTrue(len(t.stats) == 0)
class TestWithTransform(object): # (TransformTest): # Disabled!
def test_simplified(self):
t = self.run_pipeline([WithTransform(None)], u"""
with x:
y = z ** 3
""")
self.assertCode(u"""
$0_0 = x
$0_2 = $0_0.__exit__
$0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
def test_basic(self):
t = self.run_pipeline([WithTransform(None)], u"""
with x as y:
y = z ** 3
""")
self.assertCode(u"""
$0_0 = x
$0_2 = $0_0.__exit__
$0_3 = $0_0.__enter__()
$0_1 = True
try:
try:
$1_0 = None
y = $0_3
y = z ** 3
except:
$0_1 = False
if (not $0_2($1_0)):
raise
finally:
if $0_1:
$0_2(None, None, None)
""", t)
class TestInterpretCompilerDirectives(TransformTest):
"""
This class tests the parallel directives AST-rewriting and importing.
"""
# Test the parallel directives (c)importing
import_code = u"""
cimport cython.parallel
cimport cython.parallel as par
from cython cimport parallel as par2
from cython cimport parallel
from cython.parallel cimport threadid as tid
from cython.parallel cimport threadavailable as tavail
from cython.parallel cimport prange
"""
expected_directives_dict = {
u'cython.parallel': u'cython.parallel',
u'par': u'cython.parallel',
u'par2': u'cython.parallel',
u'parallel': u'cython.parallel',
u"tid": u"cython.parallel.threadid",
u"tavail": u"cython.parallel.threadavailable",
u"prange": u"cython.parallel.prange",
}
def setUp(self):
super(TestInterpretCompilerDirectives, self).setUp()
compilation_options = Options.CompilationOptions(Options.default_options)
ctx = Main.Context.from_options(compilation_options)
transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives)
transform.module_scope = Symtab.ModuleScope('__main__', None, ctx)
self.pipeline = [transform]
self.debug_exception_on_error = DebugFlags.debug_exception_on_error
def tearDown(self):
DebugFlags.debug_exception_on_error = self.debug_exception_on_error
def test_parallel_directives_cimports(self):
self.run_pipeline(self.pipeline, self.import_code)
parallel_directives = self.pipeline[0].parallel_directives
self.assertEqual(parallel_directives, self.expected_directives_dict)
def test_parallel_directives_imports(self):
self.run_pipeline(self.pipeline,
self.import_code.replace(u'cimport', u'import'))
parallel_directives = self.pipeline[0].parallel_directives
self.assertEqual(parallel_directives, self.expected_directives_dict)
# TODO: Re-enable once they're more robust.
if False:
from Cython.Debugger import DebugWriter
from Cython.Debugger.Tests.TestLibCython import DebuggerTestCase
else:
# skip test, don't let it inherit unittest.TestCase
DebuggerTestCase = object
class TestDebugTransform(DebuggerTestCase):
def elem_hasattrs(self, elem, attrs):
return all(attr in elem.attrib for attr in attrs)
def test_debug_info(self):
try:
assert os.path.exists(self.debug_dest)
t = DebugWriter.etree.parse(self.debug_dest)
# the xpath of the standard ElementTree is primitive, don't use
# anything fancy
L = list(t.find('/Module/Globals'))
assert L
xml_globals = dict((e.attrib['name'], e.attrib['type']) for e in L)
self.assertEqual(len(L), len(xml_globals))
L = list(t.find('/Module/Functions'))
assert L
xml_funcs = dict((e.attrib['qualified_name'], e) for e in L)
self.assertEqual(len(L), len(xml_funcs))
# test globals
self.assertEqual('CObject', xml_globals.get('c_var'))
self.assertEqual('PythonObject', xml_globals.get('python_var'))
# test functions
funcnames = ('codefile.spam', 'codefile.ham', 'codefile.eggs',
'codefile.closure', 'codefile.inner')
required_xml_attrs = 'name', 'cname', 'qualified_name'
assert all(f in xml_funcs for f in funcnames)
spam, ham, eggs = [xml_funcs[funcname] for funcname in funcnames]
self.assertEqual(spam.attrib['name'], 'spam')
self.assertNotEqual('spam', spam.attrib['cname'])
assert self.elem_hasattrs(spam, required_xml_attrs)
# test locals of functions
spam_locals = list(spam.find('Locals'))
assert spam_locals
spam_locals.sort(key=lambda e: e.attrib['name'])
names = [e.attrib['name'] for e in spam_locals]
self.assertEqual(list('abcd'), names)
assert self.elem_hasattrs(spam_locals[0], required_xml_attrs)
# test arguments of functions
spam_arguments = list(spam.find('Arguments'))
assert spam_arguments
self.assertEqual(1, len(list(spam_arguments)))
# test step-into functions
step_into = spam.find('StepIntoFunctions')
spam_stepinto = [x.attrib['name'] for x in step_into]
assert spam_stepinto
self.assertEqual(2, len(spam_stepinto))
assert 'puts' in spam_stepinto
assert 'some_c_function' in spam_stepinto
except:
f = open(self.debug_dest)
try:
print(f.read())
finally:
f.close()
raise
if __name__ == "__main__":
import unittest
unittest.main()
```
#### File: Cython/Tests/TestCodeWriter.py
```python
from Cython.TestUtils import CythonTest
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
# Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
# Whitespace is very significant in this process:
# - always newline on new block (!)
# - indent 4 spaces
# - 1 space around every operator
def t(self, codestr):
self.assertCode(codestr, self.fragment(codestr).root)
def test_print(self):
self.t(u"""
print(x + y ** 2)
print(x, y, z)
print(x + y, x + y * z, x * (y + z))
""")
def test_if(self):
self.t(u"if x:\n pass")
def test_ifelifelse(self):
self.t(u"""
if x:
pass
elif y:
pass
elif z + 34 ** 34 - 2:
pass
else:
pass
""")
def test_def(self):
self.t(u"""
def f(x, y, z):
pass
def f(x = 34, y = 54, z):
pass
""")
def test_cdef(self):
self.t(u"""
cdef f(x, y, z):
pass
cdef public void (x = 34, y = 54, z):
pass
cdef f(int *x, void *y, Value *z):
pass
cdef f(int **x, void **y, Value **z):
pass
cdef inline f(int &x, Value &z):
pass
""")
def test_longness_and_signedness(self):
self.t(u"def f(unsigned long long long long long int y):\n pass")
def test_signed_short(self):
self.t(u"def f(signed short int y):\n pass")
def test_typed_args(self):
self.t(u"def f(int x, unsigned long int y):\n pass")
def test_cdef_var(self):
self.t(u"""
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
print(x, y, z)
else:
print(43)
""")
self.t(u"""
for abc in (1, 2, 3):
print(x, y, z)
else:
print(43)
""")
def test_while_loop(self):
self.t(u"""
while True:
while True:
while True:
continue
""")
def test_inplace_assignment(self):
self.t(u"x += 43")
def test_cascaded_assignment(self):
self.t(u"x = y = z = abc = 43")
def test_attribute(self):
self.t(u"a.x")
def test_return_none(self):
self.t(u"""
def f(x, y, z):
return
cdef f(x, y, z):
return
def f(x, y, z):
return None
cdef f(x, y, z):
return None
def f(x, y, z):
return 1234
cdef f(x, y, z):
return 1234
""")
if __name__ == "__main__":
import unittest
unittest.main()
```
#### File: Cython/Tests/TestTestUtils.py
```python
import os.path
import unittest
import tempfile
import textwrap
import shutil
from ..TestUtils import write_file, write_newer_file
class TestTestUtils(unittest.TestCase):
def setUp(self):
super(TestTestUtils, self).setUp()
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
if self.temp_dir and os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
super(TestTestUtils, self).tearDown()
def _test_path(self, filename):
return os.path.join(self.temp_dir, filename)
def _test_write_file(self, content, expected, **kwargs):
file_path = self._test_path("abcfile")
write_file(file_path, content, **kwargs)
assert os.path.isfile(file_path)
with open(file_path, 'rb') as f:
found = f.read()
assert found == expected, (repr(expected), repr(found))
def test_write_file_text(self):
text = u"abcüöä"
self._test_write_file(text, text.encode('utf8'))
def test_write_file_dedent(self):
text = u"""
A horse is a horse,
of course, of course,
And no one can talk to a horse
of course
"""
self._test_write_file(text, textwrap.dedent(text).encode('utf8'), dedent=True)
def test_write_file_bytes(self):
self._test_write_file(b"ab\0c", b"ab\0c")
def test_write_newer_file(self):
file_path_1 = self._test_path("abcfile1.txt")
file_path_2 = self._test_path("abcfile2.txt")
write_file(file_path_1, "abc")
assert os.path.isfile(file_path_1)
write_newer_file(file_path_2, file_path_1, "xyz")
assert os.path.isfile(file_path_2)
assert os.path.getmtime(file_path_2) > os.path.getmtime(file_path_1)
def test_write_newer_file_same(self):
file_path = self._test_path("abcfile.txt")
write_file(file_path, "abc")
mtime = os.path.getmtime(file_path)
write_newer_file(file_path, file_path, "xyz")
assert os.path.getmtime(file_path) > mtime
def test_write_newer_file_fresh(self):
file_path = self._test_path("abcfile.txt")
assert not os.path.exists(file_path)
write_newer_file(file_path, file_path, "xyz")
assert os.path.isfile(file_path)
```
#### File: tutorial/cdef_classes/sin_of_square.py
```python
from cython.cimports.libc.math import sin
@cython.cclass
class Function:
@cython.ccall
def evaluate(self, x: float) -> float:
return 0
@cython.cclass
class SinOfSquareFunction(Function):
@cython.ccall
def evaluate(self, x: float) -> float:
return sin(x ** 2)
```
#### File: tutorial/clibraries/queue3.py
```python
from cython.cimports import cqueue
from cython import cast
@cython.cclass
class Queue:
"""A queue class for C integer values.
>>> q = Queue()
>>> q.append(5)
>>> q.peek()
5
>>> q.pop()
5
"""
_c_queue = cython.declare(cython.pointer(cqueue.Queue))
def __cinit__(self):
self._c_queue = cqueue.queue_new()
if self._c_queue is cython.NULL:
raise MemoryError()
def __dealloc__(self):
if self._c_queue is not cython.NULL:
cqueue.queue_free(self._c_queue)
@cython.ccall
def append(self, value: cython.int):
if not cqueue.queue_push_tail(self._c_queue,
cast(cython.p_void, cast(cython.Py_ssize_t, value))):
raise MemoryError()
# The `cpdef` feature is obviously not available for the original "extend()"
# method, as the method signature is incompatible with Python argument
# types (Python does not have pointers). However, we can rename
# the C-ish "extend()" method to e.g. "extend_ints()", and write
# a new "extend()" method that provides a suitable Python interface by
# accepting an arbitrary Python iterable.
@cython.ccall
def extend(self, values):
for value in values:
self.append(value)
@cython.cfunc
def extend_ints(self, values: cython.p_int, count: cython.size_t):
value: cython.int
for value in values[:count]: # Slicing pointer to limit the iteration boundaries.
self.append(value)
@cython.ccall
@cython.exceptval(-1, check=True)
def peek(self) -> cython.int:
value: cython.int = cast(cython.Py_ssize_t, cqueue.queue_peek_head(self._c_queue))
if value == 0:
# this may mean that the queue is empty,
# or that it happens to contain a 0 value
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return value
@cython.ccall
@cython.exceptval(-1, check=True)
def pop(self) -> cython.int:
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return cast(cython.Py_ssize_t, cqueue.queue_pop_head(self._c_queue))
def __bool__(self):
return not cqueue.queue_is_empty(self._c_queue)
```
#### File: tutorial/clibraries/queue.py
```python
from cython.cimports import cqueue
@cython.cclass
class Queue:
_c_queue = cython.declare(cython.pointer(cqueue.Queue))
def __cinit__(self):
self._c_queue = cqueue.queue_new()
```
#### File: tutorial/cython_tutorial/primes_cpp.py
```python
import cython
from cython.cimports.libcpp.vector import vector
def primes(nb_primes: cython.uint):
i: cython.int
p: vector[cython.int]
p.reserve(nb_primes) # allocate memory for 'nb_primes' elements.
n: cython.int = 2
while p.size() < nb_primes: # size() for vectors is similar to len()
for i in p:
if n % i == 0:
break
else:
p.push_back(n) # push_back is similar to append()
n += 1
# If possible, C values and C++ objects are automatically
# converted to Python objects at need.
return p # so here, the vector will be copied into a Python list.
```
#### File: tutorial/external/keyword_args_call.py
```python
from cython.cimports.strstr import strstr
def main():
data: cython.p_char = "hfvcakdfagbcffvschvxcdfgccbcfhvgcsnfxjh"
pos = strstr(needle='akd', haystack=data)
print(pos is not cython.NULL)
```
#### File: userguide/extension_types/shrubbery.py
```python
from __future__ import print_function
@cython.cclass
class Shrubbery:
width: cython.int
height: cython.int
def __init__(self, w, h):
self.width = w
self.height = h
def describe(self):
print("This shrubbery is", self.width,
"by", self.height, "cubits.")
```
#### File: userguide/language_basics/optional_subclassing.py
```python
from __future__ import print_function
@cython.cclass
class A:
@cython.cfunc
def foo(self):
print("A")
@cython.cclass
class B(A):
@cython.cfunc
def foo(self, x=None):
print("B", x)
@cython.cclass
class C(B):
@cython.ccall
def foo(self, x=True, k:cython.int = 3):
print("C", x, k)
```
#### File: userguide/language_basics/parameter_refcount.py
```python
from __future__ import print_function
from cython.cimports.cpython.ref import PyObject
import sys
python_dict = {"abc": 123}
python_dict_refcount = sys.getrefcount(python_dict)
@cython.cfunc
def owned_reference(obj: object):
refcount = sys.getrefcount(python_dict)
print('Inside owned_reference: {refcount}'.format(refcount=refcount))
@cython.cfunc
def borrowed_reference(obj: cython.pointer(PyObject)):
refcount = obj.ob_refcnt
print('Inside borrowed_reference: {refcount}'.format(refcount=refcount))
def main():
print('Initial refcount: {refcount}'.format(refcount=python_dict_refcount))
owned_reference(python_dict)
borrowed_reference(cython.cast(cython.pointer(PyObject), python_dict))
```
#### File: tests/run/builtin_types_class.py
```python
from __future__ import print_function
import cython
# https://github.com/cython/cython/issues/3954
# calls to the __class__ attributes of builtin types were optimized to something invalid
@cython.locals(d=dict)
def test_dict(d):
"""
>>> test_dict({})
dict
{}
"""
print(d.__class__.__name__)
print(d.__class__())
@cython.locals(i=int)
def test_int(i):
"""
>>> test_int(0)
int
0
"""
print(i.__class__.__name__)
print(i.__class__())
@cython.cclass
class C:
def __str__(self):
return "I'm a C object"
@cython.locals(c=C)
def test_cdef_class(c):
"""
# This wasn't actually broken but is worth testing anyway
>>> test_cdef_class(C())
C
I'm a C object
"""
print(c.__class__.__name__)
print(c.__class__())
@cython.locals(d=object)
def test_object(o):
"""
>>> test_object({})
dict
{}
>>> test_object(1)
int
0
>>> test_object(C())
C
I'm a C object
"""
print(o.__class__.__name__)
print(o.__class__())
```
#### File: tests/run/pure_py_cimports.py
```python
from cython.cimports.libc import math
from cython.cimports.libc.math import ceil
def libc_math_ceil(x):
"""
>>> libc_math_ceil(1.5)
[2, 2]
"""
return [int(n) for n in [ceil(x), math.ceil(x)]]
```
#### File: cython/Tools/cevaltrace.py
```python
from __future__ import print_function, absolute_import
import re
import os.path
from dis import get_instructions # requires Python 3.4+
# collapse some really boring byte codes
_COLLAPSE = {'NOP', 'LOAD_CONST', 'POP_TOP', 'JUMP_FORWARD'}
#_COLLAPSE.clear()
_is_start = re.compile(r"\s* switch \s* \( opcode \)", re.VERBOSE).match
# Py3: TARGET(XX), Py2: case XX
_match_target = re.compile(r"\s* (?: TARGET \s* \( | case \s* ) \s* (\w+) \s* [:)]", re.VERBOSE).match
_ignored = re.compile(r"\s* PREDICTED[A-Z_]*\(", re.VERBOSE).match
_is_end = re.compile(r"\s* } \s* /\* \s* switch \s* \*/", re.VERBOSE).match
_find_pyversion = re.compile(r'\#define \s+ PY_VERSION \s+ "([^"]+)"', re.VERBOSE).findall
class ParseError(Exception):
def __init__(self, message="Failed to parse ceval.c"):
super(ParseError, self).__init__(message)
def parse_ceval(file_path):
snippets = {}
with open(file_path) as f:
lines = iter(f)
for line in lines:
if _is_start(line):
break
else:
raise ParseError()
targets = []
code_lines = []
for line in lines:
target_match = _match_target(line)
if target_match:
if code_lines:
code = ''.join(code_lines).rstrip()
for target in targets:
snippets[target] = code
del code_lines[:], targets[:]
targets.append(target_match.group(1))
elif _ignored(line):
pass
elif _is_end(line):
break
else:
code_lines.append(line)
else:
if not snippets:
raise ParseError()
return snippets
def translate(func, ceval_snippets):
start_offset = 0
code_obj = getattr(func, '__code__', None)
if code_obj and os.path.exists(code_obj.co_filename):
start_offset = code_obj.co_firstlineno
with open(code_obj.co_filename) as f:
code_line_at = {
i: line.strip()
for i, line in enumerate(f, 1)
if line.strip()
}.get
else:
code_line_at = lambda _: None
for instr in get_instructions(func):
code_line = code_line_at(instr.starts_line)
line_no = (instr.starts_line or start_offset) - start_offset
yield line_no, code_line, instr, ceval_snippets.get(instr.opname)
def main():
import sys
import importlib.util
if len(sys.argv) < 3:
print("Usage: %s path/to/Python/ceval.c script.py ..." % sys.argv[0], file=sys.stderr)
return
ceval_source_file = sys.argv[1]
version_header = os.path.join(os.path.dirname(ceval_source_file), '..', 'Include', 'patchlevel.h')
if os.path.exists(version_header):
with open(version_header) as f:
py_version = _find_pyversion(f.read())
if py_version:
py_version = py_version[0]
if not sys.version.startswith(py_version + ' '):
print("Warning: disassembling with Python %s, but ceval.c has version %s" % (
sys.version.split(None, 1)[0],
py_version,
), file=sys.stderr)
snippets = parse_ceval(ceval_source_file)
for code in _COLLAPSE:
if code in snippets:
snippets[code] = ''
for file_path in sys.argv[2:]:
module_name = os.path.basename(file_path)
print("/*######## MODULE %s ########*/" % module_name)
print('')
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for func_name, item in sorted(vars(module).items()):
if not callable(item):
continue
print("/* FUNCTION %s */" % func_name)
print("static void") # assuming that it highlights in editors
print("%s() {" % func_name)
last_line = None
for line_no, code_line, instr, snippet in translate(item, snippets):
if last_line != line_no:
if code_line:
print('')
print('/*# %3d %s */' % (line_no, code_line))
print('')
last_line = line_no
print(" %s:%s {%s" % (
instr.opname,
' /* %s */' % instr.argrepr if instr.arg is not None else '',
' /* ??? */' if snippet is None else ' /* ... */ }' if snippet == '' else '',
))
print(snippet or '')
print("} /* FUNCTION %s */" % func_name)
if __name__ == '__main__':
main()
```
#### File: cython/Tools/dump_github_issues.py
```python
import configparser
import gzip
import json
import os.path
from datetime import datetime
from urllib.request import urlopen
GIT_CONFIG_FILE = ".git/config"
class RateLimitReached(Exception):
pass
def gen_urls(repo):
i = 0
while True:
yield f"https://api.github.com/repos/{repo}/issues?state=all&per_page=100&page={i}"
i += 1
def read_rate_limit():
with urlopen("https://api.github.com/rate_limit") as p:
return json.load(p)
def parse_rate_limit(limits):
limits = limits['resources']['core']
return limits['limit'], limits['remaining'], datetime.fromtimestamp(limits['reset'])
def load_url(url):
with urlopen(url) as p:
data = json.load(p)
if isinstance(data, dict) and 'rate limit' in data.get('message', ''):
raise RateLimitReached()
assert isinstance(data, list), type(data)
return data or None # None indicates empty last page
def join_list_data(lists):
result = []
for data in lists:
if not data:
break
result.extend(data)
return result
def output_filename(repo):
timestamp = datetime.now()
return f"github_issues_{repo.replace('/', '_')}_{timestamp.strftime('%Y%m%d_%H%M%S')}.json.gz"
def write_gzjson(file_name, data, indent=2):
with gzip.open(file_name, "wt", encoding='utf-8') as gz:
json.dump(data, gz, indent=indent)
def find_origin_url(git_config=GIT_CONFIG_FILE):
assert os.path.exists(git_config)
parser = configparser.ConfigParser()
parser.read(git_config)
return parser.get('remote "origin"', 'url')
def parse_repo_name(git_url):
if git_url.endswith('.git'):
git_url = git_url[:-4]
return '/'.join(git_url.split('/')[-2:])
def dump_issues(repo):
"""Main entry point."""
print(f"Reading issues from repo '{repo}'")
urls = gen_urls(repo)
try:
paged_data = map(load_url, urls)
issues = join_list_data(paged_data)
except RateLimitReached:
limit, remaining, reset_time = parse_rate_limit(read_rate_limit())
print(f"FAILURE: Rate limits ({limit}) reached, remaining: {remaining}, reset at {reset_time}")
return
filename = output_filename(repo)
print(f"Writing {len(issues)} to {filename}")
write_gzjson(filename, issues)
### TESTS
def test_join_list_data():
assert join_list_data([]) == []
assert join_list_data([[1,2]]) == [1,2]
assert join_list_data([[1,2], [3]]) == [1,2,3]
assert join_list_data([[0], [1,2], [3]]) == [0,1,2,3]
assert join_list_data([[0], [1,2], [[[]],[]]]) == [0,1,2,[[]],[]]
def test_output_filename():
filename = output_filename("re/po")
import re
assert re.match(r"github_issues_re_po_[0-9]{8}_[0-9]{6}\.json", filename)
def test_find_origin_url():
assert find_origin_url()
def test_parse_repo_name():
assert parse_repo_name("https://github.com/cython/cython") == "cython/cython"
assert parse_repo_name("git+ssh://[email protected]/cython/cython.git") == "cython/cython"
assert parse_repo_name("git+ssh://[email protected]/fork/cython.git") == "fork/cython"
def test_write_gzjson():
import tempfile
with tempfile.NamedTemporaryFile() as tmp:
write_gzjson(tmp.name, [{}])
# test JSON format
with gzip.open(tmp.name) as f:
assert json.load(f) == [{}]
# test indentation
with gzip.open(tmp.name) as f:
assert f.read() == b'[\n {}\n]'
### MAIN
if __name__ == '__main__':
repo_name = parse_repo_name(find_origin_url())
dump_issues(repo_name)
``` |
{
"source": "johannes-mueller/redshift",
"score": 2
} |
#### File: custom_components/redshift/__init__.py
```python
import logging
import datetime as DT
import homeassistant.core as HA
import homeassistant.helpers.event as EV
from homeassistant.helpers import entity_registry
from homeassistant.const import (
STATE_ON,
SERVICE_TURN_ON,
ATTR_AREA_ID,
ATTR_ENTITY_ID,
ATTR_DEVICE_ID
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP
)
from .calculator import RedshiftCalculator
DOMAIN = 'redshift'
_LOGGER = logging.getLogger('redshift')
async def async_setup(hass, config):
def inactive():
return hass.states.get(DOMAIN+'.active').state != 'True'
def fetch_light_states():
return {
lgt: hass.states.get(lgt) for lgt in hass.states.async_entity_ids('light')
if hass.states.get(lgt).state == STATE_ON
}
def current_target_color_temp():
return manual_color_temp or round(redshift_calculator.color_temp())
def color_temp_in_limits(lgt):
min_mired = hass.states.get(lgt).attributes['min_mireds']
max_mired = hass.states.get(lgt).attributes['max_mireds']
return min(max_mired, max(min_mired, current_target_color_temp()))
async def apply_new_color_temp(lgt):
color_temp = color_temp_in_limits(lgt)
current_color_temp = _color_temp_of_state(hass.states.get(lgt))
if color_temp == current_color_temp:
return
_LOGGER.debug("%s -> %s", lgt, color_temp)
attrs = {ATTR_ENTITY_ID: lgt, ATTR_COLOR_TEMP: color_temp}
known_states[lgt] = HA.State(lgt, STATE_ON, attrs)
await hass.services.async_call('light', SERVICE_TURN_ON, attrs)
def forget_off_lights(current_states):
return dict(
filter(lambda x: x[0] in current_states.keys(), known_states.items())
)
async def maybe_apply_new_color_temp(lgt, current_state):
if lgt in lights_not_to_touch:
return
known_state = known_states.get(lgt)
known_color_temp = _color_temp_of_state(known_state)
current_color_temp = _color_temp_of_state(current_state)
light_just_went_on = known_state is None
nobody_changed_color_temp_since_last_time = known_color_temp == current_color_temp
_LOGGER.debug("%s: %s %s" % (lgt, known_color_temp, current_color_temp))
if nobody_changed_color_temp_since_last_time or light_just_went_on:
await apply_new_color_temp(lgt)
async def timer_event(event):
nonlocal known_states
await hass.async_block_till_done()
current_states = fetch_light_states()
known_states = forget_off_lights(current_states)
if inactive():
return
for lgt, current_state in current_states.items():
await maybe_apply_new_color_temp(lgt, current_state)
def dont_touch(event):
for entity_id in entities_ids_from_event(event):
lights_not_to_touch.add(entity_id)
def handle_again(event):
for entity_id in entities_ids_from_event(event):
if entity_id not in lights_not_to_touch:
_LOGGER.warning("Unknown entity_id: %s" % entity_id)
continue
lights_not_to_touch.remove(entity_id)
def entities_ids_from_event(event):
entity_reg = entity_registry.async_get(hass)
device_id = event.data.get(ATTR_DEVICE_ID)
if device_id is not None:
for entry in entity_registry.async_entries_for_device(entity_reg, device_id):
yield entry.entity_id
area_id = event.data.get(ATTR_AREA_ID)
if area_id is not None:
for entry in entity_registry.async_entries_for_area(entity_reg, area_id):
yield entry.entity_id
entity_ids = event.data.get(ATTR_ENTITY_ID, [])
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
for entity_id in entity_ids:
yield entity_id
async def deactivate(event):
nonlocal manual_color_temp
manual_color_temp = event.data.get('color_temp')
if manual_color_temp is not None:
manual_color_temp = round(_kelvin_to_mired(manual_color_temp))
await timer_event(None)
hass.states.async_set(DOMAIN+'.active', False)
async def activate(_):
nonlocal manual_color_temp
manual_color_temp = None
hass.states.async_set(DOMAIN+'.active', True)
await timer_event(None)
def finalized_config():
final_config = dict(
evening_time="17:00",
night_time="23:00",
morning_time="06:00",
day_color_temp=6250,
night_color_temp=2500
)
final_config.update(config[DOMAIN])
return final_config
def make_redshift_calculator():
return RedshiftCalculator(
final_config['evening_time'],
final_config['night_time'],
final_config['morning_time'],
_kelvin_to_mired(final_config['day_color_temp']),
_kelvin_to_mired(final_config['night_color_temp']),
)
final_config = finalized_config()
redshift_calculator = make_redshift_calculator()
known_states = {}
manual_color_temp = None
lights_not_to_touch = set()
hass.services.async_register(DOMAIN, 'dont_touch', dont_touch)
hass.services.async_register(DOMAIN, 'handle_again', handle_again)
hass.services.async_register(DOMAIN, 'activate', activate)
hass.services.async_register(DOMAIN, 'deactivate', deactivate)
hass.states.async_set(DOMAIN+'.active', True)
EV.async_track_time_interval(hass, timer_event, DT.timedelta(seconds=1))
return True
def _kelvin_to_mired(kelvin):
return 1e6/kelvin
def _color_temp_of_state(state):
if state is None:
return None
return state.attributes.get(ATTR_COLOR_TEMP)
```
#### File: redshift/tests/conftest.py
```python
import pytest
import freezegun as FG
import homeassistant.core as HA
from homeassistant.const import (
STATE_ON,
SERVICE_TURN_ON,
ATTR_ENTITY_ID,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
)
from .common import (
make_lights,
)
from .const import (
MIN_MIRED,
MAX_MIRED,
)
@pytest.fixture
async def lights(hass):
"""Provide lights 'light.light_1' and 'light.light_2'."""
await make_lights(hass, ['light_1', 'light_2'], area_name="area_1")
@pytest.fixture
async def more_lights(hass, lights):
"""Provide lights 'light.light_3' and 'light.light_4'."""
await make_lights(hass, ['light_3', 'light_4'], area_name="area_2")
@pytest.fixture
async def turn_on_service(hass):
"""Mock SERVICE_TURN_ON for lights."""
calls = []
@HA.callback
def mock_service_log(call):
"""Mock service call."""
entity = call.data[ATTR_ENTITY_ID]
color_temp = min(MAX_MIRED, max(MIN_MIRED, round(call.data.get(ATTR_COLOR_TEMP))))
attrs = {
ATTR_COLOR_TEMP: color_temp,
'min_mireds': MIN_MIRED,
'max_mireds': MAX_MIRED
}
hass.states.async_set(entity, STATE_ON, attrs)
calls.append(call)
hass.services.async_register('light', SERVICE_TURN_ON, mock_service_log)
return calls
@pytest.fixture
def start_at_noon():
"""Fake noon time."""
with FG.freeze_time("2020-12-13 12:00:00") as frozen_time:
yield frozen_time
@pytest.fixture
def start_at_night():
"""Fake night time (3am)."""
with FG.freeze_time("2020-12-13 03:00:00") as frozen_time:
yield frozen_time
``` |
{
"source": "JohannesNakayama/CitationNetworks",
"score": 3
} |
#### File: JohannesNakayama/CitationNetworks/bibl_io.py
```python
import pandas as pd
import uuid
import re
import math
import networkx as nx
import json
#%%
class Bibliography:
"""
a bibliography containing bibliographic data
"""
def __init__(self, path_list, db_src):
self.bib_df = _read_bib(path_list, db_src)
self.bib_dict = _convert_bib_to_dict(self.bib_df, db_src)
self.cit_rel = _extract_cit_rel(self.bib_dict)
def export_bib_dict(self, path):
"""
write bibliographic dictionary to a json file
"""
with open(path, 'w') as json_file:
json.dump(self.bib_dict, json_file)
#%%
class CitationNetwork:
"""
a citation network in the form of a weighted DAG
"""
def __init__(self, BibObject, keywords, w_method):
self.keywords = keywords
self.cit_net = _create_citation_network(BibObject, keywords, w_method)
self.out_deg_dist = _extract_out_deg_dist(self.cit_net)
def add_enclosing(self):
"""
add sink and source to citation network
"""
self.cit_net.add_edges_from(_enclose_cit_rel(self.cit_net))
#%%
class MainPathNetwork:
"""
a network of main paths constructed from a citation network
"""
def __init__(self, CitNetObject, mc=1, iterations=1):
self.cit_net_raw = CitNetObject
self.main_paths, self.main_net = _main_path_analysis(CitNetObject, mc, iterations)
self.main_path_net = nx.DiGraph(kw=CitNetObject.keywords)
def create_network(self, mw=1):
"""
create a main path network
contains all edges that are part of a main path
network is pruned with some options
"""
self.main_path_net.add_weighted_edges_from(self.main_net)
_rm_list_e = []
for e in self.main_path_net.edges:
if self.main_path_net.get_edge_data(e[0], e[1])['weight'] < mw:
_rm_list_e.append(e)
self.main_path_net.remove_edges_from(_rm_list_e)
_rm_list_n = []
for n in self.main_path_net.nodes:
if (self.main_path_net.in_degree(n) == 0 and self.main_path_net.out_degree(n) == 0):
_rm_list_n.append(n)
self.main_path_net.remove_nodes_from(_rm_list_n)
#%%
def _read_bib(path_list, db_src):
"""
read output data from bibliographic database
args:
path_list: a list of paths to read from
db_src: database source of the bibliographic data
returns:
bib_df: a bibliographic dataframe
"""
# get database specifics
db_specs = _switch_case_db(db_src)
# read file(s)
if len(path_list) == 1:
bib_df = pd.read_csv(
path_list[0], usecols=db_specs['columns'],
dtype=db_specs['dtype'], sep=db_specs['sep'],
index_col=False
)
else:
bib_df = pd.concat(
[
pd.read_csv(
path, usecols=db_specs['columns'],
dtype=db_specs['dtype'], sep=db_specs['sep'],
index_col=False
) for path in path_list
],
ignore_index=True
)
# some more formatting
bib_df['db_src'] = db_src
bib_df = bib_df.rename(columns=db_specs['new_cols'])
return bib_df
#%%
def _convert_bib_to_dict(bib_df, db_src):
"""
convert bibliographic dataframe into python dictionary
args:
bib_df: a bibliographic dataframe
db_src: database source of the bibliographic data
returns:
bib_dict: a bibliographic dictionary
"""
# get database specifics
db_specs = _switch_case_db(db_src)
# extract and reformat data
keys = bib_df.columns
bib_dict = {}
# create a dictionary entries
for j in range(bib_df.shape[0]):
entry = {}
for key in keys:
entry[key] = bib_df.loc[j, key]
entry = _split_columns(entry, db_specs['to_split']) # split non-atomic columns
bib_dict[str(uuid.uuid4())] = entry
return bib_dict
#%%
# switch scopus specifics
def _switch_scopus():
"""
in case db_src='scopus', use these specifications
args:
None
returns:
wos: the scopus specifics
"""
# define scopus specifics
scopus = {
'columns': [
'Title', 'Authors', 'Author(s) ID', 'Source title',
'Year', 'Volume', 'Issue', 'Cited by',
'References', 'DOI', 'ISBN', 'ISSN',
'CODEN', 'PubMed ID', 'EID'
],
'dtype': {
'Title': 'str', 'Authors': 'str',
'Author(s) ID': 'str', 'Source title': 'str',
'Year': 'float64', 'Volume': 'str',
'Issue': 'str', 'Cited by': 'float64',
'References': 'str', 'DOI': 'str',
'ISBN': 'str', 'ISSN': 'str',
'CODEN': 'str', 'PubMed ID': 'str',
'EID': 'str'
},
'sep': ',',
'new_cols': {
'Title': 'title', 'Authors': 'authors',
'Author(s) ID': 'scopus_author_id', 'Source title': 'source',
'Year': 'year', 'Volume': 'vol',
'Issue': 'issue', 'Cited by': 'cit_count',
'References': 'refs', 'DOI': 'doi',
'ISBN': 'isbn', 'ISSN': 'issn',
'CODEN': 'coden', 'PubMed ID': 'pmid',
'EID': 'eid'
},
'to_split': {
'authors': ',', 'scopus_author_id': ';',
'refs': ';', 'isbn': ';'
}
}
return scopus
#%%
# switch web of science specifics
def _switch_wos():
"""
in case db_src='wos', use these specifications
args:
None
returns:
wos: the wos specifics
"""
# define web of science specifics
wos = {
'columns': [
'TI', 'AU', 'AF', 'RI', 'OI', 'SO', 'PY', 'VL',
'IS', 'TC', 'Z9', 'CR', 'U1', 'U2', 'DI', 'D2',
'BN', 'SN', 'PM', 'J9', 'JI', 'BP', 'EP'
],
'dtype': {
'TI': 'str', 'AU': 'str', 'AF': 'str', 'RI': 'str',
'OI': 'str', 'SO': 'str', 'PY': 'float64', 'VL': 'str',
'IS': 'str', 'TC': 'float64', 'Z9': 'float64', 'CR': 'str',
'U1': 'float64', 'U2': 'float64', 'DI': 'str', 'D2': 'str',
'BN': 'str', 'SN': 'str', 'PM': 'str', 'J9': 'str',
'JI': 'str', 'BP': 'str', 'EP': 'str'
},
'sep': '\t',
'new_cols': {
'TI': 'title', 'AU': 'authors', 'AF': 'authors_full',
'RI': 'researcher_id', 'OI': 'orcid', 'SO': 'source',
'PY': 'year', 'VL': 'vol', 'IS': 'issue',
'TC': 'cit_count', 'Z9': 'cit_count_z9', 'CR': 'refs',
'U1': 'usage_count_180d', 'U2': 'usage_count_2013', 'DI': 'doi',
'D2': 'book_doi', 'BN': 'isbn', 'SN': 'issn',
'PM': 'pmid', 'J9': 'src_abb_29', 'JI': 'src_abb_iso',
'BP': 'start_page', 'EP': 'end_page'
},
'to_split': {
'authors': ';', 'authors_full': ';', 'researcher_id': ';',
'orcid': ';', 'refs': ';', 'isbn': ';'
}
}
return wos
#%%
# switch case replacement
# adapted from: https://www.pydanny.com/why-doesnt-python-have-switch-case.html
def _switch_case_db(arg):
"""
replacement for switch case
args:
arg: the case to execute
returns:
func(): the switch function
"""
# dictionary referring to switch statements
switcher = {
'scopus': _switch_scopus,
'wos': _switch_wos
}
# print status if exception occurs
if arg not in ['scopus', 'wos']:
print(
'Expection: Unknown db_source ' +
str(arg) +
'\nOutput might not be in proper format'
)
# switch function
func = switcher.get(arg, lambda: [[], {}])
return func()
#%%
# split non-atomic columns
def _split_columns(entry, split_list):
"""
split pre-defined dictionary entries along pre-defined separators
args:
entry: a bibliography entry
split_list: a pre-defined list of columns to separate
returns:
entry: the split entry
"""
# function to strip trailing spaces
space_stripper = lambda x: x.rstrip().lstrip()
# iterate over pre-defined split_list
for label, separator in split_list.items():
try:
entry[label] = space_stripper(entry[label]).rstrip(';')
entry[label] = entry[label].split(separator)
entry[label] = list(
map(space_stripper, entry[label])
)
except:
continue
return entry
#%%
def _extract_cit_rel(bib_dict):
"""
extract the citation relation
args:
bib_dict: a bibliographic dictionary
returns:
cit_rel: a list of tuples (x, y) with 'x cites y'
"""
# initialize citation relation
cit_rel = []
# iterate over all bibliography entries
for key in bib_dict.keys():
doi_at_key = bib_dict[key]['doi']
# check if a doi is available, if not: continue
if len(str(doi_at_key)) > 8:
pass
else:
continue
refs_at_key = bib_dict[key]['refs']
# try to extract doi and append to citation relation
try:
for ref_idx in range(len(refs_at_key)):
ref_doi = _extract_doi(
refs_at_key[ref_idx], bib_dict[key]['db_src']
)
if ref_doi == 'NO_DOI':
continue
if ref_doi != doi_at_key:
cit_rel.append((ref_doi, doi_at_key))
except:
continue
return cit_rel
#%%
# extract doi from reference string
# with a little help from: https://www.crossref.org/blog/dois-and-matching-regular-expressions/
def _extract_doi(ref_elem, db_src='wos'):
"""
extract doi from reference (CAUTION: only works for db_src == 'wos' so far!)
args:
ref_elem: a reference
db_src: database source of the bibliographic data
returns:
doi if doi is extractable
'NO_DOI' if no doi is extractable
"""
# currently: only works for web of science
if db_src == 'wos':
regex_doi = re.compile('10.\d{4,9}/[-._;()/:A-Za-z0-9]+$') # define regex for doi
# try searching for doi in reference string
try:
res = regex_doi.search(ref_elem)
doi = res.group(0)
return doi
# if not successful: return value for missing doi
except:
return 'NO_DOI'
# <CODE FOR SCOPUS HERE> -> elif db_src == 'scopus': <CODE>
else:
return 'NO_DOI'
#%%
#%%
def _create_citation_network(BibObject, keywords=['citation network'], w_method='nppc'):
"""
create a citation network
args:
BibObject: Bibliography object
keywords: list of keywords as graph attributes (for later reference)
w_method: weighting method (so far only 'nppc')
returns:
net: a citation network
"""
# create directed graph
net = nx.DiGraph(kw=keywords)
net.add_edges_from(BibObject.cit_rel)
net = _break_cycles(net)
net = _compute_edge_weights(net, w_method)
return net
#%%
def _compute_edge_weights(net, method='nppc'):
"""
compute the edge weights of a citation network
args:
net: a citation network
method: a weighting method (so far only 'nppc')
returns:
net: the net with weighted edges
"""
if method == 'nppc': # node pair projection count
# extract all pairs of connected nodes
con_nodes = _find_connected_nodes(net)
# compute nppc weights
for sub in [nx.all_simple_paths(
net, source=pair[0], target=pair[1]
) for pair in con_nodes]:
tmp = net.subgraph({node for path in sub for node in path})
for edge in tmp.edges:
try:
net[edge[0]][edge[1]]['weight'] += 1
except:
net[edge[0]][edge[1]]['weight'] = 1
else:
print('This does not seem to be a valid weighting method.')
return net
#%%
def _find_connected_nodes(net):
"""
find all connected nodes (a, b) where there is a path from a to b
args:
net: a citation network
returns:
con_nodes: a list of tuples with all connected nodes
"""
con_nodes = [
(struc[0], i)
for node in net.nodes
for struc in nx.bfs_successors(net, node)
for i in struc[1]
]
return(con_nodes)
#%%
def _extract_out_deg_dist(net):
"""
extract the distribution of out-degrees in the citation network for later pruning
args:
net: a citation graph
returns:
a pandas Dataframe with the frequency distribution of out-degrees
"""
out_deg_list = [net.out_degree(node) for node in net.nodes]
freq = [0] * (max(out_deg_list) + 1)
for i in out_deg_list:
freq[i] += 1
data = {
'out_degree': list(range(max(out_deg_list) + 1)),
'freq': freq
}
return(pd.DataFrame(data))
#%%
def _enclose_cit_rel(net):
"""
add sink and source to a citation network
args:
net: an unweighted citation network
return:
enclosing: list of edges to add sink and source
"""
try:
source_edges = [('source', node, {'weight': 1}) for node in net.nodes if net.in_degree(node) == 0]
sink_edges = [(node, 'sink', {'weight': 1}) for node in net.nodes if net.out_degree(node) == 0]
return(source_edges + sink_edges)
except:
return(False)
#%%
def _break_cycles(net):
"""
breaks potential cycles in a citation net
args:
net: a citation net
returns:
net: the acyclic citation net
"""
flag = True
counter = 0
while(flag):
try:
counter += 1
cycles = nx.find_cycle(net)
net.remove_edge(cycles[-1][0], cycles[-1][1])
except:
if counter == 1:
print('NOTE: {} edge was removed to break cycles'.format(counter))
else:
print('NOTE: {} edges were removed to break cycles'.format(counter))
flag = False
if counter >= 100:
print('NOTE: There are oddly many cycles. Please check your data to avoid problems in the further process.')
flag = False
return(net)
#%%
def _main_path_analysis(CitNetObject, mc, iterations):
"""
conduct main path analysis on a citation network
args:
CitNetObject: a CitationNetwork object
mc: minimum citations for start nodes
iterations: number of iterations to conduct
returns:
main_paths: a list of all mined main paths
main_net: the edges of all main paths in one list
"""
# setup
CitNetObject.cit_net.remove_nodes_from(['source', 'sink'])
print('NOTE: source and sink have been removed from your citation network')
init_net = CitNetObject.cit_net
# iterations
for i in range(iterations):
# initialize
start_nodes = _retrieve_start_nodes(init_net, mc)
main_paths = []
main_net = []
for node in start_nodes:
# loop setup
counter = 0 # if paths cross lengths of 100, the while loop is interrupted
flag = True
error = False
mp = []
cur_nodes = [node]
# find main path
while(flag):
# create list containing all lists of out_edges of the current nodes
candidates = [
[
(e[0], e[1], init_net.edges[e[0], e[1]]['weight'])
for e in init_net.out_edges(cur_node)
] for cur_node in cur_nodes
]
# extract weights from candidate edges
weights = [[t[2] for t in l] for l in candidates]
# determine maximum and prepare next iteration
mw = [max(w) for w in weights]
max_idx = [
[
i for i, j in enumerate(weights[m]) if j == mw[m]
] for m in range(len(mw))
]
# update current nodes and extend current main path
cur_nodes.clear()
for i, mi in enumerate(max_idx):
next_edges = [candidates[i][j] for j in mi]
mp.extend(next_edges)
cur_nodes.extend([e[1] for e in next_edges])
cur_nodes = list(dict.fromkeys(cur_nodes))
# remove node from current nodes if end of path is reached
rm_idx = []
for i in range(len(cur_nodes)):
if init_net.out_degree(cur_nodes[i]) == 0:
rm_idx.append(i)
for idx in sorted(rm_idx, reverse=True):
del cur_nodes[idx]
counter += 1
# stop criteria
if not cur_nodes:
flag = False
if counter >= 100:
print('This takes oddly long. Something must have gone wrong.')
error = True
flag = False
# append extracted main path to main path collection and extend main path network
mp = list(dict.fromkeys(mp))
main_paths.append(mp)
main_net.extend(mp)
# report potential error
if error:
print('An error occurred.')
break
init_net = nx.DiGraph()
init_net.add_weighted_edges_from(main_net)
return(main_paths, main_net)
#%%
def _retrieve_start_nodes(net, mc):
"""
retrieve nodes with in_degree == 0 as starting points for main path analysis
args:
net: a CitationNetwork object
mc: minimum citations
returns:
list of start nodes in net
"""
return [node for node in net.nodes if (net.in_degree(node) == 0 and net.out_degree(node) > mc)]
``` |
{
"source": "JohannesNE/parse-vital",
"score": 2
} |
#### File: JohannesNE/parse-vital/parse_vital.py
```python
import gzip
from construct import *
import warnings
import io
from pathlib import Path
import collections
import textwrap
import pandas as pd
# Import file construct obj
from vital_file_struct import body_str, header_str
class Track:
'''
Object which contains all packets from one track
'''
def __init__(self, vital_obj, trkid):
# Get rec from trkid
self.info, = (trk for trk in vital_obj.track_info if trk.trkid == trkid)
self.recs = [rec for rec in vital_obj.recs if rec.trkid == trkid]
self.devname = 'VITAL' if self.info.devid == 0 else \
[dev.devname for dev in vital_obj.dev_info if dev.devid == self.info.devid][-1]
# Convert values using adc_gain and adc_offset
for i, rec in enumerate(self.recs):
if self.info.rec_type == 1: # Waveform
self.recs[i]['values'].vals_real = [val * self.info.adc_gain + self.info.adc_offset for val in rec['values'].vals]
elif self.info.rec_type == 2: # Numeric
self.recs[i]['values'].vals_real = rec['values'].val[0] * self.info.adc_gain + self.info.adc_offset
elif self.info.rec_type == 5: # String (Annotation)
self.recs[i]['values'].vals_real = rec['values'].sval
self.recs[i]['values'].num = 1 # There is only one value (string) per rec
else:
raise Exception(f'Unknown rec_type: {self.info.rec_type}')
def __str__(self):
n_recs = [rec['values'].num for rec in self.recs]
return textwrap.dedent(f'''
======= TRACK INFO =======
name: {self.info.name}
unit: {self.info.unit}
starttime: {self.recs[0].dt.format()} ({self.recs[0].dt.humanize()})
measurements: {sum(n_recs)} in {len(n_recs)} blocks
--------------------------
''')
def to_pandas_ts(self, concat_list = True):
'''
Convert track to data frame with time and (real) value
'''
try:
# In events srate us 0. As there is only one value per rec, it can just be set to None
freq = f'{1000/self.info.srate}ms'
except ZeroDivisionError:
freq = None
pandas_ts = []
for rec in self.recs:
index = pd.date_range(start = rec.dt.datetime, freq = freq, periods = rec['values'].num)
pandas_ts.append(pd.Series(rec['values'].vals_real, index = index))
if concat_list:
pandas_ts = pd.concat(pandas_ts)
return pandas_ts
def save_to_file(self, folder_path = None, file_name = None, gzip = False):
'''
Save csv file containing track
'''
if file_name is None:
file_name = Path(self.info._io.name).stem + '_' + self.info.name + '_' + str(self.info.trkid) + ('.csv.gz' if gzip else '.csv')
if folder_path is None:
folder_path = 'converted'
folder_path = Path(folder_path)
#Create folder if it does not exist
folder_path.mkdir(parents=True, exist_ok=True)
file_path = folder_path / file_name
pandas_ts = self.to_pandas_ts()
pandas_ts.to_csv(file_path, header = False, compression='gzip' if gzip else 'infer')
print(f'Saved {file_path}')
class Vital:
'''
Class that holds an entire .vital file as a dict
'''
def __init__(self, path):
self.load_vital(path)
self.track_info = ListContainer([packet.data for packet in self.file.body if packet.type == 0])
self.dev_info = ListContainer([packet.data for packet in self.file.body if packet.type == 9])
# Event tracks may be duplicated in trackinfo.
# Keep only the first EVENTS track.
track_names = [trk.name for trk in self.track_info]
i_event = [i for i, x in enumerate(track_names) if x == "EVENT"]
if(len(i_event) > 1):
i_event.pop() # Keep one instance
for i in sorted(i_event, reverse=True):
del self.track_info[i]
self.recs = ListContainer([packet.data for packet in self.file.body if packet.type == 1])
def __str__(self):
'''
Human readable description when Vital object is printed
'''
return textwrap.dedent(f'''
======= VITAL FILE INFO =======
Path: {self.file.header._io.filename}
Size: {self.summed_datalen/1000.0} KB
Format Ver.: {self.file.header.format_ver}
Tracks (n): {len(self.track_info)}
----------- Tracks ------------
''') + \
pd.DataFrame(self.track_info)[['trkid', 'name', 'unit']].to_string(index = False) + \
textwrap.dedent('''
-------------------------------
''')
def get_track(self, trkid = None, name = None):
'''
Returns record. Can be called with either name or trkid.
If both are given, they are tested to match.
'''
if trkid is None and name is None:
raise ValueError('get_rec expected either trkid or name')
# Get trkid if name is given
if not name is None:
trkid_from_name, = (x.trkid for x in self.track_info if x.name == name)
if not trkid is None:
assert trkid == trkid_from_name
trkid = trkid_from_name
return Track(self, trkid)
def save_track_info(self, path):
'''
Save csv file with track info
'''
track_df = pd.DataFrame(self.track_info)[['trkid', 'name', 'unit', 'srate', 'devid']]
dev_df = pd.DataFrame(self.dev_info)[['devid', 'devname']]
track_df = pd.merge(track_df, dev_df, how = 'left', on = 'devid')
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
track_df.to_csv(path / Path('tracks.csv'), index=False)
print('Saved Track Info (tracks.csv)')
def save_tracks_to_file(self, trackids = None, names = None, path = None, save_all = False, gzip = False):
'''
Save tracks to individual csv files
'''
if path is None:
path = self.vital_filename
if save_all:
tracks = [self.get_track(trackid) for trackid in [track_info.trkid for track_info in self.track_info]]
self.save_track_info(path)
elif trackids is None and names is None:
raise ValueError('Expected either trkids, names or save_all')
else:
if names is not None:
tracks = [self.get_track(name = name) for name in names]
else:
tracks = [self.get_track(trackid = trackid) for trackid in trackids]
for track in tracks:
track.save_to_file(folder_path=path, gzip = gzip)
def load_vital(self, path):
with gzip.GzipFile(path, 'rb') as f:
# the last 4 bits of a gzip files is its unpacked size
total_file_size = f.seek(0, io.SEEK_END)
f.seek(0)
header = header_str.parse_stream(f)
# Loop until stream error
body = ListContainer()
completed = False
data_read = header.headerlen + 10
print('')
while not completed:
try:
body.append(body_str.parse_stream(f))
data_read = data_read + body[-1].datalen + 5
print(f'Data read: {round(data_read/1000)} of {total_file_size/1000} kB', end="\r", flush=True)
except StreamError:
#print("End of stream reached")
completed = True
print()
# Check that all packets have been parsed
self.summed_datalen = sum([x.datalen + 5 for x in body]) + header.headerlen + 10
#print("Total file size: " + str(total_file_size/1000) + "kB")
assert total_file_size == self.summed_datalen, "The summed datalen does not match the filesize"
self.vital_filename = Path(path).stem
self.file = Container(header=header, body=body)
# When run as __main__ (from command line)
def main(args):
try:
vitfile = Vital(args.vitalfile)
except FileNotFoundError as err:
print(err)
return
if args.info:
print(vitfile)
else:
#TODO output Save tracks
if args.trkid is not None:
try:
trkid_int = [int(id) for id in args.trkid]
except ValueError as err:
print('Error: Expected --trkid as list of integers')
print(err)
return
else:
trkid_int = None
vitfile.save_tracks_to_file(trackids = trkid_int, names = args.name, save_all = args.saveall, path=args.outdir, gzip=args.gzip)
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description='''
Convert .Vital file to .csv files
Output CSVs will be named <record name>_<track name>_<device id>.csv[.gz]
''')
parser.add_argument('vitalfile', type=str, help = 'Path to input file (.vital)')
parser.add_argument('--outdir', '-o', type=str, help = 'Directory for csv files (default=./converted)')
parser.add_argument('--info', '-I', action='store_true', help = 'Info about .vital file')
parser.add_argument('--trkid', '-t', nargs='+', help = 'Id(s) of track(s) to save')
parser.add_argument('--name', '-n', nargs='+', help = 'Name(s) of track(s) to save')
parser.add_argument('--saveall', action='store_true', help = 'Save all tracks')
parser.add_argument('--gzip', action='store_true', help = 'Compress all tracks with gzip')
main(parser.parse_args())
``` |
{
"source": "johannesnicolaus/celseq2",
"score": 3
} |
#### File: celseq2/celseq2/demultiplex.py
```python
from collections import Counter
import argparse
from celseq2.helper import filehandle_fastq_gz, print_logger
from celseq2.helper import join_path, mkfolder, base_name
import plotly.graph_objs as go
from plotly.offline import plot
import pandas as pd
def str2int(s):
"""
str('1-3,6,89-90,67') => [1,2,3,6,67,89,90]
"""
intervals = list(map(lambda x: x.strip().split('-'),
s.strip().split(',')))
out = []
for x in intervals:
try:
p, q = map(int, x)
except ValueError:
out += [int(x[0])]
continue
if p > q:
p, q = q, p
out += list(range(p, q + 1))
return(sorted(list(set(out))))
def bc_dict_seq2id(bc_index_fpath, col_seq=None):
""" dict[barcode_seq] = barcode_id """
if col_seq is None:
col_seq = 0
out = dict()
with open(bc_index_fpath, 'rt') as fin:
# next(fin) # remove header
# out = list(map(lambda row: row.strip().split(), fin))
# out = {row[1]: int(row[0]) for row in out}
row_num = 1
for row in fin:
if row.startswith('#'):
continue
row = row.strip().split()
# print('{}:{}'.format(row_num, row))
row_val = row[col_seq]
row_key = row_num
out[row_key] = row_val
row_num += 1
return(out)
def bc_dict_id2seq(bc_index_fpath, col_seq=None):
""" dict[barcode_id] = barcode_seq"""
if col_seq is None:
col_seq = 0
out = dict()
with open(bc_index_fpath, 'rt') as fin:
# next(fin) # remove header
# out = map(lambda row: row.strip().split(), fin)
# out = {int(row[0]): row[1] for row in out}
row_num = 1
for row in fin:
if row.startswith('#'):
continue
row = row.strip().split()
# print('{}:{}'.format(row_num, row))
row_val = row[col_seq]
row_key = row_num
out[row_key] = row_val
row_num += 1
return(out)
def demultiplexing(read1_fpath, read2_fpath, dict_bc_id2seq,
outdir,
start_umi=0, start_bc=6,
len_umi=6, len_bc=6, len_tx=35,
bc_qual_min=10,
is_gzip=True,
save_unknown_bc_fastq=False,
tagging_only=False,
tag_to='tagged.fastq',
do_bc_rev_complement=False,
do_tx_rev_complement=False,
verbose=False):
"""
Demultiplexing to fastq files based on barcode sequence.
"""
if is_gzip:
fh_umibc = filehandle_fastq_gz(read1_fpath)
fh_tx = filehandle_fastq_gz(read2_fpath)
else:
fh_umibc = open(read1_fpath, 'rt')
fh_tx = open(read2_fpath, 'rt')
sample_counter = Counter()
bc_fhout = dict()
for bc_id, bc_seq in dict_bc_id2seq.items():
# bc_id = '[{}]'.format('-'.join(map(str, bc_id)))
bc_fhout[bc_seq] = join_path(outdir,
'BC-{}-{}.fastq'.format(bc_id, bc_seq))
mkfolder(join_path(outdir, 'UNKNOWN'))
bc_fhout['UNKNOWNBC_R1'] = join_path(outdir, 'UNKNOWN',
'UNKNOWNBC_R1.fq')
bc_fhout['UNKNOWNBC_R2'] = join_path(outdir, 'UNKNOWN',
'UNKNOWNBC_R2.fq')
if tagging_only:
out_fpath_tagged_fq = join_path(outdir, tag_to)
out_fh_tagged_fq = open(out_fpath_tagged_fq, 'w')
for bc_seq, v in bc_fhout.items():
if bc_seq.startswith('UNKNOWN'):
bc_fhout[bc_seq] = open(v, 'w')
continue
if tagging_only:
bc_fhout[bc_seq] = out_fh_tagged_fq
else:
bc_fhout[bc_seq] = open(v, 'w')
i = 0
while(True):
if verbose and i % 1000000 == 0:
print_logger('Processing {:,} reads...'.format(i))
try:
umibc_name = next(fh_umibc).rstrip()
umibc_seq = next(fh_umibc).rstrip()
next(fh_umibc)
umibc_qualstr = next(fh_umibc).rstrip()
tx_name = next(fh_tx).rstrip()
tx_seq = next(fh_tx).rstrip()
next(fh_tx)
tx_qualstr = next(fh_tx).rstrip()
i += 1
except StopIteration:
break
# Quality check? or user should feed good files
# if not (umibc_name and umibc_seq and umibc_qualstr and tx_name and tx_seq and tx_qualstr):
# raise Exception('FastQError: Possible Broken Fastq. Check pair-{}.\n'.format(i+1))
# if len(umibc_seq) != len(umibc_qualstr) or len(tx_seq) != len(tx_qualstr):
# raise Exception('FastQError: Possible multi-line Fastq. Convert to 4-line please.\n')
# if umibc_name.split()[0] != tx_name.split()[0]:
# raise Exception('FastQError: Reads are not paired at pair-{}.\n'.format(i+1))
sample_counter['total'] += 1
umibc_idx = sorted(list(set(range(start_umi, start_umi + len_umi)) |
set(range(start_bc, start_bc + len_bc))))
if len(umibc_seq) < len(umibc_idx):
continue
umibc_min_qual = min((ord(umibc_qualstr[i]) - 33 for i in umibc_idx))
if umibc_min_qual < bc_qual_min:
continue
sample_counter['qualified'] += 1
umi = umibc_seq[start_umi:(start_umi + len_umi)]
cell_bc = umibc_seq[start_bc:(start_bc + len_bc)]
try:
fhout = bc_fhout[cell_bc]
except KeyError:
if save_unknown_bc_fastq:
fhout = bc_fhout['UNKNOWNBC_R1']
fhout.write('{}\n{}\n{}\n{}\n'.format(umibc_name, umibc_seq,
"+", umibc_qualstr))
fhout = bc_fhout['UNKNOWNBC_R2']
fhout.write('{}\n{}\n{}\n{}\n'.format(tx_name, tx_seq,
"+", tx_qualstr))
sample_counter['unknown'] += 1
continue
# if len(tx_seq) < len_tx:
# continue
if len(tx_seq) > len_tx:
tx_seq, tx_qualstr = tx_seq[:len_tx], tx_qualstr[:len_tx]
read_name = '@BC-{}_UMI-{}'.format(cell_bc, umi)
fhout.write('{}\n{}\n{}\n{}\n'.format(read_name, tx_seq,
"+", tx_qualstr))
sample_counter[cell_bc] += 1
sample_counter['saved'] += 1
sample_counter['unqualified'] = sample_counter['total'] - \
sample_counter['qualified']
for _, v in bc_fhout.items():
v.close()
fh_umibc.close()
fh_tx.close()
return(sample_counter)
def write_demultiplexing(stats, dict_bc_id2seq, stats_fpath):
if stats_fpath is None:
stats_fpath = 'demultiplexing.csv'
try:
fh_stats = open(stats_fpath, 'w')
except Exception as e:
raise Exception(e)
fh_stats.write('BC,Reads(#),Reads(%)\n')
for bc_id, bc_seq in dict_bc_id2seq.items():
# bc_id = '[{:04d}]'.format('-'.join(map(str, bc_id)))
formatter = '{:04d}-{},{},{:07.3f}\n'
fh_stats.write(formatter.format(bc_id, bc_seq, stats[bc_seq],
stats[bc_seq] / stats['total'] * 100))
formatter = '{},{},{:07.3f}\n'
fh_stats.write(formatter.format('saved', stats['saved'],
stats['saved'] / stats['total'] * 100))
fh_stats.write(formatter.format('unknown', stats['unknown'],
stats['unknown'] / stats['total'] * 100))
fh_stats.write(formatter.format('qualified', stats['qualified'],
stats['qualified'] / stats['total'] * 100))
fh_stats.write(formatter.format('unqualified', stats['unqualified'],
stats['unqualified'] / stats['total'] * 100))
fh_stats.write(formatter.format('total', stats['total'],
stats['total'] / stats['total'] * 100))
def plotly_demultiplexing_stats(fpaths=[], saveto='', fnames=[]):
'''
Save a plotly box graph with a list of demultiplexing stats files
Parameters
----------
fpaths : list
A list of file paths
saveto : str
File path to save the html file as the plotly box graph
fnames : list
A list of strings to label each ``fpaths``
Returns
-------
bool
True if saving successfully, False otherwise
'''
if not fnames:
fnames = [base_name(f) for f in fpaths]
if len(fnames) != len(fpaths):
fnames = [base_name(f) for f in fpaths]
num_reads_data = []
for i in range(len(fpaths)):
f = fpaths[i]
fname = fnames[i]
stats = pd.read_csv(f, index_col=0)
cell_stats = stats.iloc[:-5, :]
# tail 5 lines are fixed as the overall stats
overall_stats = stats.iloc[-5:, :]
num_reads_data.append(
go.Box(
y=cell_stats['Reads(#)'],
name='{} (#Saved={}/#Total={})'.format(
fname,
overall_stats.loc['saved', 'Reads(#)'],
overall_stats.loc['total', 'Reads(#)'])))
layout = go.Layout(
# legend=dict(x=-.1, y=-.2),
xaxis=dict(showticklabels=False),
title='Number of reads saved per BC per item')
fig = go.Figure(data=num_reads_data, layout=layout)
try:
plot(fig, filename=saveto, auto_open=False)
return(True)
except Exception as e:
print(e, flush=True)
return(False)
def main():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('read1_fpath', type=str)
parser.add_argument('read2_fpath', type=str)
parser.add_argument('--bc-index', type=str, metavar='FILENAME',
help='File path to barcode dictionary')
parser.add_argument('--bc-seq-column', type=int, metavar='N',
default=0,
help=('Column of cell barcode dictionary file '
'which tells the actual sequences.'))
parser.add_argument('--bc-index-used', type=str, metavar='string',
default='1-96',
help='Index of used barcode IDs (default=1-96)')
parser.add_argument('--min-bc-quality', metavar='N', type=int, default=10,
help='Minimal quality for barcode reads (default=10)')
parser.add_argument('--out-dir', metavar='DIRNAME', type=str, default='.',
help='Output directory. Defaults to current directory')
parser.add_argument('--is-gzip', dest='is_gzip', action='store_true')
parser.add_argument('--not-gzip', dest='is_gzip', action='store_false')
parser.set_defaults(is_gzip=True)
parser.add_argument('--stats-file', metavar='STATFILE',
type=str, default='demultiplexing.log',
help='Statistics (default: demultiplexing.log)')
parser.add_argument('--umi-start-position',
metavar='N', type=int, default=0,
help=('Start index of UMI on R1. '
'Default: 0. (0-based).'))
parser.add_argument('--umi-length', metavar='N', type=int, default=6,
help='Length of UMI (default=6)')
parser.add_argument('--bc-start-position',
metavar='N', type=int, default=6,
help=('Start index of cell barcode on R1. '
'Default: 6. (0-based).'))
parser.add_argument('--bc-length', metavar='N', type=int, default=6,
help='Length of CELSeq barcode (default=6)')
parser.add_argument('--cut-length', metavar='N', type=int, default=35,
help='Length of read on R2 to be mapped. (default=35)')
parser.add_argument('--save-unknown-bc-fastq',
dest='save_unknown_bc_fastq', action='store_true')
parser.set_defaults(save_unknown_bc_fastq=False)
parser.add_argument('--tagging-only',
dest='tagging_only', action='store_true',
help=('Demultiplexed reads are merged to a file named'
' \"tagged.fastq\" under --out-dir.'))
parser.set_defaults(tagging_only=False)
parser.add_argument(
'--tag-to',
dest='tag_to', default='tagged.fastq',
help=('File base name to save the tagged fastq file. '
'Only used when tagging_only.'))
parser.add_argument('--verbose', dest='verbose', action='store_true')
parser.set_defaults(verbose=False)
args = parser.parse_args()
bc_dict = bc_dict_id2seq(args.bc_index, args.bc_seq_column)
bc_index_used = str2int(args.bc_index_used)
bc_dict = {x: bc_dict.get(x, None) for x in bc_index_used}
print_logger('Demultiplexing starts {}--{} ...'.format(args.read1_fpath,
args.read2_fpath))
out = demultiplexing(read1_fpath=args.read1_fpath,
read2_fpath=args.read2_fpath,
outdir=args.out_dir, dict_bc_id2seq=bc_dict,
start_umi=args.umi_start_position,
start_bc=args.bc_start_position,
len_umi=args.umi_length,
len_bc=args.bc_length,
len_tx=args.cut_length,
bc_qual_min=args.min_bc_quality,
is_gzip=args.is_gzip,
save_unknown_bc_fastq=args.save_unknown_bc_fastq,
tagging_only=args.tagging_only,
tag_to=args.tag_to,
do_bc_rev_complement=False,
do_tx_rev_complement=False,
verbose=args.verbose)
print_logger('Demultiplexing ends {}--{}.'.format(args.read1_fpath,
args.read2_fpath))
write_demultiplexing(out, bc_dict, args.stats_file)
if __name__ == "__main__":
main()
```
#### File: celseq2/celseq2/diagnose.py
```python
import argparse
from .helper import print_logger
from .helper import filehandle_fastq_gz
from collections import Counter
def get_dict_bc_has_reads(r1, bc_index, bc_seq_col):
print(r1)
with open(bc_index, 'rt') as fin:
# next(fin)
rows = map(lambda row: row.strip().split(), fin)
known_bc = set([row[bc_seq_col] for row in rows])
print_logger('There are {} different cell barcodes.'.format(len(known_bc)))
res = Counter({bc: 0 for bc in known_bc})
bc_len = len(next(iter(res)))
fh_r1 = filehandle_fastq_gz(r1) if r1.endswith('.gz') else open(r1, 'r')
i = 0
while True:
if i % 1000000 == 0:
print_logger('Processing {:,} reads...'.format(i))
try:
_ = next(fh_r1).rstrip()
r1_seq = next(fh_r1).rstrip()
_ = next(fh_r1).rstrip()
_ = next(fh_r1).rstrip()
i += 1
r1_bc = r1_seq[:bc_len]
if not r1_bc:
continue
if r1_bc in known_bc:
res[r1_bc] += 1
else:
res['unknown'] += 1
except StopIteration:
break
print_logger('Processed total {:,} reads...'.format(i))
fh_r1.close()
return res
def main():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
'--bc-index', metavar='FILENAME', type=str,
help=('File path to cell barcode index.'))
parser.add_argument(
'--bc-seq-col', metavar='N', default=1, type=int,
help=('Column index of cell barcode index file to find the sequence',
' of cell barcodes. Default: 1 (2nd column).'))
parser.add_argument(
'--r1', metavar='FILENAME', type=str,
help=('File path to R1.'))
parser.add_argument(
'-o', '--output',
metavar='FILENAME', type=str,
required=True,
help=('File path save output log.'))
args = parser.parse_args()
if args.r1 and args.bc_index:
counter_bc_size = get_dict_bc_has_reads(args.r1,
args.bc_index,
args.bc_seq_col)
fhout = open(args.output, 'w')
tot = sum([counter_bc_size[x] for x in counter_bc_size])
bc_size_max, bc_size_min = float('-inf'), float('inf')
for bc in counter_bc_size:
if bc != 'unknown' and counter_bc_size[bc] > bc_size_max:
bc_size_max = counter_bc_size[bc]
if bc != 'unknown' and counter_bc_size[bc] < bc_size_min:
bc_size_min = counter_bc_size[bc]
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
bc, 20,
counter_bc_size[bc], counter_bc_size[bc] * 100 / tot))
valid_bc_size_val = [counter_bc_size[x]
for x in counter_bc_size if x != 'unknown']
bc_size_avg = sum([x / len(valid_bc_size_val)
for x in valid_bc_size_val])
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'bc_size_max', 20,
bc_size_max, bc_size_max * 100 / tot))
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'bc_size_min', 20,
bc_size_min, bc_size_min * 100 / tot))
fhout.write('{:>{}}\t{:06.2f}\t{:06.2f}\n'.format(
'bc_size_avg', 20,
bc_size_avg, bc_size_avg * 100 / tot))
fhout.write('{:>{}}\t{:,}\t{:06.2f}\n'.format(
'total', 20,
tot, tot * 100 / tot))
fhout.close()
if __name__ == '__main__':
main()
```
#### File: celseq2/celseq2/dummy_species.py
```python
import random
import argparse
def gtf_attr_str(gene_id=None, gene_name=None, gene_biotype=None,
transcript_id=None, exon_num=None):
'''
Simple writer to fill 'attribute' column of GTF
>>> gtf_attr_str()
>>> gtf_attr_str('g1', 'dummy_gene_1', 'g1_tx1', 1)
'gene_id "g1"; gene_name "dummy_gene_1"; transcript_id "g1_tx1"; exon_number "1";'
>>> gtf_attr_str('g1', 'dummy_gene_1')
'gene_id "g1"; gene_name "dummy_gene_1";'
'''
if not gene_id or not gene_name:
return(None)
out = "gene_id \"{gid}\"; gene_name \"{gname}\";".format(gid=gene_id,
gname=gene_name)
if gene_biotype:
out += " gene_biotype \"{biotype}\";".format(biotype=gene_biotype)
if transcript_id:
out += " transcript_id \"{txid}\";".format(txid=transcript_id)
if exon_num:
out += " exon_number \"{exidx}\";".format(exidx=exon_num)
return(out)
def gtf_str(chrm, src, feature, start, end, score, strand, frame, attr):
'''
Formatter to write single GTF line.
'''
out = ('{chrm}\t{src}\t{feature}\t{start}\t{end}\t{score}\t'
'{strand}\t{frame}\t{attr}').format(chrm=chrm,
src=src,
feature=feature,
start=start,
end=end,
score=score,
strand=strand,
frame=frame,
attr=attr)
return(out)
def dummy_gtf(saveto=None, len_exon=100, len_intron=200, len_intergenic=300):
'''
Create dummy GTF annations.
'''
fout = open(saveto, 'wt')
s_stream, e_stream = 1, 1
src = 'celseq2'
score = '.'
frame = '.'
out = []
# g0
g_attr = gtf_attr_str(gene_id='g0', gene_name='celseq2_gene-0', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g0', gene_name='celseq2_gene-0', gene_biotype='protein_coding',
transcript_id='tx0')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2):
exon_attr = gtf_attr_str(gene_id='g0', gene_name='celseq2_gene-0', gene_biotype='protein_coding',
transcript_id='tx0', exon_num=i + 1)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
s_stream, e_stream = e_stream - 2 * len_exon - 1 * len_intron + 1, e_stream
# g8
g_attr = gtf_attr_str(gene_id='g8', gene_name='celseq2_gene-8', gene_biotype='lincRNA')
g = gtf_str(chrm='chr2', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g8', gene_name='celseq2_gene-8',
transcript_id='tx8', gene_biotype='lincRNA')
tx = gtf_str(chrm='chr2', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2):
exon_attr = gtf_attr_str(gene_id='g8', gene_name='celseq2_gene-8', gene_biotype='lincRNA',
transcript_id='tx8', exon_num=i + 1)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr2', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
s_stream = e_stream + 1 + len_intergenic
# g1
g_attr = gtf_attr_str(gene_id='g1', gene_name='celseq2_gene-1', gene_biotype='miRNA')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 3 * len_exon + 2 * len_intron - 1,
score=score,
strand='-',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g1', gene_name='celseq2_gene-1', gene_biotype='miRNA',
transcript_id='tx1')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 3 * len_exon + 2 * len_intron - 1,
score=score,
strand='-',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(3, 0, -1):
exon_attr = gtf_attr_str(gene_id='g1', gene_name='celseq2_gene-1', gene_biotype='miRNA',
transcript_id='tx1', exon_num=i)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='-',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
s_stream = e_stream + 1 + len_intergenic
# g2
g_attr = gtf_attr_str(gene_id='g2', gene_name='celseq2_gene-2', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='-',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g2', gene_name='celseq2_gene-2', gene_biotype='protein_coding',
transcript_id='tx2.1')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='-',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2, 0, -1):
exon_attr = gtf_attr_str(gene_id='g2', gene_name='celseq2_gene-2', gene_biotype='protein_coding',
transcript_id='tx2', exon_num=i)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='-',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
tx_attr = gtf_attr_str(gene_id='g2', gene_name='celseq2_gene-2', gene_biotype='protein_coding',
transcript_id='tx2.2')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=int(e_stream - 1.5 * len_exon),
end=int(e_stream + 1.5 * len_exon),
score=score,
strand='-',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
exon_attr = gtf_attr_str(gene_id='g2', gene_name='celseq2_gene-2', gene_biotype='protein_coding',
transcript_id='tx2.2', exon_num=1)
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=int(e_stream - 1.5 * len_exon),
end=int(e_stream + 1.5 * len_exon),
score=score,
strand='-',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intergenic
# g3
g_attr = gtf_attr_str(gene_id='g3', gene_name='celseq2_gene-3', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g3', gene_name='celseq2_gene-3', gene_biotype='protein_coding',
transcript_id='tx3')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2):
exon_attr = gtf_attr_str(gene_id='g3', gene_name='celseq2_gene-3', gene_biotype='protein_coding',
transcript_id='tx3', exon_num=i + 1)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
s_stream = e_stream + 1 + len_intergenic
# g4
g_attr = gtf_attr_str(gene_id='g4', gene_name='celseq2_gene-4', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g4', gene_name='celseq2_gene-4', gene_biotype='protein_coding',
transcript_id='tx4')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2):
exon_attr = gtf_attr_str(gene_id='g4', gene_name='celseq2_gene-4', gene_biotype='protein_coding',
transcript_id='tx4', exon_num=i + 1)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
# g5
s_stream, e_stream = e_stream - len_exon + 1, e_stream
g_attr = gtf_attr_str(gene_id='g5', gene_name='celseq2_gene-5', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=e_stream,
score=score,
strand='-',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g5', gene_name='celseq2_gene-5', gene_biotype='protein_coding',
transcript_id='tx5')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=e_stream,
score=score,
strand='-',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(1, 0, -1):
exon_attr = gtf_attr_str(gene_id='g5', gene_name='celseq2_gene-5', gene_biotype='protein_coding',
transcript_id='tx5', exon_num=i)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='-',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
s_stream = e_stream + 1 + len_intergenic
# g6
g_attr = gtf_attr_str(gene_id='g6', gene_name='celseq2_gene-6', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 1 * len_exon + 0 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g6', gene_name='celseq2_gene-6', gene_biotype='protein_coding',
transcript_id='tx6')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 1 * len_exon + 0 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
exon_attr = gtf_attr_str(gene_id='g6', gene_name='celseq2_gene-6', gene_biotype='protein_coding',
transcript_id='tx6', exon_num=1)
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=s_stream + 1 * len_exon + 0 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
# g7
g_attr = gtf_attr_str(gene_id='g7', gene_name='celseq2_gene-7', gene_biotype='protein_coding')
g = gtf_str(chrm='chr1', src=src,
feature='gene',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=g_attr)
fout.write('{}\n'.format(g))
out.append(g)
tx_attr = gtf_attr_str(gene_id='g7', gene_name='celseq2_gene-7', gene_biotype='protein_coding',
transcript_id='tx7')
tx = gtf_str(chrm='chr1', src=src,
feature='transcript',
start=s_stream,
end=s_stream + 2 * len_exon + 1 * len_intron - 1,
score=score,
strand='+',
frame=frame,
attr=tx_attr)
fout.write('{}\n'.format(tx))
out.append(tx)
for i in range(2):
exon_attr = gtf_attr_str(gene_id='g7', gene_name='celseq2_gene-7', gene_biotype='protein_coding',
transcript_id='tx7', exon_num=i + 1)
s_stream, e_stream = s_stream, s_stream + len_exon - 1
exon = gtf_str(chrm='chr1', src=src,
feature='exon',
start=s_stream,
end=e_stream,
score=score,
strand='+',
frame=frame,
attr=exon_attr)
fout.write('{}\n'.format(exon))
out.append(exon)
s_stream = e_stream + 1 + len_intron
e_total = e_stream
print('End of chr1 is {}'.format(e_total))
fout.close()
return(out)
def dummy_fasta(saveto, max_len=5000):
'''
Generate FASTA
chr1: total length 5K
chr2: same as very first 500bp
'''
fasta = ['A'] * int(max_len * 0.25) + \
['T'] * int(max_len * 0.25) + \
['G'] * int(max_len * 0.25) + \
['C'] * int(max_len * 0.25)
random.seed(42)
random.shuffle(fasta)
chr1 = fasta
chr2 = fasta[:500]
wrap_len = 60
with open(saveto, 'w') as fout:
fout.write('>chr1\n')
for i in range(0, len(chr1), wrap_len):
j = i + wrap_len if i + wrap_len < len(chr1) else len(chr1)
fout.write('{}\n'.format(
''.join(chr1[i:j])))
fout.write('>chr2\n')
for i in range(0, len(chr2), wrap_len):
j = i + wrap_len if i + wrap_len < len(chr2) else len(chr2)
fout.write('{}\n'.format(
''.join(chr2[i:j])))
def get_argument_parser():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
'--gtf',
metavar="FILE",
help='Save dummy GTF to file.')
parser.add_argument(
'--fasta',
metavar="FILE",
help='Save dummy FASTA to file.')
parser.add_argument(
'--test',
action='store_true', default=False,
help='Perform doctest only.')
return(parser)
def main():
p = get_argument_parser()
args = p.parse_args()
if args.test:
import doctest
doctest.testmod()
else:
dummy_gtf(args.gtf)
dummy_fasta(args.fasta)
if __name__ == '__main__':
main()
```
#### File: celseq2/tests/test_demultiplexing.py
```python
import pytest
from celseq2.helper import md5sum
'''
Test demultiplexing.
Expectation is from the stats.log of demultiplexing.
'''
def test_demultiplexing(instance_demultiplex_stats):
calc = md5sum(instance_demultiplex_stats)
# e.g. 0027-GTGATC 18 003.333
assert calc == '7ed86eb8520bc17bd3f91c1e136cf2b1'
```
#### File: celseq2/tests/test_simulate_celseq2_data.py
```python
import pytest
import HTSeq
from celseq2.helper import md5sum
def test_simulate_celseq2(instance_celseq2_data):
r1_gz, r2_gz = instance_celseq2_data
assert md5sum(r1_gz) == '2a633d13fa343211a02bea6a4e14183b'
assert md5sum(r2_gz) == '97f490bf9929f1b64d7e0142ade72576'
``` |
{
"source": "johannesnicolaus/singlecell",
"score": 3
} |
#### File: singlecell/indrop/config.py
```python
import logging
from pkg_resources import resource_string, resource_filename
import yaml
#from jinja2 import Environment, PackageLoader, select_autoescape
_LOGGER = logging.getLogger(__name__)
#_TEMPLATE_ENV = Environment(
# loader=PackageLoader('singlecell',
# os.path.join('data', 'templates')),
# autoescape=select_autoescape(['html', 'xml'])
#)
def read_config(config_file):
"""Read the configuration file (in YAML format).
We rely on a simple configuration file that contains a set of
sections, with each section containing a list of options. With this simple
two-level hierarchy, it is straightforward to assign default values to
options based on the configuration file template included in the package.
A separate function checks whether mandatory options (e.g., input files)
are specified.
TODO: docstring"""
# read the configuration file template containing the default values for
# various options
with open(get_config_template_file(), 'rb') as fh:
config = yaml.load(fh)
# read the user-provided configuration file that can be used to override
# default option values
with open(config_file, 'rb') as fh:
user_config = yaml.load(fh)
errors = False
for section, entries in user_config.items():
if section not in config:
_LOGGER.warning(
'Ignoring invalid section "%s" in configuration file',
section)
continue
sec = config[section]
for option, value in entries.items():
if option not in sec:
_LOGGER.warning(
'Ignoring invalid option "%s" in section "%s" of '
'configuration file.', option, section)
elif value is not None:
if sec[option] is not None and not \
isinstance(value, type(sec[option])):
_LOGGER.error(
'Wrong data type for option "%s" in section "%s": '
'Should be "%s", but got "%s" (%s).',
option, section,
str(type(sec[option])), str(type(value), str(value)))
errors = True
sec[option] = value
return config, errors
def write_config(conf, output_file):
"""Write documentation to yaml file."""
with open(output_file, 'w') as ofh:
yaml.dump(conf, ofh, default_flow_style=False)
def get_config_template():
"""Create a configuration file template for use with the inDrop pipeline.
TODO: docstring
"""
return resource_string('singlecell', 'data/indrop/config_template.yaml')\
.decode('utf-8')
def get_config_template_file():
"""Get the path of the configuration file template.
TODO: docstring
"""
return resource_filename('singlecell', 'data/indrop/config_template.yaml')
```
#### File: singlecell/indrop/quantify_transcript_expression.py
```python
import sys
import argparse
from genometools import misc
from .. import expression
_LOGGER = misc.get_logger()
def get_argument_parser():
desc = 'Quantify transcript expression based on aligned inDrop reads.'
parser = argparse.ArgumentParser(
description=desc, add_help=False)
g = parser.add_argument_group('Help')
g.add_argument('-h', '--help', action='help',
help='Show this help message and exit.')
g = parser.add_argument_group('Input and output files')
g.add_argument(
'-a', '--alignment-file', type=str, required=True,
help='.bam file containing the mapped reads.')
g.add_argument(
'-e', '--gene-expression-file', type=str, required=True,
help='.tsv file containing UMI-filtered (!) gene expression values.')
g.add_argument(
'-g', '--gene-file', type=str, required=True,
help='.tsv file containing list of protein-coding genes.')
g.add_argument(
'-n', '--genome-annotation-file', type=str, required=True,
help='.gtf file containing genome annotations.')
g.add_argument(
'-or', '--output-file', type=str, required=True,
help='Output file (.pickle format).')
g = parser.add_argument_group('Counting parameters')
g.add_argument(
'-m', '--max-genes', type=int, required=False, default=0,
help=('Maxmimum number of genes to process. If 0, '
'process all genes. [0]')
)
return parser
def main(args=None):
"""Entry point for script."""
if args is None:
parser = get_argument_parser()
args = parser.parse_args()
alignment_file = args.alignment_file
gene_expression_file = args.gene_expression_file
gene_file = args.gene_file
genome_annotation_file = args.genome_annotation_file
output_file = args.output_file
max_genes = args.max_genes
if max_genes == 0:
max_genes = None
expression.quantify_transcript_expression(
alignment_file, gene_expression_file,
gene_file, genome_annotation_file,
output_file,
max_genes=max_genes)
return 0
if __name__ == '__main__':
return_code = main()
sys.exit(return_code)
```
#### File: singlecell/qc/general.py
```python
import numpy as np
import plotly.graph_objs as go
from plotly import tools
from genometools.expression import ExpMatrix
from .. import util
def plot_cell_transcript_distribution(
matrix, name='', color='rgb(31, 119, 180)',
width=1350, height=600,
font_size=16, font_family='serif'):
"""Plot the number of transcripts per cell.
"""
fig = tools.make_subplots(rows=1, cols=2, shared_yaxes=False)
hist_trace = go.Histogram(
x=matrix.sum(axis=0),
histnorm='percent',
marker=dict(
color=color,
)
)
assert isinstance(matrix, ExpMatrix)
fig.append_trace(hist_trace, 1, 1)
num_transcripts = matrix.sum(axis=0) # works?
s = np.sort(num_transcripts)[::-1]
step_size = int((matrix.shape[1])/200.0)
x = s[::step_size]
y = np.arange(matrix.shape[1])[::step_size] + 1
scatter_trace = go.Scatter(
x=x,
y=y,
mode='lines',
line=dict(
color=color,
width=3.0,
),
)
fig.append_trace(scatter_trace, 1, 2)
layout = go.Layout(
title = '%s (n=%d)' % (name, matrix.n),
width=width,
height=height,
font=dict(
size=font_size,
family=font_family,
),
showlegend=False,
)
fig['layout'].update(layout)
xaxis_hist = dict(
title='Number of transcripts',
)
yaxis_hist = dict(
title='Fraction of cells (%)'
)
fig['layout'].xaxis1.update(xaxis_hist)
fig['layout'].yaxis1.update(yaxis_hist)
xaxis_scatter = dict(
title='Transcript threshold',
)
yaxis_scatter = dict(
title=('Number of cells above threshold'),
autorange=False,
range=[0, matrix.shape[1]*1.05],
)
fig['layout'].xaxis2.update(xaxis_scatter)
fig['layout'].yaxis2.update(yaxis_scatter)
return fig
def plot_transcriptome_components(
matrix, species='human', name='',
width=950, height=800, font_size=16, font_family='serif'):
"""Plots showing mitochondrial and ribosomal transcriptome components.
TODO: docstring
"""
def generate_scatter_trace(matrix, sel_genes, color):
transcripts = matrix.sum(axis=0)
sel_sum = matrix.loc[sel_genes].sum(axis=0)
sel_frac = sel_sum / matrix.sum(axis=0)
trace = go.Scatter(
x=transcripts,
y=100*sel_frac,
text=matrix.cells,
mode='markers',
marker=dict(
opacity=0.7,
color=color,
),
)
return trace
def generate_hist_trace(matrix, sel_genes, color):
transcripts = matrix.sum(axis=0)
sel_sum = matrix.loc[sel_genes].sum(axis=0)
sel_frac = sel_sum / matrix.sum(axis=0)
trace = go.Histogram(
y=100*sel_frac,
autobiny=False,
ybins=dict(
start=0,
end=100.1,
size=5.0001,
),
marker=dict(
color=color,
),
histnorm='percent',
)
return trace
fig = tools.make_subplots(rows=2, cols=2, shared_yaxes=True)
mito_genes = util.get_mitochondrial_genes(species=species)
ribo_genes = util.get_ribosomal_genes(species=species)
mito_color = 'rgb(255, 127, 14)'
ribo_color = 'rgb(31, 119, 180)'
try:
mito_trace1 = generate_hist_trace(matrix, mito_genes, mito_color)
mito_trace2 = generate_scatter_trace(
matrix, mito_genes, mito_color)
except KeyError:
pass
else:
fig.append_trace(mito_trace1, 1, 1)
fig.append_trace(mito_trace2, 1, 2)
try:
ribo_trace1 = generate_scatter_trace(
matrix, ribo_genes, ribo_color)
ribo_trace2 = generate_hist_trace(matrix, ribo_genes, ribo_color)
except KeyError:
pass
else:
fig.append_trace(ribo_trace1, 2, 1)
fig.append_trace(ribo_trace2, 2, 2)
if name:
name = name + ' '
title = '%s(n=%d)' % (name, matrix.shape[1])
layout = go.Layout(
title=title,
width=width,
height=height,
font=dict(
size=font_size,
family='font_family',
),
showlegend=False,
bargap=0.1,
)
#fig['layout'].update(height=600, width=600, title='i <3 subplots')
xaxis_hist = dict(
title='Fraction of cells (%)'
)
xaxis_scatter = dict(
title='Number of transcripts'
)
yaxis_mito = dict(
title='Fraction of mitochondrial<br> transcripts (%)',
autorange=False,
range=[0, 100],
zeroline=True,
)
yaxis_ribo = dict(
title='Fraction of ribosomal<br> transcripts (%)',
autorange=False,
range=[0, 100],
zeroline=True,
)
fig['layout'].update(layout)
fig['layout'].xaxis1.update(xaxis_hist)
fig['layout']['xaxis1']['title'] = ''
fig['layout'].xaxis3.update(xaxis_hist)
#fig['layout'].xaxis.update(xaxis)
fig['layout'].xaxis2.update(xaxis_scatter)
fig['layout']['xaxis2']['title'] = ''
fig['layout'].xaxis4.update(xaxis_scatter)
fig['layout'].yaxis1.update(yaxis_mito)
fig['layout'].yaxis2.update(yaxis_ribo)
#fig['layout']['yaxis2'].update(yaxis_mito)
#fig['layout']['yaxis2']['title'] = ''
#fig['layout']['yaxis4'].update(yaxis_ribo)
#fig['layout']['yaxis4']['title'] = ''
return fig
```
#### File: tests/indrop/test_reads.py
```python
import os
import logging
from pkg_resources import resource_filename
import pytest
from genometools import misc
from singlecell import indrop
_LOGGER = logging.getLogger(__name__)
@pytest.mark.online
def test_process_reads(my_barcode_read_file, my_mrna_read_file,
my_output_pypath):
"""Tests the read processing function."""
barcode1_file = resource_filename(
'singlecell', 'data/indrop/gel_barcode1_list.txt')
barcode2_file = resource_filename(
'singlecell', 'data/indrop/gel_barcode2_list.txt')
output_read_file = str(my_output_pypath.join('processed_reads.fastq'))
output_count_file = str(my_output_pypath.join('barcode_counts.tsv'))
output_log_file = str(my_output_pypath.join('read_processing_log.txt'))
indrop.reads.process_reads(
my_barcode_read_file, my_mrna_read_file,
barcode1_file, barcode2_file,
output_read_file, output_count_file, output_log_file)
md5sum = misc.get_file_md5sum(output_read_file)
assert md5sum == '4a21825d92ed776f126dd89843ba16d6'
md5sum = misc.get_file_md5sum(output_count_file)
assert md5sum == 'b2776081a49442bb01e62e1470574f18'
``` |
{
"source": "JohannesPertl/spotipy",
"score": 2
} |
#### File: tests/integration/test_user_endpoints.py
```python
from __future__ import print_function
import os
import sys
from spotipy import (
CLIENT_CREDS_ENV_VARS as CCEV,
prompt_for_user_token,
Spotify,
SpotifyException,
)
import unittest
import warnings
import requests
from pprint import pprint # noqa
class AuthTestSpotipy(unittest.TestCase):
"""
These tests require user authentication - provide client credentials using
the following environment variables
::
'SPOTIPY_CLIENT_USERNAME'
'SPOTIPY_CLIENT_ID'
'SPOTIPY_CLIENT_SECRET'
'SPOTIPY_REDIRECT_URI'
"""
playlist = "spotify:user:plamere:playlist:2oCEWyyAPbZp9xhVSxZavx"
playlist_new_id = "spotify:playlist:7GlxpQjjxRjmbb3RP2rDqI"
four_tracks = ["spotify:track:6RtPijgfPKROxEzTHNRiDp",
"spotify:track:7IHOIqZUUInxjVkko181PB",
"4VrWlk8IQxevMvERoX08iC",
"http://open.spotify.com/track/3cySlItpiPiIAzU3NyHCJf"]
two_tracks = ["spotify:track:6RtPijgfPKROxEzTHNRiDp",
"spotify:track:7IHOIqZUUInxjVkko181PB"]
other_tracks = ["spotify:track:2wySlB6vMzCbQrRnNGOYKa",
"spotify:track:29xKs5BAHlmlX1u4gzQAbJ",
"spotify:track:1PB7gRWcvefzu7t3LJLUlf"]
album_ids = ["spotify:album:6kL09DaURb7rAoqqaA51KU",
"spotify:album:6RTzC0rDbvagTSJLlY7AKl"]
bad_id = 'BAD_ID'
@classmethod
def setUpClass(self):
if sys.version_info >= (3, 2):
# >= Python3.2 only
warnings.filterwarnings(
"ignore",
category=ResourceWarning, # noqa
message="unclosed.*<ssl.SSLSocket.*>")
missing = list(filter(lambda var: not os.getenv(CCEV[var]), CCEV))
if missing:
raise Exception(
('Please set the client credentials for the test application'
' using the following environment variables: {}').format(
CCEV.values()))
self.username = os.getenv(CCEV['client_username'])
self.scope = (
'playlist-modify-public '
'user-library-read '
'user-follow-read '
'user-library-modify '
'user-read-private '
'user-top-read '
'user-follow-modify '
'user-read-recently-played '
'ugc-image-upload'
)
self.token = prompt_for_user_token(self.username, scope=self.scope)
self.spotify = Spotify(auth=self.token)
# Helper
def get_or_create_spotify_playlist(self, playlist_name):
playlists = self.spotify.user_playlists(self.username)
while playlists:
for item in playlists['items']:
if item['name'] == playlist_name:
return item
playlists = self.spotify.next(playlists)
return self.spotify.user_playlist_create(
self.username, playlist_name)
# Helper
def get_as_base64(self, url):
import base64
return base64.b64encode(requests.get(url).content).decode("utf-8")
def test_track_bad_id(self):
try:
self.spotify.track(self.bad_id)
self.assertTrue(False)
except SpotifyException:
self.assertTrue(True)
def test_basic_user_profile(self):
user = self.spotify.user(self.username)
self.assertTrue(user['id'] == self.username.lower())
def test_current_user(self):
user = self.spotify.current_user()
self.assertTrue(user['id'] == self.username.lower())
def test_me(self):
user = self.spotify.me()
self.assertTrue(user['id'] == self.username.lower())
def test_user_playlists(self):
playlists = self.spotify.user_playlists(self.username, limit=5)
self.assertTrue('items' in playlists)
self.assertTrue(len(playlists['items']) == 5)
def test_user_playlist_tracks(self):
playlists = self.spotify.user_playlists(self.username, limit=5)
self.assertTrue('items' in playlists)
for playlist in playlists['items']:
user = playlist['owner']['id']
pid = playlist['id']
results = self.spotify.user_playlist_tracks(user, pid)
self.assertTrue(len(results['items']) >= 0)
def test_current_user_saved_albums(self):
# List
albums = self.spotify.current_user_saved_albums()
self.assertTrue(len(albums['items']) > 1)
# Add
self.spotify.current_user_saved_albums_add(self.album_ids)
# Contains
self.assertTrue(
self.spotify.current_user_saved_albums_contains(
self.album_ids) == [
True, True])
# Remove
self.spotify.current_user_saved_albums_delete(self.album_ids)
albums = self.spotify.current_user_saved_albums()
self.assertTrue(len(albums['items']) > 1)
def test_current_user_playlists(self):
playlists = self.spotify.current_user_playlists(limit=10)
self.assertTrue('items' in playlists)
self.assertTrue(len(playlists['items']) == 10)
def test_user_playlist_follow(self):
self.spotify.user_playlist_follow_playlist(
'plamere', '4erXB04MxwRAVqcUEpu30O')
follows = self.spotify.user_playlist_is_following(
'plamere', '4erXB04MxwRAVqcUEpu30O', [
self.spotify.current_user()['id']])
self.assertTrue(len(follows) == 1, 'proper follows length')
self.assertTrue(follows[0], 'is following')
self.spotify.user_playlist_unfollow(
'plamere', '4erXB04MxwRAVqcUEpu30O')
follows = self.spotify.user_playlist_is_following(
'plamere', '4erXB04MxwRAVqcUEpu30O', [
self.spotify.current_user()['id']])
self.assertTrue(len(follows) == 1, 'proper follows length')
self.assertFalse(follows[0], 'is no longer following')
def test_current_user_saved_tracks(self):
tracks = self.spotify.current_user_saved_tracks()
self.assertTrue(len(tracks['items']) > 0)
def test_current_user_save_and_unsave_tracks(self):
tracks = self.spotify.current_user_saved_tracks()
total = tracks['total']
self.spotify.current_user_saved_tracks_add(self.four_tracks)
tracks = self.spotify.current_user_saved_tracks()
new_total = tracks['total']
self.assertTrue(new_total - total == len(self.four_tracks))
tracks = self.spotify.current_user_saved_tracks_delete(
self.four_tracks)
tracks = self.spotify.current_user_saved_tracks()
new_total = tracks['total']
self.assertTrue(new_total == total)
def test_categories(self):
response = self.spotify.categories()
self.assertTrue(len(response['categories']) > 0)
def test_category_playlists(self):
response = self.spotify.categories()
for cat in response['categories']['items']:
cat_id = cat['id']
response = self.spotify.category_playlists(category_id=cat_id)
if len(response['playlists']["items"]) > 0:
break
self.assertTrue(True)
def test_new_releases(self):
response = self.spotify.new_releases()
self.assertTrue(len(response['albums']) > 0)
def test_featured_releases(self):
response = self.spotify.featured_playlists()
self.assertTrue(len(response['playlists']) > 0)
def test_current_user_follows(self):
response = self.spotify.current_user_followed_artists()
artists = response['artists']
self.assertTrue(len(artists['items']) > 0)
def test_current_user_top_tracks(self):
response = self.spotify.current_user_top_tracks()
items = response['items']
self.assertTrue(len(items) > 0)
def test_current_user_top_artists(self):
response = self.spotify.current_user_top_artists()
items = response['items']
self.assertTrue(len(items) > 0)
def test_current_user_recently_played(self):
# No cursor
res = self.spotify.current_user_recently_played()
self.assertTrue(len(res['items']) <= 50)
played_at = res['items'][0]['played_at']
# Using `before` gives tracks played before
res = self.spotify.current_user_recently_played(
before=res['cursors']['after'])
self.assertTrue(len(res['items']) <= 50)
self.assertTrue(res['items'][0]['played_at'] < played_at)
played_at = res['items'][0]['played_at']
# Using `after` gives tracks played after
res = self.spotify.current_user_recently_played(
after=res['cursors']['before'])
self.assertTrue(len(res['items']) <= 50)
self.assertTrue(res['items'][0]['played_at'] > played_at)
def test_user_playlist_ops(self):
sp = self.spotify
# create empty playlist
playlist = self.get_or_create_spotify_playlist(
'spotipy-testing-playlist-1')
playlist_id = playlist['id']
# remove all tracks from it
sp.user_playlist_replace_tracks(
self.username, playlist_id, [])
playlist = sp.user_playlist(self.username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 0)
self.assertTrue(len(playlist['tracks']['items']) == 0)
# add tracks to it
sp.user_playlist_add_tracks(
self.username, playlist_id, self.four_tracks)
playlist = sp.user_playlist(self.username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 4)
self.assertTrue(len(playlist['tracks']['items']) == 4)
# remove two tracks from it
sp.user_playlist_remove_all_occurrences_of_tracks(self.username,
playlist_id,
self.two_tracks)
playlist = sp.user_playlist(self.username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 2)
self.assertTrue(len(playlist['tracks']['items']) == 2)
# replace with 3 other tracks
sp.user_playlist_replace_tracks(self.username,
playlist_id,
self.other_tracks)
playlist = sp.user_playlist(self.username, playlist_id)
self.assertTrue(playlist['tracks']['total'] == 3)
self.assertTrue(len(playlist['tracks']['items']) == 3)
def test_playlist(self):
# New playlist ID
pl = self.spotify.playlist(self.playlist_new_id)
self.assertTrue(pl["tracks"]["total"] > 0)
# Old playlist ID
pl = self.spotify.playlist(self.playlist)
self.assertTrue(pl["tracks"]["total"] > 0)
def test_playlist_tracks(self):
# New playlist ID
pl = self.spotify.playlist_tracks(self.playlist_new_id, limit=2)
self.assertTrue(len(pl["items"]) == 2)
self.assertTrue(pl["total"] > 0)
# Old playlist ID
pl = self.spotify.playlist_tracks(self.playlist, limit=2)
self.assertTrue(len(pl["items"]) == 2)
self.assertTrue(pl["total"] > 0)
def test_playlist_upload_cover_image(self):
pl1 = self.get_or_create_spotify_playlist('spotipy-testing-playlist-1')
plid = pl1['uri']
old_b64 = pl1['images'][0]['url']
# Upload random dog image
r = requests.get('https://dog.ceo/api/breeds/image/random')
dog_base64 = self.get_as_base64(r.json()['message'])
self.spotify.playlist_upload_cover_image(plid, dog_base64)
# Image must be different
pl1 = self.spotify.playlist(plid)
new_b64 = self.get_as_base64(pl1['images'][0]['url'])
self.assertTrue(old_b64 != new_b64)
def test_playlist_cover_image(self):
pl = self.get_or_create_spotify_playlist('spotipy-testing-playlist-1')
plid = pl['uri']
res = self.spotify.playlist_cover_image(plid)
self.assertTrue(len(res) > 0)
first_image = res[0]
self.assertTrue('width' in first_image)
self.assertTrue('height' in first_image)
self.assertTrue('url' in first_image)
def test_user_follows_and_unfollows_artist(self):
# Initially follows 1 artist
res = self.spotify.current_user_followed_artists()
self.assertTrue(res['artists']['total'] == 1)
# Follow 2 more artists
artists = ["6DPYiyq5kWVQS4RGwxzPC7", "0NbfKEOTQCcwd6o7wSDOHI"]
self.spotify.user_follow_artists(artists)
res = self.spotify.current_user_followed_artists()
self.assertTrue(res['artists']['total'] == 3)
# Unfollow these 2 artists
self.spotify.user_unfollow_artists(artists)
res = self.spotify.current_user_followed_artists()
self.assertTrue(res['artists']['total'] == 1)
def test_user_follows_and_unfollows_user(self):
# TODO improve after implementing `me/following/contains`
users = ["11111204", "xlqeojt6n7on0j7coh9go8ifd"]
# Follow 2 more users
self.spotify.user_follow_users(users)
# Unfollow these 2 users
self.spotify.user_unfollow_users(users)
def test_deprecated_starred(self):
pl = self.spotify.user_playlist(self.username)
self.assertTrue(pl["tracks"] is None)
self.assertTrue(pl["owner"] is None)
def test_deprecated_user_playlist(self):
# Test without user due to change from
# https://developer.spotify.com/community/news/2018/06/12/changes-to-playlist-uris/
pl = self.spotify.user_playlist(None, self.playlist)
self.assertTrue(pl["tracks"]["total"] > 0)
def test_deprecated_user_playlis(self):
# Test without user due to change from
# https://developer.spotify.com/community/news/2018/06/12/changes-to-playlist-uris/
pl = self.spotify.user_playlist_tracks(None, self.playlist, limit=2)
self.assertTrue(len(pl["items"]) == 2)
self.assertTrue(pl["total"] > 0)
``` |
{
"source": "JohannesPertl/vreddit-download-bot",
"score": 3
} |
#### File: services/upload/upload.py
```python
import json
import logging
import os
import redis
import requests
import sys
import shared.util as util
from shared.exceptions import InvalidRequest, AlreadyProcessed
def main():
request_json = redis.spop(current_set)
if request_json:
request = json.loads(request_json.decode('utf-8'))
try:
upload_request(request)
except InvalidRequest as ie:
util.open_lock(redis, request['id'])
logging.info(f"Invalid upload request {request['id']} : {request['link']}: {ie}")
except AlreadyProcessed as ape:
util.open_lock(redis, request['id'])
logging.error(ape)
except Exception as e:
util.handle_failed_request(redis, request, current_set, e)
logging.error(
f"{type(e).__name__} occurred while uploading request {request['id']} : {request['link']} : {e}")
def upload_request(request):
# Check for duplicates
util.already_processed_check(redis, request)
# Create request
uploaded_link = upload(request['reddit_link'])
request.update(
uploaded_link=uploaded_link
)
request_json = json.dumps(request)
# Enqueue for replying
if uploaded_link:
success = redis.sadd(next_set, request_json)
logging.info(f"Uploaded request {request['id']} : {request['link']}.")
else:
raise Exception("Invalid upload link.")
def upload(link):
try:
response_json = upload_via_reddittube(link)
if response_json['status'] == 'ok':
return response_json['share_url']
else:
raise Exception(f"Invalid response status: {response_json['status']}")
except Exception as e:
raise Exception(f"Couldn't upload to reddittube: {e}")
def upload_via_reddittube(link):
site_url = "https://reddit.tube/parse"
response = requests.get(site_url, params={
'url': link
}, timeout=250)
return response.json()
if __name__ == '__main__':
util.log("upload")
config = util.load_configuration()
redis = redis.Redis(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'])
current_set = config['REDIS_REQUESTS_UPLOAD']
next_set = config['REDIS_REQUESTS_REPLY']
while True:
main()
```
#### File: vreddit-download-bot/shared/util.py
```python
import json
import logging
import os
import re
import sys
import urllib.parse
from urllib.error import HTTPError, URLError
from urllib.request import Request
import praw
import requests
import yaml
from shared.exceptions import AlreadyProcessed
def load_configuration():
conf_file = os.path.join(os.path.dirname(__file__), os.environ['CONFIG'])
with open(conf_file, encoding='utf8') as f:
config = yaml.safe_load(f)
# load dependent configuration
config['FOOTER'] = "\n\n *** \n" + config['INFO_LINK'] + " | " + config[
'CONTACT_LINK']
return config
CONFIG = load_configuration()
def authenticate():
"""Authenticate via praw.ini file, look at praw documentation for more info"""
authentication = praw.Reddit(site_name=CONFIG['BOT_NAME'])
logging.info(f'Authenticated as {authentication.user.me()}')
return authentication
def log(service, stdout=False):
if stdout:
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format=f'{service:<6}: %(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
else:
logging.basicConfig(
filename=f"shared/logs/bot.log",
level=logging.INFO,
format=f'{service:<6}: %(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def get_reddit_item(reddit, request):
if request['type'] == "message":
return reddit.inbox.message(request['id'])
else:
return reddit.comment(request['id'])
def contains_link(string):
"""Returns link or empty string"""
match_link = re.search(
r"https?://(www\.)?[-a-zA-Z0-9@:%._+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_+.~#?&/=]*)", string)
return match_link[0] if match_link else ""
def contains_username(name, string):
"""Returns regex search"""
return re.search(r"(?i)u/" + name, string)
def get_lock(request_id):
return f"{CONFIG['REDIS_REQUESTS_LOCKED']}:{request_id}"
def open_lock(redis, request_id):
# Remove redundant lock to free up space
lock = get_lock(request_id)
redis.delete(lock)
def handle_failed_request(redis, request, current_set, exception):
if request['retries'] > 10:
open_lock(redis, request['id'])
request.update(
error=str(exception)
)
next_set = CONFIG['REDIS_REQUESTS_FAILED']
logging.error(f"Reached retry limit. Pushing request {request['id']} : {request['link']} to failed requests.")
else:
request['retries'] += 1
next_set = current_set
request_json = json.dumps(request)
redis.sadd(next_set, request_json)
def is_link_valid(link):
# Check if download is valid without downloading
if "reddit.tube" in link:
if requests.head(link, timeout=10).ok:
return True
return False
try:
status_code = urllib.request.urlopen(link, timeout=2).getcode()
return status_code == 200
except (HTTPError, URLError, ValueError):
return False
def already_processed_check(redis, request):
if redis.sismember(CONFIG['REDIS_REQUESTS_SUCCESS'], request['id']):
raise AlreadyProcessed(request['link'])
``` |
{
"source": "johannespetereit/eShopOnContainers",
"score": 3
} |
#### File: eShopOnContainers/locust/eshoptasks.py
```python
from locust import TaskSet, task, seq_task
class TrafficTasks(TaskSet):
def on_start(self):
pass
@task(70)
class Browse(TaskSet):
def on_start(self):
print(f"{self.locust.user_info['Email']} is browsing")
self.reload_shop()
@task(15)
def reload_shop(self):
self.locust.executor.show_index()
@task(30)
def update_product_list(self):
self.locust.executor.update_product_list()
@task(80)
def update_product_list_simple(self):
self.locust.executor.update_product_list_simple()
@task(7)
def stop_browsing(self):
self.interrupt()
@task(50)
class Shop(TaskSet):
def on_start(self):
if not self.locust.executor.is_logged_in:
print(f"{self.locust.user_info['Email']} cannot shop, not logged in")
self.interrupt()
else:
print(f"{self.locust.user_info['Email']} starts shopping")
@task(20)
def add_to_basket(self):
self.locust.executor.add_to_basket()
@task(30)
def reload_shop(self):
self.locust.executor.show_index()
@task(30)
def update_product_list(self):
self.locust.executor.update_product_list()
@task(80)
def update_product_list_simple(self):
self.locust.executor.update_product_list_simple()
@task(40)
def stop_shopping(self):
self.interrupt()
@task(20)
def show_basket(self):
self.locust.executor.show_basket()
@task(13)
def show_orders(self):
self.locust.executor.show_orders()
@task(12)
def show_order_detail(self):
self.locust.executor.show_order_detail()
@task(10)
def goto_checkout(self):
self.locust.executor.show_checkout()
@task(30)
class Checkout(TaskSet):
def on_start(self):
if not self.locust.executor.is_logged_in or not self.locust.executor.has_items_in_basket:
print(f"{self.locust.user_info['Email']} can't checkout, not logged in or nothing in basket")
self.interrupt()
else:
print(f"{self.locust.user_info['Email']} checking out")
@seq_task(1)
def show_basket(self):
self.locust.executor.show_basket()
@seq_task(2)
def goto_checkout(self):
self.locust.executor.show_checkout()
@seq_task(3)
def perform_checkout(self):
self.locust.executor.perform_checkout()
self.interrupt()
@task(40)
class Login(TaskSet):
def on_start(self):
if not self.locust.executor.can_log_in:
print(f"{self.locust.user_info['Email']} cannot login: No user available to login")
self.interrupt()
elif self.locust.executor.is_logged_in:
print(f"{self.locust.user_info['Email']} is already logged in")
self.interrupt()
else:
print(f"{self.locust.user_info['Email']} is logging in")
self.show_login()
def show_login(self):
self.locust.executor.show_login()
@task(80)
def perform_login(self):
self.locust.executor.perform_login()
self.interrupt()
@task(12)
def give_up_login(self):
self.locust.executor.give_up_login()
self.interrupt()
@task(10)
class Register(TaskSet):
def on_start(self):
self.show_register()
def show_register(self):
self.locust.executor.show_register()
@task(30)
def perform_register(self):
self.locust.executor.perform_register()
self.interrupt()
@task(70)
def give_up_register(self):
self.locust.executor.give_up_register()
self.interrupt()
``` |
{
"source": "johannespetrat/OptML",
"score": 3
} |
#### File: OptML/optml/models.py
```python
import abc
from sklearn.base import BaseEstimator
class Model(BaseEstimator):
def __init__(self):
raise NotImplementedError("You need to implement the initialisation function for this model!")
@abc.abstractmethod
def get_params(self):
raise NotImplementedError("You need to implement the 'get_params' function for this model!")
class KerasModel(Model):
__model_module__ = 'keras'
def __init__(self):
raise NotImplementedError("You need to implement the initialisation function for this model! " +
"It should at least specify 'batch_size' and the number of epochs.")
def fit(self, X, y, verbose=0):
return self.model.fit(X,y, epochs=self.epochs, batch_size=self.batch_size, verbose=verbose)
def predict(self, X):
return self.model.predict(X)
```
#### File: OptML/optml/optimizer_base.py
```python
import numpy as np
import abc
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
class Optimizer(object):
def __init__(self, model, hyperparams, eval_func):
"""
Keyword arguments:
model - a model as specified in the readme
hyperparams - a list of Parameter instances
eval_func - scoring function to be minimized. Takes input (y_true, y_predicted) where
y_true and y_predicted are numpy arrays
"""
self.model = model
self.hyperparam_history = []
self.hyperparams = hyperparams
self.eval_func = eval_func
self.model_module = self.infer_model_type(model)
self.param_dict = {p.name:p for p in hyperparams}
def infer_model_type(self, model):
if 'xgboost' in model.__module__.lower():
return 'xgboost'
elif 'pipeline' in model.__module__.lower():
return 'pipeline'
elif 'sklearn' in model.__module__.lower():
return 'sklearn'
elif (hasattr(model, '__model_module__')) and ('keras' in model.__model_module__.lower()):
return 'keras'
else:
raise NotImplementedError("{} not implemented for module '{}'".format(
str(type(self))[:-2].split('.')[-1], model.__module__))
def get_kfold_split(self, n_folds, X):
"""
Splits X into n_folds folds
Args:
n_folds: integer specifying number of folds
X: data to be split
Returns:
a generator with tuples of form (train_idxs, test_idxs)
"""
kf = KFold(n_splits=n_folds)
return kf.split(X)
@abc.abstractmethod
def get_next_hyperparameters(self):
raise NotImplementedError("This class needs a get_next_hyperparameters(...) function")
@abc.abstractmethod
def fit(self, X, y, params):
raise NotImplementedError("This class needs a self.fit(X, y, params) function")
def build_new_model(self, new_hyperparams):
if self.model_module == 'pipeline':
new_model = self.model.set_params(**new_hyperparams)
elif (self.model_module == 'sklearn') or (self.model_module == 'xgboost'):
new_model = self.model.__class__(**new_hyperparams)
elif self.model_module == 'statsmodels':
raise NotImplementedError("Not yet implemented for 'statsmodels'")
#new_model = self.model.__class__(**new_hyperparams)
#new_model = ModelConverter(new_model).convert()
elif self.model_module == 'keras':
new_model = self.model.__class__(**new_hyperparams)
else:
raise NotImplementedError("{} not implemented for module '{}'".format(
str(type(self))[:-2].split('.')[-1], self.model_module))
return new_model
def get_best_params_and_model(self):
"""
Returns the best parameters and model after optimization.
Keyword arguments:
None
"""
best_params_idx = np.argmax([score for score, params in self.hyperparam_history])
best_params = self.hyperparam_history[best_params_idx][1]
if isinstance(self.model, Pipeline):
all_params = self.model.get_params()
all_params.update(best_params)
best_model = self.model.set_params(**all_params)
else:
best_model = self.model.__class__(**dict(self.model.get_params(), **best_params))
return best_params, best_model
class MissingValueException(Exception):
pass
class Parameter(object):
def __init__(self, name, param_type, lower=None, upper=None, possible_values=None, distribution=None):
"""
Keywords:
name - String specifying the name of this parameter
param_type - 'categorical', 'continuous', 'integer', 'boolean', 'int_array' or 'continuous_array'
lower - lower bound of parameter (only applicable to numerical types)
upper - upper bound of parameter (only applicable to numerical types)
possible_values - list of possible values a parameter can take (only applicable to categorical type)
distribution - specifies a distribution to sample from (only applicable to continuous types); not actually implemented yet
"""
param_type = param_type.lower()
if not param_type in ['categorical', 'continuous', 'integer', 'boolean', 'int_array', 'continuous_array']:
raise ValueError("param_type needs to be 'categorical','continuous','integer', 'int_array', 'continuous_array' or 'boolean'")
if (param_type == 'categorical') and (possible_values is None):
raise MissingValueException("Need to provide possible values for categorical parameters.")
self.possible_values = possible_values
self.param_type = param_type.lower()
if (param_type in ['continuous', 'integer', 'int_array', 'continuous_array']) and (
(lower is None) or (upper is None)):
raise MissingValueException("Need to provide 'lower' and 'upper' for parameters of type.".format(
param_type))
self.lower = lower
self.upper = upper
self.name = name
if distribution is not None:
self.distribution = distribution
if param_type.lower() in ['int_array', 'continuous_array']:
if len(lower)!=len(upper):
raise ValueError("'lower' and 'upper' must be of the same length.")
self.size = len(lower)
def random_sample(self):
"""
returns a uniformly random sample of the parameter
Keywords:
None
"""
if self.param_type == 'integer':
return np.random.choice(np.arange(self.lower, self.upper+1, 1))
elif self.param_type == 'categorical':
return str(np.random.choice(self.possible_values))
elif self.param_type == 'continuous':
return np.random.uniform(self.lower, self.upper)
elif self.param_type == 'boolean':
return np.random.choice([True, False])
elif self.param_type == 'continuous_array':
return [np.random.uniform(self.lower[i],self.upper[i]) for i in range(len(self.lower))]
elif self.param_type == 'int_array':
return [np.random.choice(np.arange(self.lower[i],self.upper[i]),1)[0] for i in range(len(self.lower))]
```
#### File: OptML/tests/test_hyperopt_optimizer.py
```python
import numpy as np
import unittest
from optml.hyperopt_optimizer import HyperoptOptimizer
from optml import Parameter
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
def clf_score(y_true,y_pred):
return np.sum(y_true==y_pred)/float(len(y_true))
class TestHyperoptOptimizer(unittest.TestCase):
def test_param_space(self):
interval = [0,10]
p1 = Parameter('test_integer', 'integer', lower=interval[0], upper=interval[1])
p2 = Parameter('test_categorical', 'categorical', possible_values=['A','B','C'])
p3 = Parameter('test_boolean', 'boolean')
p4 = Parameter('test_continuous', 'continuous', lower=interval[0], upper=interval[1])
p5 = Parameter('test_continuous_array', 'continuous_array', lower=[interval[0]], upper=[interval[1]])
model = RandomForestClassifier()
hyperopt = HyperoptOptimizer(model, [p1,p2,p3,p4],lambda x: x)
param_space = hyperopt.param_space
with self.assertRaises(ValueError):
hyperopt = HyperoptOptimizer(model, [p1,p2,p3,p4,p5],lambda x: x)
def test_improvement(self):
np.random.seed(4)
data, target = make_classification(n_samples=100,
n_features=45,
n_informative=15,
n_redundant=5,
class_sep=1,
n_clusters_per_class=4,
flip_y=0.4)
model = RandomForestClassifier(max_depth=5)
model.fit(data, target)
start_score = clf_score(target, model.predict(data))
p1 = Parameter('max_depth', 'integer', lower=1, upper=10)
hyperopt = HyperoptOptimizer(model, [p1], clf_score)
best_params, best_model = hyperopt.fit(X_train=data, y_train=target, n_iters=10)
best_model.fit(data, target)
final_score = clf_score(target, best_model.predict(data))
self.assertTrue(final_score>start_score)
for status in hyperopt.trials.statuses():
self.assertEqual(status, 'ok')
```
#### File: OptML/tests/test_random_search.py
```python
import numpy as np
import unittest
from optml.random_search import RandomSearchOptimizer
from optml import Parameter
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
def clf_score(y_true,y_pred):
return np.sum(y_true==y_pred)/float(len(y_true))
class TestRandomSearchOptimizer(unittest.TestCase):
def test_improvement(self):
np.random.seed(4)
data, target = make_classification(n_samples=100,
n_features=45,
n_informative=15,
n_redundant=5,
class_sep=1,
n_clusters_per_class=4,
flip_y=0.4)
model = RandomForestClassifier(max_depth=5)
model.fit(data, target)
start_score = clf_score(target, model.predict(data))
p1 = Parameter('max_depth', 'integer', lower=1, upper=10)
rand_search = RandomSearchOptimizer(model, [p1], clf_score)
best_params, best_model = rand_search.fit(X_train=data, y_train=target, n_iters=10)
best_model.fit(data, target)
final_score = clf_score(target, best_model.predict(data))
self.assertTrue(final_score>start_score)
``` |
{
"source": "johannespischinger/mlops_project",
"score": 3
} |
#### File: src/models/predict_model.py
```python
import argparse
import torch
from train_model import CNNModel
def loader():
parser = argparse.ArgumentParser(description="Evaluation arguments")
parser.add_argument("load_model_from", default="")
parser.add_argument("load_data_from", default="")
# add any additional argument that you want
args = parser.parse_args()
# Loading the model
checkpoint = torch.load(args.load_model_from)
model = CNNModel()
model.load_state_dict(checkpoint["state_dict"])
model.eval()
path = args.load_data_from
dataset = torch.load(path)
return model, dataset
def prediction(model, data):
testloader = torch.utils.data.DataLoader(data)
test_acc = []
with torch.no_grad():
test_acc = []
for images, labels in testloader:
prediction = torch.exp(model(images.float().unsqueeze(1)))
top_p, top_class = prediction.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_acc.append(torch.mean(equals.type(torch.FloatTensor)))
else:
print(f"Accuracy: {sum(test_acc) / len(test_acc)}")
if __name__ == "__main__":
model, dataset = loader()
prediction(model, dataset)
```
#### File: mlops_project/tests/test_data.py
```python
import os
import numpy as np
import pytest
import torch
from tests import _PROJECT_ROOT
@pytest.mark.skipif(
not os.path.exists(f"{_PROJECT_ROOT}/data/processed"),
reason="Data files not found",
)
def test_load():
test = torch.load(f"{_PROJECT_ROOT}/data/processed/test.pt")
train = torch.load(f"{_PROJECT_ROOT}/data/processed/train.pt")
# test, train = load()
assert (
len(train) == 25000
), "Dataset did not have the correct number of samples"
assert (
len(test) == 5000
), "Dataset did not have the correct number of samples"
assert [
list(image.shape) == [1, 28, 28] for image, _ in test
], "Shape of sample is not correct"
assert [
list(image.shape) == [1, 28, 28] for image, _ in train
], "Shape of sample is not correct"
train_labels = []
test_labels = []
for _, label in train:
train_labels.append(label)
for _, label in test:
test_labels.append(label)
assert (
len(torch.from_numpy(np.asarray(train_labels)).unique()) == 10
), "Not all labels are represented in the training dataset"
assert (
len(torch.from_numpy(np.asarray(test_labels)).unique()) == 10
), "Not all labels are represented in the test dataset"
``` |
{
"source": "johannespischinger/senti_anal",
"score": 3
} |
#### File: opensentiment/gcp/storage_utils.py
```python
from datetime import datetime
from google.cloud import storage
def save_to_model_gs(save_dir: str, model_name: str) -> None:
"""Function to upload model to google storage bucket"""
scheme = "gs://"
bucket_name = save_dir[len(scheme) :].split("/")[0]
prefix = "{}{}/".format(scheme, bucket_name)
bucket_path = save_dir[len(prefix) :].rstrip("/")
datetime_ = datetime.now().strftime("model_%Y%m%d_%H%M%S")
name = model_name.split("/")[-3]
if bucket_path:
model_path = "{}/{}/{}".format(bucket_path, datetime_, name)
else:
model_path = "{}/{}".format(datetime_, name)
bucket = storage.Client().bucket(bucket_name)
blob = bucket.blob(model_path)
blob.upload_from_filename(model_name)
```
#### File: opensentiment/models/predict_model.py
```python
import logging
import os
from pathlib import Path
import torch
from torch.utils.data import DataLoader
import wandb
from opensentiment.models.bert_model import SentimentClassifier
from opensentiment.utils import get_project_root
logger = logging.getLogger(__name__)
def predict(
model_name: str,
models_path: str,
batch_size: int = 64,
data_path: str = "tests/dummy_dataset",
) -> float:
wandb.init(
project="BERT",
entity="senti_anal",
name=os.getcwd().split("/")[-1],
job_type="test",
)
model = SentimentClassifier()
model.load_state_dict(torch.load(os.path.join(models_path, model_name)))
model.eval()
wandb.watch(model, log_freq=100)
test_set = torch.load(
os.path.join(get_project_root(), f"{data_path}/test_dataset.pt")
)
test_loader = DataLoader(test_set, batch_size=batch_size)
total_pred = 0
corr_pred = 0
with torch.no_grad():
for data in test_loader:
input_ids = data["input_id"]
attention_masks = data["attention_mask"]
targets = data["target"]
predictions = model(input_ids, attention_masks)
_, pred_class = torch.max(predictions, dim=1)
corr_pred += torch.sum(pred_class == targets)
total_pred += targets.shape[0]
wandb.log({"test_acc": corr_pred / total_pred})
logger.info(f"Final test accuracy: {corr_pred/total_pred:.4}")
return corr_pred / total_pred
```
#### File: opensentiment/models/train_model.py
```python
import logging
import os
from collections import defaultdict
from typing import Any, Dict, Tuple
import hydra
import numpy as np
import torch
import transformers
from omegaconf import DictConfig
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import wandb
from opensentiment.gcp.storage_utils import save_to_model_gs
from opensentiment.models.bert_model import SentimentClassifier
from opensentiment.utils import get_project_root
logger = logging.getLogger(__name__)
def train_model(
model: nn.Module,
data_loader: DataLoader,
criterion: Any,
optimizer: Any,
scheduler: Any,
max_norm: float = 1.0,
) -> [torch.Tensor, np.float64]:
model.train()
train_loss = []
correct_pred = 0
total_pred = 0
for d in tqdm(data_loader):
input_ids = d["input_id"]
attention_masks = d["attention_mask"]
targets = d["target"]
# forward prop
predictions = model(input_ids, attention_masks)
loss = criterion(predictions, targets)
_, pred_classes = torch.max(predictions, dim=1)
# backprop
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=max_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
# training loss and number of correct prediction
train_loss.append(loss.item())
correct_pred += torch.sum(pred_classes == targets)
total_pred += targets.shape[0]
return correct_pred / total_pred, np.mean(train_loss)
def eval_model(
model: nn.Module,
data_loader: DataLoader,
criterion: Any,
) -> [torch.Tensor, float]:
model.eval()
eval_loss = []
correct_pred = 0
total_pred = 0
with torch.no_grad():
for d in tqdm(data_loader):
input_ids = d["input_id"]
attention_masks = d["attention_mask"]
targets = d["target"]
# forward prop
predictions = model(input_ids, attention_masks)
loss = criterion(predictions, targets)
_, pred_classes = torch.max(predictions, dim=1)
eval_loss.append(loss.item())
correct_pred += torch.sum(pred_classes == targets)
total_pred += targets.shape[0]
return correct_pred / total_pred, np.mean(eval_loss)
@hydra.main(config_path="config", config_name="default_config.yaml")
def train(cfg: DictConfig) -> Tuple[Dict, str]:
if cfg.wandb_key_api:
os.environ["WANDB_API_KEY"] = cfg.wandb_key_api
wandb.init(
project="BERT",
entity="senti_anal",
name=os.getcwd().split("/")[-1],
job_type="train",
)
config = cfg.experiments
torch.manual_seed(config.seed)
train_set = torch.load(
os.path.join(get_project_root(), f"{config.data_path}/train_dataset.pt")
)
val_set = torch.load(
os.path.join(get_project_root(), f"{config.data_path}/val_dataset.pt")
)
train_loader = DataLoader(train_set, batch_size=config.batch_size)
val_loader = DataLoader(val_set, batch_size=config.batch_size)
model = SentimentClassifier()
wandb.watch(model, log_freq=100)
total_steps = len(train_loader) * config.epochs
criterion = torch.nn.CrossEntropyLoss()
optimizer = transformers.AdamW(
params=model.parameters(), lr=config.learning_rate, correct_bias=False
)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=config.num_warmup_steps,
num_training_steps=total_steps,
)
history = defaultdict(list)
best_accuracy = 0
best_model_name = "untrained_model.pt"
logger.info("Start training:")
for epoch in range(config.epochs):
# training part
print(f"epoch : {epoch + 1}/{config.epochs}")
train_acc, train_loss = train_model(
model,
train_loader,
criterion,
optimizer,
scheduler,
config.max_norm,
)
# validation part
val_acc, val_loss = eval_model(model, val_loader, criterion)
# saving training logs
history["train_acc"].append(train_acc)
history["train_loss"].append(train_loss)
history["val_acc"].append(val_acc)
history["val_loss"].append(val_loss)
wandb.log(
{
"train_loss": train_loss,
"train_acc": train_acc,
"val_loss": val_loss,
"val_acc": val_acc,
}
)
logger.info(
f"train_loss: {train_loss}, train_acc: {train_acc} ,val_loss: {val_loss}, val_acc: {val_acc}"
)
# saving model if performance improved
if val_acc > best_accuracy:
best_model_name = f"best_model_state_{val_acc:.2}.pt"
best_accuracy = val_acc
torch.save(model.state_dict(), os.path.join(os.getcwd(), best_model_name))
if cfg.job_dir_gs:
logger.info(f"Uploading model google bucket: {cfg.job_dir_gs}")
save_to_model_gs(cfg.job_dir_gs, best_model_name)
return history, best_model_name
if __name__ == "__main__":
train()
```
#### File: api/fast/test_serve_api.py
```python
import pytest
from fastapi.testclient import TestClient
import glob
import os
from opensentiment.utils import paths_to_file_ext
from opensentiment.api.fast.serve_api import app
client_not_loaded = TestClient(app)
def test_read_main():
response = client_not_loaded.get("/")
assert response.status_code == 200
assert response.json() == {"Model Serve API": "running"}
class TestAPP:
not_model_exists = not bool(
paths_to_file_ext(folder="model_store", file_ext="ckpt")[0]
)
response_missing = "dummy"
@pytest.mark.skipif(not_model_exists, reason="no model found")
@pytest.mark.parametrize(
"path,expected_status,expected_text,expected_sentiment",
[
(
"/api/v1/serve_single",
422,
response_missing,
response_missing,
),
(
"/api/v1/serve_single?query_text=a positive review. i like it",
200,
"a positive review. i like it",
"Positive",
),
(
"/api/v1/serve_single?query_text=a negative review. i hate it",
200,
"a negative review. i hate it",
"Negativ",
),
],
)
def test_serve_single(
self, path, expected_status, expected_text, expected_sentiment
):
with TestClient(app) as client_loaded:
response = client_loaded.get(path)
assert response.status_code == expected_status
r_json = response.json()
if expected_status == 200:
assert (
r_json["query_text"] == expected_text
), f"got {r_json}, expected {expected_text}"
assert (
r_json["prediction"][0][1] == expected_sentiment
), f"got {r_json}, expected_sentiment {expected_sentiment}"
@pytest.mark.skipif(not_model_exists, reason="no model found")
@pytest.mark.parametrize(
"path,expected_status,expected_text",
[
("/api/v1/serve_batch", 422, response_missing),
("/api/v1/serve_batch?q=foo&q=bar", 200, ["foo", "bar"]),
],
)
def test_serve_batch(self, path, expected_status, expected_text):
with TestClient(app) as client_loaded:
response = client_loaded.get(path)
assert response.status_code == expected_status
r_json = response.json()
if expected_status == 200:
assert (
r_json["query_list"] == expected_text
), f"got {response.json()}, expected {expected_text}"
```
#### File: tests/data/test_dataset_pl.py
```python
import os
from collections import abc
from typing import Dict, List, Tuple, Union
import hydra
import omegaconf
import pytest
import pytorch_lightning as pl
from opensentiment.utils import return_omegaconf_modified
@pytest.mark.parametrize(
"config,shapes_desired",
[
(
return_omegaconf_modified(
{
"data": {
"datamodule": {
"only_take_every_n_sample": 512,
"num_workers": {"train": 0},
}
},
"logging": {"wandb": {"mode": "offline"}},
}
),
[
("attention_mask", [32, 128]),
("input_ids", [32, 128]),
("labels", [32]),
],
),
(
return_omegaconf_modified(
{
"data": {
"datamodule": {
"only_take_every_n_sample": 512,
"max_seq_length": 32,
"batch_size": {
"train": 16,
"val": 16,
"test": 16,
},
}
},
"logging": {"wandb": {"mode": "offline"}},
}
),
[
("attention_mask", [16, 32]),
("input_ids", [16, 32]),
("labels", [16]),
],
),
],
)
def test_dataset(
config: omegaconf.OmegaConf, shapes_desired: List[Tuple[str, List[int]]]
):
dm: pl.LightningDataModule = hydra.utils.instantiate(
config.data.datamodule, _recursive_=False
)
dm.prepare_data()
dm.setup("fit")
sample = next(iter(dm.train_dataloader()))
shapes = [(x[0], x[1].shape) for x in sample.items()]
assert len(shapes) == len(sample)
for i in shapes_desired:
assert i[0] in sample, f"{i[0]} not in dataset {sample}"
assert i[1] == list(
sample[i[0]].shape
), f"shape {i[1]} {i[0]} not in expected but instead {list(sample[i[0]].shape)}"
```
#### File: tests/models/test_bert_model_pl.py
```python
import os
from collections import abc
from typing import List, Tuple
import omegaconf
import pytest
import pytorch_lightning as pl
from hydra import compose, initialize_config_dir
import hydra
from opensentiment.models import train_model_pl
from opensentiment.utils import get_project_root, return_omegaconf_modified
AVAIL_GPU = 0
@pytest.mark.parametrize(
"config",
[
(return_omegaconf_modified({"model": {"transformer_freeze": False}})),
(return_omegaconf_modified({"model": {"transformer_freeze": True}})),
(
return_omegaconf_modified(
{
"model": {"transformer_freeze": False},
"data": {
"datamodule": {
"model_name_or_path": "distilbert-base-uncased-finetuned-sst-2-english"
}
},
}
)
),
],
)
def test_model(
config: omegaconf.OmegaConf,
):
hydra.core.global_hydra.GlobalHydra.instance().clear()
hydra_dir = os.path.join(get_project_root(), ".cache", "Hydratest")
os.makedirs(hydra_dir, exist_ok=True)
model: pl.LightningModule = hydra.utils.instantiate(
config.model,
**{
"model_name_or_path": config.data.datamodule.model_name_or_path,
"train_batch_size": config.data.datamodule.batch_size.train,
},
optim=config.optim,
data=config.data,
logging=config.logging,
_recursive_=False,
)
for name, param in model.named_parameters():
if "classifier" in name or not config.model.transformer_freeze:
# trainable layers
assert param.requires_grad is True
else:
assert param.requires_grad is False
return model
``` |
{
"source": "johannespitz/MNF_VBNN",
"score": 2
} |
#### File: johannespitz/MNF_VBNN/mutual_information.py
```python
import sys
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
def _compute_mi(model, samples, num_runs):
predictions_list = []
for idx in tqdm(range(num_runs), file=sys.stdout):
predictions_list.append(model.predict(samples, batch_size=10000))
probs = np.asarray(predictions_list)
mean = probs.mean(axis=0)
def _entropy(ps):
return -1. * (ps * np.log(ps + 1e-8)).sum(axis=-1)
mean_H = _entropy(mean)
indi_H = _entropy(probs)
return mean_H - indi_H.mean(axis=0), mean_H
def plot_mi(tests_dict, model, num_runs=20):
fig, axs = plt.subplots(2, 1, figsize=(20, 10))
for name, samples in tests_dict.items():
mi, ent = _compute_mi(model, samples, num_runs)
print(f'MI: {name:15s} min: {mi.min():.4f}, max: {mi.max():.4f}, mean: {mi.mean():.4f}')
print(f'PE: {name:15s} min: {ent.min():.4f}, max: {ent.max():.4f}, mean: {ent.mean():.4f}')
# with self.summary_writer.as_default():
# tf.summary.histogram('mi/' + name, mi, epoch)
# tf.summary.histogram('ent/' + name, ent, epoch)
sns.distplot(mi, label=name, ax=axs[0], kde=True, kde_kws=dict(gridsize=1000))
sns.distplot(ent, label=name, ax=axs[1], kde=True, kde_kws=dict(gridsize=1000))
plt.legend()
fig.tight_layout()
return plt
``` |
{
"source": "johannespitz/probability",
"score": 2
} |
#### File: experimental/mcmc/particle_filter.py
```python
from __future__ import print_function
import collections
import functools
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import exponential
from tensorflow_probability.python.distributions import uniform
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import docstring_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.util import SeedStream
__all__ = [
'ess_below_threshold',
'infer_trajectories',
'particle_filter',
'reconstruct_trajectories',
'resample_independent',
'resample_minimum_variance',
'resample_deterministic_minimum_error'
]
# TODO(b/153467570): Move SampleParticles into `tfp.distributions`.
class SampleParticles(distribution_lib.Distribution):
"""Like tfd.Sample, but inserts new rightmost batch (vs event) dim."""
def __init__(self, distribution, num_particles, name=None):
parameters = dict(locals())
with tf.name_scope(name or 'SampleParticles') as name:
self._distribution = distribution
self._num_particles = tensor_util.convert_nonref_to_tensor(
num_particles, dtype_hint=tf.int32, name='num_particles')
super(SampleParticles, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=distribution.validate_args,
allow_nan_stats=distribution.allow_nan_stats,
name=name)
self._parameters = self._no_dependency(parameters)
@property
def num_particles(self):
return self._num_particles
@property
def distribution(self):
return self._distribution
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self, **kwargs):
return self.distribution.event_shape_tensor(**kwargs)
def _batch_shape(self):
return tf.nest.map_structure(
lambda b: tensorshape_util.concatenate( # pylint: disable=g-long-lambda
[tf.get_static_value(self.num_particles)], b),
self.distribution.batch_shape)
def _batch_shape_tensor(self, **kwargs):
return tf.nest.map_structure(
lambda b: ps.concat([[self.num_particles], b], axis=0),
self.distribution.batch_shape_tensor(**kwargs))
def _log_prob(self, x, **kwargs):
return self._call_log_measure('_log_prob', x, kwargs)
def _log_cdf(self, x, **kwargs):
return self._call_log_measure('_log_cdf', x, kwargs)
def _log_sf(self, x, **kwargs):
return self._call_log_measure('_log_sf', x, kwargs)
def _call_log_measure(self, attr, x, kwargs):
return getattr(self.distribution, attr)(x, **kwargs)
# TODO(b/152797117): Override _sample_n, once it supports joint distributions.
def sample(self, sample_shape=(), seed=None, name=None):
with tf.name_scope(name or 'sample_particles'):
sample_shape = ps.concat([
dist_util.expand_to_vector(sample_shape),
[self.num_particles]], axis=0)
return self.distribution.sample(sample_shape, seed=seed)
# TODO(davmre): Replace this hack with a more efficient TF builtin.
def _batch_gather(params, indices, axis=0):
"""Gathers a batch of indices from `params` along the given axis.
Args:
params: `Tensor` of shape `[d[0], d[1], ..., d[N - 1]]`.
indices: int `Tensor` of shape broadcastable to that of `params`.
axis: int `Tensor` dimension of `params` (and of the broadcast indices) to
gather over.
Returns:
result: `Tensor` of the same type and shape as `params`.
"""
params_rank = ps.rank_from_shape(ps.shape(params))
indices_rank = ps.rank_from_shape(ps.shape(indices))
params_with_axis_on_right = dist_util.move_dimension(
params, source_idx=axis, dest_idx=-1)
indices_with_axis_on_right = ps.broadcast_to(
dist_util.move_dimension(indices,
source_idx=axis - (params_rank - indices_rank),
dest_idx=-1),
ps.shape(params_with_axis_on_right))
result = tf.gather(params_with_axis_on_right,
indices_with_axis_on_right,
axis=params_rank - 1,
batch_dims=params_rank - 1)
return dist_util.move_dimension(result, source_idx=-1, dest_idx=axis)
def _dummy_indices_like(indices):
"""Returns dummy indices ([0, 1, 2, ...]) with batch shape like `indices`."""
indices_shape = ps.shape(indices)
num_particles = indices_shape[0]
return tf.broadcast_to(
ps.reshape(
ps.range(num_particles),
ps.concat([[num_particles],
ps.ones([ps.rank_from_shape(indices_shape) - 1],
dtype=np.int32)],
axis=0)),
indices_shape)
def _gather_history(structure, step, num_steps):
"""Gather up to `num_steps` of history from a nested structure."""
initial_step = ps.maximum(0, step - num_steps)
return tf.nest.map_structure(
lambda x: tf.gather(x, ps.range(initial_step, step)),
structure)
def ess_below_threshold(unnormalized_log_weights, threshold=0.5):
"""Determines if the effective sample size is much less than num_particles."""
with tf.name_scope('ess_below_threshold'):
num_particles = ps.size0(unnormalized_log_weights)
log_weights = tf.math.log_softmax(unnormalized_log_weights, axis=0)
log_ess = -tf.math.reduce_logsumexp(2 * log_weights, axis=0)
return log_ess < (ps.log(num_particles) +
ps.log(threshold))
ParticleFilterStepResults = collections.namedtuple(
'ParticleFilterStepResults',
['particles',
'log_weights',
'parent_indices',
'incremental_log_marginal_likelihood',
# Track both incremental and accumulated likelihoods because they're cheap,
# and this allows users to get the accumulated likelihood without needing
# to trace every step.
'accumulated_log_marginal_likelihood'
])
def _default_trace_fn(results):
return (results.particles,
results.log_weights,
results.parent_indices,
results.incremental_log_marginal_likelihood)
ParticleFilterLoopVariables = collections.namedtuple(
'ParticleFilterLoopVariables',
['step',
'previous_step_results',
'accumulated_traced_results',
'state_history', # Set to `tf.zeros([0])` if not tracked.
'num_steps_traced'
])
particle_filter_arg_str = """
Each latent state is a `Tensor` or nested structure of `Tensor`s, as defined
by the `initial_state_prior`.
Each of the `transition_fn`, `observation_fn`, and `proposal_fn` args,
if specified, takes arguments `(step, state)`, where `state` represents
the latent state at timestep `step`. These `fn`s may also, optionally, take
additional keyword arguments `state_history` and `observation_history`, which
will be passed if and only if the corresponding
`num_steps_state_history_to_pass` or `num_steps_observation_history_to_pass`
arguments are provided to this method. These are described further below.
Args:
observations: a (structure of) Tensors, each of shape
`concat([[num_observation_steps, b1, ..., bN], event_shape])` with
optional batch dimensions `b1, ..., bN`.
initial_state_prior: a (joint) distribution over the initial latent state,
with optional batch shape `[b1, ..., bN]`.
transition_fn: callable returning a (joint) distribution over the next
latent state.
observation_fn: callable returning a (joint) distribution over the current
observation.
num_particles: `int` `Tensor` number of particles.
initial_state_proposal: a (joint) distribution over the initial latent
state, with optional batch shape `[b1, ..., bN]`. If `None`, the initial
particles are proposed from the `initial_state_prior`.
Default value: `None`.
proposal_fn: callable returning a (joint) proposal distribution over the
next latent state. If `None`, the dynamics model is used (
`proposal_fn == transition_fn`).
Default value: `None`.
resample_criterion_fn: optional Python `callable` with signature
`do_resample = resample_criterion_fn(log_weights)`,
where `log_weights` is a float `Tensor` of shape
`[b1, ..., bN, num_particles]` containing log (unnormalized) weights for
all particles at the current step. The return value `do_resample`
determines whether particles are resampled at the current step. In the
case `resample_criterion_fn==None`, particles are resampled at every step.
The default behavior resamples particles when the current effective
sample size falls below half the total number of particles. Note that
the resampling criterion is not used at the final step---there, particles
are always resampled, so that we return unweighted values.
Default value: `tfp.experimental.mcmc.ess_below_threshold`.
rejuvenation_kernel_fn: optional Python `callable` with signature
`transition_kernel = rejuvenation_kernel_fn(target_log_prob_fn)`
where `target_log_prob_fn` is a provided callable evaluating
`p(x[t] | y[t], x[t-1])` at each step `t`, and `transition_kernel`
should be an instance of `tfp.mcmc.TransitionKernel`.
Default value: `None`. # TODO(davmre): not yet supported.
num_transitions_per_observation: scalar Tensor positive `int` number of
state transitions between regular observation points. A value of `1`
indicates that there is an observation at every timestep,
`2` that every other step is observed, and so on. Values greater than `1`
may be used with an appropriately-chosen transition function to
approximate continuous-time dynamics. The initial and final steps
(steps `0` and `num_timesteps - 1`) are always observed.
Default value: `None`.
num_steps_state_history_to_pass: scalar Python `int` number of steps to
include in the optional `state_history` argument to `transition_fn`,
`observation_fn`, and `proposal_fn`. If `None`, this argument
will not be passed.
Default value: `None`.
num_steps_observation_history_to_pass: scalar Python `int` number of steps
to include in the optional `observation_history` argument to
`transition_fn`, `observation_fn`, and `proposal_fn`. If `None`, this
argument will not be passed.
Default value: `None`.
"""
non_markovian_specification_str = """
#### Non-Markovian models (state and observation history).
Models that do not follow the [Markov property](
https://en.wikipedia.org/wiki/Markov_property), which requires that the
current state contains all information relevent to the future of the system,
are supported by specifying `num_steps_state_history_to_pass` and/or
`num_steps_observation_history_to_pass`. If these are specified, additional
keyword arguments `state_history` and/or `observation_history` (respectively)
will be passed to each of `transition_fn`, `observation_fn`, and
`proposal_fn`.
The `state_history`, if requested, is a structure of `Tensor`s like
the initial state, but with a batch dimension prefixed to every Tensor,
of size `num_steps_state_history_to_pass` , so that
`state_history[-1]` represents the previous state
(for `transition_fn` and `proposal_fn`, this will equal the `state` arg),
`state_history[-2]` the state before that, and so on.
The `observation_history` is a structure like `observations`, but with leading
dimension of `minimum(step, num_steps_observation_history_to_pass)`. At the
initial step, `observation_history=None` will be passed and should be
handled appropriately. At subsequent steps, `observation_history[-1]`
refers to the observation at the previous timestep, and so on.
"""
@docstring_util.expand_docstring(
particle_filter_arg_str=particle_filter_arg_str)
def infer_trajectories(observations,
initial_state_prior,
transition_fn,
observation_fn,
num_particles,
initial_state_proposal=None,
proposal_fn=None,
resample_criterion_fn=ess_below_threshold,
rejuvenation_kernel_fn=None,
num_transitions_per_observation=1,
num_steps_state_history_to_pass=None,
num_steps_observation_history_to_pass=None,
seed=None,
name=None): # pylint: disable=g-doc-args
"""Use particle filtering to sample from the posterior over trajectories.
${particle_filter_arg_str}
seed: Python `int` seed for random ops.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'infer_trajectories'`).
Returns:
trajectories: a (structure of) Tensor(s) matching the latent state, each
of shape
`concat([[num_timesteps, num_particles, b1, ..., bN], event_shape])`,
representing unbiased samples from the posterior distribution
`p(latent_states | observations)`.
incremental_log_marginal_likelihoods: float `Tensor` of shape
`[num_observation_steps, b1, ..., bN]`,
giving the natural logarithm of an unbiased estimate of
`p(observations[t] | observations[:t])` at each timestep `t`. Note that
(by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true
`log p(observations[t] | observations[:t])`.
${non_markovian_specification_str}
#### Examples
**Tracking unknown position and velocity**: Let's consider tracking an object
moving in a one-dimensional space. We'll define a dynamical system
by specifying an `initial_state_prior`, a `transition_fn`,
and `observation_fn`.
The structure of the latent state space is determined by the prior
distribution. Here, we'll define a state space that includes the object's
current position and velocity:
```python
initial_state_prior = tfd.JointDistributionNamed({
'position': tfd.Normal(loc=0., scale=1.),
'velocity': tfd.Normal(loc=0., scale=0.1)})
```
The `transition_fn` specifies the evolution of the system. It should
return a distribution over latent states of the same structure as the prior.
Here, we'll assume that the position evolves according to the velocity,
with a small random drift, and the velocity also changes slowly, following
a random drift:
```python
def transition_fn(_, previous_state):
return tfd.JointDistributionNamed({
'position': tfd.Normal(
loc=previous_state['position'] + previous_state['velocity'],
scale=0.1),
'velocity': tfd.Normal(loc=previous_state['velocity'], scale=0.01)})
```
The `observation_fn` specifies the process by which the system is observed
at each time step. Let's suppose we observe only a noisy version of the =
current position.
```python
def observation_fn(_, state):
return tfd.Normal(loc=state['position'], scale=0.1)
```
Now let's track our object. Suppose we've been given observations
corresponding to an initial position of `0.4` and constant velocity of `0.01`:
```python
# Generate simulated observations.
observed_positions = tfd.Normal(loc=tf.linspace(0.4, 0.8, 0.01),
scale=0.1).sample()
# Run particle filtering to sample plausible trajectories.
(trajectories, # {'position': [40, 1000], 'velocity': [40, 1000]}
lps) = tfp.experimental.mcmc.infer_trajectories(
observations=observed_positions,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=1000)
```
For all `i`, `trajectories['position'][:, i]` is a sample from the
posterior over position sequences, given the observations:
`p(state[0:T] | observations[0:T])`. Often, the sampled trajectories
will be highly redundant in their earlier timesteps, because most
of the initial particles have been discarded through resampling
(this problem is known as 'particle degeneracy'; see section 3.5 of
[Doucet and Johansen][1]).
In such cases it may be useful to also consider the series of *filtering*
distributions `p(state[t] | observations[:t])`, in which each latent state
is inferred conditioned only on observations up to that point in time; these
may be computed using `tfp.mcmc.experimental.particle_filter`.
#### References
[1] <NAME> and <NAME>. A tutorial on particle
filtering and smoothing: Fifteen years later.
_Handbook of nonlinear filtering_, 12(656-704), 2009.
https://www.stats.ox.ac.uk/~doucet/doucet_johansen_tutorialPF2011.pdf
"""
with tf.name_scope(name or 'infer_trajectories') as name:
seed = SeedStream(seed, 'infer_trajectories')
(particles,
log_weights,
parent_indices,
incremental_log_marginal_likelihoods) = particle_filter(
observations=observations,
initial_state_prior=initial_state_prior,
transition_fn=transition_fn,
observation_fn=observation_fn,
num_particles=num_particles,
initial_state_proposal=initial_state_proposal,
proposal_fn=proposal_fn,
resample_criterion_fn=resample_criterion_fn,
rejuvenation_kernel_fn=rejuvenation_kernel_fn,
num_transitions_per_observation=num_transitions_per_observation,
num_steps_state_history_to_pass=num_steps_state_history_to_pass,
num_steps_observation_history_to_pass=(
num_steps_observation_history_to_pass),
trace_fn=_default_trace_fn,
seed=seed,
name=name)
weighted_trajectories = reconstruct_trajectories(particles, parent_indices)
# Resample all steps of the trajectories using the final weights.
resample_indices = categorical.Categorical(
dist_util.move_dimension(
log_weights[-1, ...],
source_idx=0,
dest_idx=-1)).sample(num_particles, seed=seed)
trajectories = tf.nest.map_structure(
lambda x: _batch_gather(x, resample_indices, axis=1),
weighted_trajectories)
return trajectories, incremental_log_marginal_likelihoods
@docstring_util.expand_docstring(
particle_filter_arg_str=particle_filter_arg_str,
non_markovian_specification_str=non_markovian_specification_str)
def particle_filter(observations,
initial_state_prior,
transition_fn,
observation_fn,
num_particles,
initial_state_proposal=None,
proposal_fn=None,
resample_criterion_fn=ess_below_threshold,
rejuvenation_kernel_fn=None, # TODO(davmre): not yet supported. pylint: disable=unused-argument
num_transitions_per_observation=1,
num_steps_state_history_to_pass=None,
num_steps_observation_history_to_pass=None,
trace_fn=_default_trace_fn,
step_indices_to_trace=None,
seed=None,
name=None): # pylint: disable=g-doc-args
"""Samples a series of particles representing filtered latent states.
The particle filter samples from the sequence of "filtering" distributions
`p(state[t] | observations[:t])` over latent
states: at each point in time, this is the distribution conditioned on all
observations *up to that time*. Because particles may be resampled, a particle
at time `t` may be different from the particle with the same index at time
`t + 1`. To reconstruct trajectories by tracing back through the resampling
process, see `tfp.mcmc.experimental.reconstruct_trajectories`.
${particle_filter_arg_str}
trace_fn: Python `callable` defining the values to be traced at each step.
It takes a `ParticleFilterStepResults` tuple and returns a structure of
`Tensor`s. The default function returns
`(particles, log_weights, parent_indices, step_log_likelihood)`.
step_indices_to_trace: optional `int` `Tensor` listing, in increasing order,
the indices of steps at which to record the values traced by `trace_fn`.
If `None`, the default behavior is to trace at every timestep,
equivalent to specifying `step_indices_to_trace=tf.range(num_timsteps)`.
seed: Python `int` seed for random ops.
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'particle_filter'`).
Returns:
particles: a (structure of) Tensor(s) matching the latent state, each
of shape
`concat([[num_timesteps, num_particles, b1, ..., bN], event_shape])`,
representing (possibly weighted) samples from the series of filtering
distributions `p(latent_states[t] | observations[:t])`.
log_weights: `float` `Tensor` of shape
`[num_timesteps, num_particles, b1, ..., bN]`, such that
`log_weights[t, :]` are the logarithms of normalized importance weights
(such that `exp(reduce_logsumexp(log_weights), axis=-1) == 1.`) of
the particles at time `t`. These may be used in conjunction with
`particles` to compute expectations under the series of filtering
distributions.
parent_indices: `int` `Tensor` of shape
`[num_timesteps, num_particles, b1, ..., bN]`,
such that `parent_indices[t, k]` gives the index of the particle at
time `t - 1` that the `k`th particle at time `t` is immediately descended
from. See also
`tfp.experimental.mcmc.reconstruct_trajectories`.
incremental_log_marginal_likelihoods: float `Tensor` of shape
`[num_observation_steps, b1, ..., bN]`,
giving the natural logarithm of an unbiased estimate of
`p(observations[t] | observations[:t])` at each observed timestep `t`.
Note that (by [Jensen's inequality](
https://en.wikipedia.org/wiki/Jensen%27s_inequality))
this is *smaller* in expectation than the true
`log p(observations[t] | observations[:t])`.
${non_markovian_specification_str}
"""
seed = SeedStream(seed, 'particle_filter')
with tf.name_scope(name or 'particle_filter'):
num_observation_steps = ps.size0(tf.nest.flatten(observations)[0])
num_timesteps = (
1 + num_transitions_per_observation * (num_observation_steps - 1))
# If no criterion is specified, default is to resample at every step.
if not resample_criterion_fn:
resample_criterion_fn = lambda _: True
# Canonicalize the list of steps to trace as a rank-1 tensor of (sorted)
# positive integers. E.g., `3` -> `[3]`, `[-2, -1]` -> `[N - 2, N - 1]`.
if step_indices_to_trace is not None:
(step_indices_to_trace,
traced_steps_have_rank_zero) = _canonicalize_steps_to_trace(
step_indices_to_trace, num_timesteps)
# Dress up the prior and prior proposal as a fake `transition_fn` and
# `proposal_fn` respectively.
prior_fn = lambda _1, _2: SampleParticles( # pylint: disable=g-long-lambda
initial_state_prior, num_particles)
prior_proposal_fn = (
None if initial_state_proposal is None
else lambda _1, _2: SampleParticles( # pylint: disable=g-long-lambda
initial_state_proposal, num_particles))
# Initially the particles all have the same weight, `1. / num_particles`.
broadcast_batch_shape = tf.convert_to_tensor(
functools.reduce(
ps.broadcast_shape,
tf.nest.flatten(initial_state_prior.batch_shape_tensor()),
[]), dtype=tf.int32)
log_uniform_weights = ps.zeros(
ps.concat([
[num_particles],
broadcast_batch_shape], axis=0),
dtype=tf.float32) - ps.log(num_particles)
# Initialize from the prior and incorporate the first observation.
dummy_previous_step = ParticleFilterStepResults(
particles=prior_fn(0, []).sample(),
log_weights=log_uniform_weights,
parent_indices=None,
incremental_log_marginal_likelihood=0.,
accumulated_log_marginal_likelihood=0.)
initial_step_results = _filter_one_step(
step=0,
# `previous_particles` at the first step is a dummy quantity, used only
# to convey state structure and num_particles to an optional
# proposal fn.
previous_step_results=dummy_previous_step,
observation=tf.nest.map_structure(
lambda x: tf.gather(x, 0), observations),
transition_fn=prior_fn,
observation_fn=observation_fn,
proposal_fn=prior_proposal_fn,
resample_criterion_fn=resample_criterion_fn,
seed=seed)
def _loop_body(step,
previous_step_results,
accumulated_traced_results,
state_history,
num_steps_traced):
"""Take one step in dynamics and accumulate marginal likelihood."""
step_has_observation = (
# The second of these conditions subsumes the first, but both are
# useful because the first can often be evaluated statically.
ps.equal(num_transitions_per_observation, 1) |
ps.equal(step % num_transitions_per_observation, 0))
observation_idx = step // num_transitions_per_observation
current_observation = tf.nest.map_structure(
lambda x, step=step: tf.gather(x, observation_idx), observations)
history_to_pass_into_fns = {}
if num_steps_observation_history_to_pass:
history_to_pass_into_fns['observation_history'] = _gather_history(
observations,
observation_idx,
num_steps_observation_history_to_pass)
if num_steps_state_history_to_pass:
history_to_pass_into_fns['state_history'] = state_history
new_step_results = _filter_one_step(
step=step,
previous_step_results=previous_step_results,
observation=current_observation,
transition_fn=functools.partial(
transition_fn, **history_to_pass_into_fns),
observation_fn=functools.partial(
observation_fn, **history_to_pass_into_fns),
proposal_fn=(
None if proposal_fn is None else
functools.partial(proposal_fn, **history_to_pass_into_fns)),
resample_criterion_fn=resample_criterion_fn,
has_observation=step_has_observation,
seed=seed)
return _update_loop_variables(
step=step,
current_step_results=new_step_results,
accumulated_traced_results=accumulated_traced_results,
state_history=state_history,
trace_fn=trace_fn,
step_indices_to_trace=step_indices_to_trace,
num_steps_traced=num_steps_traced)
loop_results = tf.while_loop(
cond=lambda step, *_: step < num_timesteps,
body=_loop_body,
loop_vars=_initialize_loop_variables(
initial_step_results=initial_step_results,
num_timesteps=num_timesteps,
num_steps_state_history_to_pass=num_steps_state_history_to_pass,
trace_fn=trace_fn,
step_indices_to_trace=step_indices_to_trace))
results = tf.nest.map_structure(lambda ta: ta.stack(),
loop_results.accumulated_traced_results)
if step_indices_to_trace is not None:
# If we were passed a rank-0 (single scalar) step to trace, don't
# return a time axis in the returned results.
results = ps.cond(
traced_steps_have_rank_zero,
lambda: tf.nest.map_structure(lambda x: x[0, ...], results),
lambda: results)
return results
def _canonicalize_steps_to_trace(step_indices_to_trace, num_timesteps):
"""Canonicalizes `3` -> `[3]`, `[-2, -1]` -> `[N - 2, N - 1]`, etc."""
step_indices_to_trace = tf.convert_to_tensor(
step_indices_to_trace, dtype_hint=tf.int32)
traced_steps_have_rank_zero = ps.equal(
ps.rank_from_shape(ps.shape(step_indices_to_trace)), 0)
# Canonicalize negative step indices as positive.
step_indices_to_trace = ps.where(step_indices_to_trace < 0,
num_timesteps + step_indices_to_trace,
step_indices_to_trace)
# Canonicalize scalars as length-one vectors.
return (ps.reshape(step_indices_to_trace, [ps.size(step_indices_to_trace)]),
traced_steps_have_rank_zero)
def _initialize_loop_variables(initial_step_results,
num_timesteps,
num_steps_state_history_to_pass,
trace_fn,
step_indices_to_trace):
"""Initialize arrays and other quantities passed through the filter loop."""
# Create arrays to store traced values (particles, likelihoods, etc).
num_steps_to_trace = (num_timesteps
if step_indices_to_trace is None
else ps.size0(step_indices_to_trace))
traced_results = trace_fn(initial_step_results)
trace_arrays = tf.nest.map_structure(
lambda x: tf.TensorArray(dtype=x.dtype, size=num_steps_to_trace),
traced_results)
# If we are supposed to trace at step 0, write the traced values.
num_steps_traced, trace_arrays = ps.cond(
(True if step_indices_to_trace is None
else ps.equal(step_indices_to_trace[0], 0)),
lambda: (1, # pylint: disable=g-long-lambda
tf.nest.map_structure(
lambda ta, x: ta.write(0, x),
trace_arrays,
traced_results)),
lambda: (0, trace_arrays))
# Because `while_loop` requires Tensor values, we'll represent the lack of
# state history by a static-shape empty Tensor.
# This can be detected elsewhere by branching on
# `tf.is_tensor(state_history) and state_history.shape[0] == 0`.
state_history = tf.zeros([0])
if num_steps_state_history_to_pass:
# Repeat the initial state, so that `state_history` always has length
# `num_steps_state_history_to_pass`.
state_history = tf.nest.map_structure(
lambda x: tf.broadcast_to( # pylint: disable=g-long-lambda
x[tf.newaxis, ...],
ps.concat([[num_steps_state_history_to_pass],
ps.shape(x)], axis=0)),
initial_step_results.particles)
return ParticleFilterLoopVariables(
step=1,
previous_step_results=initial_step_results,
accumulated_traced_results=trace_arrays,
state_history=state_history,
num_steps_traced=num_steps_traced)
def _update_loop_variables(step,
current_step_results,
accumulated_traced_results,
state_history,
trace_fn,
step_indices_to_trace,
num_steps_traced):
"""Update the loop state to reflect a step of filtering."""
# Write particles, indices, and likelihoods to their respective arrays.
trace_this_step = True
if step_indices_to_trace is not None:
trace_this_step = ps.equal(
step_indices_to_trace[ps.minimum(
num_steps_traced,
ps.cast(ps.size0(step_indices_to_trace) - 1, dtype=np.int32))],
step)
num_steps_traced, accumulated_traced_results = ps.cond(
trace_this_step,
lambda: (num_steps_traced + 1, # pylint: disable=g-long-lambda
tf.nest.map_structure(
lambda x, y: x.write(num_steps_traced, y),
accumulated_traced_results,
trace_fn(current_step_results))),
lambda: (num_steps_traced, accumulated_traced_results))
history_is_empty = (tf.is_tensor(state_history) and
state_history.shape[0] == 0)
if not history_is_empty:
# Permute the particles from previous steps to match the current resampled
# indices, so that the state history reflects coherent trajectories.
resampled_state_history = tf.nest.map_structure(
lambda x: _batch_gather(x[1:], # pylint: disable=g-long-lambda
current_step_results.parent_indices,
axis=1),
state_history)
# Update the history by concat'ing the carried-forward elements with the
# most recent state.
state_history = tf.nest.map_structure(
lambda h, s: tf.concat([h, s[tf.newaxis, ...]], axis=0),
resampled_state_history,
current_step_results.particles)
return ParticleFilterLoopVariables(
step=step + 1,
previous_step_results=current_step_results,
accumulated_traced_results=accumulated_traced_results,
state_history=state_history,
num_steps_traced=num_steps_traced)
def _filter_one_step(step,
observation,
previous_step_results,
transition_fn,
observation_fn,
proposal_fn,
resample_criterion_fn,
has_observation=True,
seed=None):
"""Advances the particle filter by a single time step."""
with tf.name_scope('filter_one_step'):
seed = SeedStream(seed, 'filter_one_step')
num_particles = ps.size0(previous_step_results.log_weights)
proposed_particles, proposal_log_weights = _propose_with_log_weights(
step=step - 1,
particles=previous_step_results.particles,
transition_fn=transition_fn,
proposal_fn=proposal_fn,
seed=seed)
log_weights = tf.nn.log_softmax(
proposal_log_weights + previous_step_results.log_weights, axis=-1)
# If this step has an observation, compute its weights and marginal
# likelihood (and otherwise, leave weights unchanged).
observation_log_weights = ps.cond(
has_observation,
lambda: ps.broadcast_to( # pylint: disable=g-long-lambda
_compute_observation_log_weights(
step, proposed_particles, observation, observation_fn),
ps.shape(log_weights)),
lambda: tf.zeros_like(log_weights))
unnormalized_log_weights = log_weights + observation_log_weights
log_weights = tf.nn.log_softmax(unnormalized_log_weights, axis=0)
# Every entry of `log_weights` differs from `unnormalized_log_weights`
# by the same normalizing constant. We extract that constant by examining
# an arbitrary entry.
incremental_log_marginal_likelihood = (
unnormalized_log_weights[0] - log_weights[0])
# Adaptive resampling: resample particles iff the specified criterion.
do_resample = resample_criterion_fn(unnormalized_log_weights)
# Some batch elements may require resampling and others not, so
# we first do the resampling for all elements, then select whether to use
# the resampled values for each batch element according to
# `do_resample`. If there were no batching, we might prefer to use
# `tf.cond` to avoid the resampling computation on steps where it's not
# needed---but we're ultimately interested in adaptive resampling
# for statistical (not computational) purposes, so this isn't a dealbreaker.
resampled_particles, resample_indices = _resample(
proposed_particles, log_weights, resample_independent, seed=seed)
uniform_weights = (ps.zeros_like(log_weights) -
ps.log(num_particles))
(resampled_particles,
resample_indices,
log_weights) = tf.nest.map_structure(
lambda r, p: ps.where(do_resample, r, p),
(resampled_particles, resample_indices, uniform_weights),
(proposed_particles,
_dummy_indices_like(resample_indices),
log_weights))
return ParticleFilterStepResults(
particles=resampled_particles,
log_weights=log_weights,
parent_indices=resample_indices,
incremental_log_marginal_likelihood=incremental_log_marginal_likelihood,
accumulated_log_marginal_likelihood=(
previous_step_results.accumulated_log_marginal_likelihood +
incremental_log_marginal_likelihood))
def _propose_with_log_weights(step,
particles,
transition_fn,
proposal_fn=None,
seed=None):
"""Proposes a new batch of particles with importance weights.
Args:
step: int `Tensor` current step.
particles: Nested structure of `Tensor`s each of shape
`[b1, ..., bN, num_particles, latent_part_event_shape]`, where
`b1, ..., bN` are optional batch dimensions.
transition_fn: callable, producing a distribution over `particles`
at the next step.
proposal_fn: callable, producing a distribution over `particles`
at the next step.
Default value: `None`.
seed: Python `int` random seed.
Default value: `None`.
Returns:
proposed_particles: Nested structure of `Tensor`s, matching `particles`.
proposal_log_weights: `Tensor` of shape
`concat([[b1, ..., bN], [num_particles]])`.
"""
with tf.name_scope('propose_with_log_weights'):
transition_dist = transition_fn(step, particles)
# If no proposal was specified, use the dynamics.
if proposal_fn is None:
return transition_dist.sample(seed=seed), 0.0
proposal_dist = proposal_fn(step, particles)
proposed_particles = proposal_dist.sample(seed=seed)
proposal_log_weights = (
transition_dist.log_prob(proposed_particles) -
proposal_dist.log_prob(proposed_particles))
return proposed_particles, proposal_log_weights
def _compute_observation_log_weights(step,
particles,
observation,
observation_fn):
"""Computes particle importance weights from an observation step.
Args:
step: int `Tensor` current step.
particles: Nested structure of `Tensor`s, each of shape
`concat([[num_particles, b1, ..., bN], latent_part_event_shape])`, where
`b1, ..., bN` are optional batch dimensions.
observation: Nested structure of `Tensor`s, each of shape
`concat([[b1, ..., bN], observation_part_event_shape])` where
`b1, ..., bN` are optional batch dimensions.
observation_fn: callable, producing a distribution over `observation`s.
Returns:
log_weights: `Tensor` of shape `concat([num_particles, b1, ..., bN])`.
"""
with tf.name_scope('compute_observation_log_weights'):
observation_dist = observation_fn(step, particles)
return observation_dist.log_prob(observation)
def _resample(particles, log_weights, resample_fn, seed=None):
"""Resamples the current particles according to provided weights.
Args:
particles: Nested structure of `Tensor`s each of shape
`[num_particles, b1, ..., bN, ...]`, where
`b1, ..., bN` are optional batch dimensions.
log_weights: float `Tensor` of shape `[num_particles, b1, ..., bN]`, where
`b1, ..., bN` are optional batch dimensions.
resample_fn: choose the function used for resampling.
Use 'resample_minimum_variance' for minimum variance resampling.
Use 'resample_deterministic_minimum_error' for minimum error resampling.
Use 'resample_independent' for independent resampling.
seed: Python `int` random seed.
Returns:
resampled_particles: Nested structure of `Tensor`s, matching `particles`.
resample_indices: int `Tensor` of shape `[num_particles, b1, ..., bN]`.
"""
with tf.name_scope('resample'):
weights_shape = ps.shape(log_weights)
num_particles = weights_shape[0]
log_probs = tf.math.log_softmax(log_weights, axis=0)
resampled_indices = resample_fn(log_probs, num_particles, (), seed=seed)
resampled_particles = tf.nest.map_structure(
lambda x: _batch_gather(x, resampled_indices, axis=0),
particles)
return resampled_particles, resampled_indices
def reconstruct_trajectories(particles, parent_indices, name=None):
"""Reconstructs the ancestor trajectory that generated each final particle."""
with tf.name_scope(name or 'reconstruct_trajectories'):
# Walk backwards to compute the ancestor of each final particle at time t.
final_indices = _dummy_indices_like(parent_indices[-1])
ancestor_indices = tf.scan(
fn=lambda ancestor, parent: _batch_gather(parent, ancestor, axis=0),
elems=parent_indices[1:],
initializer=final_indices,
reverse=True)
ancestor_indices = tf.concat([ancestor_indices, [final_indices]], axis=0)
return tf.nest.map_structure(
lambda part: _batch_gather(part, ancestor_indices, axis=1), particles)
# TODO(b/153689734): rewrite so as not to use `move_dimension`.
def resample_independent(log_probs, event_size, sample_shape,
seed=None, name=None):
"""Categorical resampler for sequential Monte Carlo.
This function is based on Algorithm #1 in the paper
[Maskell et al. (2006)][1].
Args:
log_probs: A tensor-valued batch of discrete log probability distributions.
event_size: the dimension of the vector considered a single draw.
sample_shape: the `sample_shape` determining the number of draws.
seed: Python '`int` used to seed calls to `tf.random.*`.
Default value: None (i.e. no seed).
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'resample_independent'`).
Returns:
resampled_indices: The result is similar to sampling with
```python
expanded_sample_shape = tf.concat([[event_size], sample_shape]), axis=-1)
tfd.Categorical(logits=log_probs).sample(expanded_sample_shape)`
```
but with values sorted along the first axis. It can be considered to be
sampling events made up of a length-`event_size` vector of draws from
the `Categorical` distribution. For large input values this function should
give better performance than using `Categorical`.
The sortedness is an unintended side effect of the algorithm that is
harmless in the context of simple SMC algorithms.
#### References
[1]: <NAME>, <NAME> and <NAME>. A Single Instruction Multiple
Data Particle Filter.
In 2006 IEEE Nonlinear Statistical Signal Processing Workshop.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with tf.name_scope(name or 'resample_independent') as name:
log_probs = tf.convert_to_tensor(log_probs, dtype_hint=tf.float32)
log_probs = dist_util.move_dimension(log_probs, source_idx=0, dest_idx=-1)
batch_shape = ps.shape(log_probs)[:-1]
num_markers = ps.shape(log_probs)[-1]
# `working_shape` specifies the total number of events
# we will be generating.
working_shape = ps.concat([sample_shape, batch_shape], axis=0)
# `points_shape` is the shape of the final result.
points_shape = ps.concat([working_shape, [event_size]], axis=0)
# `markers_shape` is the shape of the markers we temporarily insert.
markers_shape = ps.concat([working_shape, [num_markers]], axis=0)
# Generate one real point for each particle.
log_points = -exponential.Exponential(
rate=tf.constant(1.0, dtype=log_probs.dtype)).sample(
points_shape, seed=seed)
# We divide up the unit interval [0, 1] according to the provided
# probability distributions using `cumsum`.
# At the end of each division we place a 'marker'.
# We generate random points on the unit interval.
# We sort the combination of points and markers. The number
# of points between the markers defining a division gives the number
# of samples we require in that division.
# For example, suppose `probs` is `[0.2, 0.3, 0.5]`.
# We divide up `[0, 1]` using 3 markers:
#
# | | |
# 0. 0.2 0.5 1.0 <- markers
#
# Suppose we generate four points: [0.1, 0.25, 0.9, 0.75]
# After sorting the combination we get:
#
# 0.1 0.25 0.75 0.9 <- points
# * | * | * *|
# 0. 0.2 0.5 1.0 <- markers
#
# We have one sample in the first category, one in the second and
# two in the last.
#
# All of these computations are carried out in batched form.
markers = ps.concat(
[tf.zeros(points_shape, dtype=tf.int32),
tf.ones(markers_shape, dtype=tf.int32)],
axis=-1)
log_marker_positions = tf.broadcast_to(
tf.math.cumulative_logsumexp(log_probs, axis=-1),
markers_shape)
log_points_and_markers = ps.concat(
[log_points, log_marker_positions], axis=-1)
indices = tf.argsort(log_points_and_markers, axis=-1, stable=False)
sorted_markers = tf.gather_nd(
markers,
indices[..., tf.newaxis],
batch_dims=(
ps.rank_from_shape(sample_shape) +
ps.rank_from_shape(batch_shape)))
markers_and_samples = ps.cast(
tf.cumsum(sorted_markers, axis=-1), dtype=tf.int32)
markers_and_samples = tf.minimum(markers_and_samples, num_markers - 1)
# Collect up samples, omitting markers.
resampled = tf.reshape(markers_and_samples[tf.equal(sorted_markers, 0)],
points_shape)
return dist_util.move_dimension(resampled, source_idx=-1, dest_idx=0)
# TODO(b/153199903): replace this function with `tf.scatter_nd` when
# it supports `batch_dims`.
def _scatter_nd_batch(indices, updates, shape, batch_dims=0):
"""A partial implementation of `scatter_nd` supporting `batch_dims`."""
# `tf.scatter_nd` does not support a `batch_dims` argument.
# Instead we use the gradient of `tf.gather_nd`.
# From a purely mathematical perspective this works because
# (if `tf.scatter_nd` supported `batch_dims`)
# `gather_nd` and `scatter_nd` (with matching `indices`) are
# adjoint linear operators and
# the gradient w.r.t `x` of `dot(y, A(x))` is `adjoint(A)(y)`.
#
# Another perspective: back propagating through a "neural" network
# containing a gather operation carries derivatives backwards through the
# network, accumulating the derivatives in the locations that
# were gathered from, ie. they are scattered.
# If the network multiplies each gathered element by
# some quantity, then the backwardly propagating derivatives are scaled
# by this quantity before being scattered.
# Combining this with the fact that`GradientTape.gradient`
# starts back-propagation with derivatives equal to `1`, this allows us
# to use the multipliers to determine the quantities scattered.
#
# However, derivatives are only supported for floating point types
# so we 'tunnel' our types through the `float64` type.
# So the implmentation is "partial" in the sense that it supports
# data that can be losslessly converted to `tf.float64` and back.
dtype = updates.dtype
internal_dtype = tf.float64
multipliers = ps.cast(updates, internal_dtype)
with tf.GradientTape() as tape:
zeros = tf.zeros(shape, dtype=internal_dtype)
tape.watch(zeros)
weighted_gathered = multipliers * tf.gather_nd(
zeros,
indices,
batch_dims=batch_dims)
grad = tape.gradient(weighted_gathered, zeros)
return ps.cast(grad, dtype=dtype)
# TODO(b/153689734): rewrite so as not to use `move_dimension`.
def resample_minimum_variance(
log_probs, event_size, sample_shape, seed=None, name=None):
"""Minimum variance resampler for sequential Monte Carlo.
This function is based on Algorithm #2 in [Maskell et al. (2006)][1].
Args:
log_probs: A tensor-valued batch of discrete log probability distributions.
event_size: the dimension of the vector considered a single draw.
sample_shape: the `sample_shape` determining the number of draws.
seed: Python '`int` used to seed calls to `tf.random.*`.
Default value: None (i.e. no seed).
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'resample_minimum_variance'`).
Returns:
resampled_indices: The result is similar to sampling with
```python
expanded_sample_shape = tf.concat([[event_size], sample_shape]), axis=-1)
tfd.Categorical(logits=log_probs).sample(expanded_sample_shape)`
```
but with values sorted along the first axis. It can be considered to be
sampling events made up of a length-`event_size` vector of draws from
the `Categorical` distribution. However, although the elements of
this event have the appropriate marginal distribution, they are not
independent of each other. Instead they have been chosen so as to form
a good representative sample, suitable for use with Sequential Monte
Carlo algorithms.
The sortedness is an unintended side effect of the algorithm that is
harmless in the context of simple SMC algorithms.
#### References
[1]: <NAME>, <NAME> and <NAME>. A Single Instruction Multiple
Data Particle Filter.
In 2006 IEEE Nonlinear Statistical Signal Processing Workshop.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with tf.name_scope(name or 'resample_minimum_variance') as name:
log_probs = tf.convert_to_tensor(log_probs, dtype_hint=tf.float32)
log_probs = dist_util.move_dimension(log_probs, source_idx=0, dest_idx=-1)
batch_shape = ps.shape(log_probs)[:-1]
working_shape = ps.concat([sample_shape, batch_shape], axis=-1)
log_cdf = tf.math.cumulative_logsumexp(log_probs[..., :-1],
axis=-1)
# Each resampling requires a single uniform random variable
offset = uniform.Uniform(
low=tf.constant(0., log_cdf.dtype),
high=tf.constant(1., log_cdf.dtype)).sample(
working_shape, seed=seed)[..., tf.newaxis]
# It is possible for numerical error to result in a cumulative
# sum that exceeds 1 so we need to clip.
markers = ps.cast(
tf.floor(event_size * tf.math.exp(log_cdf) + offset), tf.int32)
indices = markers[..., tf.newaxis]
updates = tf.ones(ps.shape(indices)[:-1], dtype=tf.int32)
scatter_shape = ps.concat(
[working_shape, [event_size + 1]], axis=-1)
batch_dims = (ps.rank_from_shape(sample_shape) +
ps.rank_from_shape(batch_shape))
x = _scatter_nd_batch(indices, updates, scatter_shape,
batch_dims=batch_dims)
resampled = tf.cumsum(x, axis=-1)[..., :-1]
return dist_util.move_dimension(resampled, source_idx=-1, dest_idx=0)
def _finite_differences(sums):
"""The inverse of `tf.cumsum` with `axis=-1`."""
return ps.concat(
[sums[..., :1], sums[..., 1:] - sums[..., :-1]], axis=-1)
def _samples_from_counts(values, counts, total_number):
"""Construct sequences of values from tabulated numbers of counts."""
extended_result_shape = ps.concat(
[ps.shape(counts)[:-1],
[total_number + 1]], axis=0)
padded_counts = ps.concat(
[ps.zeros_like(counts[..., :1]),
counts[..., :-1]], axis=-1)
edge_positions = ps.cumsum(padded_counts, axis=-1)
# We need to scatter `values` into an array according to
# the given `counts`.
# Because the final result typically consists of sequences of samples
# that are constant in blocks, we can scatter just the finite
# differences of the values (which become the 'edges' of the blocks)
# and then cumulatively sum them back up
# at the end. (Reminiscent of run length encoding.)
# Eg. suppose we have values = `[0, 2, 1]`
# and counts = `[2, 3, 4]`
# Then the output we require is `[0, 0, 2, 2, 2, 1, 1, 1, 1]`.
# The finite differences of the input are:
# `[0, 2, -1]`.
# The finite differences of the output are:
# `[0, 0, 2, 0, 0, -1, 0, 0, 0]`.
# The latter is the former scattered into a larger array.
#
# So the algorithm is essentially
# compute finite differences -> scatter -> undo finite differences
edge_heights = _finite_differences(values)
edges = _scatter_nd_batch(
edge_positions[..., tf.newaxis],
edge_heights,
extended_result_shape,
batch_dims=ps.rank_from_shape(ps.shape(counts)) - 1)
result = tf.cumsum(edges, axis=-1)[..., :-1]
return result
# TODO(b/153689734): rewrite so as not to use `move_dimension`.
def resample_deterministic_minimum_error(
log_probs, event_size, sample_shape,
seed=None, name='resample_deterministic_minimum_error'):
"""Deterministic minimum error resampler for sequential Monte Carlo.
This function is based on Algorithm #3 in [Maskell et al. (2006)][1].
Args:
log_probs: A tensor-valued batch of discrete log probability distributions.
event_size: the dimension of the vector considered a single draw.
sample_shape: the `sample_shape` determining the number of draws. Because
this resampler is deterministic it simply replicates the draw you
would get for `sample_shape=[1]`.
seed: This argument is unused but is present so that this function shares
its interface with the other resampling functions.
Default value: None
name: Python `str` name for ops created by this method.
Default value: `None` (i.e., `'resample_deterministic_minimum_error'`).
Returns:
resampled_indices: The result is similar to sampling with
```python
expanded_sample_shape = tf.concat([sample_shape, [event_size]]), axis=-1)
tfd.Categorical(logits=log_probs).sample(expanded_sample_shape)`
```
but with values chosen deterministically so that the empirical distribution
is as close as possible to the specified distribution.
It is intended to provide a good representative sample, suitable for use
with Sequential Monte Carlo algorithms.
#### References
[1]: <NAME>, <NAME> and <NAME>. A Single Instruction Multiple
Data Particle Filter.
In 2006 IEEE Nonlinear Statistical Signal Processing Workshop.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
del seed
with tf.name_scope(name or 'resample_deterministic_minimum_error'):
sample_shape = tf.convert_to_tensor(sample_shape, dtype_hint=tf.int32)
log_probs = dist_util.move_dimension(
log_probs, source_idx=0, dest_idx=-1)
probs = tf.math.exp(log_probs)
prob_shape = ps.shape(probs)
pdf_size = prob_shape[-1]
# If we could draw fractional numbers of samples we would
# choose `ideal_numbers` for the number of each element.
ideal_numbers = event_size * probs
# We approximate the ideal numbers by truncating to integers
# and then repair the counts starting with the one with the
# largest fractional error and working our way down.
first_approximation = tf.floor(ideal_numbers)
missing_fractions = ideal_numbers - first_approximation
first_approximation = ps.cast(
first_approximation, dtype=tf.int32)
fraction_order = tf.argsort(missing_fractions, axis=-1)
# We sort the integer parts and fractional parts together.
batch_dims = ps.rank_from_shape(prob_shape) - 1
first_approximation = tf.gather_nd(
first_approximation,
fraction_order[..., tf.newaxis],
batch_dims=batch_dims)
missing_fractions = tf.gather_nd(
missing_fractions,
fraction_order[..., tf.newaxis],
batch_dims=batch_dims)
sample_defect = event_size - tf.reduce_sum(
first_approximation, axis=-1, keepdims=True)
unpermuted = tf.broadcast_to(
tf.range(pdf_size),
prob_shape)
increments = tf.cast(
unpermuted >= pdf_size - sample_defect,
dtype=first_approximation.dtype)
counts = first_approximation + increments
samples = _samples_from_counts(fraction_order, counts, event_size)
result_shape = tf.concat([sample_shape,
prob_shape[:-1],
[event_size]], axis=0)
# Replicate sample up to batch size.
# TODO(dpiponi): rather than replicating, spread the "error" over
# multiple samples with a minimum-discrepancy sequence.
resampled = tf.broadcast_to(samples, result_shape)
return dist_util.move_dimension(resampled, source_idx=-1, dest_idx=0)
``` |
{
"source": "johannespitz/pyro",
"score": 2
} |
#### File: contrib/epidemiology/sir.py
```python
import torch
from torch.nn.functional import pad
import pyro
import pyro.distributions as dist
from .compartmental import CompartmentalModel
from .distributions import infection_dist
class SimpleSIRModel(CompartmentalModel):
"""
Susceptible-Infected-Recovered model.
To customize this model we recommend forking and editing this class.
This is a stochastic discrete-time discrete-state model with three
compartments: "S" for susceptible, "I" for infected, and "R" for
recovered individuals (the recovered individuals are implicit: ``R =
population - S - I``) with transitions ``S -> I -> R``.
:param int population: Total ``population = S + I + R``.
:param float recovery_time: Mean recovery time (duration in state
``I``). Must be greater than 1.
:param iterable data: Time series of new observed infections. Each time
step is Binomial distributed between 0 and the number of ``S -> I``
transitions. This allows false negative but no false positives.
"""
def __init__(self, population, recovery_time, data):
compartments = ("S", "I") # R is implicit.
duration = len(data)
super().__init__(compartments, duration, population)
assert isinstance(recovery_time, float)
assert recovery_time > 1
self.recovery_time = recovery_time
self.data = data
series = ("S2I", "I2R", "obs")
full_mass = [("R0", "rho")]
def global_model(self):
tau = self.recovery_time
R0 = pyro.sample("R0", dist.LogNormal(0., 1.))
rho = pyro.sample("rho", dist.Beta(2, 2))
return R0, tau, rho
def initialize(self, params):
# Start with a single infection.
return {"S": self.population - 1, "I": 1}
def transition_fwd(self, params, state, t):
R0, tau, rho = params
# Sample flows between compartments.
S2I = pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=state["S"],
num_infectious=state["I"],
population=self.population))
I2R = pyro.sample("I2R_{}".format(t),
dist.Binomial(state["I"], 1 / tau))
# Update compartments with flows.
state["S"] = state["S"] - S2I
state["I"] = state["I"] + S2I - I2R
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t] if t < self.duration else None)
def transition_bwd(self, params, prev, curr, t):
R0, tau, rho = params
# Reverse the flow computation.
S2I = prev["S"] - curr["S"]
I2R = prev["I"] - curr["I"] + S2I
# Condition on flows between compartments.
pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=prev["S"],
num_infectious=prev["I"],
population=self.population),
obs=S2I)
pyro.sample("I2R_{}".format(t),
dist.ExtendedBinomial(prev["I"], 1 / tau),
obs=I2R)
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t])
class OverdispersedSIRModel(CompartmentalModel):
"""
Overdispersed Susceptible-Infected-Recovered model.
To customize this model we recommend forking and editing this class.
This is a stochastic discrete-time discrete-state model with three
compartments: "S" for susceptible, "I" for infected, and "R" for
recovered individuals (the recovered individuals are implicit: ``R =
population - S - I``) with transitions ``S -> I -> R``.
This model accounts for superspreading (overdispersed individual
reproductive number) by assuming each infected individual infects
BetaBinomial-many susceptible individuals, where the BetaBinomial
distribution acts as an overdispersed Binomial distribution, adapting the
more standard NegativeBinomial distribution that acts as an overdispersed
Poisson distribution [1,2] to the setting of finite populations. To
preserve Markov structure, we follow [2] and assume all infections by a
single individual occur on the single time step where that individual makes
an ``I -> R`` transition. That is, whereas the :class:`SimpleSIRModel`
assumes infected individuals infect `Binomial(S,R/tau)`-many susceptible
individuals during each infected time step (over `tau`-many steps on
average), this model assumes they infect `BetaBinomial(k,...,S)`-many
susceptible individuals but only on the final time step before recovering.
**References**
[1] <NAME>, <NAME>, <NAME>, <NAME> (2005)
"Superspreading and the effect of individual variation on disease
emergence"
https://www.nature.com/articles/nature04153.pdf
[2] <NAME>, <NAME>, <NAME> (2017)
"Quantifying Transmission Heterogeneity Using Both Pathogen Phylogenies
and Incidence Time Series"
https://academic.oup.com/mbe/article/34/11/2982/3952784
:param int population: Total ``population = S + I + R``.
:param float recovery_time: Mean recovery time (duration in state
``I``). Must be greater than 1.
:param iterable data: Time series of new observed infections. Each time
step is Binomial distributed between 0 and the number of ``S -> I``
transitions. This allows false negative but no false positives.
"""
def __init__(self, population, recovery_time, data):
compartments = ("S", "I") # R is implicit.
duration = len(data)
super().__init__(compartments, duration, population)
assert isinstance(recovery_time, float)
assert recovery_time > 1
self.recovery_time = recovery_time
self.data = data
series = ("S2I", "I2R", "obs")
full_mass = [("R0", "rho", "k")]
def global_model(self):
tau = self.recovery_time
R0 = pyro.sample("R0", dist.LogNormal(0., 1.))
k = pyro.sample("k", dist.Exponential(1.))
rho = pyro.sample("rho", dist.Beta(2, 2))
return R0, k, tau, rho
def initialize(self, params):
# Start with a single infection.
return {"S": self.population - 1, "I": 1}
def transition_fwd(self, params, state, t):
R0, k, tau, rho = params
# Sample flows between compartments.
I2R = pyro.sample("I2R_{}".format(t),
dist.Binomial(state["I"], 1 / tau))
S2I = pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0,
num_susceptible=state["S"],
num_infectious=state["I"],
population=self.population,
concentration=k))
# Update compartments with flows.
state["S"] = state["S"] - S2I
state["I"] = state["I"] + S2I - I2R
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t] if t < self.duration else None)
def transition_bwd(self, params, prev, curr, t):
R0, k, tau, rho = params
# Reverse the flow computation.
S2I = prev["S"] - curr["S"]
I2R = prev["I"] - curr["I"] + S2I
# Condition on flows between compartments.
pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0,
num_susceptible=prev["S"],
num_infectious=prev["I"],
population=self.population,
concentration=k),
obs=S2I)
pyro.sample("I2R_{}".format(t),
dist.ExtendedBinomial(prev["I"], 1 / tau),
obs=I2R)
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t])
class SparseSIRModel(CompartmentalModel):
"""
Susceptible-Infected-Recovered model with sparsely observed infections.
To customize this model we recommend forking and editing this class.
This is a stochastic discrete-time discrete-state model with four
compartments: "S" for susceptible, "I" for infected, and "R" for
recovered individuals (the recovered individuals are implicit: ``R =
population - S - I``) with transitions ``S -> I -> R``.
This model allows observations of **cumulative** infections at uneven time
intervals. To preserve Markov structure (and hence tractable inference)
this model adds an auxiliary compartment ``O`` denoting the fully-observed
cumulative number of observations at each time point. At observed times
(when ``mask[t] == True``) ``O`` must exactly match the provided data;
between observed times ``O`` stochastically imputes the provided data.
:param int population: Total ``population = S + I + R``.
:param float recovery_time: Mean recovery time (duration in state
``I``). Must be greater than 1.
:param iterable data: Time series of **cumulative** observed infections.
Whenever ``mask[t] == True``, ``data[t]`` corresponds to an
observation; otherwise ``data[t]`` can be arbitrary, e.g. NAN.
:param iterable mask: Boolean time series denoting whether an observation
is made at each time step. Should satisfy ``len(mask) == len(data)``.
"""
def __init__(self, population, recovery_time, data, mask):
assert len(data) == len(mask)
duration = len(data)
compartments = ("S", "I", "O") # O is auxiliary, R is implicit.
super().__init__(compartments, duration, population)
assert isinstance(recovery_time, float)
assert recovery_time > 1
self.recovery_time = recovery_time
self.data = data
self.mask = mask
series = ("S2I", "I2R", "S2O", "obs")
full_mass = [("R0", "rho")]
def global_model(self):
tau = self.recovery_time
R0 = pyro.sample("R0", dist.LogNormal(0., 1.))
rho = pyro.sample("rho", dist.Beta(2, 2))
return R0, tau, rho
def initialize(self, params):
# Start with a single infection.
return {"S": self.population - 1, "I": 1, "O": 0}
def transition_fwd(self, params, state, t):
R0, tau, rho = params
# Sample flows between compartments.
S2I = pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=state["S"],
num_infectious=state["I"],
population=self.population))
I2R = pyro.sample("I2R_{}".format(t),
dist.Binomial(state["I"], 1 / tau))
S2O = pyro.sample("S2O_{}".format(t),
dist.ExtendedBinomial(S2I, rho))
# Update compartments with flows.
state["S"] = state["S"] - S2I
state["I"] = state["I"] + S2I - I2R
state["O"] = state["O"] + S2O
# Condition on cumulative observations.
mask_t = self.mask[t] if t < self.duration else False
data_t = self.data[t] if t < self.duration else None
pyro.sample("obs_{}".format(t),
dist.Delta(state["O"]).mask(mask_t),
obs=data_t)
def transition_bwd(self, params, prev, curr, t):
R0, tau, rho = params
# Reverse the flow computation.
S2I = prev["S"] - curr["S"]
I2R = prev["I"] - curr["I"] + S2I
S2O = curr["O"] - prev["O"]
# Condition on flows between compartments.
pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=prev["S"],
num_infectious=prev["I"],
population=self.population),
obs=S2I)
pyro.sample("I2R_{}".format(t),
dist.ExtendedBinomial(prev["I"], 1 / tau),
obs=I2R)
pyro.sample("S2O_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=S2O)
# Condition on cumulative observations.
pyro.sample("obs_{}".format(t),
dist.Delta(curr["O"]).mask(self.mask[t]),
obs=self.data[t])
class UnknownStartSIRModel(CompartmentalModel):
"""
Susceptible-Infected-Recovered model with unknown date of first infection.
To customize this model we recommend forking and editing this class.
This is a stochastic discrete-time discrete-state model with three
compartments: "S" for susceptible, "I" for infected, and "R" for
recovered individuals (the recovered individuals are implicit: ``R =
population - S - I``) with transitions ``S -> I -> R``.
This model demonstrates:
1. How to incorporate spontaneous infections from external sources;
2. How to incorporate time-varying piecewise ``rho`` by supporting
forecasting in :meth:`transition_fwd`.
3. How to override the :meth:`predict` method to compute extra
statistics.
:param int population: Total ``population = S + I + R``.
:param float recovery_time: Mean recovery time (duration in state
``I``). Must be greater than 1.
:param int pre_obs_window: Number of time steps before beginning ``data``
where the initial infection may have occurred. Must be positive.
:param iterable data: Time series of new observed infections. Each time
step is Binomial distributed between 0 and the number of ``S -> I``
transitions. This allows false negative but no false positives.
"""
def __init__(self, population, recovery_time, pre_obs_window, data):
compartments = ("S", "I") # R is implicit.
duration = pre_obs_window + len(data)
super().__init__(compartments, duration, population)
assert isinstance(recovery_time, float)
assert recovery_time > 1
self.recovery_time = recovery_time
assert isinstance(pre_obs_window, int) and pre_obs_window > 0
self.pre_obs_window = pre_obs_window
self.post_obs_window = len(data)
# We set a small time-constant external infecton rate such that on
# average there is a single external infection during the
# pre_obs_window. This allows unknown time of initial infection
# without introducing long-range coupling across time.
self.external_rate = 1 / pre_obs_window
# Prepend data with zeros.
if isinstance(data, list):
data = [0.] * self.pre_obs_window + data
else:
data = pad(data, (self.pre_obs_window, 0), value=0.)
self.data = data
series = ("S2I", "I2R", "obs")
full_mass = [("R0", "rho0", "rho1")]
def global_model(self):
tau = self.recovery_time
R0 = pyro.sample("R0", dist.LogNormal(0., 1.))
# Assume two different response rates: rho0 before any observations
# were made (in pre_obs_window), followed by a higher response rate rho1
# after observations were made (in post_obs_window).
rho0 = pyro.sample("rho0", dist.Beta(2, 2))
rho1 = pyro.sample("rho1", dist.Beta(2, 2))
# Whereas each of rho0,rho1 are scalars (possibly batched over samples),
# we construct a time series rho with an extra time dim on the right.
rho = torch.cat([
rho0.unsqueeze(-1).expand(rho0.shape + (self.pre_obs_window,)),
rho1.unsqueeze(-1).expand(rho1.shape + (self.post_obs_window,)),
], dim=-1)
# Model external infections as an infectious pseudo-individual added
# to num_infectious when sampling S2I below.
X = self.external_rate * tau / R0
return R0, X, tau, rho
def initialize(self, params):
# Start with no internal infections.
return {"S": self.population, "I": 0}
def transition_fwd(self, params, state, t):
R0, X, tau, rho = params
# Sample flows between compartments.
S2I = pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=state["S"],
num_infectious=state["I"] + X,
population=self.population))
I2R = pyro.sample("I2R_{}".format(t),
dist.Binomial(state["I"], 1 / tau))
# Update compartments with flows.
state["S"] = state["S"] - S2I
state["I"] = state["I"] + S2I - I2R
# In .transition_fwd() t will always be an integer but may lie outside
# of [0,self.duration) when forecasting.
rho_t = rho[..., t] if t < self.duration else rho[..., -1]
data_t = self.data[t] if t < self.duration else None
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho_t),
obs=data_t)
def transition_bwd(self, params, prev, curr, t):
R0, X, tau, rho = params
# Reverse the flow computation.
S2I = prev["S"] - curr["S"]
I2R = prev["I"] - curr["I"] + S2I
# Condition on flows between compartments.
pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=prev["S"],
num_infectious=prev["I"] + X,
population=self.population),
obs=S2I)
pyro.sample("I2R_{}".format(t),
dist.ExtendedBinomial(prev["I"], 1 / tau),
obs=I2R)
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho[..., t]),
obs=self.data[t])
def predict(self, forecast=0):
"""
Augments
:meth:`~pyro.contrib.epidemiology.compartmental.Compartmental.predict`
with samples of ``first_infection`` i.e. the first time index at which
the infection ``I`` becomes nonzero. Note this is measured from the
beginning of ``pre_obs_window``, not the beginning of data.
:param int forecast: The number of time steps to forecast forward.
:returns: A dictionary mapping sample site name (or compartment name)
to a tensor whose first dimension corresponds to sample batching.
:rtype: dict
"""
samples = super().predict(forecast)
# Extract the time index of the first infection (samples["I"] > 0)
# for each sample trajectory in the samples["I"] tensor.
samples["first_infection"] = samples["I"].cumsum(-1).eq(0).sum(-1)
return samples
class RegionalSIRModel(CompartmentalModel):
r"""
Susceptible-Infected-Recovered model with coupling across regions.
To customize this model we recommend forking and editing this class.
This is a stochastic discrete-time discrete-state model with three
compartments in each region: "S" for susceptible, "I" for infected, and "R"
for recovered individuals (the recovered individuals are implicit: ``R =
population - S - I``) with transitions ``S -> I -> R``.
Regions are coupled by a ``coupling`` matrix with entries in ``[0,1]``.
The all ones matrix is equivalent to a single region. The identity matrix
is equivalent to a set of independent regions. This need not be symmetric,
but symmetric matrices are probably more physically plausible. The expected
number of new infections each time step ``S2I`` is Binomial distributed
with mean::
E[S2I] = S (1 - (1 - R0 / (population @ coupling)) ** (I @ coupling))
≈ R0 S (I @ coupling) / (population @ coupling) # for small I
Thus in a nearly entirely susceptible population, a single infected
individual infects approximately ``R0`` new individuals on average,
independent of ``coupling``.
This model demonstrates:
1. How to create a regional model with a ``population`` vector.
2. How to model both homogeneous parameters (here ``R0``) and
heterogeneous parameters with hierarchical structure (here ``rho``)
using ``self.region_plate``.
3. How to approximately couple regions in :meth:`transition_bwd` using
``prev["I_approx"]``.
:param torch.Tensor population: Tensor of per-region populations, defining
``population = S + I + R``.
:param torch.Tensor coupling: Pairwise coupling matrix. Entries should be
in ``[0,1]``.
:param float recovery_time: Mean recovery time (duration in state ``I``).
Must be greater than 1.
:param iterable data: Time x Region sized tensor of new observed
infections. Each time step is vector of Binomials distributed between
0 and the number of ``S -> I`` transitions. This allows false negative
but no false positives.
"""
def __init__(self, population, coupling, recovery_time, data):
duration = len(data)
num_regions, = population.shape
assert coupling.shape == (num_regions, num_regions)
assert (0 <= coupling).all()
assert (coupling <= 1).all()
assert isinstance(recovery_time, float)
assert recovery_time > 1
if isinstance(data, torch.Tensor):
# Data tensors should be oriented as (time, region).
assert data.shape == (duration, num_regions)
compartments = ("S", "I") # R is implicit.
# We create a regional model by passing a vector of populations.
super().__init__(compartments, duration, population, approximate=("I",))
self.coupling = coupling
self.recovery_time = recovery_time
self.data = data
series = ("S2I", "I2R", "obs")
full_mass = [("R0", "rho")]
def global_model(self):
# Assume recovery time is a known constant.
tau = self.recovery_time
# Assume reproductive number is unknown but homogeneous.
R0 = pyro.sample("R0", dist.LogNormal(0., 1.))
# Assume response rate is heterogeneous and model it with a
# hierarchical Gamma-Beta prior.
rho_c1 = pyro.sample("rho_c1", dist.Gamma(2, 1))
rho_c0 = pyro.sample("rho_c0", dist.Gamma(2, 1))
with self.region_plate:
rho = pyro.sample("rho", dist.Beta(rho_c1, rho_c0))
return R0, tau, rho
def initialize(self, params):
# Start with a single infection in region 0.
I = torch.zeros_like(self.population)
I[0] += 1
S = self.population - I
return {"S": S, "I": I}
def transition_fwd(self, params, state, t):
R0, tau, rho = params
# Account for infections from all regions.
I_coupled = state["I"] @ self.coupling
pop_coupled = self.population @ self.coupling
with self.region_plate:
# Sample flows between compartments.
S2I = pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=state["S"],
num_infectious=I_coupled,
population=pop_coupled))
I2R = pyro.sample("I2R_{}".format(t),
dist.Binomial(state["I"], 1 / tau))
# Update compartments with flows.
state["S"] = state["S"] - S2I
state["I"] = state["I"] + S2I - I2R
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t] if t < self.duration else None)
def transition_bwd(self, params, prev, curr, t):
R0, tau, rho = params
# Account for infections from all regions. This uses approximate (point
# estimate) counts I_approx for infection from other regions, but uses
# the exact (enumerated) count I for infections from one's own region.
I_coupled = prev["I_approx"] @ self.coupling
I_coupled = I_coupled + (prev["I"] - prev["I_approx"]) * self.coupling.diag()
I_coupled = I_coupled.clamp(min=0) # In case I_approx is negative.
pop_coupled = self.population @ self.coupling
# Reverse the flow computation.
S2I = prev["S"] - curr["S"]
I2R = prev["I"] - curr["I"] + S2I
with self.region_plate:
# Condition on flows between compartments.
pyro.sample("S2I_{}".format(t),
infection_dist(individual_rate=R0 / tau,
num_susceptible=prev["S"],
num_infectious=I_coupled,
population=pop_coupled),
obs=S2I)
pyro.sample("I2R_{}".format(t),
dist.ExtendedBinomial(prev["I"], 1 / tau),
obs=I2R)
# Condition on observations.
pyro.sample("obs_{}".format(t),
dist.ExtendedBinomial(S2I, rho),
obs=self.data[t])
```
#### File: contrib/epidemiology/test_seir.py
```python
import pytest
import torch
import pyro.distributions as dist
from pyro.contrib.epidemiology import OverdispersedSEIRModel, SimpleSEIRModel
@pytest.mark.parametrize("duration", [3, 7])
@pytest.mark.parametrize("forecast", [0, 7])
@pytest.mark.parametrize("options", [
{},
{"dct": 1.},
{"num_quant_bins": 8},
], ids=str)
def test_simple_smoke(duration, forecast, options):
population = 100
incubation_time = 2.0
recovery_time = 7.0
# Generate data.
model = SimpleSEIRModel(population, incubation_time, recovery_time,
[None] * duration)
for attempt in range(100):
data = model.generate({"R0": 1.5, "rho": 0.5})["obs"]
if data.sum():
break
assert data.sum() > 0, "failed to generate positive data"
# Infer.
model = SimpleSEIRModel(population, incubation_time, recovery_time, data)
num_samples = 5
model.fit(warmup_steps=2, num_samples=num_samples, max_tree_depth=2,
**options)
# Predict and forecast.
samples = model.predict(forecast=forecast)
assert samples["S"].shape == (num_samples, duration + forecast)
assert samples["E"].shape == (num_samples, duration + forecast)
assert samples["I"].shape == (num_samples, duration + forecast)
@pytest.mark.parametrize("duration", [3, 7])
@pytest.mark.parametrize("forecast", [0, 7])
@pytest.mark.parametrize("options", [
{},
{"dct": 1.},
{"num_quant_bins": 8},
], ids=str)
def test_overdispersed_smoke(duration, forecast, options):
population = 100
incubation_time = 2.0
recovery_time = 7.0
# Generate data.
model = OverdispersedSEIRModel(
population, incubation_time, recovery_time, [None] * duration)
for attempt in range(100):
data = model.generate({"R0": 1.5, "rho": 0.5, "k": 1.0})["obs"]
if data.sum():
break
assert data.sum() > 0, "failed to generate positive data"
# Infer.
model = OverdispersedSEIRModel(
population, incubation_time, recovery_time, data)
num_samples = 5
model.fit(warmup_steps=2, num_samples=num_samples, max_tree_depth=2,
**options)
# Predict and forecast.
samples = model.predict(forecast=forecast)
assert samples["S"].shape == (num_samples, duration + forecast)
assert samples["E"].shape == (num_samples, duration + forecast)
assert samples["I"].shape == (num_samples, duration + forecast)
@pytest.mark.parametrize("duration", [3, 7])
@pytest.mark.parametrize("forecast", [0, 7])
def test_coalescent_likelihood_smoke(duration, forecast):
population = 100
incubation_time = 2.0
recovery_time = 7.0
# Generate data.
model = OverdispersedSEIRModel(
population, incubation_time, recovery_time, [None] * duration)
for attempt in range(100):
data = model.generate({"R0": 1.5, "rho": 0.5, "k": 1.0})["obs"]
if data.sum():
break
assert data.sum() > 0, "failed to generate positive data"
leaf_times = torch.rand(5).pow(0.5) * duration
coal_times = dist.CoalescentTimes(leaf_times).sample()
coal_times = coal_times[..., torch.randperm(coal_times.size(-1))]
# Infer.
model = OverdispersedSEIRModel(
population, incubation_time, recovery_time, data,
leaf_times=leaf_times, coal_times=coal_times)
num_samples = 5
model.fit(warmup_steps=2, num_samples=num_samples, max_tree_depth=2)
# Predict and forecast.
samples = model.predict(forecast=forecast)
assert samples["S"].shape == (num_samples, duration + forecast)
assert samples["E"].shape == (num_samples, duration + forecast)
assert samples["I"].shape == (num_samples, duration + forecast)
``` |
{
"source": "johannespitz/softlearning",
"score": 2
} |
#### File: examples/development/simulate_policy.py
```python
import argparse
from distutils.util import strtobool
import json
import os
from pathlib import Path
import pickle
import tensorflow as tf
import pandas as pd
from softlearning.environments.utils import (
get_environment_from_params, get_environment)
from softlearning.policies.utils import get_policy_from_variant
from softlearning.samplers import rollouts
from softlearning.utils.video import save_video
DEFAULT_RENDER_KWARGS = {
'mode': 'human',
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path',
type=str,
help='Path to the checkpoint.')
parser.add_argument('--max-path-length', '-l', type=int, default=1000)
parser.add_argument('--num-rollouts', '-n', type=int, default=10)
parser.add_argument('--render-kwargs', '-r',
type=json.loads,
default='{}',
help="Kwargs for rollouts renderer.")
parser.add_argument('--video-save-path',
type=Path,
default=None)
parser.add_argument('--deterministic', '-d',
type=lambda x: bool(strtobool(x)),
nargs='?',
const=True,
default=True,
help="Evaluate policy deterministically.")
args = parser.parse_args()
return args
def load_checkpoint(checkpoint_path, session=None):
session = session or tf.keras.backend.get_session()
checkpoint_path = checkpoint_path.rstrip('/')
trial_path = os.path.dirname(checkpoint_path)
variant_path = os.path.join(trial_path, 'params.pkl')
with open(variant_path, 'rb') as f:
variant = pickle.load(f)
metadata_path = os.path.join(checkpoint_path, ".tune_metadata")
if os.path.exists(metadata_path):
with open(metadata_path, "rb") as f:
metadata = pickle.load(f)
else:
metadata = None
with session.as_default():
pickle_path = os.path.join(checkpoint_path, 'checkpoint.pkl')
with open(pickle_path, 'rb') as f:
picklable = pickle.load(f)
progress_path = os.path.join(trial_path, 'progress.csv')
progress = pd.read_csv(progress_path)
return picklable, variant, progress, metadata
def load_policy_and_environment(picklable, variant):
environment_params = (
variant['environment_params']['training']
if 'evaluation' in variant['environment_params']
else variant['environment_params']['training'])
environment = get_environment_from_params(environment_params)
policy = get_policy_from_variant(variant, environment)
policy.set_weights(picklable['policy_weights'])
return policy, environment
def simulate_policy(checkpoint_path,
deterministic,
num_rollouts,
max_path_length,
render_kwargs,
video_save_path=None,
evaluation_environment_params=None):
checkpoint_path = checkpoint_path.rstrip('/')
picklable, variant, progress, metadata = load_checkpoint(checkpoint_path)
policy, environment = load_policy_and_environment(picklable, variant)
render_kwargs = {**DEFAULT_RENDER_KWARGS, **render_kwargs}
with policy.set_deterministic(deterministic):
paths = rollouts(num_rollouts,
environment,
policy,
path_length=max_path_length,
render_kwargs=render_kwargs)
if video_save_path and render_kwargs.get('mode') == 'rgb_array':
fps = 1 // getattr(environment, 'dt', 1/30)
for i, path in enumerate(paths):
video_save_dir = os.path.expanduser('/tmp/simulate_policy/')
video_save_path = os.path.join(video_save_dir, f'episode_{i}.mp4')
save_video(path['images'], video_save_path, fps=fps)
return paths
if __name__ == '__main__':
gpu_options = tf.GPUOptions(allow_growth=True)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.keras.backend.set_session(session)
args = parse_args()
simulate_policy(**vars(args))
```
#### File: softlearning/policies/utils.py
```python
from collections import OrderedDict
from copy import deepcopy
from softlearning.preprocessors.utils import get_preprocessor_from_params
def get_gaussian_policy(*args, **kwargs):
from .gaussian_policy import FeedforwardGaussianPolicy
policy = FeedforwardGaussianPolicy(*args, **kwargs)
return policy
def get_uniform_policy(*args, **kwargs):
from .uniform_policy import ContinuousUniformPolicy
policy = ContinuousUniformPolicy(*args, **kwargs)
return policy
POLICY_FUNCTIONS = {
'GaussianPolicy': get_gaussian_policy,
'UniformPolicy': get_uniform_policy,
}
def get_policy(policy_type, *args, **kwargs):
return POLICY_FUNCTIONS[policy_type](*args, **kwargs)
def get_policy_from_params(policy_params, env, *args, **kwargs):
policy_type = policy_params['type']
policy_kwargs = deepcopy(policy_params.get('kwargs', {}))
observation_preprocessors_params = policy_kwargs.pop(
'observation_preprocessors_params', {})
observation_keys = policy_kwargs.pop(
'observation_keys', None) or env.observation_keys
observation_shapes = OrderedDict((
(key, value) for key, value in env.observation_shape.items()
if key in observation_keys
))
observation_preprocessors = OrderedDict()
for name, observation_shape in observation_shapes.items():
preprocessor_params = observation_preprocessors_params.get(name, None)
if not preprocessor_params:
observation_preprocessors[name] = None
continue
observation_preprocessors[name] = get_preprocessor_from_params(
env, preprocessor_params)
action_range = (env.action_space.low, env.action_space.high)
policy = POLICY_FUNCTIONS[policy_type](
input_shapes=observation_shapes,
output_shape=env.action_shape,
action_range=action_range,
observation_keys=observation_keys,
*args,
preprocessors=observation_preprocessors,
**policy_kwargs,
**kwargs)
return policy
def get_policy_from_variant(variant, *args, **kwargs):
policy_params = variant['policy_params']
return get_policy_from_params(policy_params, *args, **kwargs)
``` |
{
"source": "johannes-qvarford/jq-todo-backend-py",
"score": 2
} |
#### File: jq-todo-backend-py/jqtodobackend/backend.py
```python
from typing import List
from uuid import UUID
from fastapi import FastAPI, Depends
from jqtodobackend.models import Todo, CreatedTodo, TodoChanges
from jqtodobackend.repository import TodoRepository
app = FastAPI()
@app.get("/", response_model=List[CreatedTodo])
def get_all(repo: TodoRepository = Depends(TodoRepository)):
ts = repo.all()
return ts
@app.delete("/")
def delete_all(repo: TodoRepository = Depends(TodoRepository)):
repo.clear()
@app.post("/", response_model=CreatedTodo)
def post(todo: Todo, repo: TodoRepository = Depends(TodoRepository)):
created = CreatedTodo.from_todo(todo)
repo.insert(created)
return created
@app.get(
"/{_id}",
response_model=CreatedTodo,
responses={404: {"description": "Todo could not be found"}},
)
def get(_id: UUID, repo: TodoRepository = Depends(TodoRepository)):
return repo.find(_id).as_http_response()
@app.patch("/{_id}")
def patch(
_id: UUID, todo_changes: TodoChanges, repo: TodoRepository = Depends(TodoRepository)
):
repo.patch(_id, todo_changes)
@app.delete("/{_id}")
def delete(_id: UUID, repo: TodoRepository = Depends(TodoRepository)):
repo.delete(_id)
``` |
{
"source": "JohannesRanderath/randydata",
"score": 2
} |
#### File: JohannesRanderath/randydata/setup.py
```python
from setuptools import setup
def readme():
with open("README.rst") as f:
return f.read()
setup(
name="randydata",
version="0.3.1.9",
description="Tools for SQL or Excel of observational data handling.",
long_description=readme(),
keywords="excel sql data handling",
url="https://github.com/JohannesRanderath/randydata",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=["randydata"],
install_requires=["pandas>=1.4.1", "mysql>=0.0.3", "sqlalchemy>=1.4.32", "IPython>=8.1.1",
"greenlet>=1.1.2", "numpy>=1.22.3", "python-dateutil>=2.8.2", "pytz>=2021.3", "six>=1.16.0",
"appnope>=0.1.2", "backcall>=0.2.0", "decorator>=5.1.1", "jedi>=0.18.1", "parso>=0.8.3",
"matplotlib-inline>=0.1.3", "traitlets>=5.1.1", "pexpect>=4.8.0", "ptyprocess>=0.7.0",
"pickleshare>=0.7.5", "prompt-toolkit>=3.0.28", "wcwidth>=0.2.5", "pygments>=2.11.2",
"setuptools>=58.1.0", "stack-data>=0.2.0", "asttokens>=2.0.5", "executing>=0.8.3",
"pure-eval>=0.2.2", "protobuf>=3.19.4", "openpyxl>=3.0.9"],
include_package_data=True
)
``` |
{
"source": "JohannesRanderath/StudyProblems",
"score": 3
} |
#### File: JohannesRanderath/StudyProblems/app.py
```python
from flask import Flask, render_template, redirect, request, flash, send_from_directory, jsonify, url_for
from flask_session import Session
from session_handling import is_logged_in, current_user, logout_from_session, login_to_session
from patterns_and_types import check_password_requirements, is_email
from security import generate_password_hash, verify_password, decrypt_token, generate_password_reset_link, \
generate_change_email_link, generate_email_confirmation_link
from mail import html_confirmation_email, html_change_mail_email, html_reset_password_mail, html_friend_request_mail, \
html_accepted_friend_mail, html_new_question_mail, html_question_answered, send_email, send_user_email
from database import create_new_user, update_user_hash, update_user_email, update_email_confirmed, get_user_email,\
get_username_by_email, close_connection, get_usernames_starting_with, user_exists, add_friend_request, add_message,\
get_user_messages, confirm_friend, delete_message, get_friends, exists_friend_or_request, delete_friends_from_db, \
get_questions_from_user, get_questions_for_user, add_question, get_question, question_exists, add_answer, \
delete_all_messages_asked_question, delete_all_messages_answered_question, get_email_preferences_not, \
update_email_preferences
app = Flask(__name__)
app.config.from_object("config.Config")
Session(app)
def flash_form_data(request_form):
try:
form_data = {key: request_form.get(key) for key in request_form.keys()}
form_data_strings = [key + ":" + form_data[key] for key in form_data.keys()]
flash(",".join(form_data_strings), "form-data")
return True
except Exception as e:
print("In app.flash_form_data: ", e)
return False
def render_my_template(template: str, **kwargs):
"""
For the badges indicating the number of messages and questions to update with every refresh, we need to calculate
them every time we render a template. This function does these calculations and passes them as parameters to the
desired template.
:param template: html template the route function wants to render
:param kwargs: parameters given by the route function
:return: rendered template from flask.render_template with all required parameters
"""
user = current_user()
if user:
# If the user is logged in, we can calculate the values by getting the values from the database
num_of_messages = len(get_user_messages(user))
questions_to_me = get_questions_for_user(user)
num_of_questions_to_me = len(questions_to_me)
num_of_unanswered_questions_to_me = len([question for question in questions_to_me
if not question["answer"]])
questions_from_me = get_questions_from_user(user)
num_of_questions_from_me = len(questions_from_me)
num_of_unanswered_questions_from_me = len([question for question in questions_from_me
if not question["answer"]])
else:
# If no user is logged in, we don't need the values
num_of_messages = None
num_of_questions_to_me = None
num_of_unanswered_questions_to_me = None
num_of_questions_from_me = None
num_of_unanswered_questions_from_me = None
return render_template(template, num_of_messages=num_of_messages, num_of_questions_to_me=num_of_questions_to_me,
num_of_questions_from_me=num_of_questions_from_me,
num_of_unanswered_questions_to_me=num_of_unanswered_questions_to_me,
num_of_unanswered_questions_from_me=num_of_unanswered_questions_from_me, **kwargs)
@app.route("/robots.txt")
@app.route("/styles.css")
@app.route("/favicon.ico")
def get_static_from_root():
"""
Make robots.txt available to crawlers and styles.css as well as favicon.ico to browsers.
:return: desired static file
"""
return send_from_directory(app.static_folder, request.path[1:])
@app.route("/")
@is_logged_in
def home():
"""
Home page displaying the messages.
:return: index.html
"""
messages = get_user_messages(current_user())
# Display messages oldest to newest
messages = list(reversed(messages))
return render_my_template("index.html", messages=messages)
@app.route("/my_questions")
@is_logged_in
def my_questions():
"""
Overview of all questions the user asked and their answers if available.
:return: my_questions.html
"""
questions = get_questions_from_user(current_user())
# Display questions oldest to newest
if questions:
questions = list(reversed(questions))
message_answered_question()
return render_my_template("my_questions.html", questions=questions)
@app.route("/to_answer", methods=["POST", "GET"])
@is_logged_in
def to_answer():
"""
Overview of all questions assigned to the user, grouped by questions they haven't answered and those,
they already answered.
From this view they can choose which question to answer next and do so.
:return: to_answer.html
"""
questions = get_questions_for_user(current_user())
# Display in oldest to newest
if questions:
questions = list(reversed(questions))
unanswered_questions = [question for question in questions if not question["answer"]]
answered_questions = [question for question in questions if question not in unanswered_questions]
message_asked_question()
return render_my_template("to_answer.html", unanswered_questions=unanswered_questions,
answered_questions=answered_questions)
@app.route("/manage_friends")
@is_logged_in
def manage_friends():
"""
Overview of friends, functionality to remove some of them, ask them questions and send new friend requests.
:return: friends.html
"""
friends = get_friends(current_user())
num_of_friends = len(friends)
return render_my_template("friends.html", friends=friends, numOfFriends=num_of_friends)
@app.route("/get_usernames_list")
@is_logged_in
def get_usernames_list():
"""
NO VIEW
AJAX call to get usernames starting with a given substring that are not friends with the current user.
Used in user search to send friend requests
:return: json list of usernames starting with substring <startswith> or an empty list if no substring given
"""
startswith = request.args.get("startswith")
if not startswith:
# Do not expose all usernames. Return an empty list when no substring given
return jsonify([])
else:
usernames = get_usernames_starting_with(startswith)
usernames = [username[0] for username in usernames]
if current_user() in usernames:
usernames.remove(current_user())
friends = get_friends(current_user())
usernames = [username for username in usernames if username not in friends]
return jsonify(usernames)
@app.route("/add_friend", methods=["POST"])
@is_logged_in
def add_friend():
"""
NO VIEW
Logic function to add friend request to library and send notifications to other user.
:return: redirect back to /manage_friends
"""
username = request.form.get("username").lower()
user = current_user()
# other user has to be given, must exist and can't be the same as the user logged in.
if not username or username == user or not user_exists(username):
flash("User not found", "danger")
return redirect(url_for("manage_friends"))
# There can be no more than one friend request / friend relationship between the same users
if exists_friend_or_request(username, user):
flash("You already are friends with that user or it exists a friend request.", "warning")
return redirect(url_for("manage_friends"))
if not add_friend_request(user, username):
flash("An unexpected error occurred. Try again later.")
return redirect(url_for("manage_friends"))
# Send email notification if user wishes so and has a confirmed email.
if "friend_request" not in get_email_preferences_not(username):
send_user_email(username, "New friend request", html_friend_request_mail(user))
if not add_message(user, username, "friend_request"):
flash("An unexpected error occurred. Try again later.", "danger")
return redirect(url_for("manage_friends"))
flash("Friend request sent", "success")
return redirect(url_for("manage_friends"))
@app.route("/accept_friend_request", methods=["POST"])
@is_logged_in
def accept_friend_request():
"""
NO VIEW
Logic function to confirm friend request in database and send notifications.
:return: redirect to home (/)
"""
username = current_user()
if not confirm_friend(request.form.get("username").lower(), username) \
or not delete_message(request.form.get("message_id")):
flash("An error occurred. Please try again later", "danger")
return redirect(url_for("home"))
add_message(username, request.form.get("username").lower(), "accepted_friend_request")
# if they didn't opt out and confirmed their email, send them an email.
if "accepted_friend" not in get_email_preferences_not(request.form.get("username").lower()):
send_user_email(request.form.get("username").lower(), "New friend", html_accepted_friend_mail(username))
flash("Friend request accepted.", "success")
return redirect(url_for("home"))
@app.route("/decline_friend_request", methods=["POST"])
@is_logged_in
def decline_friend_request():
"""
NO VIEW
Logic function to remove friend request and message from database and notify user.
:return: redirect back to home (/)
"""
if not delete_message(request.form.get("message_id")) \
or not delete_friends_from_db(current_user(), request.form.get("username").lower()):
flash("An error occurred. Please try again later", "danger")
return redirect(url_for("home"))
add_message(current_user(), request.form.get("username").lower(), "declined_friend_request")
# No email is sent for declined requests
flash("Friend request declined.", "important")
return redirect(url_for("home"))
@app.route("/remove_friend", methods=["POST"])
@is_logged_in
def remove_friend():
"""
NO VIEW
Logic function to remove friendship from database and notify former friend.
:return: redirect back to /manage_friends
"""
username = request.form.get("username").lower()
if not username or not delete_friends_from_db(current_user(), username) \
or not delete_message(request.form.get("message_id")):
flash("An error occurred, please try again later", "danger")
return redirect(url_for("manage_friends"))
add_message(current_user(), username, "removed_friend")
# No email is sent for removed friends
flash("Friend removed", "success")
return redirect(url_for("manage_friends"))
@app.route("/discard_message", methods=["POST"])
@is_logged_in
def discard_message():
"""
NO VIEW
Logic function to remove read messages from database
:return: redirect back to home (/)
"""
if not delete_message(request.form.get("message_id")):
flash("An error occurred, please try again.", "danger")
return redirect(url_for("home"))
flash("Message deleted.", "success")
return redirect(url_for("home"))
@app.route("/ask_question", methods=["POST", "GET"])
@is_logged_in
def ask_question():
"""
View to create new question and assign it to a friend.
Adds question to database and notifies assigned friend.
:return: redirect to all asked questions (/my_questions) if successful and back to ask_question if not.
"""
if request.method == "POST":
friend = request.form.get("friend")
question = request.form.get("question")
if not friend or not question:
flash("Please supply all required parameters", "danger")
flash_form_data(request.form)
return redirect(url_for("ask_question"))
if not user_exists(friend):
flash("User does not exist", "danger")
flash_form_data(request.form)
return redirect(url_for("ask_question"))
if not add_question(current_user(), friend, question):
flash("An error occurred, please try again later", "danger")
flash_form_data(request.form)
return redirect(url_for("ask_question"))
add_message(current_user(), friend, "asked_question")
if "new_question" not in get_email_preferences_not(friend):
send_user_email(friend, "New question", html_new_question_mail(current_user()))
flash("Question asked", "success")
return redirect(url_for("my_questions"))
else:
# Get friends for dropdown with all friends to choose one to assign the question to
# If clicked to ask a specific friend, preselect them.
friends = get_friends(current_user())
friend = request.args.get("friend")
return render_my_template("ask_question.html", friends=friends, ask_friend=friend)
@app.route("/answer_question", methods=["POST", "GET"])
@is_logged_in
def answer_question():
"""
View to answer question the user was assigned to by a friend.
Adds answer to database and notifies friend
:return: answer_question.html from get
:return: redirect to /to_answer from post
"""
if request.method == "POST":
question_id = request.form.get("id")
question = get_question(question_id)
answer = request.form.get("answer")
if not question_id or not answer:
flash("Please supply all required parameters", "danger")
flash_form_data(request.form)
return redirect(url_for("to_answer"))
if not question_exists(question_id) or not question:
flash("Question not found", "danger")
flash_form_data(request.form)
return redirect(url_for("to_answer"))
if not add_answer(question_id, answer):
flash("An error occurred, please try again.", "danger")
flash_form_data(request.form)
redirect(url_for("to_answer"))
add_message(current_user(), question["sender"], "answered_question")
# send email if user didn't opt out and confirmed their email.
if "question_answered" not in get_email_preferences_not(question["sender"]):
send_user_email(question["sender"], "Question answered", html_question_answered(current_user()))
flash("Question answered", "success")
return redirect(url_for("to_answer"))
else:
question_id = request.args.get("id")
if not question_id:
flash("Illegal parameters", "danger")
return redirect(url_for("to_answer"))
question = get_question(int(question_id))
if not question:
flash("Question not found", "danger")
return redirect(url_for("to_answer"))
if question["answer"]:
flash("Question already answered.", "warning")
return redirect(url_for("to_answer"))
return render_my_template("answer_question.html", question=question)
@app.route("/message_asked_question", methods=["POST"])
@is_logged_in
def message_asked_question():
"""
When clicked on message informing about a new question the user got assigned to,
showing all questions they were assigned to. And deleting messages informing about
new questions as they saw all of them now
:return: redirect to /to_answer
"""
delete_all_messages_asked_question(current_user())
return redirect(url_for("to_answer"))
@app.route("/message_answered_question", methods=["POST"])
@is_logged_in
def message_answered_question():
"""
When clicked on message informing about a new answer to a question the user asked,
showing all questions they asked. And deleting messages informing about
new answers as they saw all of them now
:return: redirect to /my_questions
"""
delete_all_messages_answered_question(current_user())
return redirect(url_for("my_questions"))
@app.route("/account", methods=["POST", "GET"])
@is_logged_in
def account():
"""
View to manage account related details
1. Change password
2. Change email or add new email if the user didn't add one before
3. Update preferences for email notifications
:return: account.html
"""
if request.method == "POST":
# 1. Change password
if request.form.get("type") == "change_password":
old_password = request.form.get("old_password")
new_password = request.form.get("new_password")
confirmation = request.form.get("confirmation")
# Check given data and perform password change
if not old_password:
flash("Old password required", "warning")
return redirect(url_for("account"))
if not new_password:
flash("New password required", "warning")
return redirect(url_for("account"))
if not confirmation:
flash("Please confirm password", "warning")
return redirect(url_for("account"))
if not verify_password(current_user(), old_password):
flash("Wrong password", "warning")
return redirect(url_for("account"))
if not new_password == confirmation:
flash("Passwords do not match", "warning")
return redirect(url_for("account"))
if not update_user_hash(generate_password_hash(new_password), current_user()):
flash("An unexpected error occurred. Please try again later", "danger")
return redirect(url_for("account"))
flash("Password changed", "success")
return redirect(url_for("account"))
# 2. Change account email
if request.form.get("type") == "change_email":
new_email = request.form.get("new_email")
if not new_email:
flash("new email required", "warning")
return redirect(url_for("account"))
old_email = get_user_email(current_user())
# if the user never had an email in their account, we confirm as in the registration process
if not old_email:
if not update_user_email(current_user(), new_email):
flash("An unexpected error occurred. Please try again later", "danger")
return redirect(url_for("account"))
if not send_email(new_email, "Please confirm your email",
html_confirmation_email(generate_email_confirmation_link(
new_email, app.config["EMAIL_CONFIRMATION_SALT"]))):
flash("An error occurred. Please try again later.", "danger")
return redirect(url_for("home"))
flash("Email set", "success")
return redirect(url_for("account"))
# if there is already an email assigned to their account, we don't update this before they
# didn't confirm the email
else:
if not send_email(new_email, "Confirm new email",
html_change_mail_email(generate_change_email_link(old_email, new_email,
app.config["CHANGE_EMAIL_SALT"]))):
flash("An error occurred. Please try again later.", "danger")
return redirect(url_for("home"))
flash("Confirmation link sent", "success")
return redirect(url_for("account"))
# 3. Update email preferences
if request.form.get("type") == "email_preferences":
# Save preferences as comma separated list in string to database
email_preferences_not = []
if not request.form.get("friend_request"):
email_preferences_not.append("friend_request")
if not request.form.get("accepted_friend"):
email_preferences_not.append("accepted_friend")
if not request.form.get("new_question"):
email_preferences_not.append("new_question")
if not request.form.get("question_answered"):
email_preferences_not.append("question_answered")
if not update_email_preferences(current_user(), email_preferences_not):
flash("An error occurred, please try again later.", "danger")
return redirect(url_for("home"))
flash("Email preferences updated", "success")
return redirect(url_for("account"))
else:
# Show logged in user and email preferences in account view.
username = current_user()
email_preferences_not = get_email_preferences_not(username)
return render_my_template("account.html", username=username, email_preferences_not=email_preferences_not)
@app.route("/logout")
@is_logged_in
def logout():
"""
NO VIEW
Logic function to log user out
Removes session and redirects to / (which is redirected to /login as they are no longer logged in)
:return: redirect to home (/)
"""
if not logout_from_session():
flash("An error occurred, please try again", "danger")
return redirect(url_for("home"))
flash("You are now logged out", "success")
return redirect(url_for("home"))
@app.route("/login", methods=["POST", "GET"])
def login():
"""
View for user to log in.
:return: login.html from get
:return: redirect to home (/) if successful
:return: redirect back to /login if unsuccessful
"""
if request.method == "POST":
username = request.form.get("username").lower()
if not username:
flash("Username required", "warning")
return redirect(url_for("login"))
if not request.form.get("password"):
flash("Password required", "warning")
flash_form_data(request.form)
return redirect(url_for("login"))
if not verify_password(username, request.form.get("password")):
flash("Wrong username or password", "danger")
flash_form_data(request.form)
return redirect(url_for("login"))
if not login_to_session(username):
flash("An error occurred, please try again.", "danger")
flash_form_data(request.form)
return redirect(url_for("login"))
flash("Login successful", "success")
return redirect(url_for("home"))
else:
return render_my_template("login.html")
@app.route("/register", methods=["POST", "GET"])
def register():
"""
View to register user.
Logs user in automatically when successful.
:return: register.html from get
:return: redirect to home (/) when successful
:return: redirect back to /register when unsuccessful
"""
if request.method == "POST":
username = request.form.get("username").lower()
password = request.form.get("password")
email = request.form.get("email")
# Check given data
if not username:
flash("Username required", "warning")
flash_form_data(request.form)
return redirect(url_for("register"))
if not password:
flash("Password required", "warning")
flash_form_data(request.form)
return redirect(url_for("register"))
if not request.form.get("confirmation"):
flash("Please confirm password", "warning")
flash_form_data(request.form)
return redirect(url_for("register"))
if user_exists(username):
flash("Username already exists", "danger")
flash_form_data(request.form)
return redirect(url_for("register"))
if not password == request.form.get("confirmation"):
flash("Passwords do not match", "warning")
flash_form_data(request.form)
return redirect(url_for("register"))
if not check_password_requirements(password):
flash("Password does not meet requirements")
flash_form_data(request.form)
return redirect(url_for("register"))
# Register user
if not create_new_user(username, generate_password_hash(password)):
flash("An unexpected error occurred. Please try again later", "danger")
flash_form_data(request.form)
return redirect(url_for("register"))
# email is optional
if email and is_email(email):
update_user_email(username, email)
if not send_email(email, "Please confirm your email",
html_confirmation_email(generate_email_confirmation_link(
email, app.config["EMAIL_CONFIRMATION_SALT"]))):
flash("An error occurred. Please try again later.", "danger")
flash_form_data(request.form)
return redirect(url_for("register"))
# Log in automatically
if not login_to_session(username):
flash("An error occurred, please try again", "danger")
flash_form_data(request.form)
return render_my_template(url_for("register"))
flash("You are successfully registered", "success")
return redirect(url_for("home"))
else:
return render_my_template("register.html")
@app.route("/confirm/<token>")
def confirm(token):
"""
Confirm email with link given in email
Update email confirmed in database
:param token: Token generated by itsdangerous
:return: redirect to home (/)
:return: bad_confirmation_link.html when unsuccessful
"""
try:
email = decrypt_token(token, app.config["EMAIL_CONFIRMATION_SALT"])
except Exception as e:
print("In app.confirm: ", e)
return render_my_template("bad_confirmation_link.html")
if not update_email_confirmed(email):
flash("An error occurred, please try again", "danger")
return redirect(url_for("home"))
flash("Email confirmed. Thank you", "success")
return redirect(url_for("home"))
@app.route("/change_email/<token>")
def change_email(token):
"""
Confirm email change in account that already had an email assigned to it.
Updates email in database
:param token: Token generated by itsdangerous
:return: redirect to home (/) when successful
:return: bad_change_email_link.html when unsuccessful
"""
try:
data = decrypt_token(token, app.config["CHANGE_EMAIL_SALT"])
old_email = data["old_email"]
new_email = data["new_email"]
if not update_user_email(get_username_by_email(old_email), new_email):
flash("An error occurred, please try again", "danger")
return redirect(url_for("home"))
flash("Email updated!", "success")
return redirect(url_for("home"))
except Exception as e:
print("In app.change_email: ", e)
return render_my_template("bad_change_email_link.html")
@app.route("/reset_password/<token>", methods=["POST", "GET"])
def reset_password(token):
"""
Reset password with password reset link.
:param token: token generated by itsdangerous
:return: reset_password.html from get if link valid
:return: bad_password_reset_link.html if link not valid
:return: redirect to /login if successful
:return: redirect back to reset_password if unsuccessful
"""
if request.method == "POST":
username = request.form.get("username").lower()
password = request.form.get("<PASSWORD>")
confirmation = request.form.get("confirmation")
if not username:
flash("Username required", "warning")
return render_my_template("reset_password.html", username=username)
if not password:
flash("Password required")
return render_my_template("reset_password.html", username=username)
if not confirmation:
flash("Please confirm password", "warning")
return render_my_template("reset_password.html", username=username)
if not password == confirmation:
flash("Passwords do not match", "warning")
return render_my_template("reset_password.html", username=username)
if not check_password_requirements(password):
flash("Password does not meet criteria", "warning")
return render_my_template("reset_password.html", username=username)
if not update_user_hash(generate_password_hash(password), username):
flash("An error occurred, please try again", "danger")
return render_my_template("reset_password.html", username=username)
flash("Password reset successfully", "success")
return redirect(url_for("login"))
else:
try:
username = decrypt_token(token, app.config["RESET_PASSWORD_SALT"])
return render_my_template("reset_password.html", username=username)
except Exception as e:
print("In app.reset_password: ", e)
return render_my_template("bad_password_reset_link.html")
@app.route("/request_password_reset", methods=["POST", "GET"])
def request_password_reset():
"""
View to enter username from forgot password? link.
Sends password reset mail to user if there is an email associated with their account.
:return: request_password_reset.html
"""
if request.method == "POST":
username = request.form.get("username").lower()
if not username:
flash("Username required", "warning")
return render_my_template("request_password_reset.html")
if not send_user_email(username, "Reset password",
html_reset_password_mail(generate_password_reset_link(
username, app.config["RESET_PASSWORD_SALT"]))):
flash("An error occurred. Probably the username doesn't exist or no email is associated with it.", "danger")
return render_my_template("request_password_reset.html")
flash("Reset link sent", "success")
return render_my_template("request_password_reset.html")
else:
return render_my_template("request_password_reset.html")
@app.teardown_appcontext
def close_db(exception):
"""
Close database on app closing.
:param exception: From app.teardown_appcontext, is passed to database.py
:return: None
"""
close_connection(exception)
if __name__ == '__main__':
app.run()
``` |
{
"source": "johannesrave/GH_CPython",
"score": 4
} |
#### File: GH_CPython/GrasshopperSyntax/curve.py
```python
import Grasshopper02 as gh
def AddArc(plane, radius, angle_degrees):
"""Adds an arc curve to the document
Parameters:
plane = plane on which the arc will lie. The origin of the plane will be
the center point of the arc. x-axis of the plane defines the 0 angle
direction.
radius = radius of the arc
angle_degrees = interval of arc
Returns:
id of the new curve object
"""
if not isinstance(plane, gh.Plane):
raise Exception("plane should be an instance of Plane")
elif not isinstance(radius, float):
raise Exception("radius should be an instance of float")
elif not isinstance(angle_degrees, float):
raise Exception("angle_degrees should be an instance of float")
else:
rc = gh.Curve('<Curve>','AddArc', plane, radius, angle_degrees, '</Curve>')
return rc
def AddArc3Pt(start, end, point_on_arc):
"""Adds a 3-point arc curve to the document
Parameters:
start, end = endpoints of the arc
point_on_arc = a point on the arc
Returns:
id of the new curve object
"""
if not (isinstance(start, gh.Point)):
raise Exception("start should be an instance of a Point")
elif not isinstance(end, gh.Point):
raise Exception("end should be an instance of a Point")
elif not isinstance(point_on_arc, gh.Point):
raise Exception("point_on_arc should be an instance of a Point")
else:
rc = gh.Curve('<Curve>','AddArc3Pt', start, end, point_on_arc, '</Curve>')
return rc
def AddArcPtTanPt(start, direction, end):
"""Adds an arc curve, created from a start point, a start direction, and an
end point, to the document
Returns:
id of the new curve object
"""
if not (isinstance(start, gh.Point)):
raise Exception("start should be an instance of a gh.Point")
elif not isinstance(end, gh.Point):
raise Exception("end should be an instance of a gh.Point")
elif not isinstance(direction, gh.Vector):
raise Exception("direction should be an instance of a gh.Vector")
else:
rc = gh.Curve('<Curve>','AddArcPtTanPt', start, direction, end, '</Curve>')
return rc
def AddBlendCurve(curves, parameters, reverses, continuities):
"""Makes a curve blend between two curves
Parameters:
curves = two curves
parameters = two curve parameters defining the blend end points
reverses = two boolean values specifying to use the natural or opposite direction of the curve
continuities = two numbers specifying continuity at end points
0 = position, 1 = tangency, 2 = curvature
Returns:
identifier of new curve on success
"""
if not isinstance(curves, list) \
or len(curves) != 2 \
or not isinstance(curves[0], gh.Curve) \
or not isinstance(curves[1], gh.Curve):
raise Exception("curves should be a list of two curves")
elif not isinstance(parameters, list) \
or len(parameters)!=2 \
or not isinstance(parameters[0], float) \
or not isinstance(parameters[1], float):
raise Exception("parameters should be a list of two floats defining the blend end points ")
elif not isinstance(reverses, list) \
or len(reverses)!= 2 \
or not isinstance(reverses[0], bool) \
or not isinstance(reverses[1], bool):
raise Exception("reverses should be a list of two boolean values specifying to use the natural or opposite direction of the curve")
elif not isinstance(continuities, list) \
or len(continuities)!= 2 \
or not isinstance(continuities[0],int) \
or not isinstance(continuities[1], int)\
or continuities[0]>2 or continuities[1]>2\
or continuities[0]<0 or continuities[1]<0 :
raise Exception("continuities should be a list of two numbers specifying continuity at end points 0 = position, 1 = tangency, 2 = curvature")
else:
rc = gh.Curve('<Curve>','AddBlendCurve', curves, parameters, reverses, continuities, '</Curve>')
return rc
def AddCircle(plane_or_center, radius):
"""Adds a circle curve to the document
Parameters:
plane_or_center = plane on which the circle will lie. If a point is
passed, this will be the center of the circle on the active
construction plane
radius = the radius of the circle
Returns:
id of the new curve object
"""
if not isinstance(plane_or_center, gh.Plane) or not isinstance(plane_or_center, gh.Point):
raise Exception("plane_or_center should be an instance of Plane")
elif not isinstance(radius, float):
raise Exception("radius should be an instance of float")
else:
rc = gh.Curve('<Curve>','AddCircle', plane_or_center, radius,'</Curve>')
return rc
def AddCircle3Pt(first, second, third):
"""Adds a 3-point circle curve to the document
Parameters:
first, second, third = points on the circle
Returns:
id of the new curve object
"""
if not (isinstance(first, gh.Point)):
raise Exception("first should be an instance of a Point")
elif not isinstance(second, gh.Point):
raise Exception("second should be an instance of a Point")
elif not isinstance(third, gh.Point):
raise Exception("third should be an instance of a Point")
else:
rc = gh.Curve('<Curve>','AddCircle3Pt', first, second, third, '</Curve>')
return rc
def AddCurve(points, degree=3):
"""Adds a control points curve object to the document
Parameters:
points = a list of points
degree[opt] = degree of the curve
Returns:
id of the new curve object
"""
if not isinstance(points, list) or not len(points)>1:
raise Exception("points should be a list of more than one point")
elif not isinstance(degree, int):
raise Exception("degree should be an int representing the degree of the curve")
else:
for i in points:
if not isinstance(i, gh.Point):
raise Exception("points should be list of points")
rc = gh.Curve('<Curve>','AddCurve', points, degree, '</Curve>')
return rc
def AddEllipse(plane, radiusX, radiusY):
"""Adds an elliptical curve to the document
Parameters:
plane = the plane on which the ellipse will lie. The origin of
the plane will be the center of the ellipse
radiusX, radiusY = radius in the X and Y axis directions
Returns:
id of the new curve object if successful
"""
if not isinstance(plane, gh.Plane):
raise Exception("plane should be an instance of Plane")
elif not isinstance(radiusX, float) or not isinstance(radiusY, float):
raise Exception("radiusX, radiusY should be floats representing radius in the X and Y axis directions")
else:
rc = gh.Curve('<Curve>','AddEllipse', plane, radiusX, radiusY, '</Curve>')
return rc
def AddEllipse3Pt(center, second, third):
"""Adds a 3-point elliptical curve to the document
Parameters:
center = center point of the ellipse
second = end point of the x axis
third = end point of the y axis
Returns:
id of the new curve object if successful
"""
if not isinstance(center, gh.Point)or not isinstance(second, gh.Point) or not isinstance(third, gh.Point):
raise Exception("center, second and third should be instances of gh.Point")
else:
rc = gh.Curve('<Curve>','AddEllipse3Pt', center, second, third, '</Curve>')
return rc
def AddFilletCurve(curve0id, curve1id, radius=1.0, base_point0=None, base_point1=None):
"""Adds a fillet curve between two curve objects
Parameters:
curve0id = identifier of the first curve object
curve1id = identifier of the second curve object
radius [opt] = fillet radius
base_point0 [opt] = base point of the first curve. If omitted,
starting point of the curve is used
base_point1 [opt] = base point of the second curve. If omitted,
starting point of the curve is used
Returns:
id of the new curve object if successful
"""
if not isinstance(curve0id, gh.Curve) or not isinstance(curve1id, gh.Curve):
raise Exception("curve0id and curve1id should be instances of gh.Curve")
elif not isinstance(radius, float):
raise Exception("radius should be a float number")
elif base_point0 != None:
if not isinstance(base_point0, gh.Point):
raise Exception("base_point0 should be an instance of gh.Point or None")
elif base_point1 != None:
if not isinstance(base_point1, gh.Point):
raise Exception("base_point1 shoule be an instance of gh.Point or None")
else:
rc = gh.Curve('<Curve>','AddFilletCurve', curve0id, curve1id, radius, base_point0, base_point1, '</Curve>')
return rc
def AddInterpCrvOnSrf(surface_id, points):
"""Adds an interpolated curve object that lies on a specified
surface. Note, this function will not create periodic curves,
but it will create closed curves.
Parameters:
surface_id = identifier of the surface to create the curve on
points = list of 3D points that lie on the specified surface.
The list must contain at least 2 points
Returns:
id of the new curve object if successful
"""
return rc
def AddInterpCrvOnSrfUV(surface_id, points):
"""Adds an interpolated curve object based on surface parameters,
that lies on a specified surface. Note, this function will not
create periodic curves, but it will create closed curves.
Parameters:
surface_id = identifier of the surface to create the curve on
points = list of 2D surface parameters. The list must contain
at least 2 sets of parameters
Returns:
id of the new curve object if successful
"""
return rc
def AddInterpCurve(points, degree=3, knotstyle=0, start_tangent=None, end_tangent=None):
"""Adds an interpolated curve object to the document. Options exist to make
a periodic curve or to specify the tangent at the endpoints. The resulting
curve is a non-rational NURBS curve of the specified degree.
Parameters:
points = list containing 3D points to interpolate. For periodic curves,
if the final point is a duplicate of the initial point, it is
ignored. The number of control points must be >= (degree+1).
degree[opt] = The degree of the curve (must be >=1).
Periodic curves must have a degree >= 2. For knotstyle = 1 or 2,
the degree must be 3. For knotstyle = 4 or 5, the degree must be odd
knotstyle[opt]
0 Uniform knots. Parameter spacing between consecutive knots is 1.0.
1 Chord length spacing. Requires degree = 3 with arrCV1 and arrCVn1 specified.
2 Sqrt (chord length). Requires degree = 3 with arrCV1 and arrCVn1 specified.
3 Periodic with uniform spacing.
4 Periodic with chord length spacing. Requires an odd degree value.
5 Periodic with sqrt (chord length) spacing. Requires an odd degree value.
start_tangent [opt] = 3d vector that specifies a tangency condition at the
beginning of the curve. If the curve is periodic, this argument must be omitted.
end_tangent [opt] = 3d vector that specifies a tangency condition at the
end of the curve. If the curve is periodic, this argument must be omitted.
Returns:
id of the new curve object if successful
"""
return rc
def AddLine(start, end):
"""Adds a line curve to the current model.
Parameters:
start, end = end points of the line
Returns:
id of the new curve object
"""
return rc
def AddNurbsCurve(points, knots, degree, weights=None):
"""Adds a NURBS curve object to the document
Parameters:
points = list containing 3D control points
knots = Knot values for the curve. The number of elements in knots must
equal the number of elements in points plus degree minus 1
degree = degree of the curve. must be greater than of equal to 1
weights[opt] = weight values for the curve. Number of elements should
equal the number of elements in points. Values must be greater than 0
"""
return rc
def AddPolyline(points, replace_id=None):
"""Adds a polyline curve to the current model
Parameters:
points = list of 3D points. Duplicate, consecutive points will be
removed. The list must contain at least two points. If the
list contains less than four points, then the first point and
last point must be different.
replace_id[opt] = If set to the id of an existing object, the object
will be replaced by this polyline
Returns:
id of the new curve object if successful
"""
return rc
def AddRectangle(plane, width, height):
"""Add a rectangular curve to the document
Paramters:
plane = plane on which the rectangle will lie
width, height = width and height of rectangle as measured along the plane's
x and y axes
Returns:
id of new rectangle
"""
return rc
def AddSpiral(point0, point1, pitch, turns, radius0, radius1=None):
"""Adds a spiral or helical curve to the document
Parameters:
point0 = helix axis start point or center of spiral
point1 = helix axis end point or point normal on spiral plane
pitch = distance between turns. If 0, then a spiral. If > 0 then the
distance between helix "threads"
turns = number of turns
radius0, radius1 = starting and ending radius
Returns:
id of new curve on success
"""
return rc
def AddSubCrv(curve_id, param0, param1):
"""Add a curve object based on a portion, or interval of an existing curve
object. Similar in operation to Rhino's SubCrv command
Parameters:
curve_id = identifier of a closed planar curve object
param0, param1 = first and second parameters on the source curve
Returns:
id of the new curve object if successful
"""
return rc
def ArcAngle(curve_id, segment_index=-1):
"""Returns the angle of an arc curve object.
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The angle in degrees if successful.
"""
return arc.AngleDegrees
def ArcCenterPoint(curve_id, segment_index=-1):
"""Returns the center point of an arc curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The 3D center point of the arc if successful.
"""
return arc.Center
def ArcMidPoint(curve_id, segment_index=-1):
"""Returns the mid point of an arc curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The 3D mid point of the arc if successful.
"""
return arc.MidPoint
def ArcRadius(curve_id, segment_index=-1):
"""Returns the radius of an arc curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The radius of the arc if successful.
"""
return arc.Radius
#Point
def CircleCenterPoint(curve_id, segment_index=-1, return_plane=False):
"""Returns the center point of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
return_plane [opt] = if True, the circle's plane is returned
curve_id identifies a polycurve
Returns:
The 3D center point of the circle if successful.
The plane of the circle if return_plane is True
"""
return circle.Center
def CircleCircumference(curve_id, segment_index=-1):
"""Returns the circumference of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The circumference of the circle if successful.
"""
return circle.Circumference
def CircleRadius(curve_id, segment_index=-1):
"""Returns the radius of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The radius of the circle if successful.
"""
return circle.Radius
def CloseCurve(curve_id, tolerance=-1.0):
"""Closes an open curve object by making adjustments to the end points so
they meet at a point
Parameters:
curve_id = identifier of a curve object
tolerance[opt] = maximum allowable distance between start and end
point. If omitted, the current absolute tolerance is used
Returns:
id of the new curve object if successful
"""
return rc
def ClosedCurveOrientation(curve_id, direction=(0, 0, 1)):
"""Determine the orientation (counter-clockwise or clockwise) of a closed,
planar curve
Parameters:
curve_id = identifier of a curve object
direction[opt] = 3d vector that identifies up, or Z axs, direction of
the plane to test against
Returns:
1 if the curve's orientation is clockwise
-1 if the curve's orientation is counter-clockwise
0 if unable to compute the curve's orientation
"""
return int(orientation)
def ConvertCurveToPolyline(curve_id, angle_tolerance=5.0, tolerance=0.01, delete_input=False, min_edge_length=0,
max_edge_length=0):
"""Convert curve to a polyline curve
Parameters:
curve_id = identifier of a curve object
angle_tolerance [opt] = The maximum angle between curve tangents at line
endpoints. If omitted, the angle tolerance is set to 5.0.
tolerance[opt] = The distance tolerance at segment midpoints. If omitted,
the tolerance is set to 0.01.
delete_input[opt] = Delete the curve object specified by curve_id. If
omitted, curve_id will not be deleted.
min_edge_length[opt] = Minimum segment length
max_edge_length[opt] = Maximum segment length
Returns:
The new curve if successful.
"""
return id
#point
def CurveArcLengthPoint(curve_id, length, from_start=True):
"""Returns the point on the curve that is a specified arc length
from the start of the curve.
Parameters:
curve_id = identifier of a curve object
length = The arc length from the start of the curve to evaluate.
from_start[opt] = If not specified or True, then the arc length point is
calculated from the start of the curve. If False, the arc length
point is calculated from the end of the curve.
Returns:
Point3d if successful
"""
def CurveArea(curve_id):
"""Returns area of closed planar curves. The results are based on the
current drawing units.
Parameters:
curve_id = The identifier of a closed, planar curve object.
Returns:
List of area information. The list will contain the following information:
Element Description
0 The area. If more than one curve was specified, the
value will be the cumulative area.
1 The absolute (+/-) error bound for the area.
"""
return mp.Area, mp.AreaError
def CurveAreaCentroid(curve_id):
"""Returns area centroid of closed, planar curves. The results are based
on the current drawing units.
Parameters:
curve_id = The identifier of a closed, planar curve object.
Returns:
Tuple of area centroid information containing the following information:
Element Description
0 The 3d centroid point. If more than one curve was specified,
the value will be the cumulative area.
1 A 3d vector with the absolute (+/-) error bound for the area
centroid.
"""
return mp.Centroid, mp.CentroidError
def CurveArrows(curve_id, arrow_style=None):
"""Enables or disables a curve object's annotation arrows
Parameters:
curve_id = identifier of a curve
arrow_style[opt] = the style of annotation arrow to be displayed
0 = no arrows
1 = display arrow at start of curve
2 = display arrow at end of curve
3 = display arrow at both start and end of curve
Returns:
if arrow_style is not specified, the current annotation arrow style
if arrow_style is specified, the previos arrow style
"""
def CurveBooleanDifference(curve_id_0, curve_id_1):
"""Calculates the difference between two closed, planar curves and
adds the results to the document. Note, curves must be coplanar.
Parameters:
curve_id_0 = identifier of the first curve object.
curve_id_1 = identifier of the second curve object.
Returns:
The identifiers of the new objects if successful, None on error.
"""
return curves
def CurveBooleanIntersection(curve_id_0, curve_id_1):
"""Calculates the intersection of two closed, planar curves and adds
the results to the document. Note, curves must be coplanar.
Parameters:
curve_id_0 = identifier of the first curve object.
curve_id_1 = identifier of the second curve object.
Returns:
The identifiers of the new objects.
"""
return curves
def CurveBooleanUnion(curve_id):
"""Calculate the union of two or more closed, planar curves and
add the results to the document. Note, curves must be coplanar.
Parameters:
curve_id = list of two or more close planar curves identifiers
Returns:
The identifiers of the new objects.
"""
return curves
def CurveBrepIntersect(curve_id, brep_id, tolerance=None):
"""Intersects a curve object with a brep object. Note, unlike the
CurveSurfaceIntersection function, this function works on trimmed surfaces.
Parameters:
curve_id = identifier of a curve object
brep_id = identifier of a brep object
tolerance [opt] = distance tolerance at segment midpoints.
If omitted, the current absolute tolerance is used.
Returns:
List of identifiers for the newly created intersection curve and
point objects if successful. None on error.
"""
return curves, points
def CurveClosestObject(curve_id, object_ids):
"""Returns the 3D point locations on two objects where they are closest to
each other. Note, this function provides similar functionality to that of
Rhino's ClosestPt command.
Parameters:
curve_id = identifier of the curve object to test
object_ids = list of identifiers of point cloud, curve, surface, or
polysurface to test against
Returns:
Tuple containing the results of the closest point calculation.
The elements are as follows:
0 The identifier of the closest object.
1 The 3-D point that is closest to the closest object.
2 The 3-D point that is closest to the test curve.
"""
def CurveClosestPoint(curve_id, test_point, segment_index=-1):
"""Returns parameter of the point on a curve that is closest to a test point.
Parameters:
curve_id = identifier of a curve object
point = sampling point
segment_index [opt] = curve segment if curve_id identifies a polycurve
Returns:
The parameter of the closest point on the curve
"""
return t
def CurveContourPoints(curve_id, start_point, end_point, interval=None):
"""Returns the 3D point locations calculated by contouring a curve object.
Parameters:
curve_id = identifier of a curve object.
start_point = 3D starting point of a center line.
end_point = 3D ending point of a center line.
interval [opt] = The distance between contour curves. If omitted,
the interval will be equal to the diagonal distance of the object's
bounding box divided by 50.
Returns:
A list of 3D points, one for each contour
"""
return list(rc)
def CurveCurvature(curve_id, parameter):
"""Returns the curvature of a curve at a parameter. See the Rhino help for
details on curve curvature
Parameters:
curve_id = identifier of the curve
parameter = parameter to evaluate
Returns:
Tuple of curvature information on success
element 0 = point at specified parameter
element 1 = tangent vector
element 2 = center of radius of curvature
element 3 = radius of curvature
element 4 = curvature vector
None on failure
"""
return point, tangent, center, radius, cv
def CurveCurveIntersection(curveA, curveB=None, tolerance=-1):
"""Calculates intersection of two curve objects.
Parameters:
curveA = identifier of the first curve object.
curveB = identifier of the second curve object. If omitted, then a
self-intersection test will be performed on curveA.
tolerance [opt] = absolute tolerance in drawing units. If omitted,
the document's current absolute tolerance is used.
Returns:
List of tuples of intersection information if successful.
The list will contain one or more of the following elements:
Element Type Description
[n][0] Number The intersection event type, either Point (1) or Overlap (2).
[n][1] Point3d If the event type is Point (1), then the intersection point
on the first curve. If the event type is Overlap (2), then
intersection start point on the first curve.
[n][2] Point3d If the event type is Point (1), then the intersection point
on the first curve. If the event type is Overlap (2), then
intersection end point on the first curve.
[n][3] Point3d If the event type is Point (1), then the intersection point
on the second curve. If the event type is Overlap (2), then
intersection start point on the second curve.
[n][4] Point3d If the event type is Point (1), then the intersection point
on the second curve. If the event type is Overlap (2), then
intersection end point on the second curve.
[n][5] Number If the event type is Point (1), then the first curve parameter.
If the event type is Overlap (2), then the start value of the
first curve parameter range.
[n][6] Number If the event type is Point (1), then the first curve parameter.
If the event type is Overlap (2), then the end value of the
first curve parameter range.
[n][7] Number If the event type is Point (1), then the second curve parameter.
If the event type is Overlap (2), then the start value of the
second curve parameter range.
[n][8] Number If the event type is Point (1), then the second curve parameter.
If the event type is Overlap (2), then the end value of the
second curve parameter range.
"""
def CurveDegree(curve_id, segment_index=-1):
"""Returns the degree of a curve object.
Parameters:
curve_id = identifier of a curve object.
segment_index [opt] = the curve segment if curve_id identifies a polycurve.
Returns:
The degree of the curve if successful. None on error.
"""
return curve.Degree
def CurveDeviation(curve_a, curve_b):
"""Returns the minimum and maximum deviation between two curve objects
Parameters:
curve_a, curve_b = identifiers of two curves
Returns:
tuple of deviation information on success
element 0 = curve_a parameter at maximum overlap distance point
element 1 = curve_b parameter at maximum overlap distance point
element 2 = maximum overlap distance
element 3 = curve_a parameter at minimum overlap distance point
element 4 = curve_b parameter at minimum overlap distance point
element 5 = minimum distance between curves
None on error
"""
return maxa, maxb, maxd, mina, minb, mind
def CurveDim(curve_id, segment_index=-1):
"""Returns the dimension of a curve object
Parameters:
curve_id = identifier of a curve object.
segment_index [opt] = the curve segment if curve_id identifies a polycurve.
Returns:
The dimension of the curve if successful. None on error.
"""
return curve.Dimension
def CurveDirectionsMatch(curve_id_0, curve_id_1):
"""Tests if two curve objects are generally in the same direction or if they
would be more in the same direction if one of them were flipped. When testing
curve directions, both curves must be either open or closed - you cannot test
one open curve and one closed curve.
Parameters:
curve_id_0 = identifier of first curve object
curve_id_1 = identifier of second curve object
Returns:
True if the curve directions match, otherwise False.
"""
return Rhino.Geometry.Curve.DoDirectionsMatch(curve0, curve1)
def CurveDiscontinuity(curve_id, style):
"""Search for a derivatitive, tangent, or curvature discontinuity in
a curve object.
Parameters:
curve_id = identifier of curve object
style = The type of continuity to test for. The types of
continuity are as follows:
Value Description
1 C0 - Continuous function
2 C1 - Continuous first derivative
3 C2 - Continuous first and second derivative
4 G1 - Continuous unit tangent
5 G2 - Continuous unit tangent and curvature
Returns:
List 3D points where the curve is discontinuous
"""
return points
def CurveDomain(curve_id, segment_index=-1):
"""Returns the domain of a curve object.
Parameters:
curve_id = identifier of the curve object
segment_index[opt] = the curve segment if curve_id identifies a polycurve.
"""
return [dom.Min, dom.Max]
def CurveEditPoints(curve_id, return_parameters=False, segment_index=-1):
"""Returns the edit, or Greville, points of a curve object.
For each curve control point, there is a corresponding edit point.
Parameters:
curve_id = identifier of the curve object
return_parameters[opt] = if True, return as a list of curve parameters.
If False, return as a list of 3d points
segment_index[opt] = the curve segment is curve_id identifies a polycurve
Returns:
curve parameters of 3d points on success
None on error
"""
return nc.GrevillePoints()
def CurveEndPoint(curve_id, segment_index=-1):
"""Returns the end point of a curve object
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
The 3-D end point of the curve if successful.
"""
return curve.PointAtEnd
def CurveFilletPoints(curve_id_0, curve_id_1, radius=1.0, base_point_0=None, base_point_1=None, return_points=True):
"""Find points at which to cut a pair of curves so that a fillet of a
specified radius fits. A fillet point is a pair of points (point0, point1)
such that there is a circle of radius tangent to curve curve0 at point0 and
tangent to curve curve1 at point1. Of all possible fillet points, this
function returns the one which is the closest to the base point base_point_0,
base_point_1. Distance from the base point is measured by the sum of arc
lengths along the two curves.
Parameters:
curve_id_0 = identifier of the first curve object.
curve_id_1 = identifier of the second curve object.
radius [opt] = The fillet radius. If omitted, a radius
of 1.0 is specified.
base_point_0 [opt] = The base point on the first curve.
If omitted, the starting point of the curve is used.
base_point_1 [opt] = The base point on the second curve. If omitted,
the starting point of the curve is used.
return_points [opt] = If True (Default), then fillet points are
returned. Otherwise, a fillet curve is created and
it's identifier is returned.
Returns:
If return_points is True, then a list of point and vector values
if successful. The list elements are as follows:
0 A point on the first curve at which to cut (arrPoint0).
1 A point on the second curve at which to cut (arrPoint1).
2 The fillet plane's origin (3-D point). This point is also
the center point of the fillet
3 The fillet plane's X axis (3-D vector).
4 The fillet plane's Y axis (3-D vector).
5 The fillet plane's Z axis (3-D vector).
If return_points is False, then the identifier of the fillet curve
if successful.
None if not successful, or on error.
"""
return scriptcontext.errorhandler()
def CurveFrame(curve_id, parameter, segment_index=-1):
"""Returns the plane at a parameter of a curve. The plane is based on the
tangent and curvature vectors at a parameter.
Parameters:
curve_id = identifier of the curve object.
parameter = parameter to evaluate.
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
The plane at the specified parameter if successful.
None if not successful, or on error.
"""
return scriptcontext.errorhandler()
def CurveKnotCount(curve_id, segment_index=-1):
"""Returns the knot count of a curve object.
Parameters:
curve_id = identifier of the curve object.
segment_index [opt] = the curve segment if curve_id identifies a polycurve.
Returns:
The number of knots if successful.
None if not successful or on error.
"""
return nc.Knots.Count
def CurveKnots(curve_id, segment_index=-1):
"""Returns the knots, or knot vector, of a curve object
Parameters:
curve_id = identifier of the curve object.
segment_index [opt] = the curve segment if curve_id identifies a polycurve.
Returns:
knot values if successful.
None if not successful or on error.
"""
return rc
def CurveLength(curve_id, segment_index=-1, sub_domain=None):
"""Returns the length of a curve object.
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
sub_domain [opt] = list of two numbers identifing the sub-domain of the
curve on which the calculation will be performed. The two parameters
(sub-domain) must be non-decreasing. If omitted, the length of the
entire curve is returned.
Returns:
The length of the curve if successful.
None if not successful, or on error.
"""
return curve.GetLength()
def CurveMidPoint(curve_id, segment_index=-1):
"""Returns the mid point of a curve object.
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
The 3D mid point of the curve if successful.
None if not successful, or on error.
"""
return scriptcontext.errorhandler()
def CurveNormal(curve_id, segment_index=-1):
"""Returns the normal direction of the plane in which a planar curve object lies.
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
The 3D normal vector if sucessful.
None if not successful, or on error.
"""
return scriptcontext.errorhandler()
def CurveNormalizedParameter(curve_id, parameter):
"""Converts a curve parameter to a normalized curve parameter;
one that ranges between 0-1
Parameters:
curve_id = identifier of the curve object
parameter = the curve parameter to convert
Returns:
normalized curve parameter
"""
return curve.Domain.NormalizedParameterAt(parameter)
def CurveParameter(curve_id, parameter):
"""Converts a normalized curve parameter to a curve parameter;
one within the curve's domain
Parameters:
curve_id = identifier of the curve object
parameter = the normalized curve parameter to convert
Returns:
curve parameter
"""
return curve.Domain.ParameterAt(parameter)
def CurvePerpFrame(curve_id, parameter):
"""Returns the perpendicular plane at a parameter of a curve. The result
is relatively parallel (zero-twisting) plane
Parameters:
curve_id = identifier of the curve object
parameter = parameter to evaluate
Returns:
Plane on success
None on error
"""
if rc: return plane
def CurvePlane(curve_id, segment_index=-1):
"""Returns the plane in which a planar curve lies. Note, this function works
only on planar curves.
Parameters:
curve_id = identifier of the curve object
segment_index[opt] = the curve segment if curve_id identifies a polycurve
Returns:
The plane in which the curve lies if successful.
None if not successful, or on error.
"""
return scriptcontext.errorhandler()
def CurvePointCount(curve_id, segment_index=-1):
"""Returns the control points count of a curve object.
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
Number of control points if successful.
None if not successful
"""
def CurvePoints(curve_id, segment_index=-1):
"""Returns the control points, or control vertices, of a curve object.
If the curve is a rational NURBS curve, the euclidean control vertices
are returned.
"""
def CurveRadius(curve_id, test_point, segment_index=-1):
"""Returns the radius of curvature at a point on a curve.
Parameters:
curve_id = identifier of the curve object
test_point = sampling point
segment_index[opt] = the curve segment if curve_id identifies a polycurve
Returns:
The radius of curvature at the point on the curve if successful.
None if not successful, or on error.
"""
def CurveSeam(curve_id, parameter):
"""Adjusts the seam, or start/end, point of a closed curve.
Parameters:
curve_id = identifier of the curve object
parameter = The parameter of the new start/end point.
Note, if successful, the resulting curve's
domain will start at dblParameter.
Returns:
True or False indicating success or failure.
"""
def CurveStartPoint(curve_id, segment_index=-1, point=None):
"""Returns the start point of a curve object
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
point [opt] = new start point
Returns:
The 3D starting point of the curve if successful.
"""
def CurveSurfaceIntersection(curve_id, surface_id, tolerance=-1, angle_tolerance=-1):
"""Calculates intersection of a curve object with a surface object.
Note, this function works on the untrimmed portion of the surface.
Parameters:
curve_id = The identifier of the first curve object.
surface_id = The identifier of the second curve object. If omitted,
the a self-intersection test will be performed on curve.
tolerance [opt] = The absolute tolerance in drawing units. If omitted,
the document's current absolute tolerance is used.
angle_tolerance [opt] = angle tolerance in degrees. The angle
tolerance is used to determine when the curve is tangent to the
surface. If omitted, the document's current angle tolerance is used.
Returns:
Two-dimensional list of intersection information if successful.
The list will contain one or more of the following elements:
Element Type Description
(n, 0) Number The intersection event type, either Point(1) or Overlap(2).
(n, 1) Point3d If the event type is Point(1), then the intersection point
on the first curve. If the event type is Overlap(2), then
intersection start point on the first curve.
(n, 2) Point3d If the event type is Point(1), then the intersection point
on the first curve. If the event type is Overlap(2), then
intersection end point on the first curve.
(n, 3) Point3d If the event type is Point(1), then the intersection point
on the second curve. If the event type is Overlap(2), then
intersection start point on the surface.
(n, 4) Point3d If the event type is Point(1), then the intersection point
on the second curve. If the event type is Overlap(2), then
intersection end point on the surface.
(n, 5) Number If the event type is Point(1), then the first curve parameter.
If the event type is Overlap(2), then the start value of the
first curve parameter range.
(n, 6) Number If the event type is Point(1), then the first curve parameter.
If the event type is Overlap(2), then the end value of the
curve parameter range.
(n, 7) Number If the event type is Point(1), then the U surface parameter.
If the event type is Overlap(2), then the U surface parameter
for curve at (n, 5).
(n, 8) Number If the event type is Point(1), then the V surface parameter.
If the event type is Overlap(2), then the V surface parameter
for curve at (n, 5).
(n, 9) Number If the event type is Point(1), then the U surface parameter.
If the event type is Overlap(2), then the U surface parameter
for curve at (n, 6).
(n, 10) Number If the event type is Point(1), then the V surface parameter.
If the event type is Overlap(2), then the V surface parameter
for curve at (n, 6).
"""
def CurveTangent(curve_id, parameter, segment_index=-1):
"""Returns a 3D vector that is the tangent to a curve at a parameter.
Parameters:
curve_id = identifier of the curve object
parameter = parameter to evaluate
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
A 3D vector if successful.
None on error.
"""
def CurveWeights(curve_id, segment_index=-1):
"""Returns list of weights that are assigned to the control points of a curve
Parameters:
curve_id = identifier of the curve object
segment_index[opt] = the curve segment if curve_id identifies a polycurve
Returns:
The weight values of the curve if successful.
None if not successful, or on error.
"""
def DivideCurve(curve_id, segments, create_points=False, return_points=True):
"""Divides a curve object into a specified number of segments.
Parameters:
curve_id = identifier of the curve object
segments = The number of segments.
create_points [opt] = Create the division points. If omitted or False,
points are not created.
return_points [opt] = If omitted or True, points are returned.
If False, then a list of curve parameters are returned.
Returns:
If return_points is not specified or True, then a list containing 3D
division points.
If return_points is False, then an array containing division curve
parameters.
None if not successful, or on error.
"""
def DivideCurveEquidistant(curve_id, distance, create_points=False, return_points=True):
"""Divides a curve such that the linear distance between the points is equal.
Parameters:
curve_id = the object's identifier
distance = linear distance between division points
create_points[opt] = create the division points
return_points[opt] = If True, return a list of points.
If False, return a list of curve parameters
Returns:
A list of points or curve parameters based on the value of return_points
None on error
"""
def DivideCurveLength(curve_id, length, create_points=False, return_points=True):
"""Divides a curve object into segments of a specified length.
Parameters:
curve_id = identifier of the curve object
length = The length of each segment.
create_points [opt] = Create the division points. If omitted or False,
points are not created.
return_points [opt] = If omitted or True, points are returned.
If False, then a list of curve parameters are returned.
Returns:
If return_points is not specified or True, then a list containing 3D
division points if successful.
If return_points is False, then an array containing division curve
parameters if successful.
None if not successful, or on error.
"""
def EllipseCenterPoint(curve_id):
"""Returns the center point of an elliptical-shaped curve object.
Parameters:
curve_id = identifier of the curve object.
Returns:
The 3D center point of the ellipse if successful.
"""
def EllipseQuadPoints(curve_id):
"""Returns the quadrant points of an elliptical-shaped curve object.
Parameters:
curve_id = identifier of the curve object.
Returns:
Four 3D points identifying the quadrants of the ellipse
"""
def EvaluateCurve(curve_id, t, segment_index=-1):
"""Evaluates a curve at a parameter and returns a 3D point
Parameters:
curve_id = identifier of the curve object
t = the parameter to evaluate
segment_index [opt] = the curve segment if curve_id identifies a polycurve
"""
def ExplodeCurves(curve_ids, delete_input=False):
"""Explodes, or un-joins, one curves. Polycurves will be exploded into curve
segments. Polylines will be exploded into line segments. ExplodeCurves will
return the curves in topological order.
Parameters:
curve_ids = the curve object(s) to explode.
delete_input[opt] = Delete input objects after exploding.
Returns:
List identifying the newly created curve objects
"""
def ExtendCurve(curve_id, extension_type, side, boundary_object_ids):
"""Extends a non-closed curve object by a line, arc, or smooth extension
until it intersects a collection of objects.
Parameters:
curve_id: identifier of curve to extend
extension_type: 0 = line, 1 = arc, 2 = smooth
side: 0=extend from the start of the curve, 1=extend from the end of the curve
boundary_object_ids: curve, surface, and polysurface objects to extend to
Returns:
The identifier of the new object if successful.
None if not successful
"""
def ExtendCurveLength(curve_id, extension_type, side, length):
"""Extends a non-closed curve by a line, arc, or smooth extension for a
specified distance
Parameters:
curve_id: curve to extend
extension_type: 0 = line, 1 = arc, 2 = smooth
side: 0=extend from start of the curve, 1=extend from end of the curve
length: distance to extend
Returns:
The identifier of the new object
None if not successful
"""
def ExtendCurvePoint(curve_id, side, point):
"""Extends a non-closed curve by smooth extension to a point
Parameters:
curve_id: curve to extend
side: 0=extend from start of the curve, 1=extend from end of the curve
point: point to extend to
Returns:
The identifier of the new object if successful.
None if not successful, or on error.
"""
def FairCurve(curve_id, tolerance=1.0):
"""Fairs a curve. Fair works best on degree 3 (cubic) curves. Fair attempts
to remove large curvature variations while limiting the geometry changes to
be no more than the specified tolerance. Sometimes several applications of
this method are necessary to remove nasty curvature problems.
Parameters:
curve_id = curve to fair
tolerance[opt] = fairing tolerance
Returns:
True or False indicating success or failure
"""
def FitCurve(curve_id, degree=3, distance_tolerance=-1, angle_tolerance=-1):
"""Reduces number of curve control points while maintaining the curve's same
general shape. Use this function for replacing curves with many control
points. For more information, see the Rhino help for the FitCrv command.
Parameters:
curve_id = Identifier of the curve object
degree [opt] = The curve degree, which must be greater than 1.
The default is 3.
distance_tolerance [opt] = The fitting tolerance. If distance_tolerance
is not specified or <= 0.0, the document absolute tolerance is used.
angle_tolerance [opt] = The kink smoothing tolerance in degrees. If
angle_tolerance is 0.0, all kinks are smoothed. If angle_tolerance
is > 0.0, kinks smaller than angle_tolerance are smoothed. If
angle_tolerance is not specified or < 0.0, the document angle
tolerance is used for the kink smoothing.
Returns:
The identifier of the new object
None if not successful, or on error.
"""
def InsertCurveKnot(curve_id, parameter, symmetrical=False):
"""Inserts a knot into a curve object
Parameters:
curve_id = identifier of the curve object
parameter = parameter on the curve
symmetrical[opt] = if True, then knots are added on both sides of
the center of the curve
Returns:
True or False indicating success or failure
"""
def IsArc(curve_id, segment_index=-1):
"""Verifies an object is an arc curve
Parameters:
curve_id = Identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False
"""
def IsCircle(curve_id, tolerance=None):
"""Verifies an object is a circle curve
Parameters:
curve_id = Identifier of the curve object
tolerance [opt] = If the curve is not a circle, then the tolerance used
to determine whether or not the NURBS form of the curve has the
properties of a circle. If omitted, Rhino's internal zero tolerance is used
Returns:
True or False
"""
def IsCurve(object_id):
"Verifies an object is a curve"
def IsCurveClosable(curve_id, tolerance=None):
"""Decide if it makes sense to close off the curve by moving the end point
to the start point based on start-end gap size and length of curve as
approximated by chord defined by 6 points
Parameters:
curve_id = identifier of the curve object
tolerance[opt] = maximum allowable distance between start point and end
point. If omitted, the document's current absolute tolerance is used
Returns:
True or False
"""
def IsCurveClosed(object_id):
return " "
def IsCurveInPlane(object_id, plane=None):
"""Test a curve to see if it lies in a specific plane
Parameters:
object_id = the object's identifier
plane[opt] = plane to test. If omitted, the active construction plane is used
Returns:
True or False
"""
def IsCurveLinear(object_id, segment_index=-1):
"""Verifies an object is a linear curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsCurvePeriodic(curve_id, segment_index=-1):
"""Verifies an object is a periodic curve object
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False
"""
def IsCurvePlanar(curve_id, segment_index=-1):
"""Verifies an object is a planar curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsCurveRational(curve_id, segment_index=-1):
"""Verifies an object is a rational NURBS curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsEllipse(object_id, segment_index=-1):
"""Verifies an object is an elliptical-shaped curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsLine(object_id, segment_index=-1):
"""Verifies an object is a line curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsPointOnCurve(object_id, point, segment_index=-1):
"""Verifies that a point is on a curve
Parameters:
curve_id = identifier of the curve object
point = the test point
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False indicating success or failure
"""
def IsPolyCurve(object_id, segment_index=-1):
"""Verifies an object is a PolyCurve curve
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False
"""
def IsPolyline(object_id, segment_index=-1):
"""Verifies an object is a Polyline curve object
Parameters:
curve_id = identifier of the curve object
segment_index [opt] = the curve segment if curve_id identifies a polycurve
Returns:
True or False
"""
def JoinCurves(object_ids, delete_input=False, tolerance=None):
"""Joins multiple curves together to form one or more curves or polycurves
Parameters:
object_ids = list of multiple curves
delete_input[opt] = delete input objects after joining
tolerance[opt] = join tolerance. If omitted, 2.1 * document absolute
tolerance is used
Returns:
List of Guids representing the new curves
"""
def LineFitFromPoints(points):
"""Returns a line that was fit through an array of 3D points
Parameters:
points = a list of at least two 3D points
Returns:
line on success
"""
def MakeCurveNonPeriodic(curve_id, delete_input=False):
"""Makes a periodic curve non-periodic. Non-periodic curves can develop
kinks when deformed
Parameters:
curve_id = identifier of the curve object
delete_input[opt] = delete the input curve
Returns:
id of the new or modified curve if successful
None on error
"""
def MeanCurve(curve0, curve1, tolerance=None):
"""Creates an average curve from two curves
Parameters:
curve0, curve1 = identifiers of two curves
tolerance[opt] = angle tolerance used to match kinks between curves
Returns:
id of the new or modified curve if successful
None on error
"""
def MeshPolyline(polyline_id):
"""Creates a polygon mesh object based on a closed polyline curve object.
The created mesh object is added to the document
Parameters:
polyline_id = identifier of the polyline curve object
Returns:
identifier of the new mesh object
None on error
"""
def OffsetCurve(object_id, direction, distance, normal=None, style=1):
"""Offsets a curve by a distance. The offset curve will be added to Rhino
Parameters:
object_id = identifier of a curve object
direction = point describing direction of the offset
distance = distance of the offset
normal[opt] = normal of the plane in which the offset will occur.
If omitted, the normal of the active construction plane will be used
style[opt] = the corner style
0 = None, 1 = Sharp, 2 = Round, 3 = Smooth, 4 = Chamfer
Returns:
List of ids for the new curves on success
None on error
"""
def OffsetCurveOnSurface(curve_id, surface_id, distance_or_parameter):
"""Offset a curve on a surface. The source curve must lie on the surface.
The offset curve or curves will be added to Rhino
Parameters:
curve_id, surface_id = curve and surface identifiers
distance_or_parameter = If a single number is passed, then this is the
distance of the offset. Based on the curve's direction, a positive value
will offset to the left and a negative value will offset to the right.
If a tuple of two values is passed, this is interpreted as the surface
U,V parameter that the curve will be offset through
Returns:
Identifiers of the new curves if successful
None on error
"""
def PlanarClosedCurveContainment(curve_a, curve_b, plane=None, tolerance=None):
"""Determines the relationship between the regions bounded by two coplanar
simple closed curves
Parameters:
curve_a, curve_b = identifiers of two planar, closed curves
plane[opt] = test plane. If omitted, the currently active construction
plane is used
tolerance[opt] = if omitted, the document absolute tolerance is used
Returns:
a number identifying the relationship if successful
0 = the regions bounded by the curves are disjoint
1 = the two curves intersect
2 = the region bounded by curve_a is inside of curve_b
3 = the region bounded by curve_b is inside of curve_a
None if not successful
"""
def PlanarCurveCollision(curve_a, curve_b, plane=None, tolerance=None):
"""Determines if two coplanar curves intersect
Parameters:
curve_a, curve_b = identifiers of two planar curves
plane[opt] = test plane. If omitted, the currently active construction
plane is used
tolerance[opt] = if omitted, the document absolute tolerance is used
Returns:
True if the curves intersect; otherwise False
"""
def PointInPlanarClosedCurve(point, curve, plane=None, tolerance=None):
"""Determines if a point is inside of a closed curve, on a closed curve, or
outside of a closed curve
Parameters:
point = text point
curve = identifier of a curve object
plane[opt] = plane containing the closed curve and point. If omitted,
the currently active construction plane is used
tolerance[opt] = it omitted, the document abosulte tolerance is used
Returns:
number identifying the result if successful
0 = point is outside of the curve
1 = point is inside of the curve
2 = point in on the curve
"""
def PolyCurveCount(curve_id, segment_index=-1):
"""Returns the number of curve segments that make up a polycurve"""
def PolylineVertices(curve_id, segment_index=-1):
"Returns the vertices of a polyline curve on success"
def ProjectCurveToMesh(curve_ids, mesh_ids, direction):
"""Projects one or more curves onto one or more surfaces or meshes
Parameters:
curve_ids = identifiers of curves to project
mesh_ids = identifiers of meshes to project onto
direction = projection direction
Returns:
list of identifiers
"""
def ProjectCurveToSurface(curve_ids, surface_ids, direction):
"""Projects one or more curves onto one or more surfaces or polysurfaces
Parameters:
curve_ids = identifiers of curves to project
surface_ids = identifiers of surfaces to project onto
direction = projection direction
Returns:
list of identifiers
"""
def RebuildCurve(curve_id, degree=3, point_count=10):
"""Rebuilds a curve to a given degree and control point count. For more
information, see the Rhino help for the Rebuild command.
Parameters:
curve_id = identifier of the curve object
degree[opt] = new degree (must be greater than 0)
point_count [opt] = new point count, which must be bigger than degree.
Returns:
True of False indicating success or failure
"""
def ReverseCurve(curve_id):
"""Reverses the direction of a curve object. Same as Rhino's Dir command
Parameters:
curve_id = identifier of the curve object
Returns:
True or False indicating success or failure
"""
def SimplifyCurve(curve_id, flags=0):
"Replace a curve with a geometrically equivalent polycurve"
def SplitCurve(curve_id, parameter, delete_input=True):
"""Splits, or divides, a curve at a specified parameter. The parameter must
be in the interior of the curve's domain
Parameters:
curve_id = the curve to split
parameter = one or more parameters to split the curve at
delete_input[opt] = delete the input curve
Returns:
list of new curves on success
None on error
"""
def TrimCurve(curve_id, interval, delete_input=True):
"""Trims a curve by removing portions of the curve outside a specified interval
Paramters:
curve_id = the curve to trim
interval = two numbers indentifying the interval to keep. Portions of
the curve before domain[0] and after domain[1] will be removed. If the
input curve is open, the interval must be increasing. If the input
curve is closed and the interval is decreasing, then the portion of
the curve across the start and end of the curve is returned
delete_input[opt] = delete the input curve
Reutrns:
identifier of the new curve on success
None on failure
"""
```
#### File: GH_CPython/GrasshopperSyntax/Grasshopper.py
```python
import sys
version = sys.version_info[0]
class Line:
def __init__(self, *args):
"""Adds new line using two input points, or two input lists or 6 input doubles
:param args:
"""
if len(args) == 2:
if isinstance(args[0], Point) and isinstance(args[1], Point):
self.X1 = args[0].X
self.Y1 = args[0].Y
self.Z1 = args[0].X
self.X2 = args[1].Z
self.Y2 = args[1].Y
self.Z2 = args[1].Z
elif isinstance(args[0], list) and isinstance(args[1], list):
self.X1 = args[0][0]
self.Y1 = args[0][1]
self.Z1 = args[0][2]
self.X2 = args[1][0]
self.Y2 = args[1][1]
self.Z2 = args[1][2]
elif version == 2:
if isinstance(args[0], basestring) and isinstance(args[1], basestring):
pointa = Point(args[0])
pointb = Point(args[1])
self.X1 = pointa.X
self.Y1 = pointa.Y
self.Z1 = pointa.Z
self.X2 = pointb.X
self.Y2 = pointb.Y
self.Z2 = pointb.Z
elif version == 3:
if isinstance(args[0], str) and isinstance(args[1], str):
pointa = Point(args[0])
pointb = Point(args[1])
self.X1 = pointa.X
self.Y1 = pointa.Y
self.Z1 = pointa.Z
self.X2 = pointb.X
self.Y2 = pointb.Y
self.Z2 = pointb.Z
elif len(args) == 6:
self.X1 = args[0]
self.Y1 = args[1]
self.Z1 = args[2]
self.X2 = args[3]
self.Y2 = args[4]
self.Z2 = args[5]
'''def addLine(self):
return "gCPy.Line(" + str(self.X1) + ", " \
+ str(self.Y1) + ", " \
+ str(self.Z1) + ", " \
+ str(self.X2) + ", " \
+ str(self.Y2) + ", " \
+ str(self.Z2) + ")"'''
def addLine(self):
return ["<Line>", self.X1, self.Y1, self.Z1, self.X2, self.Y2, self.Z2]
def __repr__(self):
return str(["<Line>",self.X1,self.Y1,self.Z1,self.X2,self.Y2,self.Z2,"</line>"])
def length(self):
return ((self.X2 - self.X1) ** 2 + (self.Y2 - self.Y1) ** 2 + (self.Z2 - self.Z1) ** 2) ** 0.5
def pointOnLine(self, parameter=0.5):
return Point((self.X2 - self.X1) * parameter + self.X1, \
(self.Y2 - self.Y1) * parameter + self.Y1, \
(self.Z2 - self.Z1) * parameter + self.Z1)
def __str__(self):
return str(["<Line>", self.X1, self.Y1, self.Z1, self.X2, self.Y2, self.Z2, "</Line>"])
allLines = []
for i in range(200):
l = Line(i, i+1, i+2, i-1, i-2, i-5)
allLines.append(l)
print(allLines[5])
class Point:
"""Adds new point in cartesian coordinates using 3 doubles x, y, z as input
:param:
x (double): is the x coordinate
y (double): is the y coordinate
z (double): is the z coordinate
:return:
representation of 3d point
:Example:
import Grasshopper as gh
point1 = gh.Point(0, 0, 0)
point2 = gh.Point(1., 1., 0.)
"""
def __init__(self, x=0., y=0., z=0.):
if version == 2:
if isinstance(x, list):
self.X = x[0]
self.Y = x[1]
self.Z = x[2]
elif isinstance(x, basestring):
new_vars = []
x = x.replace("gCPy.Point(", "").replace(")", "").lstrip().rstrip()
variables = x.split(",")
for i in variables:
new_vars.append(float(i))
self.X = new_vars[0]
self.Y = new_vars[1]
self.Z = new_vars[2]
else:
self.X = x
self.Y = y
self.Z = z
elif version == 3:
if isinstance(x, list):
self.X = x[0]
self.Y = x[1]
self.Z = x[2]
elif isinstance(x, str):
new_vars = []
x = x.replace("gCPy.Point(", "").replace(")", "").lstrip().rstrip()
variables = x.split(",")
for i in variables:
new_vars.append(float(i))
self.X = new_vars[0]
self.Y = new_vars[1]
self.Z = new_vars[2]
else:
self.X = x
self.Y = y
self.Z = z
self.addPoint = "gCPy.Point(" + str(x) + "," + str(y) + "," + str(z) + ")"
def __repr__(self):
return "gCPy.Point(" + str(self.X) + "," + str(self.Y) + "," + str(self.Z) + ")"
def __str__(self):
return "gCPy.Point(" + str(self.X) + "," + str(self.Y) + "," + str(self.Z) + ")"
class Surface:
def __init__(self, *args):
if len(args) == 4:
if isinstance(args[0], Point) and isinstance(args[1], Point) and isinstance(args[2], Point) and isinstance(args[3], Point):
self.P1 = args[0]
self.P2 = args[1]
self.P3 = args[2]
self.P4 = args[3]
self.addSurface = "gCPy.Surface("+ str(args[0].X) + "," \
+ str(args[0].Y) + "," \
+ str(args[0].Z) + "," \
+ str(args[1].X) + "," \
+ str(args[1].Y) + "," \
+ str(args[1].Z) + "," \
+ str(args[2].X) + "," \
+ str(args[2].Y) + "," \
+ str(args[2].Z) + "," \
+ str(args[3].X) + "," \
+ str(args[3].Y) + "," \
+ str(args[3].Z) + "," \
+ ")"
elif len(args) == 2:
if isinstance(args[0],Line) and isinstance(args[1], Line):
pass
else:
print("you have to create surface from 4 points")
########################### DEFINE METHODS ################################
def addLine(*args):
"""Adds new line between two points
:param: args
pointa, pointb (Point3): instance of points
[x1, y1, z1], [x2, y2, z2]: two points as lists
x1, y1, z1, x2, y2, z2 : 6 doubles represent coordinates of line end points.
:return:
"""
if len(args) == 2:
return Line(args[0], args[1])
elif len(args) == 6:
return Line(args[0], args[1], args[2], args[3], args[4], args[5])
def addPoint(*args):
"""
:param args:
:return:
"""
if len(args) == 1:
return Point(args[0])
elif len(args) == 3:
return Point(args[0], args[1], args[2])
def addSurface(*args):
return Surface(args[0], args[1], args[2], args[3]).addSurface
if __name__ == '__main__':
print(__name__)
```
#### File: GH_CPython/GrasshopperSyntax/plane.py
```python
def DistanceToPlane(plane, point):
"""Returns the distance from a 3D point to a plane
Parameters:
plane (plane): the plane
point (point): List of 3 numbers or Point3d
Returns:
number: The distance if successful, otherwise None
Example:
import rhinoscriptsyntax as rs
point = rs.GetPoint("Point to test")
if point:
plane = rs.ViewCPlane()
if plane:
distance = rs.DistanceToPlane(plane, point)
if distance is not None:
print("Distance to plane: ", distance)
See Also:
Distance
PlaneClosestPoint
"""
def EvaluatePlane(plane, parameter):
"""Evaluates a plane at a U,V parameter
Parameters:
plane (plane): the plane to evaluate
parameter ([number, number]): list of two numbers defining the U,V parameter to evaluate
Returns:
point: Point3d on success
Example:
import rhinoscriptsyntax as rs
view = rs.CurrentView()
plane = rs.ViewCPlane(view)
point = rs.EvaluatePlane(plane, (5,5))
rs.AddPoint( point )
See Also:
PlaneClosestPoint
"""
def IntersectPlanes(plane1, plane2, plane3):
"""Calculates the intersection of three planes
Parameters:
plane1 (plane): the 1st plane to intersect
plane2 (plane): the 2nd plane to intersect
plane3 (plane): the 3rd plane to intersect
Returns:
point: the intersection point between the 3 planes on success
None: on error
Example:
import rhinoscriptsyntax as rs
plane1 = rs.WorldXYPlane()
plane2 = rs.WorldYZPlane()
plane3 = rs.WorldZXPlane()
point = rs.IntersectPlanes(plane1, plane2, plane3)
if point: rs.AddPoint(point)
See Also:
LineLineIntersection
LinePlaneIntersection
PlanePlaneIntersection
"""
def MovePlane(plane, origin):
"""Moves the origin of a plane
Parameters:
plane (plane): Plane or ConstructionPlane
origin (point): Point3d or list of three numbers
Returns:
plane: moved plane
Example:
import rhinoscriptsyntax as rs
origin = rs.GetPoint("CPlane origin")
if origin:
plane = rs.ViewCPlane()
plane = rs.MovePlane(plane,origin)
rs.ViewCplane(plane)
See Also:
PlaneFromFrame
PlaneFromNormal
RotatePlane
"""
def PlaneClosestPoint(plane, point, return_point=True):
"""Returns the point on a plane that is closest to a test point.
Parameters:
plane (plane): The plane
point (point): The 3-D point to test.
return_point (bool, optional): If omitted or True, then the point on the plane
that is closest to the test point is returned. If False, then the
parameter of the point on the plane that is closest to the test
point is returned.
Returns:
point: If return_point is omitted or True, then the 3-D point
point: If return_point is False, then an array containing the U,V parameters
of the point
None: if not successful, or on error.
Example:
import rhinoscriptsyntax as rs
point = rs.GetPoint("Point to test")
if point:
plane = rs.ViewCPlane()
if plane:
print(rs.PlaneClosestPoint(plane, point))
See Also:
DistanceToPlane
EvaluatePlane
"""
def PlaneCurveIntersection(plane, curve, tolerance=None):
"""Intersect an infinite plane and a curve object
Parameters:
plane (plane): The plane to intersect.
curve (guid): The identifier of the curve object
torerance (number, optional): The intersection tolerance. If omitted, the document's absolute tolerance is used.
Returns:
A list of intersection information tuple if successful. The list will contain one or more of the following tuple:
Element Type Description
[0] Number The intersection event type, either Point (1) or Overlap (2).
[1] Point3d If the event type is Point (1), then the intersection point on the curve.
If the event type is Overlap (2), then intersection start point on the curve.
[2] Point3d If the event type is Point (1), then the intersection point on the curve.
If the event type is Overlap (2), then intersection end point on the curve.
[3] Point3d If the event type is Point (1), then the intersection point on the plane.
If the event type is Overlap (2), then intersection start point on the plane.
[4] Point3d If the event type is Point (1), then the intersection point on the plane.
If the event type is Overlap (2), then intersection end point on the plane.
[5] Number If the event type is Point (1), then the curve parameter.
If the event type is Overlap (2), then the start value of the curve parameter range.
[6] Number If the event type is Point (1), then the curve parameter.
If the event type is Overlap (2), then the end value of the curve parameter range.
[7] Number If the event type is Point (1), then the U plane parameter.
If the event type is Overlap (2), then the U plane parameter for curve at (n, 5).
[8] Number If the event type is Point (1), then the V plane parameter.
If the event type is Overlap (2), then the V plane parameter for curve at (n, 5).
[9] Number If the event type is Point (1), then the U plane parameter.
If the event type is Overlap (2), then the U plane parameter for curve at (n, 6).
[10] Number If the event type is Point (1), then the V plane parameter.
If the event type is Overlap (2), then the V plane parameter for curve at (n, 6).
None: on error
Example:
import rhinoscriptsyntax as rs
curve = rs.GetObject("Select curve", rs.filter.curve)
if curve:
plane = rs.WorldXYPlane()
intersections = rs.PlaneCurveIntersection(plane, curve)
if intersections:
for intersection in intersections:
rs.AddPoint(intersection[1])
See Also:
IntersectPlanes
PlanePlaneIntersection
PlaneSphereIntersection
"""
def PlaneEquation(plane):
"""Returns the equation of a plane as a tuple of four numbers. The standard
equation of a plane with a non-zero vector is Ax+By+Cz+D=0
Parameters:
plane (plane): the plane to deconstruct
Returns:
tuple(number, number, number, number): containing four numbers that represent the coefficients of the equation (A, B, C, D) if successful
None: if not successful
Example:
import rhinoscriptsyntax as rs
plane = rs.ViewCPlane()
equation = rs.PlaneEquation(plane)
print("A =", equation[0])
print("B =", equation[1])
print("C =", equation[2])
print("D =", equation[3])
See Also:
PlaneFromFrame
PlaneFromNormal
PlaneFromPoints
"""
def PlaneFitFromPoints(points):
"""Returns a plane that was fit through an array of 3D points.
Parameters:
points (point): An array of 3D points.
Returns:
plane: The plane if successful
None: if not successful
Example:
import rhinoscriptsyntax as rs
points = rs.GetPoints()
if points:
plane = rs.PlaneFitFromPoints(points)
if plane:
magX = plane.XAxis.Length
magY = plane.YAxis.Length
rs.AddPlaneSurface( plane, magX, magY )
See Also:
PlaneFromFrame
PlaneFromNormal
PlaneFromPoints
"""
def PlaneFromFrame(origin, x_axis, y_axis):
"""Construct a plane from a point, and two vectors in the plane.
Parameters:
origin (point): A 3D point identifying the origin of the plane.
x_axis (vector): A non-zero 3D vector in the plane that determines the X axis
direction.
y_axis (vector): A non-zero 3D vector not parallel to x_axis that is used
to determine the Y axis direction. Note, y_axis does not
have to be perpendicular to x_axis.
Returns:
plane: The plane if successful.
Example:
import rhinoscriptsyntax as rs
origin = rs.GetPoint("CPlane origin")
if origin:
xaxis = (1,0,0)
yaxis = (0,0,1)
plane = rs.PlaneFromFrame( origin, xaxis, yaxis )
rs.ViewCPlane(None, plane)
See Also:
MovePlane
PlaneFromNormal
PlaneFromPoints
RotatePlane
"""
def PlaneFromNormal(origin, normal, xaxis=None):
"""Creates a plane from an origin point and a normal direction vector.
Parameters:
origin (point): A 3D point identifying the origin of the plane.
normal (vector): A 3D vector identifying the normal direction of the plane.
xaxis (vector, optional): optional vector defining the plane's x-axis
Returns:
plane: The plane if successful.
Example:
import rhinoscriptsyntax as rs
origin = rs.GetPoint("CPlane origin")
if origin:
direction = rs.GetPoint("CPlane direction")
if direction:
normal = direction - origin
normal = rs.VectorUnitize(normal)
rs.ViewCPlane( None, rs.PlaneFromNormal(origin, normal) )
See Also:
MovePlane
PlaneFromFrame
PlaneFromPoints
RotatePlane
"""
def PlaneFromPoints(origin, x, y):
"""Creates a plane from three non-colinear points
Parameters:
origin (point): origin point of the plane
x, y (point): points on the plane's x and y axes
Returns:
plane: The plane if successful, otherwise None
Example:
import rhinoscriptsyntax as rs
corners = rs.GetRectangle()
if corners:
rs.ViewCPlane( rs.PlaneFromPoints(corners[0], corners[1], corners[3]))
See Also:
PlaneFromFrame
PlaneFromNormal
"""
def PlanePlaneIntersection(plane1, plane2):
"""Calculates the intersection of two planes
Parameters:
plane1 (plane): the 1st plane to intersect
plane2 (plane): the 2nd plane to intersect
Returns:
line: a line with two 3d points identifying the starting/ending points of the intersection
None: on error
Example:
import rhinoscriptsyntax as rs
plane1 = rs.WorldXYPlane()
plane2 = rs.WorldYZPlane()
line = rs.PlanePlaneIntersection(plane1, plane2)
if line: rs.AddLine(line[0], line[1])
See Also:
IntersectPlanes
LineLineIntersection
LinePlaneIntersection
"""
def PlaneSphereIntersection(plane, sphere_plane, sphere_radius):
"""Calculates the intersection of a plane and a sphere
Parameters:
plane (plane): the plane to intersect
sphere_plane (plane): equatorial plane of the sphere. origin of the plane is
the center of the sphere
sphere_radius (number): radius of the sphere
Returns:
list(number, point|plane, number): of intersection results
Element Type Description
[0] number The type of intersection, where 0 = point and 1 = circle.
[1] point or plane If a point intersection, the a Point3d identifying the 3-D intersection location.
If a circle intersection, then the circle's plane. The origin of the plane will be the center point of the circle
[2] number If a circle intersection, then the radius of the circle.
None: on error
Example:
import rhinoscriptsyntax as rs
plane = rs.WorldXYPlane()
radius = 10
results = rs.PlaneSphereIntersection(plane, plane, radius)
if results:
if results[0]==0:
rs.AddPoint(results[1])
else:
rs.AddCircle(results[1], results[2])
See Also:
IntersectPlanes
LinePlaneIntersection
PlanePlaneIntersection
"""
def PlaneTransform(plane, xform):
"""Transforms a plane
Parameters:
plane (plane): Plane to transform
xform (transform): Transformation to apply
Returns:
plane:the resulting plane if successful
None: if not successful
Example:
import rhinoscriptsyntax as rs
plane = rs.ViewCPlane()
xform = rs.XformRotation(45.0, plane.Zaxis, plane.Origin)
plane = rs.PlaneTransform(plane, xform)
rs.ViewCPlane(None, plane)
See Also:
PlaneFromFrame
PlaneFromNormal
PlaneFromPoints
"""
def RotatePlane(plane, angle_degrees, axis):
"""Rotates a plane
Parameters:
plane (plane): Plane to rotate
angle_degrees (number): rotation angle in degrees
axis (vector): Axis of rotation or list of three numbers
Returns:
plane: rotated plane on success
Example:
import rhinoscriptsyntax as rs
plane = rs.ViewCPlane()
rotated = rs.RotatePlane(plane, 45.0, plane.XAxis)
rs.ViewCPlane( None, rotated )
See Also:
MovePlane
PlaneFromFrame
PlaneFromNormal
"""
def WorldXYPlane():
"""Returns Rhino's world XY plane
Returns:
plane: Rhino's world XY plane
Example:
import rhinoscriptsyntax as rs
view = rs.CurrentView()
rs.ViewCPlane( view, rs.WorldXYPlane() )
See Also:
WorldYZPlane
WorldZXPlane
"""
def WorldYZPlane():
"""Returns Rhino's world YZ plane
Returns:
plane: Rhino's world YZ plane
Example:
import rhinoscriptsyntax as rs
view = rs.CurrentView()
rs.ViewCPlane( view, rs.WorldYZPlane() )
See Also:
WorldXYPlane
WorldZXPlane
"""
def WorldZXPlane():
"""Returns Rhino's world ZX plane
Returns:
plane: Rhino's world ZX plane
Example:
import rhinoscriptsyntax as rs
view = rs.CurrentView()
rs.ViewCPlane( view, rs.WorldZXPlane() )
See Also:
WorldXYPlane
WorldYZPlane
"""
``` |
{
"source": "johannesreichard/pi-433-remote",
"score": 2
} |
#### File: johannesreichard/pi-433-remote/pi-433-remote.py
```python
from flask import Flask, render_template
from subprocess import call
app = Flask(__name__)
SENDER_CONFIG = '10101'
SEND_CONFIG = ['-u', '-s']
RECEIVER = {
'A': '1',
'B': '2',
'C': '3',
'D': '4',
}
STATES = {
'on':'1',
'off':'0'
}
def send_signal(receiver, state):
call(['./send', SENDER_CONFIG, *SEND_CONFIG, receiver, state])
@app.route('/')
def index():
return render_template('index.html')
@app.route('/on/', methods=['POST'])
def on_all():
for _, value in RECEIVER.items():
send_signal(value, STATES['on'])
return 'all on'
@app.route('/off/', methods=['POST'])
def off_all():
for _, value in RECEIVER.items():
send_signal(value, STATES['off'])
return 'all off'
@app.route('/receiver/<receiver_id>/<state_id>/', methods=['POST'])
def switch(receiver_id, state_id):
if receiver_id not in RECEIVER.keys():
return '{} not in {}'.format(receiver_id, RECEIVER.keys()), 400
elif state_id not in STATES.keys():
return '{} not in {}'.format(state_id, STATES.keys()), 400
else:
send_signal(RECEIVER[receiver_id], STATES[state_id])
return '{} {}'.format(receiver_id, state_id)
``` |
{
"source": "Johannes-R-Schmid/eo-learn",
"score": 3
} |
#### File: eolearn/geometry/utilities.py
```python
import numpy as np
from rasterio import features, transform
from shapely.geometry import Polygon
from eolearn.core import EOTask
class VectorToRaster(EOTask):
"""
Task burns into one of the EOPatch's features geo-referenced shapes given in provided Geopandas DataFrame.
:param feature: A tuple of feature type and feature name, e.g. (FeatureType.MASK, 'cloud_mask')
:type feature: (FeatureType, str)
:param vector_data: Vector data
:type vector_data: geopandas.GeoDataFrame
:param raster_value: Value of raster pixels which are contained inside of vector polygons
:type raster_value: int or float
:param raster_shape: Can be a tuple in form of (height, width) of an existing feature from which the shape will be
taken e.g. (FeatureType.MASK, 'IS_DATA')
:type raster_shape: (int, int) or (FeatureType, str)
:param raster_dtype: `numpy` data type of the obtained raster array
:type raster_dtype: numpy.dtype
:param no_data_value: Value of raster pixels which are outside of vector polygons
:type no_data_value: int or float
"""
def __init__(self, feature, vector_data, raster_value, raster_shape, raster_dtype=np.uint8, no_data_value=0):
self.feature_type, self.feature_name = next(iter(self._parse_features(feature)))
self.vector_data = vector_data
self.raster_value = raster_value
self.raster_shape = raster_shape
self.raster_dtype = raster_dtype
self.no_data_value = no_data_value
def _get_submap(self, eopatch):
"""
Returns a new geopandas dataframe with same structure as original one (columns) except that
it contains only polygons that are contained within the given bbox.
:param eopatch: input EOPatch
:type eopatch: EOPatch
:return: New EOPatch
:rtype: EOPatch
"""
bbox = Polygon(eopatch.bbox.get_polygon())
filtered_data = self.vector_data[self.vector_data.geometry.intersects(bbox)].copy(deep=True)
filtered_data.geometry = filtered_data.geometry.intersection(bbox)
return filtered_data
def _get_shape(self, eopatch):
if isinstance(self.raster_shape, (tuple, list)) and len(self.raster_shape) == 2:
if isinstance(self.raster_shape[0], int) and isinstance(self.raster_shape[1], int):
return self.raster_shape
feature_type, feature_name = next(self._parse_features(self.raster_shape)(eopatch))
return eopatch.get_spatial_dimension(feature_type, feature_name)
raise ValueError('Could not determine shape of the raster image')
def execute(self, eopatch):
""" Execute function which adds new vector layer to the EOPatch
:param eopatch: input EOPatch
:type eopatch: EOPatch
:return: New EOPatch with added vector layer
:rtype: EOPatch
"""
bbox_map = self._get_submap(eopatch)
height, width = self._get_shape(eopatch)
dst_transform = transform.from_bounds(*eopatch.bbox, width=width, height=height)
if self.feature_name in eopatch[self.feature_type]:
raster = eopatch[self.feature_type][self.feature_name].squeeze()
else:
raster = np.ones((height, width), dtype=self.raster_dtype) * self.no_data_value
if not bbox_map.empty:
features.rasterize([(bbox_map.cascaded_union.buffer(0), self.raster_value)], out=raster,
transform=dst_transform, dtype=self.raster_dtype)
eopatch[self.feature_type][self.feature_name] = raster[..., np.newaxis]
return eopatch
```
#### File: Johannes-R-Schmid/eo-learn/install_all.py
```python
import sys
import subprocess
SUBPACKAGE_LIST = ['core',
'coregistration',
'features',
'geometry',
'io',
'mask',
'ml_tools']
def pip_command(name, args):
subprocess.check_call([sys.executable, '-m', 'pip', 'install'] + args + ['./{}'.format(name)])
if __name__ == '__main__':
for subpackage in SUBPACKAGE_LIST:
pip_command(subpackage, sys.argv[1:])
``` |
{
"source": "johannesruetten/ChargingEnvironment",
"score": 2
} |
#### File: envs/custom_env_dir/conv_optim.py
```python
import cvxpy as cvx
import numpy as np
import mosek
from envs.custom_env_dir.data_handler import DataHandler
import gym
import os
''' CALCULATE THEORETICAL OPTIMUM BY MEANS OF CONVEX OPTIMIZATION ASSUMING COMPLETE KNOWLEDGE OF FUTURE DATA '''
class ConvOptim():
def run_optimizer(self, store_dir, benchmark, supervised_training_set, game_collection, supervised_test_set, development):
# Initialize charging environment with given EV data
env = gym.make('ChargingEnv-v0', game_collection=game_collection,
battery_capacity=24, charging_rate=6,
penalty_coefficient=12, obs='benchmark')
# Sample each day 10 times for benchmark and test set
if benchmark:
n_games = len(game_collection)*10
test= True
if development:
filename = 'Theoretical_limit_DEV'
else:
filename = 'Theoretical_limit_TEST'
elif supervised_test_set:
test = True
n_games = len(game_collection)*10
filename = 'Supervised_test'
elif supervised_training_set:
n_games = 50000 #für imitation learning dataset hochgesetzt
filename = 'Supervised_train'
# Create lists to store optimization results
price_list, input_price_list, arr_list, soc_list, action_list, dates, day_cats, \
nextday_cats, starts, ends, scores, avg_scores, eps_history, pen_history, steps_array, \
final_soc, t_step, t_sin, t_cos, t_saw1, t_saw2 , t0, t1, t2, t3, t4, t5, t6, t7, t8, \
t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19, t20, t21, t22, t23, d1, d2, d3, d4 \
= [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
for i in range(n_games):
# Create lists to store optimization results for each episode/game
episode_prices, episode_input_prices, episode_soc, episode_actions, episode_day_cats, \
episode_nextday_cats, calc_actions, episode_t_step, episode_t_sin, episode_t_cos, \
episode_t_saw1, episode_t_saw2 , episode_t0, episode_t1, episode_t2, episode_t3, \
episode_t4, episode_t5, episode_t6, episode_t7, episode_t8, episode_t9, episode_t10, \
episode_t11, episode_t12, episode_t13, episode_t14, episode_t15, episode_t16, episode_t17, \
episode_t18, episode_t19, episode_t20, episode_t21, episode_t22, episode_t23, \
episode_d1, episode_d2, episode_d3, episode_d4 \
= [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], \
[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
# Get initial data from the environment
reward=0
observation = env.reset(test, i)
score = 0
initial_soc = int(round(env.soc*100,0))
steps = env.n_steps
prices = env.hourly_prices['Spot'][env.start_ind+168:env.end_ind+168].to_list()
# Define objective function
action = cvx.Variable(steps, integer=True)
soc = cvx.Variable(steps, integer=True)
objective = cvx.Minimize(prices@action)
# Define constraints
constraints = [action >=-25,
action <=25,
soc == initial_soc + cvx.cumsum(action),
soc>=0,
soc<=100,
soc[steps-1]==100]
# Define problem
prob = cvx.Problem(objective, constraints)
# Solve problem with the selected solver
prob.solve(solver=cvx.MOSEK)
'''
_____________________________________________________
Store all necessary data in a CSV file for evaluation
_____________________________________________________
'''
episode_prices = env.hourly_prices['Spot'][168:192].to_list()
episode_day_cats = env.hourly_prices['day_cat'][168]
episode_nextday_cats = env.hourly_prices['day_cat'][191]
episode_input_prices = env.hourly_prices['Spot'].to_list()
episode_soc.append(initial_soc/100)
episode_t_step = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
episode_t_sin = env.hourly_prices['t_sin'][168:192].to_list()
episode_t_cos = env.hourly_prices['t_cos'][168:192].to_list()
episode_t_saw1 = env.hourly_prices['t_saw1'][168:192].to_list()
episode_t_saw2 = env.hourly_prices['t_saw2'][168:192].to_list()
episode_t0 = env.hourly_prices['t0'][168:192].to_list()
episode_t1 = env.hourly_prices['t1'][168:192].to_list()
episode_t2 = env.hourly_prices['t2'][168:192].to_list()
episode_t3 = env.hourly_prices['t3'][168:192].to_list()
episode_t4 = env.hourly_prices['t4'][168:192].to_list()
episode_t5 = env.hourly_prices['t5'][168:192].to_list()
episode_t6 = env.hourly_prices['t6'][168:192].to_list()
episode_t7 = env.hourly_prices['t7'][168:192].to_list()
episode_t8 = env.hourly_prices['t8'][168:192].to_list()
episode_t9 = env.hourly_prices['t9'][168:192].to_list()
episode_t10 = env.hourly_prices['t10'][168:192].to_list()
episode_t11 = env.hourly_prices['t11'][168:192].to_list()
episode_t12 = env.hourly_prices['t12'][168:192].to_list()
episode_t13 = env.hourly_prices['t13'][168:192].to_list()
episode_t14 = env.hourly_prices['t14'][168:192].to_list()
episode_t15 = env.hourly_prices['t15'][168:192].to_list()
episode_t16 = env.hourly_prices['t16'][168:192].to_list()
episode_t17 = env.hourly_prices['t17'][168:192].to_list()
episode_t18 = env.hourly_prices['t18'][168:192].to_list()
episode_t19 = env.hourly_prices['t19'][168:192].to_list()
episode_t20 = env.hourly_prices['t20'][168:192].to_list()
episode_t21 = env.hourly_prices['t21'][168:192].to_list()
episode_t22 = env.hourly_prices['t22'][168:192].to_list()
episode_t23 = env.hourly_prices['t23'][168:192].to_list()
episode_d1 = env.hourly_prices['d1'][168:192].to_list()
episode_d2 = env.hourly_prices['d2'][168:192].to_list()
episode_d3 = env.hourly_prices['d3'][168:192].to_list()
episode_d4 = env.hourly_prices['d4'][168:192].to_list()
for j in range(0,env.start_ind):
episode_soc.append(initial_soc/100)
episode_actions.append('-')
calc_actions.append(0)
for j in range(0,(env.end_ind-env.start_ind)):
episode_soc.append(soc[j].value/100)
episode_actions.append((action[j].value/100)*24)
calc_actions.append((action[j].value/100)*24)
for j in range(24-env.end_ind):
episode_soc.append(soc[(env.end_ind-env.start_ind-1)].value/100)
episode_actions.append('-')
calc_actions.append(0)
price_array = np.array(episode_prices)/10
action_array = np.array(calc_actions)
episode_rewards = price_array*action_array*(-1)
if i%100==0:
print('episode: ', i, ' reward: ', sum(episode_rewards))
price_list.append(episode_prices)
arr_list.append(env.start_ind)
input_price_list.append(episode_input_prices)
soc_list.append(episode_soc)
action_list.append(episode_actions)
dates.append(env.game_date)
day_cats.append(episode_day_cats)
nextday_cats.append(episode_nextday_cats)
starts.append(env.start_time)
ends.append(env.end_time)
final_soc.append(1)
scores.append(sum(episode_rewards))
eps_history.append('-')
pen_history.append('-')
avg_scores.append('-')
t_step.append(episode_t_step)
t_sin.append(episode_t_sin)
t_cos.append(episode_t_cos)
t_saw1.append(episode_t_saw1)
t_saw2.append(episode_t_saw2)
t0.append(episode_t0)
t1.append(episode_t1)
t2.append(episode_t2)
t3.append(episode_t3)
t4.append(episode_t4)
t5.append(episode_t5)
t6.append(episode_t6)
t7.append(episode_t7)
t8.append(episode_t8)
t9.append(episode_t9)
t10.append(episode_t10)
t11.append(episode_t11)
t12.append(episode_t12)
t13.append(episode_t13)
t14.append(episode_t14)
t15.append(episode_t15)
t16.append(episode_t16)
t17.append(episode_t17)
t18.append(episode_t18)
t19.append(episode_t19)
t20.append(episode_t20)
t21.append(episode_t21)
t22.append(episode_t22)
t23.append(episode_t23)
d1.append(episode_d1)
d2.append(episode_d2)
d3.append(episode_d3)
d4.append(episode_d4)
# benchark is stored with one entire episode summarized in one row
if benchmark:
DataHandler().store_benchmark_results(price_list, soc_list, action_list, dates, day_cats, starts, ends, scores, avg_scores, \
final_soc, eps_history, pen_history, filename, store_dir)
# labeled dataset is stored with single charging/discarging decisions in one row
elif supervised_training_set or supervised_test_set:
DataHandler().store_supervised_dataset(price_list, input_price_list, arr_list, soc_list, action_list, dates, \
day_cats, nextday_cats, starts, ends, scores, \
avg_scores, final_soc, eps_history, pen_history, filename, store_dir, \
t_step, t_sin, t_cos, t_saw1, t_saw2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, \
t13, t14, t15, t16, t17, t18, t19, t20, t21, t22, t23, d1, d2, d3, d4)
else:
print('nothing stored...')
```
#### File: envs/custom_env_dir/dqn_agent.py
```python
from envs.custom_env_dir.custom_dqn import DeepQNetwork
from envs.custom_env_dir.replay_memory import ReplayBuffer
import numpy as np
import torch as T
from datetime import datetime
import os
class DQNAgent(object):
def __init__(self, gamma, epsilon, lr, n_actions, input_dims, optimizer, fc1_dims, fc2_dims,
mem_size, batch_size, chkpt_dir, replace, eps_min, eps_dec,
algo, env_name):
self.gamma = gamma
self.epsilon = epsilon
self.lr = lr
self.n_actions = n_actions
self.input_dims = input_dims
self.batch_size = batch_size
self.eps_min = eps_min
self.eps_dec = eps_dec
self.replace_target_cnt = replace
self.algo = algo
self.env_name = env_name
self.chkpt_dir = chkpt_dir
cwd = os.getcwd()
self.action_space = [i for i in range(n_actions)]
self.learn_step_counter = 0
now = datetime.now().strftime('%Y%m%d_%H%M')
# Replay memory
self.memory = ReplayBuffer(mem_size, input_dims, n_actions)
# Q-network / online network
self.q_eval = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=optimizer+'_gamma'+str(gamma)+'_lr'+\
(('%.15f' % lr).rstrip('0').rstrip('.'))+\
'_replace'+str(replace)+'_HL['+str(fc1_dims)+\
' ,'+str(fc2_dims)+']_q_eval.pt',chkpt_dir=self.chkpt_dir,\
fc1_dims=fc1_dims, fc2_dims=fc2_dims, seed=1, optimizer=optimizer)
# Target network
self.q_next = DeepQNetwork(self.lr, self.n_actions,
input_dims=self.input_dims,
name=optimizer+'_gamma'+str(gamma)+'_lr'+\
(('%.15f' % lr).rstrip('0').rstrip('.'))+\
'_replace'+str(replace)+'_HL['+str(fc1_dims)+\
' ,'+str(fc2_dims)+']_q_next.pt',chkpt_dir=self.chkpt_dir,\
fc1_dims=fc1_dims, fc2_dims=fc2_dims, seed=1, optimizer=optimizer)
# Function for action selection
def choose_action(self, observation):
# Apply e-greedy action selection
if np.random.random() > self.epsilon:
state = T.tensor([observation],dtype=T.float).to(self.q_eval.device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
# Store state transitions in replay memory
def store_transition(self, state, action, reward, state_):
self.memory.store_transition(state, action, reward, state_)
# Function samples mini-batch of transitions from replay memory
def sample_memory(self):
state, action, reward, new_state = \
self.memory.sample_buffer(self.batch_size)
states = T.tensor(state).to(self.q_eval.device)
rewards = T.tensor(reward).to(self.q_eval.device)
actions = T.tensor(action).to(self.q_eval.device)
states_ = T.tensor(new_state).to(self.q_eval.device)
return states, actions, rewards, states_
# Function updates weights of the target network every C steps
def replace_target_network(self):
if self.learn_step_counter % self.replace_target_cnt == 0:
self.q_next.load_state_dict(self.q_eval.state_dict())
# Function decrements epsilon during training process
def decrement_epsilon(self):
self.epsilon = self.epsilon - self.eps_dec \
if self.epsilon > self.eps_min else self.eps_min
# Function stores current model parameters
def save_models(self):
self.q_eval.save_checkpoint()
self.q_next.save_checkpoint()
# Function stores final agent at the end of each training run
def save_models_final(self):
self.q_eval.save_checkpoint_final()
self.q_next.save_checkpoint_final()
def load_models(self):
self.q_eval.load_checkpoint()
self.q_next.load_checkpoint()
def load_models_final(self):
self.q_eval.load_checkpoint_final()
self.q_next.load_checkpoint_final()
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
# sets gradient of optimizer object to zero
self.q_eval.optimizer.zero_grad()
self.replace_target_network()
states, actions, rewards, states_ = self.sample_memory()
indices = np.arange(self.batch_size)
# q_eval ist the main DQN (online network)
q_pred = self.q_eval.forward(states)[indices, actions]
# q_next is the target network
q_next = self.q_next.forward(states_).max(dim=1)[0]
q_target = rewards + self.gamma*q_next
loss = self.q_eval.loss(q_target, q_pred).to(self.q_eval.device)
loss.backward()
self.q_eval.optimizer.step()
self.learn_step_counter += 1
self.decrement_epsilon()
``` |
{
"source": "Johannes-Sahlmann/jwst-visit-parser",
"score": 3
} |
#### File: jwst-visit-parser/visitparser/parser.py
```python
from collections import OrderedDict
import copy
import re
from astropy.table import Table, join
import numpy as np
iswhitespace = lambda x: re.fullmatch("\s+", x) is not None
# set up regular expressions for parsing
rx_dict = {
'template': re.compile(r'# (?P<apt_templates>.*)'),
}
def _parse_line(line, rx_dict):
"""Do a regex search against all defined regexes.
Return the key and match result of the first matching regex.
See https://www.vipinajayakumar.com/parsing-text-with-python/
"""
for key, rx in rx_dict.items():
match = rx.search(line)
if match:
return key, match
# if there are no matches
return None, None
# Some lightweight classes for parsed information
class Statement(object):
"""Capture visit file statement delimited by a semicolon."""
def __init__(self, cmdstring, verbose=False):
cmdparts = cmdstring.split(' ,')
self.name = cmdparts[0]
self.args = cmdparts[1:]
try:
self.scriptname = cmdparts[2]
except IndexError:
self.scriptname = 'NONE'
if verbose:
for part in cmdparts:
print(" " + part)
def __repr__(self):
return ("<Statement {} >".format(self.name))
class Aux(Statement):
"""Capture statement identified by AUX keyword."""
def __init__(self, cmdstring, verbose=False):
super().__init__(cmdstring, verbose=verbose)
for param in self.args:
if '=' in param:
key, value = param.split('=')
try:
value = float(value)
except:
pass
self.__dict__[key] = value
class Dither(Statement):
"""Capture statement identified by DITHER keyword."""
def __init__(self, cmdstring, verbose=False):
super().__init__(cmdstring, verbose=verbose)
self.id = self.args[0].split('=')[1]
for param in self.args:
if '=' in param:
key, value = param.split('=')
try:
value = float(value)
except:
pass
self.__dict__[key] = value
class Momentum(Statement):
"""Capture statement identified by MOMENTUM keyword."""
def __init__(self, cmdstring, verbose=False):
super().__init__(cmdstring, verbose=verbose)
for param in self.args:
if '=' in param:
key, value = param.split('=')
try:
value = float(value)
except:
pass
self.__dict__[key] = value
class SlewOrAct(Statement):
"""Capture statement identified by SLEW or ACT keyword."""
def __init__(self, cmdstring, group=None, sequence=None, verbose=False):
super().__init__(cmdstring, verbose=verbose)
self.group = group
self.sequence = sequence
try:
self.activity = int(self.args[0], base=16) # note, these are base 16 hex numbers
except ValueError as e:
print('Activity number parsing raises ValueError:\n{}\nSetting to 99'.format(e))
self.activity = 99
for param in self.args[2:]:
if '=' in param:
key, value = param.split('=')
try:
value = float(value)
except:
pass
self.__dict__[key] = value
@property
def gsa(self):
"Group, sequence, activity"
return "{:02d}{:1d}{:02d}".format(self.group, self.sequence, self.activity)
class VisitDescription(Statement):
"""Capture statement identified by VISIT keyword."""
def __init__(self, cmdstring, verbose=False):
super().__init__(cmdstring, verbose=verbose)
self.id = self.args[0]
for param in self.args[1:]:
if '=' in param:
key, value = param.split('=')
try:
value = float(value)
except:
pass
self.__dict__[key] = value
class Activity(SlewOrAct):
"""Capture information related to activity."""
def __init__(self, cmdstring, *args, **kwargs):
super().__init__(cmdstring, *args, **kwargs)
# self.scriptname = self.args[1]
def __repr__(self):
return ("Activity {}: {}".format(self.gsa, self.describe()))
def describe(self):
if self.scriptname == 'NRCWFSCMAIN':
description = """{s.scriptname} {s.CONFIG} WFCGROUP={s.WFCGROUP}
Readout={s.NGROUPS:.0f} groups, {s.NINTS:.0f} ints
SW={s.FILTSHORTA}, LW={s.FILTLONGA}"""
elif self.scriptname == 'NRCMAIN':
description = """{s.scriptname} {s.CONFIG}
Readout={s.NGROUPS:.0f} groups, {s.NINTS:.0f} ints
SW={s.FILTSHORTA}, LW={s.FILTLONGA}"""
elif self.scriptname == 'NRCWFCPMAIN':
mod = self.CONFIG[3] # a or B
description = "{s.scriptname} {s.FILTSHORT" + mod + "}+{s.PUPILSHORT" + mod + \
"} Readout={s.NGROUPS:.0f} groups, {s.NINTS:.0f} ints"
elif self.scriptname == 'SCSAMMAIN':
description = """SCSAMMAIN dx={s.DELTAX}, dy={s.DELTAY}, dpa={s.DELTAPA}"""
elif self.scriptname == 'NRCSUBMAIN':
description = """NRCSUBMAIN subarray={s.SUBARRAY}"""
else:
description = """{s.scriptname}"""
try:
return description.format(s=self)
except AttributeError as e:
print('Activity {} raised AttributeError:\n{}'.format(self.scriptname, e))
class Guide(SlewOrAct):
"""Expand Guide statement."""
def describe(self):
if self.args[1] == 'FGSVERMAIN':
return ("Verification")
else:
detnum = self.DETECTOR[-1]
return """FGS{detnum}""".format(detnum=detnum, s=self)
def __repr__(self):
return ("Guide {}: {}".format(self.gsa, self.describe()))
class Slew(SlewOrAct):
"""Expand statement identified by SLEW keyword."""
def __repr__(self):
return (
"Slew {}: for {} on GS at ({}, {}) with PA={}".format(self.gsa, 'N/A', self.GSRA,
self.GSDEC, self.GSPA))
# "Slew {}: for {} on GS at ({}, {}) with PA={}".format(self.gsa, self.GUIDEMODE, self.GSRA,
# self.GSDEC, self.GSPA))
class Visit():
"""Class for JWST visit file information"""
def __init__(self, templates, statements, groups):
self.templates = templates
self.groups = groups
self.statements = statements
# store non-exposure related information in class asstributes
dithers = OrderedDict()
for statement in self.statements:
if statement.name == 'VISIT':
self.id = statement.id
self.visit_parameters = statement
elif statement.name == 'MOMENTUM':
self.momentum = statement
elif statement.name == 'AUX':
self.aux = statement
elif statement.name == 'DITHER':
dithers['{}'.format(statement.id)] = statement
self.dithers = dithers
# construct astropy table with basic content
script_statements = []
self.table = Table(names=('GROUP_ID', 'SEQ_ID', 'ACT_ID', 'GSA', 'TYPE', 'SCRIPT'),
dtype=(int, int, int, object, object, object))
for group_id, group in self.groups.items():
for seq_id, seq in group.items():
for statement in seq:
self.table.add_row((np.int(group_id.split('_')[1]), np.int(seq_id.split('_')[1]),
statement.activity, statement.gsa, statement.name, statement.scriptname))
script_statements.append(statement)
for colname in 'TYPE SCRIPT GSA'.split():
self.table[colname] = np.array(self.table[colname]).astype(str)
self.table.meta['comments']=['{}'.format(self.id)]
self.number_of_statements = len(self.table)
self.script_statements = np.array(script_statements)
def __repr__(self):
return (
"Visit {}: {:>2} dithers, {:>2} groups, {:>3} observation statements. Uses {}".format(self.id, len(self.dithers),
len(self.groups), self.number_of_statements, self.templates))
def overview_table(self, instrument=None):
"""Return an astropy table with specific information, one row per exposure/activity.
Parameters
----------
instrument : str
JWST instrument name, case insensitive
Returns
-------
table : astropy.table
Table with additional information extracted from the statements.
"""
table = copy.deepcopy(self.table)
if instrument.lower() == 'niriss':
remove_index = np.array([i for i, script in enumerate(self.table['SCRIPT']) if script[0:3] not in ['NIS', 'NRC']])
table.remove_rows(remove_index)
column_names = 'GSA OPMODE TARGTYPE DITHERID PATTERN NINTS NGROUPS PUPIL FILTER SUBARRAY'.split()
instrument_table = Table(names=tuple(column_names), dtype=tuple([object]*10))
for statement in self.script_statements:
if statement.gsa in table['GSA']:
row = [statement.gsa]
for colname in instrument_table.colnames[1:]:
try:
value = str(getattr(statement, colname))
except AttributeError:
value = 'NONE'
row.append(value)
instrument_table.add_row(vals=row)
for colname in instrument_table.colnames:
instrument_table[colname] = np.array(instrument_table[colname]).astype(str)
table = join(table, instrument_table, keys='GSA')
return table
def parse_visit_file(filename, verbose=False):
"""Read and parse the visit file line-by-line.
Parameters
----------
filename : str
Name fo file to parse
verbose : bool
verbosity
Returns
-------
visit : Visit object
Object containing all information extracted from the file.
"""
with open(filename) as file:
lines = file.readlines()
key, match = _parse_line(lines[0], rx_dict)
if key == 'template':
apt_templates = match.group('apt_templates').split(',')
# Simple parsing that ignores commands and newlines, but respects the fact that
# OSS parameters are separated by the exact string " ," with the comma necessarily after whitespace.
nocomments = [l.strip() for l in lines if not (l.startswith("#") or iswhitespace(l))]
for i in range(len(nocomments)):
if nocomments[i].startswith(','):
nocomments[i] = ' ' + nocomments[i]
merged = ''.join(nocomments)
commands = merged.split(';')
if verbose:
print(commands)
# Now iterate through the statements
groups = OrderedDict()
commands = np.array(commands)
# process initial statements
statements = []
for index,cmd in enumerate(commands):
if cmd == '':
continue
if verbose:
print(cmd)
print('*'*50)
parsedcmd = Statement(cmd)
if parsedcmd.name == 'GROUP':
break
elif parsedcmd.name == 'VISIT':
parsedcmd = VisitDescription(cmd)
elif parsedcmd.name == 'MOMENTUM':
parsedcmd = Momentum(cmd)
elif parsedcmd.name == 'AUX':
parsedcmd = Aux(cmd)
elif parsedcmd.name == 'DITHER':
parsedcmd = Dither(cmd)
statements.append(parsedcmd)
# process groups and sequences
for ii, cmd in enumerate(commands[index:]):
if cmd == '':
continue
parsedcmd = Statement(cmd)
if parsedcmd.name == 'GROUP':
ct_group = int(parsedcmd.args[0].split()[0])
groups['GROUP_{:02d}'.format(ct_group)] = OrderedDict()
continue
elif parsedcmd.name == 'SEQ':
ct_seq = int(parsedcmd.args[0].split()[0])
seq_statements = []
groups['GROUP_{:02d}'.format(ct_group)]['SEQ_{:02d}'.format(ct_seq)] = seq_statements
continue
elif parsedcmd.name == 'SLEW':
parsedcmd = Slew(cmd, group=ct_group, sequence=ct_seq)
elif parsedcmd.name == 'ACT':
if parsedcmd.args[1] == 'FGSMAIN' or parsedcmd.args[1] == 'FGSVERMAIN':
parsedcmd = Guide(cmd, group=ct_group, sequence=ct_seq)
else:
parsedcmd = Activity(cmd, group=ct_group, sequence=ct_seq)
seq_statements.append(parsedcmd)
return Visit(apt_templates, statements, groups)
def crosscheck_wfsc_visit_file(filename):
"""
TODO
----
update Perrin's function to updated code.
Parameters
----------
filename
Returns
-------
"""
statements, apt_template = parse_visit_file(filename)
print("==WFSC crosscheck for file {}==".format(filename))
print("From APT template: {}".format(apt_template))
# Check slew statements
slews = [s for s in statements if (isinstance(s, Slew) or isinstance(s, Guide))]
# Check activity statements
acts = [s for s in statements if isinstance(s, Activity)]
is_wfsc_visit = any(['WFSC' in a.scriptname for a in acts])
print("\nContains {} slew or guide statement(s):".format(len(slews)))
for s in slews:
print(" " + repr(s))
print("\nContains {} activity statement(s):".format(len(acts)))
act_by_num = dict()
for s in acts:
print(" " + repr(s))
act_by_num[s.gsa] = s
if is_wfsc_visit:
aux = [s for s in statements if s.name == 'AUX']
if len(aux) is 0:
raise RuntimeError("WFSC VISIT BUT NO AUX STATEMENT FOUND!")
# Check for presence of AUX statement
``` |
{
"source": "Johannes-Sahlmann/pystrometry",
"score": 3
} |
#### File: pystrometry/utils/archives.py
```python
import os
from astropy.table import Table
from astroquery.gaia import Gaia, TapPlus
import pandas as pd
def get_gaiadr_data(analysis_dataset_name, data_dir, source_id_array=None, gaia_data_release='dr3int5',
overwrite_query=False, gaia_table_name='gaia_source', shared_user_name='dr3int5'):
"""Query a Gaia archive table by source_id. Only data corresponding to source_id_array are returned.
Parameters
----------
analysis_dataset_name
data_dir
source_id_array
gaia_data_release
overwrite_query
gaia_table_name
Returns
-------
"""
# retrieve Gaia DR data by submitting list of source_id to GACS
output_file = os.path.join(data_dir, '{}_{}_sources.parquet'.format(gaia_data_release, analysis_dataset_name))
if (not os.path.isfile(output_file)) or (overwrite_query):
if 'int' in gaia_data_release:
gaia = TapPlus(url="http://geapre.esac.esa.int/tap-server/tap")
if getattr(gaia, '_TapPlus__isLoggedIn') is False:
gaia.login()
table_name = 'user_{}'.format(shared_user_name)
else:
gaia = Gaia
table_name = '{}'.format(gaia_data_release)
if source_id_array is not None:
input_table_name = '{}_source_id'.format(analysis_dataset_name)
input_table = os.path.join(data_dir, '%s.vot' % input_table_name)
source_id_column_name = 'source_id'
Table([source_id_array], names=['source_id']).write(input_table, format='votable', overwrite=True)
query = '''
SELECT * FROM
(select * FROM tap_upload.{0}) AS input
INNER JOIN
(select * FROM {1}.{3}) AS gdr
ON (input.{2} = gdr.source_id)
;'''.format(input_table_name, table_name, source_id_column_name, gaia_table_name)
job = gaia.launch_job_async(query=query, upload_resource=input_table,
upload_table_name=input_table_name, verbose=True)
# dump_to_file=True, output_file=output_file)
else:
query = 'select * FROM {}.{};'.format(table_name, gaia_table_name)
job = gaia.launch_job_async(query=query, verbose=True)
table = job.get_results()
df = table.to_pandas()
df.to_parquet(output_file)
else:
df = pd.read_parquet(output_file)
print('Retrieved {} rows from {}.{}'.format(len(df), gaia_data_release, gaia_table_name))
return df
```
#### File: pystrometry/utils/du437_tools.py
```python
import copy
import os
from astropy.table import Table
from astropy.time import Time
import numpy as np
import pylab as pl
from .. import gaia_astrometry, pystrometry
try:
from helpers.table_helpers import plot_columns_simple
except ImportError:
print('universal_helpers not available')
def apply_elimination_cuts(table, selection_cuts, parameter_mapping):
"""Eliminate rows in astropy table based on input parameters.
Parameters
----------
table
selection_cuts
parameter_mapping
Returns
-------
Examples
--------
selection_cuts = OrderedDict({'period_1': {'operator': '<', 'threshold': 1000.},
'period_2': {'operator': '>', 'threshold': 50.},
'm2sini': {'operator': '>', 'threshold': 10.},
})
parameter_mapping = {'period': 'PER',
'ecc': 'ECC',
'm2sini': 'MSINI',
'omega': 'OM',
'plx': 'PAR',
}
"""
string_repr = ''
for field, parameters in selection_cuts.items():
if parameters['operator'] == '>':
remove_index = np.where(table[parameter_mapping[field]] > parameters['threshold'])[0]
elif parameters['operator'] == '<':
remove_index = np.where(table[parameter_mapping[field]] < parameters['threshold'])[0]
table.remove_rows(remove_index)
string_repr += '{:>10} {} {:>6}\n'.format(field, parameters['operator'],
parameters['threshold'])
return table, string_repr
def apply_selection_cuts(table, selection_cuts, parameter_mapping):
"""
Parameters
----------
table
selection_cuts
parameter_mapping
Returns
-------
"""
string_repr = ''
for field, parameters in selection_cuts.items():
field = field.split('_')[0]
if parameters['operator'] == '>':
remove_index = np.where(table[parameter_mapping[field]] < parameters['threshold'])[0]
elif parameters['operator'] == '<':
remove_index = np.where(table[parameter_mapping[field]] > parameters['threshold'])[0]
table.remove_rows(remove_index)
string_repr += '{:>10} {} {:>6}\n'.format(field, parameters['operator'],
parameters['threshold'])
return table, string_repr
def period_phase_error(period_day_fit, period_day_truth, time_span_day):
"""Return the period phase error as defined in BH-011."""
return np.abs((period_day_fit - period_day_truth)/period_day_truth * time_span_day/period_day_truth)
def make_comparison_figures(table, parameter_mapping, mapping_dr3id_to_starname,
highlight_index=None, description_str='',
save_plot=True, plot_dir=os.getcwd(), time_span_day=1000.,
period_phase_error_threshold=0.2):
"""
Parameters
----------
table
parameter_mapping
highlight_index
description_str
save_plot
plot_dir
Returns
-------
"""
# also save table with discrepancies
discrepancy_table = Table()
discrepancy_table['sourceId'] = table['sourceId']
discrepancy_table['Name'] = table['Name']
discrepancy_table['Name_dedreq'] = table['Name_dedreq-695']
discrepancy_table['m2_mjup'] = table['{}_m2_mjup'.format('p1')]
for miks_name, mapped_name in parameter_mapping.items():
if miks_name not in 'plx'.split():
miks_field = 'p1_{}'.format(miks_name)
else:
miks_field = '{}'.format(miks_name)
if miks_field not in table.colnames:
continue
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')
pl.plot(table[mapped_name], table[miks_field], 'bo')
discrepancy_table[miks_field] = table[miks_field]
discrepancy_table[mapped_name] = table[mapped_name]
discrepancy_table['{}_discrepancy'.format(miks_field)] = table[mapped_name] - table[miks_field]
# discrepancy in percent
# discrepancy_table['{}_discr_rel'.format(miks_field)] = 100.*np.abs(table[mapped_name] - table[miks_field])/np.abs(table[miks_field])
discrepancy_table['{}_discr_rel'.format(miks_field)] = 100.*np.abs(table[mapped_name] - table[miks_field])/np.abs(table[mapped_name])
if highlight_index is not None:
pl.plot(table[mapped_name][highlight_index],
table[miks_field][highlight_index], 'ro', ms=15, mfc='none')
pl.axis('equal')
xymax = np.max(np.array([pl.xlim()[1], pl.ylim()[1]]))
pl.plot([0, xymax], [0, xymax], 'k--')
pl.xlabel('{} ({})'.format(mapped_name, table.meta['comparison_to']))
pl.ylabel('{} (DU437)'.format(miks_field))
pl.title('{} sources from {}'.format(len(table), table.meta['comparison_to']))
pl.text(0.01, 0.99, description_str, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, '{}_comparison_to_{}.pdf'.format(miks_field, table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
# period phase error:
miks_name = 'period_day'
miks_field = 'p1_{}'.format(miks_name)
mapped_name = parameter_mapping[miks_name]
period_day_fit = table[miks_field]
period_day_truth = table[mapped_name]
discrepancy_table['period_phase_error'] = period_phase_error(period_day_fit, period_day_truth, time_span_day)
n_period_recovered = len(np.where(np.abs(discrepancy_table['period_phase_error'])<period_phase_error_threshold)[0])
pl.figure(figsize=(8, 4), facecolor='w', edgecolor='k')
pl.plot(period_day_truth, discrepancy_table['period_phase_error'], 'k.')
pl.ylim((-1,1))
pl.fill_between(pl.xlim(), period_phase_error_threshold, y2=-period_phase_error_threshold, color='g', alpha=0.5)
pl.xlabel('Truth period (day)')
pl.ylabel('Period phase error')
description_str_2 = '{}/{} = {:2.1f}% within +/- {:2.1f}\n'.format(n_period_recovered, len(discrepancy_table), n_period_recovered/len(discrepancy_table)*100, period_phase_error_threshold)+description_str
pl.text(0.01, 0.99, description_str_2, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_phase_error_{}.pdf'.format(table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
# pl.close('all')
threshold = {'delta_chi2': {'value': 1000, 'operator': '>'},
'f_test_probability': {'value': 1e-100, 'operator': '<'}
}
for miks_field in ['meritFunction', 'chi2WithPlanet', 'chi2SingleStar', 'delta_chi2', 'f_test_probability', 'p1_estSNratio', 'p1_period_snr']:
pl.figure(figsize=(8, 4), facecolor='w', edgecolor='k')
index = np.where(discrepancy_table['period_phase_error'] < 100)[0]
pl.loglog(discrepancy_table['period_phase_error'][index], table[miks_field][index], 'bo', alpha=0.7)
# pl.ylim((-1,1))
# pl.fill_between(pl.xlim(), period_phase_error_threshold, y2=-period_phase_error_threshold, color='g', alpha=0.5)
pl.xlabel('Period phase error')
pl.ylabel(miks_field)
n_passed_threshold = None
if miks_field in ['delta_chi2', 'f_test_probability']:
value = threshold[miks_field]['value']
operator = threshold[miks_field]['operator']
if operator == '>':
n_passed_threshold = len(np.where(table[miks_field] > value)[0])
pl.fill_between(pl.xlim(), value, y2=pl.ylim()[1], color='g', alpha=0.5)
elif operator == '<':
n_passed_threshold = len(np.where(table[miks_field] < value)[0])
pl.fill_between(pl.xlim(), value, y2=pl.ylim()[0], color='g', alpha=0.5)
pl.title('{} of {} systems shown. {} pass threshold'.format(len(index), len(table), n_passed_threshold))
pl.text(0.01, 0.99, description_str, horizontalalignment='left', verticalalignment='top',
transform=pl.gca().transAxes)
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_phase_error_vs_{}_{}.pdf'.format(miks_field, table.meta['comparison_to']))
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
if 1:
formats = {}
for key in discrepancy_table.colnames:#['angular_distance', 'phot_g_mean_mag', 'parallax', 'pmra', 'pmdec']:
if 'discr' in key:
formats[key] = '%>2.1f'
elif 'm2' in key:
formats[key] = '%2.1f'
# else:
# formats[key] = '%2.3f'
discrepancy_table_file = os.path.join(plot_dir, 'comparison_to_{}.csv'.format(table.meta['comparison_to']))
if 'p1_period_discr_rel' in discrepancy_table.colnames:
discrepancy_table.sort('p1_period_discr_rel')
discrepancy_table.write(discrepancy_table_file, format='ascii.fixed_width', delimiter=',',
bookend=False, overwrite=True, formats=formats)
try:
pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k')
# pl.plot(table['p1_a1'], np.abs(table['p1_period'] - table[parameter_mapping['period']]), 'bo')
# pl.xlabel('Fitted semimajor axis (mas)')
pl.plot(table['a1_mas_minimum'], np.abs(table['p1_period'] - table[parameter_mapping['period']]), 'bo')
pl.xlabel('Expected semimajor axis (mas)')
pl.ylabel('Period error (day)')
pl.show()
if save_plot:
figure_name = os.path.join(plot_dir, 'period_error_vs_a1.pdf')
pl.savefig(figure_name, transparent=True, bbox_inches='tight', pad_inches=0)
except KeyError:
pass
return discrepancy_table
def get_gaia_iad(source_id, t_ref_jd, epoch_data_dir, verbose=False):
"""Return Gaia Epoch Astrometry Data.
Parameters
----------
selected_systems
index
epoch_data_dir
Returns
-------
"""
t_ref_mjd = Time(t_ref_jd, format='jd').mjd
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir)
iad.load_data()
iad_mjd = Time(iad.epoch_data[iad.time_column] * 365.25 + t_ref_jd, format='jd').mjd
iad.epoch_data['MJD'] = iad_mjd
iad.epoch_data_for_prototype = Table()
iad.epoch_data_for_prototype['t-t_ref'] = iad.epoch_data[iad.time_column]
for key in ['spsi_obs', 'cpsi_obs', 'ppfact_obs', 'da_mas_obs', 'errda_mas_obs', 'transitId',
'direction_AL0_AC1', 'OB']:
iad.epoch_data_for_prototype[key] = iad.epoch_data[key]
if key in ['spsi_obs', 'cpsi_obs']:
iad.epoch_data_for_prototype['t{}'.format(key)] = iad.epoch_data_for_prototype[
't-t_ref'] * \
iad.epoch_data_for_prototype[key]
iad.epoch_data = copy.deepcopy(iad.epoch_data_for_prototype)
iad.time_column = 't-t_ref'
iad.epoch_data['MJD'] = iad_mjd
iad.t_ref_mjd = t_ref_mjd
iad.scan_angle_definition = 'gaia'
if verbose:
iad.epoch_data.pprint()
return iad
def make_orbit_system(selected_systems, index, scan_angle_definition, t_ref_mjd,
m1_MS=1., degenerate_orbit=False,
verbose=False):
"""Return an OrbitSystem for the specified input table row.
Parameters
----------
selected_systems
index
epoch_data_dir
mapping_dr3id_to_starname
plot_dir
m1_MS
rv
show_plot
degenerate_orbit
Returns
-------
"""
alpha_mas = selected_systems['p1_a1_mas'][index]
absolute_parallax_mas = selected_systems['plx_mas'][index]
a_m = pystrometry.convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)
P_day = selected_systems['p1_period_day'][index]
m2_kg = pystrometry.pjGet_m2(m1_MS*pystrometry.MS_kg, a_m, P_day)
m2_MJ = m2_kg/pystrometry.MJ_kg
attribute_dict = {
'offset_alphastar_mas': selected_systems['alphaStarOffset_mas'][index],
'offset_delta_mas': selected_systems['deltaOffset_mas'][index],
# 'RA_deg': 0.,
# 'DE_deg': 0.,
'RA_deg': selected_systems['alpha0_deg'][index],
'DE_deg': selected_systems['delta0_deg'][index],
# 'plx_mas': selected_systems['plx'][index],
'absolute_plx_mas': selected_systems['plx_mas'][index],
'muRA_mas': selected_systems['muAlphaStar_masPyr'][index],
'muDE_mas': selected_systems['muDelta_masPyr'][index],
'P_day': selected_systems['p1_period_day'][index],
'ecc': selected_systems['p1_ecc'][index],
'omega_deg': selected_systems['p1_omega_deg'][index],
'OMEGA_deg': selected_systems['p1_OMEGA_deg'][index],
'i_deg': selected_systems['p1_incl_deg'][index],
'a_mas': selected_systems['p1_a1_mas'][index],
'Tp_day': t_ref_mjd + selected_systems['p1_Tp_day-T0'][index],
'm1_MS': m1_MS,
'm2_MJ': m2_MJ,
'Tref_MJD': t_ref_mjd,
'scan_angle_definition': scan_angle_definition,
}
if degenerate_orbit:
attribute_dict['omega_deg'] += 180.
attribute_dict['OMEGA_deg'] += 180.
orbit = pystrometry.OrbitSystem(attribute_dict=attribute_dict)
if verbose:
print(orbit)
return orbit
def make_astrometric_orbit_plotter(selected_systems, index, epoch_data_dir, degenerate_orbit=False,
verbose=False, m1_MS=1.):
"""Return AstrometricOrbitPlotter object
Parameters
----------
selected_systems
index
epoch_data_dir
degenerate_orbit
verbose
m1_MS
Returns
-------
"""
source_id = selected_systems['sourceId'][index]
t_ref_jd = selected_systems['T0_JD'][index]
iad = get_gaia_iad(source_id, t_ref_jd, epoch_data_dir, verbose=verbose)
orbit = make_orbit_system(selected_systems, index, iad.scan_angle_definition, iad.t_ref_mjd, m1_MS=m1_MS,
degenerate_orbit=degenerate_orbit, verbose=verbose)
# set coeffMatrix in orbit object
ppm_signal_mas = orbit.ppm(iad.epoch_data['MJD'], psi_deg=np.rad2deg(
np.arctan2(iad.epoch_data['spsi_obs'], iad.epoch_data['cpsi_obs'])),
offsetRA_mas=selected_systems['alphaStarOffset_mas'][index],
offsetDE_mas=selected_systems['deltaOffset_mas'][index],
externalParallaxFactors=iad.epoch_data['ppfact_obs'], verbose=True)
# 1/0
plot_dict = {}
plot_dict['model_parameters'] = {0: orbit.attribute_dict}
plot_dict['linear_coefficients'] = {'matrix': orbit.coeffMatrix} # dict ('matrix', 'table')
plot_dict['data_type'] = '1d'
if hasattr(iad, 'xi'):
plot_dict['data_type'] = 'gaia_2d'
else:
plot_dict['data_type'] = '1d'
plot_dict['scan_angle_definition'] = iad.scan_angle_definition
for key in iad.epoch_data.colnames:
if '_obs' in key:
new_key = key.replace('_obs', '')
if new_key == 'errda_mas':
new_key = 'sigma_da_mas'
iad.epoch_data[new_key] = iad.epoch_data[key]
plot_dict['data'] = iad
if verbose:
iad.epoch_data.pprint()
axp = pystrometry.AstrometricOrbitPlotter(plot_dict)
# axp.print_residual_statistics()
return axp
def make_orbit_figures(selected_systems, index, epoch_data_dir, mapping_dr3id_to_starname=None,
plot_dir=os.path.expanduser('~'),
m1_MS=1., rv=None, show_plot=True, degenerate_orbit=False, verbose=False):
axp = make_astrometric_orbit_plotter(selected_systems, index, epoch_data_dir,
degenerate_orbit=degenerate_orbit, verbose=verbose, m1_MS=m1_MS)
iad = axp.data
n_curve = 1500
timestamps_curve_2d = np.linspace(np.min(iad.epoch_data['MJD']), np.max(iad.epoch_data['MJD']), n_curve)
axp.t_curve_MJD = timestamps_curve_2d
if 'phot_g_mean_mag' in selected_systems.colnames:
mag_str = ' $G$={:2.1f}'.format(selected_systems['phot_g_mean_mag'][index])
else:
mag_str = ''
if mapping_dr3id_to_starname is not None:
axp.title = 'Gaia DR3 {} ({}{})'.format(source_id, mapping_dr3id_to_starname[source_id], mag_str)
name_seed = 'DR3_{}_{}'.format(source_id, mapping_dr3id_to_starname[source_id])
else:
name_seed = 'DR3_{}'.format(source_id)
argument_dict = {'plot_dir': plot_dir, 'ppm_panel': True, 'frame_residual_panel': True,
'orbit_only_panel': False, 'ppm_description': 'default', 'epoch_omc_description': 'default',
'orbit_description': 'default', 'arrow_offset_x': +100, 'arrow_offset_y': +100,
'name_seed': name_seed, 'scan_angle_definition': scan_angle_definition}
argument_dict['save_plot'] = True
argument_dict['omc_panel'] = True
argument_dict['orbit_only_panel'] = False
# argument_dict['make_condensed_summary_figure'] = True
# argument_dict['make_xy_residual_figure'] = True
argument_dict['make_condensed_summary_figure'] = False
argument_dict['make_xy_residual_figure'] = False
argument_dict['make_1d_overview_figure'] = True
argument_dict['excess_noise'] = selected_systems['excessNoise'][index]
argument_dict['merit_function'] = selected_systems['meritFunction'][index]
if show_plot:
axp.plot(argument_dict=argument_dict)
if rv is not None:
from ..pystrometry import plot_rv_data
my_orbit = copy.deepcopy(orbit)
# my_orbit.m2_MJ = orbit.m2_MJ/10.
plot_rv_data(rv, orbit_system=my_orbit, n_orbit=np.ceil(np.ptp(rv['MJD'])/orbit.P_day)+1)
pl.show()
return axp
def make_orbit_figure(selected_systems, index, epoch_data_dir, mapping_dr3id_to_starname=None,
plot_dir=os.path.expanduser('~'),
m1_MS=1., rv=None, show_plot=True, degenerate_orbit=False, epoch_data_suffix=None):
source_id = selected_systems['sourceId'][index]
t_ref_jd = selected_systems['T0_JD'][index]
t_ref_mjd = Time(t_ref_jd, format='jd').mjd
if epoch_data_suffix is None:
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir)
else:
iad = gaia_astrometry.GaiaIad(source_id, epoch_data_dir, epoch_data_suffix=epoch_data_suffix)
iad.load_data(filter_on_frame_uncertainty=True)
# pl.close('all')
# pl.figure()
# pl.hist(iad.epoch_data['errda_mas_obs'])
# # pl.show()
# pl.savefig('test.png')
iad_mjd = Time(iad.epoch_data[iad.time_column]*365.25+t_ref_jd, format='jd').mjd
iad.epoch_data['MJD'] = iad_mjd
iad.epoch_data_for_prototype = Table()
iad.epoch_data_for_prototype['t-t_ref'] = iad.epoch_data[iad.time_column]
for key in ['spsi_obs', 'cpsi_obs', 'ppfact_obs', 'da_mas_obs', 'errda_mas_obs', 'transitId',
'direction_AL0_AC1', 'OB']:
iad.epoch_data_for_prototype[key] = iad.epoch_data[key]
if key in ['spsi_obs', 'cpsi_obs']:
iad.epoch_data_for_prototype['t{}'.format(key)] = iad.epoch_data_for_prototype['t-t_ref'] \
* iad.epoch_data_for_prototype[key]
iad.epoch_data = copy.deepcopy(iad.epoch_data_for_prototype)
iad.time_column = 't-t_ref'
iad.epoch_data['MJD'] = iad_mjd
iad.t_ref_mjd = t_ref_mjd
scan_angle_definition = 'gaia'
# loop over every companion in system
from collections import OrderedDict
model_parameters = OrderedDict()
orbit_description = OrderedDict()
# for planet_index in np.arange(1, selected_systems['Nplanets'][index]+1):
for planet_index in np.arange(selected_systems['Nplanets'][index]):
planet_number = planet_index + 1
alpha_mas = selected_systems['p{}_a1_mas'.format(planet_number)][index]
absolute_parallax_mas = selected_systems['plx_mas'][index]
a_m = pystrometry.convert_from_angular_to_linear(alpha_mas, absolute_parallax_mas)
P_day = selected_systems['p{}_period_day'.format(planet_number)][index]
m2_kg = pystrometry.pjGet_m2(m1_MS*pystrometry.MS_kg, a_m, P_day)
m2_MJ = m2_kg/pystrometry.MJ_kg
attribute_dict = {
'offset_alphastar_mas': selected_systems['alphaStarOffset_mas'][index],
'offset_delta_mas': selected_systems['deltaOffset_mas'][index],
# 'RA_deg': 0.,
# 'DE_deg': 0.,
'RA_deg': selected_systems['alpha0_deg'][index],
'DE_deg': selected_systems['delta0_deg'][index],
# 'plx_mas': selected_systems['plx'][index],
'absolute_plx_mas': selected_systems['plx_mas'][index],
'muRA_mas': selected_systems['muAlphaStar_masPyr'][index],
'muDE_mas': selected_systems['muDelta_masPyr'][index],
'P_day': selected_systems['p{}_period_day'.format(planet_number)][index],
'ecc': selected_systems['p{}_ecc'.format(planet_number)][index],
'omega_deg': selected_systems['p{}_omega_deg'.format(planet_number)][index],
'OMEGA_deg': selected_systems['p{}_OMEGA_deg'.format(planet_number)][index],
'i_deg': selected_systems['p{}_incl_deg'.format(planet_number)][index],
'a_mas': selected_systems['p{}_a1_mas'.format(planet_number)][index],
'Tp_day': iad.t_ref_mjd + selected_systems['p{}_Tp_day-T0'.format(planet_number)][index],
'm1_MS': m1_MS,
'm2_MJ': m2_MJ,
'Tref_MJD': iad.t_ref_mjd,
'scan_angle_definition': scan_angle_definition,
}
if degenerate_orbit:
attribute_dict['omega_deg'] += 180.
attribute_dict['OMEGA_deg'] += 180.
# print(attribute_dict)
# print(pystrometry.geometric_elements(np.array([selected_systems['p1_{}'.format(key)][index] for key in 'A B F G'.split()])))
# print(pystrometry.mean_anomaly(iad.t_ref_mjd, attribute_dict['Tp_day'], attribute_dict['P_day']))
if planet_index == 0:
orbit = pystrometry.OrbitSystem(attribute_dict=attribute_dict)
# print(orbit)
# set coeffMatrix in orbit object
ppm_signal_mas = orbit.ppm(iad.epoch_data['MJD'], psi_deg=np.rad2deg(
np.arctan2(iad.epoch_data['spsi_obs'], iad.epoch_data['cpsi_obs'])),
offsetRA_mas=selected_systems['alphaStarOffset_mas'][index], offsetDE_mas=selected_systems['deltaOffset_mas'][index],
externalParallaxFactors=iad.epoch_data['ppfact_obs'], verbose=True)
model_parameters[planet_index] = attribute_dict
# display additional info on orbit panel
# if 'P1_sigma_a1_mas' in selected_systems.columns:
# p1_a1_div_sigma_a1_mas
# if ('sigma_p1_a1_mas' in selected_systems.columns) and (selected_systems['Nplanets'][index]==1):
if ('sigma_p1_a1_mas' in selected_systems.columns) and (selected_systems['Nplanets'][index]==1):
# temporary: only for single-companion solutions
orbit_descr = '$\\alpha={0[p1_a1_mas]:2.{prec}f}\\pm{0[sigma_p1_a1_mas]:2.{prec}f}$ mas (ratio={0[p1_a1_div_sigma_a1_mas]:2.1f})\n'.format(dict(selected_systems[index]), prec=3)
orbit_descr += '$P={0[p1_period_day]:2.{prec}f}\\pm{0[sigma_p1_period_day]:2.{prec}f}$ d\n'.format(dict(selected_systems[index]), prec=1)
orbit_descr += '$e={0[p1_ecc]:2.{prec}f}\\pm{0[sigma_p1_ecc]:2.{prec}f}$\n'.format(dict(selected_systems[index]), prec=3)
orbit_descr += '$i={0[p1_incl_deg]:2.{prec}f}$ deg\n'.format(dict(selected_systems[index]), prec=2)
orbit_descr += '$\\omega={0[p1_omega_deg]:2.{prec}f}$ deg\n'.format(dict(selected_systems[index]), prec=2)
orbit_descr += '$\\Omega={0[p1_OMEGA_deg]:2.{prec}f}$ deg\n'.format(dict(selected_systems[index]), prec=2)
orbit_descr += '$M_1={0:2.{prec}f}$ Msun\n'.format(m1_MS, prec=2)
orbit_descr += '$M_2={0:2.{prec}f}$ Mjup\n'.format(m2_MJ, prec=2)
else:
orbit_descr = 'default'
orbit_description[planet_index] = orbit_descr
plot_dict = {}
# plot_dict['model_parameters'] = {0: attribute_dict}
plot_dict['model_parameters'] = model_parameters
plot_dict['linear_coefficients'] = {'matrix': orbit.coeffMatrix} # dict ('matrix', 'table')
plot_dict['data_type'] = '1d'
if hasattr(iad, 'xi'):
plot_dict['data_type'] = 'gaia_2d'
else:
plot_dict['data_type'] = '1d'
plot_dict['scan_angle_definition'] = scan_angle_definition
for key in iad.epoch_data.colnames:
if '_obs' in key:
new_key = key.replace('_obs', '')
if new_key == 'errda_mas':
new_key = 'sigma_da_mas'
iad.epoch_data[new_key] = iad.epoch_data[key]
plot_dict['data'] = iad
# iad.epoch_data.pprint()
axp = pystrometry.AstrometricOrbitPlotter(plot_dict)
axp.print_residual_statistics()
n_curve = 1500
timestamps_curve_2d = np.linspace(np.min(iad.epoch_data['MJD']), np.max(iad.epoch_data['MJD']), n_curve)
axp.t_curve_MJD = timestamps_curve_2d
if 'phot_g_mean_mag' in selected_systems.colnames:
mag_str = ' $G$={:2.1f}'.format(selected_systems['phot_g_mean_mag'][index])
else:
mag_str = ''
if mapping_dr3id_to_starname is not None:
axp.title = 'Gaia DR3 {} ({}{})'.format(source_id, mapping_dr3id_to_starname[source_id], mag_str)
name_seed = 'DR3_{}_{}'.format(source_id, mapping_dr3id_to_starname[source_id].replace('/', '-'))
else:
name_seed = 'DR3_{}'.format(source_id)
argument_dict = {'plot_dir': plot_dir, 'ppm_panel': True, 'frame_residual_panel': True,
'orbit_only_panel': True, 'ppm_description': 'default', 'epoch_omc_description': 'default',
'orbit_description': orbit_description, 'arrow_offset_x': +100, 'arrow_offset_y': +100,
'name_seed': name_seed, 'scan_angle_definition': scan_angle_definition}
argument_dict['save_plot'] = True
argument_dict['omc_panel'] = True
argument_dict['orbit_only_panel'] = False
# argument_dict['orbit_only_panel'] = True
# argument_dict['make_condensed_summary_figure'] = True
# argument_dict['make_xy_residual_figure'] = True
argument_dict['make_condensed_summary_figure'] = False
argument_dict['make_xy_residual_figure'] = False
argument_dict['make_1d_overview_figure'] = True
argument_dict['excess_noise'] = selected_systems['excessNoise_mas'][index]
argument_dict['merit_function'] = selected_systems['meritFunction'][index]
if show_plot:
axp.plot(argument_dict=argument_dict)
if rv is not None:
from ..pystrometry import plot_rv_data
my_orbit = copy.deepcopy(orbit)
# my_orbit.m2_MJ = orbit.m2_MJ/10.
plot_rv_data(rv, orbit_system=my_orbit, n_orbit=np.ceil(np.ptp(rv['MJD'])/orbit.P_day)+1)
pl.show()
return axp
def show_best_solution(file, out_dir):
"""Make figures showing the MIKS best solution.
Parameters
----------
file : str
csv file containing solutions
Returns
-------
"""
data = Table.read(file, format='csv', data_start=2)
units_table = Table.read(file, format='csv', data_start=1)
units = {key: str(value) for key, value in zip(units_table[0].colnames, [units_table[0][c] for c in units_table.colnames])}
# apply cuts on data
threshold = 1e7
for colname in data.colnames:
if colname != 'sourceId':
if np.any(np.abs(data[colname]) > threshold):
data.remove_rows(np.where(np.abs(data[colname]) > threshold)[0])
print('Eliminated {} of {} rows with values > {:1.0e}'.format(len(units_table)-len(data), len(data), threshold))
plot_columns_simple(data, os.path.join(out_dir, 'best_solution_simple_plots'),
name_seed='best_solution', units=units)
``` |
{
"source": "Johannes-Sahlmann/uhelpers",
"score": 2
} |
#### File: uhelpers/uhelpers/table_helpers.py
```python
import os
import numpy as np
import pylab as pl
def print_column_statistics(t):
"""
print basic statistics of columns in astropy table
"""
for colnam in t.colnames:
try:
print('Mean %s : %3.3f median %3.3f std %3.3f' % (colnam, np.mean(t[colnam]), np.median(t[colnam]), np.std(t[colnam])))
except:
pass
def plot_columns_simple(t, plot_dir, save_plot=True, name_seed='table', selected_columns=None,
overwrite=False, show_plot=False, highlight_index=None, units=None):
"""Plot astropy column values versus row number.
:param t:
:param plot_dir:
:return:
"""
for colnam in t.colnames:
fig_name = os.path.join(plot_dir, '%s_%s.pdf' % (name_seed, colnam))
if (not os.path.isfile(fig_name)) | (overwrite) | (show_plot):
if selected_columns is None:
pass
elif (selected_columns is not None) & (colnam not in selected_columns):
continue
try:
if t[colnam].dtype == 'object':
coord = np.array(t[colnam]).astype(np.float)
else:
coord = np.array(t[colnam])
if units is None:
unit = t[colnam].unit
else:
unit = units[colnam]
pl.figure(figsize=(6, 6), facecolor='w', edgecolor='k'); pl.clf();
pl.plot(coord, 'bo')
if highlight_index is not None:
pl.plot(highlight_index, coord[highlight_index], 'go', ms=20)
pl.title('%s (%s)' % (colnam, unit))
pl.ylabel('%s (%s)' % (colnam, unit))
if show_plot:
pl.show()
if save_plot:
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
fig_name = os.path.join(plot_dir, '%s_%s.pdf' % (name_seed, colnam))
pl.savefig(fig_name, transparent=True, bbox_inches='tight', pad_inches=0.05, overwrite=overwrite)
except:
print('Exception occurred')
pass
```
#### File: uhelpers/tests/test_archive_helpers.py
```python
import netrc
import os
from astropy.table import Table
import pytest
from ..archive_helpers import get_exoplanet_orbit_database, gacs_list_query
local_dir = os.path.dirname(os.path.abspath(__file__))
ON_TRAVIS = os.environ.get('TRAVIS') == 'true'
@pytest.mark.skipif(ON_TRAVIS, reason='timeout issue.')
def test_eod():
"""Test the access to the exoplanet orbit database."""
catalog = get_exoplanet_orbit_database(local_dir, verbose=False)
assert len(catalog) > 100
@pytest.mark.skipif(ON_TRAVIS, reason='Requires access to .netrc file.')
def test_gacs_list_query():
# print('test gacs list query')
# Define which host in the .netrc file to use
HOST = 'http://gea.esac.esa.int'
# Read from the .netrc file in your home directory
secrets = netrc.netrc()
username, account, password = secrets.authenticators(HOST)
out_dir = os.path.dirname(__file__)
T = Table()
id_str_input_table = 'ID_HIP'
T[id_str_input_table] = [1, 2, 3, 4, 5, 6, 7]
gacs_table_name = 'tgas_source'
id_str_gacs_table = 'hip'
input_table_name = 'hip_star_list'
input_table = os.path.join(out_dir, 'hip_star_list.vot')
T[[id_str_input_table]].write(input_table, format='votable', overwrite=1)
T_out = gacs_list_query(username, password, out_dir, input_table, input_table_name, gacs_table_name,
id_str_gacs_table, id_str_input_table)
T_out.pprint()
``` |
{
"source": "johannessarpola/django-pastebin",
"score": 2
} |
#### File: pastebin/core/app_messages.py
```python
from django.contrib import messages
class Messenger:
"""
Simpole wrapper for the django messages
"""
def __init__(self, domain):
super().__init__()
self.domain = domain
import logging
self.logger = logging.getLogger(__name__)
def add_info_to_req(self, request, message):
messages.add_message(request, messages.INFO, message)
def add_warn_to_req(self, request, message):
messages.add_message(request, messages.WARNING, message)
def add_err_to_req(self, request, message):
messages.add_message(request, messages.ERROR, message)
self.logger.warning("Err message raised in domain {} with message: {}".format(self.domain, message))
```
#### File: pastebin/forms/user_forms.py
```python
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from pastebin.models import UserExtraInfo
class RegistrationForm(UserCreationForm):
username = forms.CharField(max_length=30)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.EmailField(max_length=75)
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email",)
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.username = self.cleaned_data["username"]
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.email = self.cleaned_data["email"]
if commit:
user.update()
return user
class ExtraEditForm(ModelForm):
class Meta:
model = UserExtraInfo
fields = ['bio']
labels = {
'bio': ('Your bio')
}
help_texts = {
'bio': 'Let us know more about you'
}
def update(self, user):
from pastebin.core.user_retriever import UserRetriever
e_info = UserRetriever().get_user_extra_info_if_exists(user)
if e_info is None:
e_info = UserExtraInfo()
e_info.paste_count = 0
e_info.bio = self.cleaned_data['bio']
e_info.save()
def save(self, commit=True):
from pastebin.core.user_retriever import UserRetriever
e_info = UserRetriever().get_user_extra_info_if_exists(self.user)
if e_info is None:
e_info = UserExtraInfo()
e_info.paste_count = 0
e_info.bio = self.cleaned_data['bio']
e_info.save(commit)
class UserEditForm(ModelForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
labels = {
'email': ('Your email'),
'first_name': ('Your first name'),
'last_name': ('Your last name'),
}
def update_with_user(self, user:User):
self.user = User.objects.get_or_create(id=user.id)[0]
self.update()
def update(self, commit=True):
user = self.user
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
user.email = self.cleaned_data["email"]
user.save(commit)
def create_user_edit_form_with_initial(user):
u = UserEditForm(
initial=
{'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name}
)
return u
def create_user_extra_edit_form_with_initial(user):
from pastebin.core.user_retriever import UserRetriever
retriever = UserRetriever()
e_info = retriever.get_user_extra_info_if_exists(user)
return ExtraEditForm(initial={'bio': e_info.bio})
``` |
{
"source": "johannes-scharlach/pod-control",
"score": 2
} |
#### File: pod-control/src/script.py
```python
from __future__ import division, print_function
import math
import numpy as np
from scipy import linalg
from matplotlib.pyplot import plot, subplot, legend, figure
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import example2sys as e2s
import analysis
def optionPricingScript():
N = 1000
option = "call"
r = 0.05
T = 1
K = 10.5
L = 30
integrator = "dopri5"
integrator_options = {"nsteps" : 2000}
sys = e2s.optionPricing(N=N, option=option,
r=r, T=T, K=K, L=L)
sys.integrator = integrator
sys.integrator_options = integrator_options
timeSteps = list(np.linspace(0, T, 10))
res = sys(timeSteps)
fig = figure()
ax = fig.add_subplot(111, projection='3d')
N2 = int(1.5*K*N/L)
X, Y, Z = [], [], []
for i in range(len(timeSteps)):
X.append([timeSteps[i] for _ in range(N2)])
Y.append([j*L/N for j in range(N2)])
Z.append(list(res[i])[:N2])
ax.plot_surface(X, Y, Z)
def controllableHeatSystemPlot():
X = [[0.0, 0.0, 0.0, 0.0, 0.0],
[0.042646464646464648, 0.042646464646464648, 0.042646464646464648, 0.042646464646464648, 0.042646464646464648],
[0.085292929292929295, 0.085292929292929295, 0.085292929292929295, 0.085292929292929295, 0.085292929292929295],
[0.12793939393939394, 0.12793939393939394, 0.12793939393939394, 0.12793939393939394, 0.12793939393939394],
[0.17058585858585859, 0.17058585858585859, 0.17058585858585859, 0.17058585858585859, 0.17058585858585859],
[0.21323232323232325, 0.21323232323232325, 0.21323232323232325, 0.21323232323232325, 0.21323232323232325],
[0.25587878787878787, 0.25587878787878787, 0.25587878787878787, 0.25587878787878787, 0.25587878787878787],
[0.29852525252525253, 0.29852525252525253, 0.29852525252525253, 0.29852525252525253, 0.29852525252525253],
[0.34117171717171718, 0.34117171717171718, 0.34117171717171718, 0.34117171717171718, 0.34117171717171718],
[0.38381818181818184, 0.38381818181818184, 0.38381818181818184, 0.38381818181818184, 0.38381818181818184],
[0.42646464646464649, 0.42646464646464649, 0.42646464646464649, 0.42646464646464649, 0.42646464646464649],
[0.46911111111111115, 0.46911111111111115, 0.46911111111111115, 0.46911111111111115, 0.46911111111111115],
[0.51175757575757574, 0.51175757575757574, 0.51175757575757574, 0.51175757575757574, 0.51175757575757574],
[0.55440404040404045, 0.55440404040404045, 0.55440404040404045, 0.55440404040404045, 0.55440404040404045],
[0.59705050505050505, 0.59705050505050505, 0.59705050505050505, 0.59705050505050505, 0.59705050505050505],
[0.63969696969696976, 0.63969696969696976, 0.63969696969696976, 0.63969696969696976, 0.63969696969696976],
[0.68234343434343436, 0.68234343434343436, 0.68234343434343436, 0.68234343434343436, 0.68234343434343436],
[0.72498989898989896, 0.72498989898989896, 0.72498989898989896, 0.72498989898989896, 0.72498989898989896],
[0.76763636363636367, 0.76763636363636367, 0.76763636363636367, 0.76763636363636367, 0.76763636363636367],
[0.81028282828282827, 0.81028282828282827, 0.81028282828282827, 0.81028282828282827, 0.81028282828282827],
[0.85292929292929298, 0.85292929292929298, 0.85292929292929298, 0.85292929292929298, 0.85292929292929298],
[0.89557575757575758, 0.89557575757575758, 0.89557575757575758, 0.89557575757575758, 0.89557575757575758],
[0.93822222222222229, 0.93822222222222229, 0.93822222222222229, 0.93822222222222229, 0.93822222222222229],
[0.98086868686868689, 0.98086868686868689, 0.98086868686868689, 0.98086868686868689, 0.98086868686868689],
[1.0235151515151515, 1.0235151515151515, 1.0235151515151515, 1.0235151515151515, 1.0235151515151515],
[1.0661616161616161, 1.0661616161616161, 1.0661616161616161, 1.0661616161616161, 1.0661616161616161],
[1.1088080808080809, 1.1088080808080809, 1.1088080808080809, 1.1088080808080809, 1.1088080808080809],
[1.1514545454545455, 1.1514545454545455, 1.1514545454545455, 1.1514545454545455, 1.1514545454545455],
[1.1941010101010101, 1.1941010101010101, 1.1941010101010101, 1.1941010101010101, 1.1941010101010101],
[1.2367474747474747, 1.2367474747474747, 1.2367474747474747, 1.2367474747474747, 1.2367474747474747],
[1.2793939393939395, 1.2793939393939395, 1.2793939393939395, 1.2793939393939395, 1.2793939393939395],
[1.3220404040404041, 1.3220404040404041, 1.3220404040404041, 1.3220404040404041, 1.3220404040404041],
[1.3646868686868687, 1.3646868686868687, 1.3646868686868687, 1.3646868686868687, 1.3646868686868687],
[1.4073333333333333, 1.4073333333333333, 1.4073333333333333, 1.4073333333333333, 1.4073333333333333],
[1.4499797979797979, 1.4499797979797979, 1.4499797979797979, 1.4499797979797979, 1.4499797979797979],
[1.4926262626262627, 1.4926262626262627, 1.4926262626262627, 1.4926262626262627, 1.4926262626262627],
[1.5352727272727273, 1.5352727272727273, 1.5352727272727273, 1.5352727272727273, 1.5352727272727273],
[1.5779191919191919, 1.5779191919191919, 1.5779191919191919, 1.5779191919191919, 1.5779191919191919],
[1.6205656565656565, 1.6205656565656565, 1.6205656565656565, 1.6205656565656565, 1.6205656565656565],
[1.6632121212121214, 1.6632121212121214, 1.6632121212121214, 1.6632121212121214, 1.6632121212121214],
[1.705858585858586, 1.705858585858586, 1.705858585858586, 1.705858585858586, 1.705858585858586],
[1.7485050505050506, 1.7485050505050506, 1.7485050505050506, 1.7485050505050506, 1.7485050505050506],
[1.7911515151515152, 1.7911515151515152, 1.7911515151515152, 1.7911515151515152, 1.7911515151515152],
[1.8337979797979798, 1.8337979797979798, 1.8337979797979798, 1.8337979797979798, 1.8337979797979798],
[1.8764444444444446, 1.8764444444444446, 1.8764444444444446, 1.8764444444444446, 1.8764444444444446],
[1.9190909090909092, 1.9190909090909092, 1.9190909090909092, 1.9190909090909092, 1.9190909090909092],
[1.9617373737373738, 1.9617373737373738, 1.9617373737373738, 1.9617373737373738, 1.9617373737373738],
[2.0043838383838386, 2.0043838383838386, 2.0043838383838386, 2.0043838383838386, 2.0043838383838386],
[2.047030303030303, 2.047030303030303, 2.047030303030303, 2.047030303030303, 2.047030303030303],
[2.0896767676767678, 2.0896767676767678, 2.0896767676767678, 2.0896767676767678, 2.0896767676767678],
[2.1323232323232322, 2.1323232323232322, 2.1323232323232322, 2.1323232323232322, 2.1323232323232322],
[2.174969696969697, 2.174969696969697, 2.174969696969697, 2.174969696969697, 2.174969696969697],
[2.2176161616161618, 2.2176161616161618, 2.2176161616161618, 2.2176161616161618, 2.2176161616161618],
[2.2602626262626262, 2.2602626262626262, 2.2602626262626262, 2.2602626262626262, 2.2602626262626262],
[2.302909090909091, 2.302909090909091, 2.302909090909091, 2.302909090909091, 2.302909090909091],
[2.3455555555555558, 2.3455555555555558, 2.3455555555555558, 2.3455555555555558, 2.3455555555555558],
[2.3882020202020202, 2.3882020202020202, 2.3882020202020202, 2.3882020202020202, 2.3882020202020202],
[2.430848484848485, 2.430848484848485, 2.430848484848485, 2.430848484848485, 2.430848484848485],
[2.4734949494949494, 2.4734949494949494, 2.4734949494949494, 2.4734949494949494, 2.4734949494949494],
[2.5161414141414142, 2.5161414141414142, 2.5161414141414142, 2.5161414141414142, 2.5161414141414142],
[2.5587878787878791, 2.5587878787878791, 2.5587878787878791, 2.5587878787878791, 2.5587878787878791],
[2.6014343434343434, 2.6014343434343434, 2.6014343434343434, 2.6014343434343434, 2.6014343434343434],
[2.6440808080808083, 2.6440808080808083, 2.6440808080808083, 2.6440808080808083, 2.6440808080808083],
[2.6867272727272726, 2.6867272727272726, 2.6867272727272726, 2.6867272727272726, 2.6867272727272726],
[2.7293737373737375, 2.7293737373737375, 2.7293737373737375, 2.7293737373737375, 2.7293737373737375],
[2.7720202020202023, 2.7720202020202023, 2.7720202020202023, 2.7720202020202023, 2.7720202020202023],
[2.8146666666666667, 2.8146666666666667, 2.8146666666666667, 2.8146666666666667, 2.8146666666666667],
[2.8573131313131315, 2.8573131313131315, 2.8573131313131315, 2.8573131313131315, 2.8573131313131315],
[2.8999595959595958, 2.8999595959595958, 2.8999595959595958, 2.8999595959595958, 2.8999595959595958],
[2.9426060606060607, 2.9426060606060607, 2.9426060606060607, 2.9426060606060607, 2.9426060606060607],
[2.9852525252525255, 2.9852525252525255, 2.9852525252525255, 2.9852525252525255, 2.9852525252525255],
[3.0278989898989899, 3.0278989898989899, 3.0278989898989899, 3.0278989898989899, 3.0278989898989899],
[3.0705454545454547, 3.0705454545454547, 3.0705454545454547, 3.0705454545454547, 3.0705454545454547],
[3.1131919191919191, 3.1131919191919191, 3.1131919191919191, 3.1131919191919191, 3.1131919191919191],
[3.1558383838383839, 3.1558383838383839, 3.1558383838383839, 3.1558383838383839, 3.1558383838383839],
[3.1984848484848487, 3.1984848484848487, 3.1984848484848487, 3.1984848484848487, 3.1984848484848487],
[3.2411313131313131, 3.2411313131313131, 3.2411313131313131, 3.2411313131313131, 3.2411313131313131],
[3.2837777777777779, 3.2837777777777779, 3.2837777777777779, 3.2837777777777779, 3.2837777777777779],
[3.3264242424242427, 3.3264242424242427, 3.3264242424242427, 3.3264242424242427, 3.3264242424242427],
[3.3690707070707071, 3.3690707070707071, 3.3690707070707071, 3.3690707070707071, 3.3690707070707071],
[3.4117171717171719, 3.4117171717171719, 3.4117171717171719, 3.4117171717171719, 3.4117171717171719],
[3.4543636363636363, 3.4543636363636363, 3.4543636363636363, 3.4543636363636363, 3.4543636363636363],
[3.4970101010101011, 3.4970101010101011, 3.4970101010101011, 3.4970101010101011, 3.4970101010101011],
[3.5396565656565659, 3.5396565656565659, 3.5396565656565659, 3.5396565656565659, 3.5396565656565659],
[3.5823030303030303, 3.5823030303030303, 3.5823030303030303, 3.5823030303030303, 3.5823030303030303],
[3.6249494949494951, 3.6249494949494951, 3.6249494949494951, 3.6249494949494951, 3.6249494949494951],
[3.6675959595959595, 3.6675959595959595, 3.6675959595959595, 3.6675959595959595, 3.6675959595959595],
[3.7102424242424243, 3.7102424242424243, 3.7102424242424243, 3.7102424242424243, 3.7102424242424243],
[3.7528888888888892, 3.7528888888888892, 3.7528888888888892, 3.7528888888888892, 3.7528888888888892],
[3.7955353535353535, 3.7955353535353535, 3.7955353535353535, 3.7955353535353535, 3.7955353535353535],
[3.8381818181818184, 3.8381818181818184, 3.8381818181818184, 3.8381818181818184, 3.8381818181818184],
[3.8808282828282827, 3.8808282828282827, 3.8808282828282827, 3.8808282828282827, 3.8808282828282827],
[3.9234747474747476, 3.9234747474747476, 3.9234747474747476, 3.9234747474747476, 3.9234747474747476],
[3.9661212121212124, 3.9661212121212124, 3.9661212121212124, 3.9661212121212124, 3.9661212121212124],
[4.0087676767676772, 4.0087676767676772, 4.0087676767676772, 4.0087676767676772, 4.0087676767676772],
[4.0514141414141411, 4.0514141414141411, 4.0514141414141411, 4.0514141414141411, 4.0514141414141411],
[4.094060606060606, 4.094060606060606, 4.094060606060606, 4.094060606060606, 4.094060606060606],
[4.1367070707070708, 4.1367070707070708, 4.1367070707070708, 4.1367070707070708, 4.1367070707070708],
[4.1793535353535356, 4.1793535353535356, 4.1793535353535356, 4.1793535353535356, 4.1793535353535356],
[4.2220000000000004, 4.2220000000000004, 4.2220000000000004, 4.2220000000000004, 4.2220000000000004]]
Y = [[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0],
[0.0, 0.25, 0.5, 0.75, 1.0]]
Z = [[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0015430421821912409, 0.010984840303416558, 0.0020332564133142194, 0.0],
[0.0, 0.0077968031683554803, 0.031022894076792776, 0.0095067953629537523, 0.0],
[0.0, 0.017571100997872374, 0.056329350774577791, 0.020894697941237512, 0.0],
[0.0, 0.029644523981234506, 0.085018102982247934, 0.034833660319807953, 0.0],
[0.0, 0.043195094325510421, 0.11584040207148256, 0.050409700797255916, 0.0],
[0.0, 0.057672581359243587, 0.14795469568571279, 0.067010746357283935, 0.0],
[0.0, 0.07270548263204496, 0.18078672764167972, 0.084223268287766273, 0.0],
[0.0, 0.088039792052027444, 0.21393804243083081, 0.10176421384888192, 0.0],
[0.0, 0.10349876380927669, 0.24712588654189302, 0.11943636499456661, 0.0],
[0.0, 0.11895653221336827, 0.28014381808580069, 0.13709901474700226, 0.0],
[0.0, 0.13432079015019452, 0.31283585331932157, 0.1546487476266834, 0.0],
[0.0, 0.14952144059037062, 0.34507953948481418, 0.17200682793649588, 0.0],
[0.0, 0.16450315589995371, 0.37677486699430063, 0.18911094315514743, 0.0],
[0.0, 0.17922051143527856, 0.40783702823439999, 0.20590979768631218, 0.0],
[0.0, 0.19363480456208157, 0.43819169441897449, 0.22235958331588901, 0.0],
[0.0, 0.20771198277894445, 0.46777194921102894, 0.23842167868156311, 0.0],
[0.0, 0.22142129827777562, 0.49651630713174599, 0.25406115700727655, 0.0],
[0.0, 0.23473443992589826, 0.52436744457774742, 0.26924582319099677, 0.0],
[0.0, 0.24762497783822562, 0.55127139704663042, 0.28394559846777639, 0.0],
[0.0, 0.26006801296359128, 0.57717706175773409, 0.29813213246815362, 0.0],
[0.0, 0.27203996063490388, 0.60203589943386715, 0.31177856414052302, 0.0],
[0.0, 0.28351842160372109, 0.6258017657374888, 0.32485937970945039, 0.0],
[0.0, 0.29448210990189883, 0.64843082649974049, 0.33735033372936629, 0.0],
[0.0, 0.30491081743362214, 0.66988152666691536, 0.34922841085301332, 0.0],
[0.0, 0.31478540204695837, 0.69011459312122514, 0.36047181362743042, 0.0],
[0.0, 0.32408779037917307, 0.7090930583279289, 0.37105996663018903, 0.0],
[0.0, 0.33280098973038819, 0.72678229618706036, 0.3809735305719873, 0.0],
[0.0, 0.34090910517899503, 0.74315006439792497, 0.39019442215392058, 0.0],
[0.0, 0.34839735943285438, 0.75816654956068796, 0.39870583689695943, 0.0],
[0.0, 0.35525211375601395, 0.77180441250577947, 0.40649227309736202, 0.0],
[0.0, 0.36146088886589928, 0.78403883217408843, 0.41353955567970335, 0.0],
[0.0, 0.36701238506234735, 0.79484754692107229, 0.4198348591260308, 0.0],
[0.0, 0.37189650109177119, 0.80421089248205735, 0.42536672892845612, 0.0],
[0.0, 0.37610435141009496, 0.81211183607827331, 0.43012510119049502, 0.0],
[0.0, 0.37962828161446155, 0.81853600630478551, 0.43410132012134051, 0.0],
[0.0, 0.38246188188553465, 0.82347171855153034, 0.43728815324631459, 0.0],
[0.0, 0.384599998330243, 0.82690999578312274, 0.43967980421096403, 0.0],
[0.0, 0.38603874214821143, 0.8288445845557818, 0.44127192309301566, 0.0],
[0.0, 0.38677549656824151, 0.82927196618698118, 0.44206161416253364, 0.0],
[0.0, 0.3868089215149445, 0.82819136301673379, 0.44204744105263316, 0.0],
[0.0, 0.3861389559948239, 0.82560473974593651, 0.441229429298447, 0.0],
[0.0, 0.38476681816655461, 0.82151679980284376, 0.43960906625087359, 0.0],
[0.0, 0.38269500309466992, 0.81593497674358417, 0.43718929835072184, 0.0],
[0.0, 0.37992727820182209, 0.80886942071885992, 0.43397452574590184, 0.0],
[0.0, 0.37646867640443799, 0.80033297999623865, 0.42997059428092405, 0.0],
[0.0, 0.37232548695489098, 0.79034117758772138, 0.42518478485712707, 0.0],
[0.0, 0.36750524399946177, 0.7789121830139476, 0.41962580018835527, 0.0],
[0.0, 0.36201671287520065, 0.76606677925987521, 0.41330374897173755, 0.0],
[0.0, 0.35586987416843641, 0.75182832497876328, 0.40623012750354842, 0.0],
[0.0, 0.34907590556441043, 0.73622271201392342, 0.39841779877223438, 0.0],
[0.0, 0.34164716152033325, 0.71927831831442013, 0.38988096906688058, 0.0],
[0.0, 0.3335971507989488, 0.70102595633053044, 0.38063516214316911, 0.0],
[0.0, 0.32494051190318696, 0.68149881698238857, 0.37069719099389575, 0.0],
[0.0, 0.3156929864566223, 0.66073240930377652, 0.36008512727516379, 0.0],
[0.0, 0.30587139057801288, 0.63876449587062045, 0.34881826844390262, 0.0],
[0.0, 0.29549358430201472, 0.61563502413167159, 0.33691710266636998, 0.0],
[0.0, 0.28457843910160513, 0.5913860537661354, 0.32440327156147158, 0.0],
[0.0, 0.27314580357130036, 0.56606168020038372, 0.31129953084660689, 0.0],
[0.0, 0.26121646733355131, 0.53970795442280095, 0.29762970895759955, 0.0],
[0.0, 0.24881212323390947, 0.51237279924251911, 0.28341866371798785, 0.0],
[0.0, 0.2359553278937834, 0.48410592214442127, 0.26869223713639512, 0.0],
[0.0, 0.22266946069243612, 0.45495872489876998, 0.25347720841425392, 0.0],
[0.0, 0.20897868125286265, 0.4249842100898979, 0.23780124524927942, 0.0],
[0.0, 0.19490788550882757, 0.39423688473389085, 0.22169285352327664, 0.0],
[0.0, 0.18048266043297559, 0.3627726611605665, 0.20518132546573875, 0.0],
[0.0, 0.16572923750832091, 0.33064875533995636, 0.18829668638753025, 0.0],
[0.0, 0.15067444502773722, 0.29792358283819992, 0.17106964008149153, 0.0],
[0.0, 0.13534565930818188, 0.26465665259203697, 0.15353151298925896, 0.0],
[0.0, 0.11977075490838322, 0.230908458695081, 0.13571419723581657, 0.0],
[0.0, 0.1039780539405448, 0.19674037039269046, 0.11765009263534411, 0.0],
[0.0, 0.087996274568271249, 0.16221452048551094, 0.099372047773770608, 0.0],
[0.0, 0.07185447878459332, 0.12739369234491038, 0.080913300274946837, 0.0],
[0.0, 0.055582019565875364, 0.092341205746871796, 0.062307416358138933, 0.0],
[0.0, 0.039208487502702991, 0.057120801738880921, 0.043588229791245978, 0.0],
[0.0, 0.022763657064014849, 0.021796526833578658, 0.024789780285256892, 0.0],
[0.0, 0.0062774318518430709, -0.013567384422752614, 0.0059462522661637473, 0.0],
[0.0, -0.010220215191933337, -0.04890663319809832, -0.012908081383800884, 0.0],
[0.0, -0.02669927968060793, -0.084156952846915462, -0.031738939613953576, 0.0],
[0.0, -0.043129799752941407, -0.11925424693220481, -0.050512074899474908, 0.0],
[0.0, -0.059481879250624203, -0.15413466706163412, -0.069193369367463953, 0.0],
[0.0, -0.075725804199668997, -0.18873481636687092, -0.087748828393692913, 0.0],
[0.0, -0.091832018127376069, -0.22299175029602458, -0.10614472960964851, 0.0],
[0.0, -0.10777124506704452, -0.25684319255271576, -0.12434760743161492, 0.0],
[0.0, -0.12351449346645146, -0.29022757594174492, -0.14232436861789335, 0.0],
[0.0, -0.13903313884811175, -0.32308419831745361, -0.16004231924217194, 0.0],
[0.0, -0.15429895935836982, -0.35535330869787463, -0.17746924244904311, 0.0],
[0.0, -0.16928419603115316, -0.38697622908127116, -0.19457344711834335, 0.0],
[0.0, -0.18396159844962998, -0.41789545405677297, -0.21132383083627407, 0.0],
[0.0, -0.19830447686780406, -0.44805475915804754, -0.22768993360463352, 0.0],
[0.0, -0.21228674938348249, -0.47739930109904799, -0.24364199474027248, 0.0],
[0.0, -0.22588299008318258, -0.50587571855951297, -0.25915100619854875, 0.0],
[0.0, -0.23906847490250127, -0.5334322286663381, -0.27418876573917694, 0.0],
[0.0, -0.25181922678107144, -0.56001872144634102, -0.28872792799346603, 0.0],
[0.0, -0.26411205916198383, -0.58558685079873019, -0.30274205430303469, 0.0],
[0.0, -0.27592461820777736, -0.61009012248729777, -0.31620566073881917, 0.0],
[0.0, -0.28723542342261288, -0.63348397864773032, -0.32909426447222562, 0.0],
[0.0, -0.29802390672727019, -0.65572587883413891, -0.34138442828040072, 0.0],
[0.0, -0.30827044985396185, -0.67677537736604243, -0.35305380317343249, 0.0],
[0.0, -0.31795642002470548, -0.69659419688197033, -0.36408116903074589, 0.0]]
fig = figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X[:15], Y[:15], Z[:15], rstride=1, cstride=1)
plt.show()
analysis.controllableHeatSystemComparison(control="sin", T=4.)
analysis.controllableHeatSystemComparison(control="hat", T=1.)
analysis.controllableHeatSystemComparison(control="hat", T=4.)
analysis.controllableHeatSystemComparison(control="BLaC", T=1.)
analysis.controllableHeatSystemComparison(control="BLaC", T=4.)
analysis.controllableHeatSystemComparison(control="one", T=.1)
analysis.controllableHeatSystemComparison(control="identity", T=.1)
analysis.controllableHeatSystemComparison(control="identity", T=4.)
```
#### File: pod-control/src/tests.py
```python
from __future__ import division, print_function
import numpy as np
import math
from pod import *
from example2sys import *
import unittest
from numpy.testing import assert_array_equal, assert_array_almost_equal
def _number_to_array(s):
return np.array([s])
class testPod(unittest.TestCase):
"""test lss functionalities"""
def test_abcd_normalize(self):
A, B = np.zeros((5, 5)), np.ones((5, 1))
C, D = np.ones((1, 5)), np.zeros((1, 1))
sys_without_D = lss(A, B, C, None)
sys_without_A = lss(None, B, C, D)
assert_array_equal(sys_without_A.A, A)
assert_array_equal(sys_without_D.D, D)
def test_zero_control(self):
A, B = np.zeros((5, 5)), np.ones((5, 1))
C, D = np.ones((1, 5)), np.zeros((1, 1))
sys = lss(A, B, C, D)
sys.x0 = np.ones((5, 1))
sys(2.0)
assert_array_equal(sys.x, np.ones((5, 1)))
# def testIdentity(self):
# N = 5
# T = range(N)
# U = [0., 1., 0., 2., -1.]
# U = map(_number_to_array, U)
# R = [0., 5., 5., 15., 10.]
# R = map(_number_to_array, R)
# A, B = None, np.ones((5, 1))
# C, D = np.ones((1, 5)), None
# sys = lss(A,B,C,D)
# for i in range(1, N):
# self.assertAlmostEqual(sys(T[i], U[i]), R[i])
# assert sys.t == T[i]
# sys.setupODE()
# timeWithSteps = [list(np.linspace(t-1, t, 2**t)) for t in T]
# for i in range(1, N):
# results = sys(timeWithSteps[i], U[i])
# self.assertAlmostEqual(results[-1], R[i])
# assert sys.t == timeWithSteps[i][-1]
# R = [r+u for r, u in zip(R, U)]
# R = map(np.array, R)
# D = np.ones((1, 1))
# sys = lss(A, B, C, D)
# for i in range(1, N):
# self.assertAlmostEqual(sys(T[i], U[i]), R[i])
def test_f(self):
A = [[1., 1.],
[0., 1.]]
B = [[1.],
[1.]]
C = [[1., 1.]]
D = [[0.]]
x = [0., 0.]
u = [1.]
sys = lss(A, B, C, D)
assert_array_equal(sys.f(0., np.array(x), u), np.array([1., 1.]))
x = [1., 1.]
u = [0.]
assert_array_equal(sys.f(0., np.array(x), u), np.array([2., 1.]))
x = [10., 2.]
u = [-3]
assert_array_equal(sys.f(0., np.array(x), u),
np.array([10+2-3., 2-3.]))
def test_truncation_functions(self):
"""Reduce system of order 3 and check truncation matrices.
Matrix `A` is in real schur form with all eigenvalues in the left half
of the complex plane. The system is reduced from order 3 to orders 1
and 2. Order, number of inputs and outputs and the pseudo inverse
property of T and Ti of the systems are checked.
"""
A = np.array([[-6, -3, 1],
[0, -2.2, 6],
[0, 0, -0.5]])
B = np.array([[1.],
[1.],
[1.]])
C = np.array([[2., 1., 0.002]])
D = None
sys = lss(A, B, C, D)
sys.x0 = np.ones((3,))
for reduction in lss.reduction_functions:
if reduction is "inoptimal_truncation_square_root":
continue
for k in [1, 2, 3]:
rsys = lss(sys, reduction=reduction, k=k)
assert rsys.order == k
assert rsys.inputs == 1
assert rsys.outputs == 1
if hasattr(rsys, 'T'):
assert rsys.T.shape == (3, k)
assert rsys.Ti.shape == (k, 3)
assert_array_almost_equal(np.dot(rsys.Ti, rsys.T),
np.eye(k))
assert_array_almost_equal(np.dot(rsys.Ti, sys.x0),
rsys.x0)
class testExample2sys(unittest.TestCase):
"""Test the example system generator"""
def test_rcLadder(self):
resistors = [1.0, 1.0, 1.0]
capacitors = [1.0, 1.0, 1.0]
sys = rcLadder(resistors, capacitors)
sys2 = rcLadder(resistors + [np.inf], capacitors)
assert sys.inputs == 1
assert sys.outputs == 1
assert sys.order == 3
for matrix in ['A', 'B', 'C', 'D']:
assert_array_equal(getattr(sys, matrix), getattr(sys2, matrix))
def test_thermalRCNetwork(self):
u = lambda t, x=None: np.array([1.])
C0, sys = thermalRCNetwork(1e90, 1e87, 100, 3, u)
assert sys.inputs == 1
assert sys.outputs == 1
assert sys.order == 99
self.assertAlmostEqual(sys.control(0.), np.array([1.0]))
self.assertAlmostEqual(sys.control(math.pi), np.array([1.0]))
self.assertAlmostEqual(sys.control(.5*math.pi), np.array([1.0]))
u = lambda t, x=None: np.array([math.sin(t)])
C0, sys = thermalRCNetwork(1e90, 1e87, 100, 3, u)
assert sys.inputs == 1
assert sys.outputs == 1
assert sys.order == 99
self.assertAlmostEqual(sys.control(0.), np.array([.0]))
self.assertAlmostEqual(sys.control(math.pi), np.array([.0]))
self.assertAlmostEqual(sys.control(.5*math.pi), np.array([1.0]))
def test__thermalRCNetworkCapacitors(self):
capacitors = [.000455, .00388, .0115, .0481, .0316, 2.79]
C, r, n = 1.0, 3, 6
def test__thermalRCNetworkResistors(self):
resistors = [5.52, 17.2, 55.2, 46.8, 56.7, 27.9]
R, r, n = 1.0, 3, 6
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johannesschmude/ibmpairs",
"score": 2
} |
#### File: ibmpairs/tests/test_paw.py
```python
import sys, os, time, glob
# Python unit testing
import unittest
# compare files
import filecmp
# requests module
import requests
# requests module mocking
import responses
# regular expressions
import re
# handle json
import json
# handle timestamps
import datetime
# get PAW to test
sys.path.append('.')
from ibmpairs import paw
# message logging tool
import logging
# handle ZIP files
import zipfile
# handling scientific data
import numpy
import pandas
# string type compatibility with Python 2 and 3
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
string_type = basestring
else:
string_type = str
#}}}
# fold: parameter settings{{{
# define global test parameters
TEST_DATA_DIR = 'tests/data'
PAIRS_SERVER = 'pairs.res.ibm.com'
PAIRS_BASE_URI = '/'
QUERY_ENDPOINT = 'v2/query'
STATUS_ENDPOINT = 'v2/queryjobs/'
DOWNLOAD_ENDPOINT = 'v2/queryjobs/download/'
REAL_CONNECT = False
PAIRS_USER = 'fakeUser'
PAIRS_PASSWORD = '<PASSWORD>'
PAIRS_PASSWORD_FILE_NAME = 'ibmpairspass.txt'
# read/overwrite parameters from environment
for var in (
'REAL_CONNECT',
'PAIRS_SERVER',
'PAIRS_USER',
):
if 'PAW_TESTS_'+var in os.environ:
exec(
"%s = os.environ['PAW_TESTS_%s']" % (var, var)
)
# convert types read in from environment
REAL_CONNECT = REAL_CONNECT == 'true'
# set credentials
if os.path.exists(os.path.expanduser(PAIRS_PASSWORD_FILE_NAME)):
try:
PAIRS_PASSWORD = paw.get_pairs_api_password(PAIRS_SERVER, PAIRS_USER)
except:
PAIRS_PASSWORD = <PASSWORD>
PAIRS_CREDENTIALS = (PAIRS_USER, PAIRS_PASSWORD)
# }}}
# fold: test PAIRS point queries{{{
class TestPointQuery(unittest.TestCase):
"""
Test cases for querying point data from PAIRS.
"""
# fold: setup mocked environment#{{{
@classmethod
def setUpClass(cls):
# define and start PAIRS mock server
cls.pairsServerMock = responses.RequestsMock()
## define endpoint processing
def point_data_endpoint(request):
respCode = 400
payload = json.loads(request.body)
# perform some tests on payload sent
if payload['spatial']['type'] == 'point' \
and len(payload['spatial']['coordinates']) > 1 \
and len(payload['spatial']['coordinates']) % 2 == 0:
respCode = 200
# check whether a raster or vector point query is performed
if re.match('^P', payload['layers'][0]['id']) is not None:
# so far, vector queries can take a single point only
if 2==len(payload['spatial']['coordinates']):
response_body = json.load(
open(os.path.join(TEST_DATA_DIR, 'point-data-sample-response-vector.json'))
)
else:
respCode = 400
else:
# generate response body
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'point-data-sample-response-raster.json'))
)
headers = {}
return respCode, headers, json.dumps(response_body)
## add endpoint
cls.pairsServerMock.add_callback(
responses.POST,
'https://'+PAIRS_SERVER+PAIRS_BASE_URI+QUERY_ENDPOINT,
callback=point_data_endpoint,
content_type='application/json',
)
cls.pairsServerMock.start()
@classmethod
def tearDownClass(cls):
cls.pairsServerMock.stop()
#}}}
def test_from_point_query_raster(self):
"""
Test querying raster point data.
"""
# query mocked data
logging.info("TEST: Query (mocked) point data.")
# define point query
testPointQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-raster.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# submit point query
testPointQuery.submit()
# for complience with general PAW query scheme, perform fake poll and download
testPointQuery.poll_till_finished()
testPointQuery.download()
testPointQuery.create_layers()
# try to split property string column (although having no effect, it should run through)
colsBeforeSplit = len(testPointQuery.vdf.columns)
testPointQuery.split_property_string_column()
colsAfterSplit = len(testPointQuery.vdf.columns)
self.assertEqual(colsBeforeSplit, colsAfterSplit)
# check vector data frame
## number of data points is correct
logging.info("TEST: Perform vector data frame tests.")
self.assertEqual(
2, len(testPointQuery.vdf)
)
## column names agree with data response
self.assertListEqual(
sorted(
list(testPointQuery.querySubmit.json()['data'][0].keys()) \
+ [paw.PAIRS_VECTOR_GEOMETRY_COLUMN_NAME]
),
sorted(testPointQuery.vdf.columns),
)
## check (some) data types from response
self.assertIsInstance(
testPointQuery.vdf.longitude[0],
float,
)
self.assertIsInstance(
testPointQuery.vdf.timestamp[0],
datetime.datetime,
)
self.assertIsInstance(
testPointQuery.vdf.value[0],
string_type,
)
def test_from_point_query_vector(self):
"""
Test querying vector point data.
"""
# query mocked data
logging.info("TEST: Query (mocked) point data.")
# define point query
testPointQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-vector.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# submit point query
testPointQuery.submit()
# for complience with general PAW query scheme, perform fake poll and download
testPointQuery.poll_till_finished()
testPointQuery.download()
testPointQuery.create_layers()
# check vector data frame
## number of data points is correct
logging.info("TEST: Perform vector data frame tests.")
self.assertEqual(
2, len(testPointQuery.vdf)
)
## column names agree with data response
self.assertListEqual(
sorted(
list(testPointQuery.querySubmit.json()['data'][0].keys())
),
sorted(testPointQuery.vdf.columns),
)
## check (some) data types from response
self.assertIsInstance(
testPointQuery.vdf.timestamp[0],
datetime.datetime,
)
self.assertIsInstance(
testPointQuery.vdf.value[0],
string_type,
)
# check property string column splitting
colsBeforeSplit = len(testPointQuery.vdf.columns)
testPointQuery.split_property_string_column()
colsAfterSplit = len(testPointQuery.vdf.columns)
if paw.PROPERTY_STRING_COL_NAME_POINT in testPointQuery.vdf.columns:
self.assertLess(colsBeforeSplit, colsAfterSplit)
else:
self.assertEqual(colsBeforeSplit, colsAfterSplit)
# run twice to double-check it is not increasing the number of columns
testPointQuery.split_property_string_column()
colsAfter2ndSplit = len(testPointQuery.vdf.columns)
self.assertEqual(colsAfterSplit, colsAfter2ndSplit)
@unittest.skipIf(
not REAL_CONNECT,
"Skip checking mock against real service."
)
def test_mock_from_point_query(self):
"""
Checks the real PAIRS point query service against the mock used.
"""
# get real data
self.pairsServerMock.stop()
testPointQueryRasterReal = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-raster.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
testPointQueryVectorReal = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-vector.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
self.pairsServerMock.start()
# get mock data
testPointQueryRasterMock = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-raster.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
testPointQueryVectorMock = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-vector.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# compare data entry keys
self.assertListEqual(
sorted(testPointQueryRasterReal.querySubmit.json()['data'][0].keys()),
sorted(testPointQueryRasterMock.querySubmit.json()['data'][0].keys()),
)
self.assertListEqual(
sorted(testPointQueryVectorReal.querySubmit.json()['data'][0].keys()),
sorted(testPointQueryVectorMock.querySubmit.json()['data'][0].keys()),
)
def test_dataframe_generation(self):
"""
Tests functions that massage the received data to the *unified* PAW dataframe.
"""
# query mocked data
logging.info("TEST: Generation of unified PAW dataframe for point data.")
testPointQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'point-data-sample-request-raster.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# submit query
testPointQuery.submit()
# set timestamp column
testPointQuery.set_timestamp_column('timestamp')
# set point coordinate columns
testPointQuery.set_lat_lon_columns('latitude', 'longitude', 'geometry')
#}}}
# fold: test PAIRS raster and vector queries{{{
class TestPollQuery(unittest.TestCase):
"""
Test cases for poll-queries of raster and vector data from PAIRS.
"""
# fold: general class parameters#{{{
## time to simulate raster query generation
RASTER_QUERY_TIME_SEC = 2
## relative deviation of file size for comparing downloaded data with mock
REL_FILESIZE_DEV = .1
## local sample data
PAIRS_RASTER_ZIP_PATH = os.path.join(TEST_DATA_DIR,'12_07_2018T18_39_36-1544202000_23976938.zip')
PAIRS_AGG_RASTER_ZIP_PATH = os.path.join(TEST_DATA_DIR,'12_07_2018T19_10_50-1544202000_25850895.zip')
PAIRS_VECTOR_ZIP_PATH = os.path.join(TEST_DATA_DIR,'04_10_2019T21_45_15-1554912000_35115995.zip')
#}}}
# fold: test environment setup#{{{
@classmethod
def setUpClass(cls):
# mock polls till finished
cls.pollsTillRasterFinished = 2
cls.pollsTillAggFinished = 2
# define time of last server call (default UNIX epoch time 0)
cls.lastCallTime = datetime.datetime.fromtimestamp(0)
# define and start PAIRS mock server
cls.pairsServerMock = responses.RequestsMock()
# define query submit endpoint processings
def submit_query_endpoint(request):
respCode = 400
payload = json.loads(request.body)
# perform some tests on payload sent
if payload['spatial']['type'] == 'square' \
and len(payload['spatial']['coordinates']) == 4:
respCode = 200
# generate response body (depending on various scenarios)
if "aggregation" in payload["spatial"].keys():
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'aggregation-data-sample-response.json'))
)
elif re.match('^P', payload['layers'][0]['id']) is not None:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'vector-data-sample-response.json'))
)
else:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'raster-data-sample-response.json'))
)
headers = {}
return respCode, headers, json.dumps(response_body)
# define query status endpoint processings
def poll_query_status_endpoint(request):
# extract PAIRS query ID from query URL
pairsQueryID = request.url.split('/')[-1]
# generate response body
headers = {}
if pairsQueryID == os.path.splitext(
os.path.basename(cls.PAIRS_RASTER_ZIP_PATH)
)[0].split('-')[-1]:
cls.pollsTillRasterFinished -= 1
if cls.pollsTillRasterFinished > 0:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'raster-data-sample-status-running-response.json'))
)
else:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'raster-data-sample-status-finished-response.json'))
)
elif pairsQueryID == os.path.splitext(
os.path.basename(cls.PAIRS_AGG_RASTER_ZIP_PATH)
)[0].split('-')[-1]:
cls.pollsTillAggFinished -= 1
if cls.pollsTillAggFinished > 0:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'aggregation-data-sample-status-running-response.json'))
)
else:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'aggregation-data-sample-status-finished-response.json'))
)
elif pairsQueryID == os.path.splitext(
os.path.basename(cls.PAIRS_VECTOR_ZIP_PATH)
)[0].split('-')[-1]:
cls.pollsTillAggFinished -= 1
if cls.pollsTillAggFinished > 0:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'vector-data-sample-status-running-response.json'))
)
else:
response_body = json.load(
open(os.path.join(TEST_DATA_DIR,'vector-data-sample-status-finished-response.json'))
)
else:
return 404, headers, json.dumps(
{
"message": "mocked test server does not have data for ID '{}'".format(pairsQueryID)
}
)
# simulate query processing time
time.sleep(cls.RASTER_QUERY_TIME_SEC)
# result return
return 200, headers, json.dumps(response_body)
## add endpoints
### query submit
cls.pairsServerMock.add_callback(
responses.POST,
'https://'+PAIRS_SERVER+PAIRS_BASE_URI+QUERY_ENDPOINT,
callback=submit_query_endpoint,
content_type='application/json',
)
### query downloads
for queryZIPPath in [
cls.PAIRS_RASTER_ZIP_PATH,
cls.PAIRS_AGG_RASTER_ZIP_PATH,
cls.PAIRS_VECTOR_ZIP_PATH
]:
queryID = os.path.splitext(
os.path.basename(queryZIPPath)
)[0].split('-')[-1]
with open(queryZIPPath, 'rb') as queryData:
cls.pairsServerMock.add(
responses.GET,
r'https://{}{}{}{}'.format(PAIRS_SERVER, PAIRS_BASE_URI, DOWNLOAD_ENDPOINT, queryID),
body = queryData.read(),
status = 200,
content_type='application/zip',
stream=True,
)
### query status
cls.pairsServerMock.add_callback(
responses.GET,
re.compile(
r'https://{}{}{}[0-9]+_[0-9]+'.format(PAIRS_SERVER, PAIRS_BASE_URI, STATUS_ENDPOINT)
),
callback=poll_query_status_endpoint,
content_type='application/json',
)
### start the mocked server
cls.pairsServerMock.start()
@classmethod
def tearDownClass(cls):
cls.pairsServerMock.stop()
#}}}
# fold: test ordinary raster queries#{{{
def raster_query(self, useLocalZip=False):
"""
Query raster data in various ways.
"""
# query mocked data
logging.info("TEST: Query (mocked) data.")
testRasterQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'raster-data-sample-request.json'))) \
if not useLocalZip else self.PAIRS_RASTER_ZIP_PATH,
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# check that query got submitted
testRasterQuery.submit()
if not useLocalZip:
self.assertTrue(testRasterQuery.querySubmit.ok)
# poll and check that data status is finished
testRasterQuery.poll_till_finished(printStatus=True)
if not useLocalZip:
self.assertTrue(testRasterQuery.queryStatus.ok)
# check that certain files exist
testRasterQuery.download()
self.assertTrue(
os.path.exists(testRasterQuery.zipFilePath)
)
logging.info("TEST: Check files downloaded.")
with zipfile.ZipFile(testRasterQuery.zipFilePath) as zf:
# test the existence of the basic meta file
for fileName in ['output.info', ]:
self.assertTrue(
fileName in zf.namelist()
)
# check that for each GeoTiff file there exists a corresonding JSON meta file
for rasterFilePath in zf.namelist():
# find all PAIRS GeoTiff files
if rasterFilePath.endswith('.tiff'):
# check a corresponding JSON file exists
self.assertTrue(
rasterFilePath+'.json' in zf.namelist()
)
# try to temporarily open the JSON file
json.loads(zf.read(rasterFilePath+'.json'))
# load raster meta data
logging.info("TEST: Load raster meta data.")
testRasterQuery.list_layers()
# check that 'details' of raster data have been successfully loaded by
# getting the spatial reference information
self.assertIsInstance(
list(testRasterQuery.metadata.values())[0]["details"]["spatialRef"],
string_type
)
# check that all data are listed as type raster
self.assertTrue(
all([
'raster' == meta['layerType']
for meta in testRasterQuery.metadata.values()
])
)
logging.info("TEST: Create NumPy arrays from raster data.")
# load the raster data into a NumPy array
testRasterQuery.create_layers()
# access the numpy array
for name, meta in testRasterQuery.metadata.items():
if meta['layerType'] == 'raster':
self.assertIsInstance(
testRasterQuery.data[name],
numpy.ndarray
)
# check that the data acknowledgement statement is not empty
self.assertIsNotNone(testRasterQuery.dataAcknowledgeText)
def test_raster_query(self):
"""
Test querying raster data.
"""
self.raster_query()
def test_raster_query_cached(self):
"""
Test querying raster data from local PAIRS ZIP file.
"""
self.raster_query(useLocalZip=True)
#}}}
# fold: test raster aggregation queries#{{{
def raster_aggregation_query(self, useLocalZip=False):
"""
Query aggregated raster data.
"""
# query mocked data
logging.info("TEST: Query (mocked) aggregation data.")
testRasterAggQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'aggregation-data-sample-request.json'))) \
if not useLocalZip else self.PAIRS_AGG_RASTER_ZIP_PATH,
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# check that query got submitted
testRasterAggQuery.submit()
if not useLocalZip:
self.assertTrue(testRasterAggQuery.querySubmit.ok)
# poll and check that data status is finished
testRasterAggQuery.poll_till_finished(printStatus=True)
if not useLocalZip:
self.assertTrue(testRasterAggQuery.queryStatus.ok)
# check that certain files exist
testRasterAggQuery.download()
self.assertTrue(
os.path.exists(testRasterAggQuery.zipFilePath)
)
logging.info("TEST: Check files downloaded.")
with zipfile.ZipFile(testRasterAggQuery.zipFilePath) as zf:
# test the existence of the basic meta file
for fileName in ['output.info', ]:
self.assertTrue(
fileName in zf.namelist()
)
# check that for each aggregated CSV file there exists a corresonding JSON meta file
for rasterFilePath in zf.namelist():
# find all PAIRS GeoTiff files
if rasterFilePath.endswith('.csv'):
# check a corresponding JSON file exists
self.assertTrue(
rasterFilePath+'.json' in zf.namelist()
)
# try to temporarily open the JSON file
json.loads(zf.read(rasterFilePath+'.json'))
# load aggregated raster meta data (which are actually vector-type data!)
logging.info("TEST: Load aggregated raster meta data.")
testRasterAggQuery.list_layers()
# check that 'details' of raster data have been successfully loaded by
# getting the spatial reference information
self.assertIsInstance(
list(testRasterAggQuery.metadata.values())[0]["details"]["spatialRef"],
string_type
)
# check that all data are listed as type vector
self.assertTrue(
all([
'vector' == meta['layerType']
for meta in testRasterAggQuery.metadata.values()
])
)
logging.info("TEST: Create Pandas dataframes from aggregated raster data.")
# load the aggregated raster data as vector data into Pandas dataframes
testRasterAggQuery.create_layers()
# access the numpy array
for name, meta in testRasterAggQuery.metadata.items():
if meta['layerType'] == 'vector':
self.assertIsInstance(
testRasterAggQuery.data[name],
pandas.DataFrame,
)
# check that the data acknowledgement statement is not empty
self.assertIsNotNone(testRasterAggQuery.dataAcknowledgeText)
def test_raster_aggregation_query(self):
"""
Test querying aggregated raster data.
"""
self.raster_aggregation_query()
def test_raster_aggregation_query_cached(self):
"""
Test querying aggregated raster data from local PAIRS ZIP file.
"""
self.raster_aggregation_query(useLocalZip=True)
#}}}
# fold: test vector queries #{{{
def vector_query(self, useLocalZip=False):
"""
Query vector data in various ways.
"""
# query mocked data
logging.info("TEST: Query (mocked) data.")
testVectorQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'vector-data-sample-request.json'))) \
if not useLocalZip else self.PAIRS_VECTOR_ZIP_PATH,
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
# check that query got submitted
testVectorQuery.submit()
if not useLocalZip:
self.assertTrue(testVectorQuery.querySubmit.ok)
# poll and check that data status is finished
testVectorQuery.poll_till_finished(printStatus=True)
if not useLocalZip:
self.assertTrue(testVectorQuery.queryStatus.ok)
# check that certain files exist
testVectorQuery.download()
self.assertTrue(
os.path.exists(testVectorQuery.zipFilePath)
)
logging.info("TEST: Check files downloaded.")
with zipfile.ZipFile(testVectorQuery.zipFilePath) as zf:
pass
# test the existence of the basic meta file
# ATTENTION: disabled for now, because it needs to be implemented
#for fileName in ['output.info', ]:
# self.assertTrue(
# fileName in zf.namelist()
# )
# load raster meta data
logging.info("TEST: Load vector meta data.")
testVectorQuery.list_layers()
# check that all data are listed as type vector
self.assertTrue(
all([
'vector' == meta['layerType']
for meta in testVectorQuery.metadata.values()
])
)
logging.info("TEST: Create dataframe from raster data.")
# load the raster data into a NumPy array
testVectorQuery.create_layers()
# access the vector dataframe
for name, meta in testVectorQuery.metadata.items():
if meta['layerType'] == 'vector':
self.assertIsInstance(
testVectorQuery.data[name],
pandas.DataFrame,
)
# try to split property string column (if any)
testVectorQuery.vdf = testVectorQuery.data[name]
# check property string column splitting
colsBeforeSplit = len(testVectorQuery.vdf.columns)
testVectorQuery.split_property_string_column()
colsAfterSplit = len(testVectorQuery.vdf.columns)
if paw.PROPERTY_STRING_COL_NAME in testVectorQuery.vdf.columns:
self.assertLess(colsBeforeSplit, colsAfterSplit)
else:
self.assertEqual(colsBeforeSplit, colsAfterSplit)
# run twice to double-check it is not increasing the number of columns
testVectorQuery.split_property_string_column()
colsAfter2ndSplit = len(testVectorQuery.vdf.columns)
self.assertEqual(colsAfterSplit, colsAfter2ndSplit)
# check that the data acknowledgement statement is not empty
self.assertIsNotNone(testVectorQuery.dataAcknowledgeText)
def test_vector_query(self):
"""
Test querying vector data.
"""
self.vector_query()
def test_vector_query_cached(self):
"""
Test querying vector data from local PAIRS ZIP file.
"""
self.vector_query(useLocalZip=True)
#}}}
def TO_BE_IMPLEMENTED_test_dataframe_generation(self):
"""
Tests functions that massage the received data to the *unified* PAW dataframe.
"""
# query mocked data
logging.info("TEST: Generation of unified PAW dataframe for raster data.")
testRasterQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'raster-data-sample-request.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
testRasterQuery.submit()
testRasterQuery.poll_till_finished(printStatus=True)
testRasterQuery.download()
# create dataframe from ratser data
testRasterQuery.create_dataframe()
# check that the dataset and datalayer column names have been added
self.assertIn(
'layerName',
testRasterQuery.dataframe[list(testRasterQuery.metadata.keys())[0]].columns
)
# fold: test cached mock data to simulate PAIRS server against real service#{{{
@unittest.skipIf(
not REAL_CONNECT,
"Skip checking mock against real service."
)
def test_mock_raster_query(self):
"""
Checks the real PAIRS raster query service against the mock used.
"""
# get real data
# prevent the responses module to complain about unused URL endponts of the mock
try:
self.pairsServerMock.stop()
except Exception as e:
# catch not all requests called error
logging.warning(
'Stopping the mocked PAIRS server caused (potentially irrelevant) trouble: {}'.format(e)
)
# check query submit
logging.info("TEST: Perform query to real PAIRS server.")
subResp = requests.post(
'https://'+PAIRS_SERVER+PAIRS_BASE_URI+QUERY_ENDPOINT,
json = json.load(open(os.path.join(TEST_DATA_DIR,'raster-data-sample-request.json'))),
auth = PAIRS_CREDENTIALS,
).json()
self.assertIn(
'id',
subResp.keys()
)
self.assertIsInstance(
subResp['id'],
string_type
)
# check query poll
while True:
statResp = requests.get(
'https://'+PAIRS_SERVER+PAIRS_BASE_URI+STATUS_ENDPOINT+subResp['id'],
auth = PAIRS_CREDENTIALS,
).json()
assert set(['id', 'rtStatus', 'statusCode']) <= set(statResp.keys())
self.assertIsInstance(
statResp['statusCode'],
int
)
if statResp['statusCode'] >= 20:
break
# check query result
downloadResp = requests.get(
'https://'+PAIRS_SERVER+PAIRS_BASE_URI+DOWNLOAD_ENDPOINT+subResp['id'],
auth = PAIRS_CREDENTIALS,
stream = True,
)
pairsDataZip = '/tmp/pairs-test-raster-download-{}.zip'.format(subResp['id'])
with open(pairsDataZip, 'wb') as f:
for chunk in downloadResp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
self.pairsServerMock.start()
# basic test of real data
self.assertTrue(
zipfile.is_zipfile(pairsDataZip)
)
# get mock data
testRasterQuery = paw.PAIRSQuery(
json.load(open(os.path.join(TEST_DATA_DIR,'raster-data-sample-request.json'))),
'https://'+PAIRS_SERVER,
auth = PAIRS_CREDENTIALS,
baseURI = PAIRS_BASE_URI,
)
testRasterQuery.submit()
testRasterQuery.poll_till_finished(printStatus=True)
testRasterQuery.download()
pairsMockZip = testRasterQuery.queryDir+'.zip'
# make sure that files in mock are available in real download
# and that the size of the data and the mock are approximately the same
logging.info("TEST: Check that all files from the mock exist in the real data queried.")
with zipfile.ZipFile(pairsMockZip, 'r') as mock, \
zipfile.ZipFile(pairsDataZip, 'r') as real:
# generate info dictionaries
mockInfo = {
f.filename: f.file_size
for f in mock.infolist()
}
realInfo = {
f.filename: f.file_size
for f in real.infolist()
}
# check that files in mock are contained in real data (in terms of names)
assert set(mockInfo.keys()) <= set(realInfo.keys())
# check that file sizes are approximately the same
for key in mockInfo.keys():
self.assertAlmostEqual(
mockInfo[key], realInfo[key],
delta = self.REL_FILESIZE_DEV * realInfo[key]
)
#}}}
# }}}
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohannesSchuele/unet-keras",
"score": 3
} |
#### File: unet-keras/model/vgg16_graphnet.py
```python
from __future__ import print_function
import numpy as np
import warnings
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalMaxPooling1D
from keras.layers import GlobalMaxPooling2D
from keras.optimizers import *
from keras.layers import GlobalAveragePooling2D
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
from keras.engine.topology import get_source_inputs
from model.utils import callback
class GraphNet_vgg16(Model):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
"""
def __init__(self,
input_size= [256, 256,2],
pretrained_weights = None,
max_nr_nodes =128
):
self.input_size = input_size,
self.pretrained_weights = pretrained_weights,
self.max_nr_nodes = max_nr_nodes
adj_dim = int((max_nr_nodes * max_nr_nodes - max_nr_nodes) / 2)
# Block 1
input = Input(shape=[input_size[0], input_size[1], input_size[2]], name="input_image")
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(adj_dim, (3, 3), activation='sigmoid', padding='same', name='block5_conv3')(x)
#x = MaxPooling2D(,(2, 2), strides=(2, 2), name='block5_pool')(x)
output = GlobalMaxPooling2D(data_format="channels_last")(x)
#model = Model(inputs, x, name='graph_vgg16')
# initialize Keras Model with defined above input and output layers
super(GraphNet_vgg16, self).__init__(inputs=input, outputs=output)
# load preatrained weights
if pretrained_weights:
self.load_weights(pretrained_weights)
def build(self):
self.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
self.summary()
def save_model(self, name):
self.save_weights(name)
@staticmethod
def checkpoint(checkpoint_callback_name):
return callback(checkpoint_callback_name)
# TODO: FIX SAVING MODEL: AT THIS POINT, ONLY SAVING MODEL WEIGHTS IS AVAILBILE
# SINCE SUBSCLASSING FROM KERAS.MODEL RESTRICTS SAVING MODEL AS AN HDF5 FILE
``` |
{
"source": "johannes-schuetze/ALTO-framework-sim",
"score": 3
} |
#### File: pyparsing-2.0.1/examples/searchparser.py
```python
from pyparsing import Word, alphanums, Keyword, Group, Combine, Forward, Suppress, Optional, OneOrMore, oneOf
from sets import Set
class SearchQueryParser:
def __init__(self):
self._methods = {
'and': self.evaluateAnd,
'or': self.evaluateOr,
'not': self.evaluateNot,
'parenthesis': self.evaluateParenthesis,
'quotes': self.evaluateQuotes,
'word': self.evaluateWord,
'wordwildcard': self.evaluateWordWildcard,
}
self._parser = self.parser()
def parser(self):
"""
This function returns a parser.
The grammar should be like most full text search engines (Google, Tsearch, Lucene).
Grammar:
- a query consists of alphanumeric words, with an optional '*' wildcard
at the end of a word
- a sequence of words between quotes is a literal string
- words can be used together by using operators ('and' or 'or')
- words with operators can be grouped with parenthesis
- a word or group of words can be preceded by a 'not' operator
- the 'and' operator precedes an 'or' operator
- if an operator is missing, use an 'and' operator
"""
operatorOr = Forward()
operatorWord = Group(Combine(Word(alphanums) + Suppress('*'))).setResultsName('wordwildcard') | \
Group(Word(alphanums)).setResultsName('word')
operatorQuotesContent = Forward()
operatorQuotesContent << (
(operatorWord + operatorQuotesContent) | operatorWord
)
operatorQuotes = Group(
Suppress('"') + operatorQuotesContent + Suppress('"')
).setResultsName("quotes") | operatorWord
operatorParenthesis = Group(
(Suppress("(") + operatorOr + Suppress(")"))
).setResultsName("parenthesis") | operatorQuotes
operatorNot = Forward()
operatorNot << (Group(
Suppress(Keyword("not", caseless=True)) + operatorNot
).setResultsName("not") | operatorParenthesis)
operatorAnd = Forward()
operatorAnd << (Group(
operatorNot + Suppress(Keyword("and", caseless=True)) + operatorAnd
).setResultsName("and") | Group(
operatorNot + OneOrMore(~oneOf("and or") + operatorAnd)
).setResultsName("and") | operatorNot)
operatorOr << (Group(
operatorAnd + Suppress(Keyword("or", caseless=True)) + operatorOr
).setResultsName("or") | operatorAnd)
return operatorOr.parseString
def evaluateAnd(self, argument):
return self.evaluate(argument[0]).intersection(self.evaluate(argument[1]))
def evaluateOr(self, argument):
return self.evaluate(argument[0]).union(self.evaluate(argument[1]))
def evaluateNot(self, argument):
return self.GetNot(self.evaluate(argument[0]))
def evaluateParenthesis(self, argument):
return self.evaluate(argument[0])
def evaluateQuotes(self, argument):
"""Evaluate quoted strings
First is does an 'and' on the indidual search terms, then it asks the
function GetQuoted to only return the subset of ID's that contain the
literal string.
"""
r = Set()
search_terms = []
for item in argument:
search_terms.append(item[0])
if len(r) == 0:
r = self.evaluate(item)
else:
r = r.intersection(self.evaluate(item))
return self.GetQuotes(' '.join(search_terms), r)
def evaluateWord(self, argument):
return self.GetWord(argument[0])
def evaluateWordWildcard(self, argument):
return self.GetWordWildcard(argument[0])
def evaluate(self, argument):
return self._methods[argument.getName()](argument)
def Parse(self, query):
#print self._parser(query)[0]
return self.evaluate(self._parser(query)[0])
def GetWord(self, word):
return Set()
def GetWordWildcard(self, word):
return Set()
def GetQuotes(self, search_string, tmp_result):
return Set()
def GetNot(self, not_set):
return Set().difference(not_set)
class ParserTest(SearchQueryParser):
"""Tests the parser with some search queries
tests containts a dictionary with tests and expected results.
"""
tests = {
'help': Set([1, 2, 4, 5]),
'help or hulp': Set([1, 2, 3, 4, 5]),
'help and hulp': Set([2]),
'help hulp': Set([2]),
'help and hulp or hilp': Set([2, 3, 4]),
'help or hulp and hilp': Set([1, 2, 3, 4, 5]),
'help or hulp or hilp or halp': Set([1, 2, 3, 4, 5, 6]),
'(help or hulp) and (hilp or halp)': Set([3, 4, 5]),
'help and (hilp or halp)': Set([4, 5]),
'(help and (hilp or halp)) or hulp': Set([2, 3, 4, 5]),
'not help': Set([3, 6, 7, 8]),
'not hulp and halp': Set([5, 6]),
'not (help and halp)': Set([1, 2, 3, 4, 6, 7, 8]),
'"help me please"': Set([2]),
'"help me please" or hulp': Set([2, 3]),
'"help me please" or (hulp and halp)': Set([2]),
'help*': Set([1, 2, 4, 5, 8]),
'help or hulp*': Set([1, 2, 3, 4, 5]),
'help* and hulp': Set([2]),
'help and hulp* or hilp': Set([2, 3, 4]),
'help* or hulp or hilp or halp': Set([1, 2, 3, 4, 5, 6, 8]),
'(help or hulp*) and (hilp* or halp)': Set([3, 4, 5]),
'help* and (hilp* or halp*)': Set([4, 5]),
'(help and (hilp* or halp)) or hulp*': Set([2, 3, 4, 5]),
'not help* and halp': Set([6]),
'not (help* and helpe*)': Set([1, 2, 3, 4, 5, 6, 7]),
'"help* me please"': Set([2]),
'"help* me* please" or hulp*': Set([2, 3]),
'"help me please*" or (hulp and halp)': Set([2]),
'"help me please" not (hulp and halp)': Set([2]),
'"help me please" hulp': Set([2]),
'help and hilp and not holp': Set([4]),
'help hilp not holp': Set([4]),
'help hilp and not holp': Set([4]),
}
docs = {
1: 'help',
2: 'help me please hulp',
3: 'hulp hilp',
4: 'help hilp',
5: 'halp thinks he needs help',
6: 'he needs halp',
7: 'nothing',
8: 'helper',
}
index = {
'help': Set((1, 2, 4, 5)),
'me': Set((2,)),
'please': Set((2,)),
'hulp': Set((2, 3,)),
'hilp': Set((3, 4,)),
'halp': Set((5, 6,)),
'thinks': Set((5,)),
'he': Set((5, 6,)),
'needs': Set((5, 6,)),
'nothing': Set((7,)),
'helper': Set((8,)),
}
def GetWord(self, word):
if (word in self.index):
return self.index[word]
else:
return Set()
def GetWordWildcard(self, word):
result = Set()
for item in list(self.index.keys()):
if word == item[0:len(word)]:
result = result.union(self.index[item])
return result
def GetQuotes(self, search_string, tmp_result):
result = Set()
for item in tmp_result:
if self.docs[item].count(search_string):
result.add(item)
return result
def GetNot(self, not_set):
all = Set(list(self.docs.keys()))
return all.difference(not_set)
def Test(self):
all_ok = True
for item in list(self.tests.keys()):
print(item)
r = self.Parse(item)
e = self.tests[item]
print('Result: %s' % r)
print('Expect: %s' % e)
if e == r:
print('Test OK')
else:
all_ok = False
print('>>>>>>>>>>>>>>>>>>>>>>Test ERROR<<<<<<<<<<<<<<<<<<<<<')
print('')
return all_ok
if __name__=='__main__':
if ParserTest().Test():
print('All tests OK')
else:
print('One or more tests FAILED')
```
#### File: ALTO-framework-sim/utilities/add_Interfaces.py
```python
import sys
def read_in():
lines = sys.stdin.readlines()
for i in range(len(lines)):
lines[i] = lines[i].replace('\n','')
#print lines
return lines
output = ""
try:
file_handle = open('Conf/networkGen_config.txt', 'r')
except (OSError, IOError) as e:
print e
print "Goodbye"
altoCount = 0#will be increased at every city, to roughly group citys into one PID
data = read_in()
for line in data:
#print "HELLO"
#if its not a edge but a city name
if '//' in line:
altoCount +=1 #increase by one once new City is worked with.
output = "\t" + line
print output
#if there is a empty line
elif len(line) < 3:
continue
#else its a edge and meta needs to be added
else:
igp_pos = line.find("=")
output = "\t" + line[:igp_pos] + "\t[label=%s,headlabel=\"%s-%s\" ,alias=1,latency=4];"%(str(line[igp_pos+1:]),str(line[7:igp_pos]),str(line[:3]))
print output
sys.stdout.flush()
```
#### File: ALTO-framework-sim/utilities/detVPnumb_.py
```python
import random
import datetime
from utilities import file_interfaces_
from Views import traceroute_
def genVpStatistics(nodeList, shortestPathsDict, graphName):
print "generating statistics about the optimal number of Vantage Points for the graph\n"
#print shortestPathsDict
HASH_MULTIPLIER = 100000
#string containing all latency for write to file
text_total_links = "Graph: "+graphName+"\nDate: " + str(datetime.datetime.now())+"\nNum VPs\tNum Links\tNum Nodes\n0\t0\n1\t0\n"
vPs = traceroute_.getVantagePoints(nodeList) #the list of vantage points from where we collect interfaces(headlabels)
tmpPathAverage = 0
tmpNodeAverage = 0
averageLinksFound = 0
averageNodesFound = 0
#print vPs
#TODO remove?
#hiddenNodes = traceroute_.getHiddenNodes(nodeList) #the list of nodes that will not appear in the trace
#starredNodes = traceroute_.getStarredNodes(nodeList)#the list of nodes that will appear as * in the trace
numbVPs = 2 # determines how many Vantage points are to be used
#run as many times as there are vantage points
while(numbVPs<=len(vPs)):
#print "New Round, Number of VPs: ", numbVPs
tmpPathAverage = 0
tmpNodeAverage = 0
linksFound = list()
nodesFound = list()
#links found during itteration
#loop number of samples required times
for numb in range(0,1000):
#print "Iteration Nr: ", numb
linksFound = list()
nodesFound = list()
vantagePoint = list()
#generate list of randomly vantage points to run traces to/ from
#if(numbVPs != len(vPs)):
#vantagePoints = getRandVantagePoints(vPs, numbVPs)
#else:
#vantagePoints = vPs
tmp = shuffleVPs(vPs)
vantagePoints = tmp[:numbVPs]
#targets = vantagePoints
#loop list to get all possible starting points
for src in vantagePoints:
#floop list to get all possible destinations
for target in vantagePoints:
#print "From %s to %s"%(src, target)
#skipp if I'm tracing to myself
if src == target:
continue
#extract the path
temp = shortestPathsDict[src]
tempVPs = temp[target]
#if length is 1 its trying to trace to itself (should not be possible)
#if len(tempVPs) == 1:
#continue
#linksFound.append((src*HASH_MULTIPLIER)+target) #has the src and dest to represent the directed edge
#hash the found edge and append to list of liks found
for key in range(len(tempVPs)-1):
#print "Here: ",tempVPs
first=tempVPs[key]
second = tempVPs[key+1]
#Adding up the edges between src and target.
linksFound.append((first*HASH_MULTIPLIER)+second)
nodesFound.append(first)
nodesFound.append(second)
tmpPathAverage = tmpPathAverage + len(set(linksFound))
tmpNodeAverage = tmpNodeAverage + len(set(nodesFound))
#print "VPs: %s: number of edges: %d"%(str(vantagePoints),len(set(linksFound)))
#print "set of links found: ", len(set(linksFound))
averageLinksFound = averageLinksFound + float(float(tmpPathAverage)/1000)
averageNodesFound = averageNodesFound + float(float(tmpNodeAverage)/1000)
#print "Average : ", averageLinksFound
text_total_links = text_total_links + "" + str(numbVPs) + "\t" + str(averageLinksFound) + "\t\t" + str(averageNodesFound)+"\n"
averageLinksFound = 0
averageNodesFound = 0
numbVPs = numbVPs + 1
file_interfaces_.writeLinkCount(text_total_links,graphName)
print "done"
return text_total_links
#takes a list off all available vantage points and returns number
def getRandVantagePoints(vpList, number):
randVPs = []
count = 0
while(count <= number):
tmp = random.choice(vpList)
randVPs.append(tmp)
count = count + 1
return randVPs
# 2nd approach, shuffel list and return
def shuffleVPs(array):
random.shuffle(array)
return array
```
#### File: ALTO-framework-sim/utilities/drawGraph_.py
```python
import pydot
from PIL import Image
#function transforms
#def transformGraphReady():
def drawGraph(hoodDict, grName):
graph = pydot.Dot(label = grName, graph_type='digraph', overlap ='scale', splines ='false')
graph.set_edge_defaults(len='4')
#first we generate the nodes
for key in hoodDict.keys():
node_a = pydot.Node(str(key), shape="box")
graph.add_node(node_a)
for node in hoodDict.keys():
for entry, value in hoodDict[node].items():
edge = pydot.Edge(str(node), str(entry), label = str(value), )
graph.add_edge(edge)
graph.write_jpeg('Output/'+grName+'.jpeg', prog='neato')
graph.write_dot('Output/DOT/'+grName+'.dot')
#Image.open('Output/'+grName+'.jpeg').show()
def drawNetworkMap(netmapList, grName):
graph = pydot.Dot(label = grName, graph_type='digraph', overlap ='scale', splines ='false')
for key, val in netmapList.iteritems():
name = ""+key+" |{ "
#print key
#print val[0]
#converting the set val to a list to apply list functions to it
iterVal = list(val)
for n in range(len(val)):
if len (iterVal) == 1:
name = name + str(iterVal.pop(0)) +" }"
else:
name = name + str(iterVal.pop(0)) +" | "
#print name
node = pydot.Node(label = name, shape = "record")
node.set_name(key)
graph.add_node(node)
name = ""
graph.write_jpeg('Output/'+grName+'.jpeg', prog='neato')
graph.write_dot('Output/DOT/'+grName+'.dot')
#struct3 [shape=record,label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"]
#method to write a traceRoute View into a dot file and jpeg
#@param nodes list of all found nodes
#@param edges list of all found edges
#@param grName string containing output name of graph
def drawTracerouteView(traceView, grName):
sd_pairs = ()
total_Edges = []
nodes = []
#FORMATING***********************************************************************
#FIRST all edges have to be determined, SECOND if path is discovered, no additional entry needed!!!
#itterating the tree to get the subtree out, i.e. the sub list (3rd level)
for outta_key, subtree in traceView.iteritems():
for inna_key, pathList in subtree.iteritems():
#print "DRAWING"
#print "outter : %d, inner: %d"%(outta_key,inna_key)
#print pathList
#if the list has one element
if len(pathList) == 1:
#if the one element is the same as the outta_key then no trace happend (src=dst)
if pathList[0]==outta_key:
#print "self discovered: %d and %d"%(outta_key,pathList[0])
#build the list of nodes. Every node has a trace to itself, so best way to build node List
nodes.append(pathList[0])
#continue
#the trace went to a neighbor node (one hop)
else:
if (isInListOfTuples(total_Edges, outta_key, pathList[0])==0):
#print "Adding Real neighbors: %d and %d"%(outta_key,pathList[0])
sd_pairs = outta_key, pathList[0]
total_Edges.append(sd_pairs)
#there is a list with min 2 entries and n length. divide into edge pairs starting with src node (outta_key)!
else:
if (isInListOfTuples(total_Edges, outta_key, pathList[0])==0):
sd_pairs = outta_key, pathList[0]
total_Edges.append(sd_pairs)
#print "ADDING pairs: ", sd_pairs
for x in range(0,len(pathList)-1):
#print "add: %d | %d"%(pathList[x],pathList[x+1])
if (isInListOfTuples(total_Edges, pathList[x], pathList[x+1])==0):
sd_pairs = pathList[x],pathList[x+1]
total_Edges.append(sd_pairs)
#print "THE TOTAL EDGES"
#print total_Edges
#print "THE TOTAL NODES"
#print nodes
#DRAWING*************************************************************************
graph = pydot.Dot(label = grName, graph_type='digraph', overlap ='scale', splines ='false')
graph.set_edge_defaults(len='2')
for n in nodes:
node_a = pydot.Node(str(n), shape="box", style="filled", color="red", fillcolor="yellow")
graph.add_node(node_a)
for src, dest in total_Edges:
edge = pydot.Edge(str(src), str(dest), label = str(1), )
graph.add_edge(edge)
graph.write_jpeg('Output/'+grName+'.jpeg', prog='neato')
graph.write_dot('Output/DOT/'+grName+'.dot')
#draw the graph that results from the merge process. Note: Every object is one cluster
def drawMerge(pidDict, grName):
#print "IN DRAW Func"
graph = pydot.Dot(label = grName, graph_type='digraph', overlap ='scale', splines ='false', compound='true')
#graph.set_edge_defaults(len='2')
for pid, pidObj in pidDict.iteritems():
#print "currently dealing with: ", pid
#pidObj.printME()
#generate new cluster
cluster = pydot.Cluster(graph_name = pid, label = pid, style='filled', color='lightgrey', labelloc = "t")
#populate cluster:
#nodes first
for node in set(pidObj.nodes):
#while len(node) > 0:
#print "Drawing: ", node
this_node = pydot.Node(name = str(node), shape="box")
cluster.add_node(this_node)
for node in set(pidObj.s_nodes):
#print "drawing Starred: ", node
this_node = pydot.Node(name = str(node), shape="box", style = 'filled', color = 'yellow')
cluster.add_node(this_node)
for node in set(pidObj.a_nodes):
#print "alias Starred: ", node
this_node = pydot.Node(name = str(node), shape="record")
cluster.add_node(this_node)
for edge, weight in pidObj.in_edges.iteritems():
src, dst = edge
#print "Edge: ", src, dst, weight
this_edge = pydot.Edge(str(src), str(dst), label = str(weight))
cluster.add_edge(this_edge)
#for edge, weight in pidObj.out_edges.iteritems():
# src, dst = edge
# print "Edge: ", src, dst, weight
# this_edge = pydot.Edge(str(src), str(dst), label = str(weight))
# cluster.add_edge(this_edge)
cluster.set_parent_graph(graph)
graph.add_subgraph(cluster)
#edges connecting PIDs have to be added at the end since they don't belong into a subgraph.
for pid, pidObj in pidDict.iteritems():
for edge, weight in pidObj.out_edges.iteritems():
src, dst = edge
#print "Edge: ", src, dst, weight
this_edge = pydot.Edge(str(src), str(dst), label = str(weight))
graph.add_edge(this_edge)
for dstPID, weight in pidObj.foundNeighborPIDs.iteritems():#TODO remove if practice prooves uneccesary!
this_edge = pydot.Edge(str(pid), str(dstPID), label = str(weight))
graph.add_edge(this_edge)
#graph.write_jpg('Output/FIN_'+grName+'.jpg', prog='dot')#TODO neato might not render the graph right!!!
graph.write_raw('Output/RESULTS/'+grName+'_Graph.dot')#this is what i really want, a dot file to adjust in yED graphically and to manually apply learned ALTO knowledge to it.
def makeNodeStarred(n):
return "*"+str(n)+"*"
def makeNodeAlias(interface, src ):
return (str(interface)+"-"+str(src))
#function returns 1 if it found the supplied pair in the supplied list, 0 if its not in yet.
def isInListOfTuples(ListOfTuples, src, dest):
for i, v in enumerate(ListOfTuples):
if (v[0] == src):
if(v[1] == dest):
#if edge (src --> dest) exists in tuple, return 1
return 1
else:
continue
return 0
``` |
{
"source": "JohannesSeidel/pyNastran",
"score": 3
} |
#### File: bdf/bdf_interface/replication.py
```python
from typing import List, Optional
from pyNastran.bdf.bdf_interface.utils import expand_tabs
from pyNastran.bdf.cards.utils import wipe_empty_fields
from pyNastran.bdf.errors import ReplicationError
def to_fields_replication(card_lines):
# type: (List[str]) -> List[Optional[str]]
"""
Converts a series of lines in a card into string versions of the field.
Handles large, small, and CSV formatted cards. Same as to_fields, but
uses a different method to determine if it's large or small field format.
Parameters
----------
lines : List[str]
the lines of the BDF card object
Returns
-------
fields : List[str]
the string formatted fields of the card
.. warning:: this function is used by the reader and isn't intended
to be called by a separate process
.. code-block:: python
>>> card_lines = ['GRID,1,,1.0,2.0,3.0']
>>> card_name = 'GRID'
>>> fields = to_fields_replication(lines)
>>> fields
['GRID', '1', '', '1.0', '2.0', '3.0']
"""
#print('to_fields_replicationA =', card_lines)
#line0 = card_lines[0]
fields = []
for iline, line in enumerate(card_lines):
line = line.rstrip()
if '\t' in line:
line = expand_tabs(line)
#print(' line = %r' % line)
if ',' in line:
assert '\t' not in line, '%r' % line
sline = line.split(',')
#print('sline = ', iline, sline)
if iline == 0:
card_name = sline[0]
#assert card_name != '=', card_lines
fields.append(card_name)
if '*' in sline[0]:
fields.extend(sline[1:5])
for unused_i in range(5 - len(sline)):
fields.append('')
else:
fields.extend(sline[1:9])
for unused_i in range(9 - len(sline)):
fields.append('')
else:
assert '\t' not in line, line
if iline == 0:
card_name = line[0:8]
assert card_name != '=', card_lines
fields.append(card_name)
if '*' in card_name:
fields.extend([line[8:24], line[24:40], line[40:56],
line[56:72]])
else:
fields.extend([line[8:16], line[16:24], line[24:32],
line[32:40], line[40:48], line[48:56], line[56:64],
line[64:72]])
if '*' in card_name:
raise ReplicationError('* found in unexpected position; %r\nlines = %s' % (card_name, card_lines))
wiped_fields = wipe_empty_fields(fields)
for field in fields:
sfield = field.strip()
#while '= ' in sfield:
#sfield = field.replace('= ','=')
#print('sfield=%r' % sfield)
if ' ' in sfield:
raise RuntimeError('field=%r has embedded blanks\nfields=%s' % (sfield, fields))
return wiped_fields
def get_nrepeats(field, old_card, new_card):
"""=4, =(11)"""
msg = 'field=%r; expected =(1), =2, ...\nold_card=%s\nnew_card=%s' % (
field, old_card, new_card)
assert field[0] == '=', msg
assert '.' not in field, msg
assert '*' not in field, msg
if '(' in field or ')' in field:
assert field[1] == '(', msg
assert field[-1] == ')', msg
fieldi = field[2:-1]
assert '(' not in fieldi, msg
assert ')' not in fieldi, msg
else:
assert '(' not in field, msg
assert ')' not in field, msg
fieldi = field[1:]
nrepeats = int(fieldi)
return nrepeats
def float_replication(field, old_field):
"""*4., *(11.5)"""
msg = 'field=%r; expected *(1.), *2., ..., *11.' % field
assert field[0] == '*', msg
assert '.' in field, msg
assert len(field) >= 3, msg
if '(' in field:
assert field[1] == '(', msg
assert field[-1] == ')', msg
fieldi = field[2:-1]
assert '(' not in fieldi, msg
assert ')' not in fieldi, msg
else:
assert '(' not in field, msg
assert ')' not in field, msg
fieldi = field[1:]
nfloat = float(fieldi)
field2 = nfloat + float(old_field)
return field2
def int_replication(field, old_field):
# type: (str, str) -> int
"""*4, *(11)"""
msg = 'field=%r; expected *(1), *2, ..., *11' % field
assert field[0] == '*', msg
assert '.' not in field, msg
if '(' in field:
assert field[1] == '(', msg
assert field[-1] == ')', msg
fieldi = field[2:-1]
assert '(' not in fieldi, msg
assert ')' not in fieldi, msg
else:
assert '(' not in field, msg
assert ')' not in field, msg
fieldi = field[1:]
assert '*' not in fieldi, msg
assert len(field) >= 2, msg
#assert len(field) == 2, 'field=%r; expected *1, *2, *3, ..., *9' % field
# integer
nint = int(fieldi)
field2 = nint + int(old_field)
return field2
def _field(old_card, ifield):
"""helper for replication"""
#if isinstance(old_card, list):
#print(old_card, ifield)
field2 = old_card[ifield]
#else:
#field2 = old_card.field(ifield)
return field2
def repeat_cards(old_card, new_card):
"""helper for replication"""
card = []
cards = []
#print('*old_card = %s' % old_card)
#print('*new_card = %s' % new_card)
assert old_card != new_card
for ifield, field in enumerate(new_card):
if field is None:
field2 = _field(old_card, ifield)
#field2 = old_card.field(ifield)
#print(' %i: %r -> %r' % (ifield, field, field2))
#assert field2 is None, 'field=%s field2=%s' % (field, field2)
card.append(field2)
continue
if field == '':
field2 = field
elif field == '=':
field2 = _field(old_card, ifield)
elif field == '==':
# just append the remaining fields
#print(' %s : extending %s' % (ifield, old_card[ifield:]))
card.extend(old_card[ifield:])
break
elif '*' in field:
# this is an increment, not multiplication...
old_field = _field(old_card, ifield)
#old_field = old_card.field(ifield)
if '.' in field:
field2 = float_replication(field, old_field)
else:
field2 = int_replication(field, old_field)
else:
msg = 'field=%r\nold_card=%s\nnew_card=%s' % (field, old_card, new_card)
assert '(' not in field, msg
assert '*' not in field, msg
assert '=' not in field, msg
field = field2
#print(' %i: %r -> %r' % (ifield, field, field2))
card.append(field2)
#print(' appending %s' % card)
cards.append(card)
return cards
```
#### File: bdf/bdf_interface/stats.py
```python
from typing import List, Set, Dict, Any, Union
def get_bdf_stats(model, return_type='string', word=''):
# type: (Any, str, str) -> Union[str, List[str]]
"""
Print statistics for the BDF
Parameters
----------
return_type : str (default='string')
the output type ('list', 'string')
'list' : list of strings
'string' : single, joined string
word : str; default=''
model flag
Returns
-------
return_data : str, optional
the output data
.. note:: if a card is not supported and not added to the proper
lists, this method will fail
.. todo:: RBE3s from OP2s can show up as ???s
"""
card_dict_groups = [
'params', 'nodes', 'spoints', 'epoints', 'points', 'gridb',
'elements', 'ao_element_flags', 'normals', 'rigid_elements', 'plotels',
'properties', 'pbusht', 'pdampt', 'pelast',
'properties_mass', 'masses',
'materials', 'creep_materials', 'hyperelastic_materials',
'MATT1', 'MATT2', 'MATT3', 'MATT4', 'MATT5', 'MATT8', 'MATT9',
'MATS1', 'MATS3', 'MATS8', 'MATT8',
'coords', 'mpcs',
# axisysmmetric
# dynamic cards
'dareas', 'delays', 'dphases', 'nlparms', 'nlpcis',
'tsteps', 'tstepnls',
'rotors',
# direct matrix input - DMIG - dict
'dmi', 'dmig', 'dmij', 'dmiji', 'dmik', 'dmiax',
'dequations',
'transfer_functions',
'tics',
# frequencies - dict[List[FREQ]]
'frequencies',
# optimization - dict
'dconadds', 'dconstrs', 'desvars', 'topvar', 'ddvals', 'dlinks', 'dresps',
'dvcrels', 'dvmrels', 'dvprels', 'dvgrids',
# SESETx - dict
'suport1',
# tables
'tables', 'tables_d', 'tables_m', 'random_tables', 'tables_sdamping',
# methods
'methods', 'cMethods',
# aero
'caeros', 'paeros', 'aecomps', 'aefacts', 'aelinks',
'aelists', 'aeparams', 'aesurf', 'aesurfs', 'aestats', 'gusts', 'flfacts',
'flutters', 'splines', 'trims', 'divergs', 'csschds',
# thermal
'bcs', 'thermal_materials', 'phbdys', 'views', 'view3ds',
'convection_properties',
# contact
'bsurf', 'bsurfs', 'blseg',
'bconp', 'bcrparas', 'bctadds', 'bctparas', 'bctsets',
# sets
'sets', 'usets',
# superelements
'csuper', 'csupext',
'sebulk', 'sebndry', 'seconct', 'seelt', 'seexcld',
'selabel', 'seloc', 'seload', 'sempln', 'senqset',
'setree',
'se_sets', 'se_usets',
# ???
'dscreen', 'dti', 'nxstrats', 'radcavs', 'radmtx', 'ringaxs', 'ringfl',
'tempds', 'spcoffs',
]
scalar_attrs = [
'aero', 'aeros', 'grdset', # handled below
# not handled
'axic', 'axif',
'baror', 'beamor', 'doptprm', 'dtable',
'zona',
]
list_attrs = [
'asets', 'bsets', 'csets', 'omits', 'qsets',
'se_bsets', 'se_csets', 'se_qsets',
'suport', 'se_suport',
'monitor_points',
]
skip_attrs = [
'active_filename', 'active_filenames', 'debug', 'log', 'reject_lines',
'is_nx', 'is_msc', 'is_bdf_vectorized', 'dumplines', 'values_to_skip',
'system_command_lines', 'executive_control_lines', 'case_control_lines',
'case_control_deck',
'is_superelements', 'special_cards', 'units',
'sol', 'sol_iline', 'sol_method', 'cards_to_read', 'card_count',
'superelement_models', 'wtmass', 'echo', 'force_echo_off',
'read_includes', 'reject_cards', 'reject_count', 'punch',
'include_dir', 'include_filenames', 'save_file_structure',
'rsolmap_to_str', 'nastran_format', 'nid_map', 'bdf_filename',
'radset', 'is_zona',
# handled below
'mpcadds', 'mpcs', 'spcadds', 'spcs',
'loads', 'load_combinations',
'dloads', 'dload_entries',
'aero', 'aeros', 'mkaeros',
'nsmadds', 'nsms',
'seqgp',
] + list_attrs + card_dict_groups + scalar_attrs
#missed_attrs = []
#for attr in model.object_attributes():
#if attr in skip_attrs:
#continue
#missed_attrs.append(attr)
#assert missed_attrs == [], missed_attrs
# These are ignored because they're lists
#ignored_types = set([
#'spoints', 'spointi', # singleton
#'grdset', # singleton
#'spcs',
#'suport', 'se_suport', # suport, suport1 - list
#'doptprm', # singleton
## SETx - list
#'sets', 'asets', 'bsets', 'csets', 'qsets',
#'se_bsets', 'se_csets', 'se_qsets',
#])
## TODO: why are some of these ignored?
#ignored_types2 = set([
#'case_control_deck', 'caseControlDeck',
## done
#'sol', 'loads', 'mkaeros',
#'reject_lines', 'reject_cards',
## not cards
#'debug', 'executive_control_lines',
#'case_control_lines', 'cards_to_read', 'card_count',
#'is_structured', 'uniqueBulkDataCards',
#'model_type', 'include_dir',
#'sol_method', 'log',
#'sol_iline',
#'reject_count', '_relpath',
#'special_cards',])
#unsupported_types = ignored_types.union(ignored_types2)
#all_params = object_attributes(model, keys_to_skip=unsupported_types)
msg = ['---BDF Statistics%s---' % word]
# sol
if 'Superelement' not in word:
msg.append('SOL %s\n' % model.sol)
msg.extend(_get_bdf_stats_loads(model))
# load_combinations / loads: handled below
# dloads
for (lid, loads) in sorted(model.dloads.items()):
msg.append('bdf.dloads[%s]' % lid)
groups_dict = {} # type: Dict[str, Any]
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (lid, loads) in sorted(model.dload_entries.items()):
msg.append('bdf.dload_entries[%s]' % lid)
groups_dict = {}
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# spcs
for (spc_id, spcadds) in sorted(model.spcadds.items()):
msg.append('bdf.spcadds[%s]' % spc_id)
groups_dict = {}
for spcadd in spcadds:
groups_dict[spcadd.type] = groups_dict.get(spcadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (spc_id, spcs) in sorted(model.spcs.items()):
msg.append('bdf.spcs[%s]' % spc_id)
groups_dict = {}
for spc in spcs:
groups_dict[spc.type] = groups_dict.get(spc.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# mpcs
for (mpc_id, mpcadds) in sorted(model.mpcadds.items()):
msg.append('bdf.mpcadds[%s]' % mpc_id)
groups_dict = {}
for mpcadd in mpcadds:
groups_dict[mpcadd.type] = groups_dict.get(mpcadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (mpc_id, mpcs) in sorted(model.mpcs.items()):
msg.append('bdf.mpcs[%s]' % mpc_id)
groups_dict = {}
for mpc in mpcs:
groups_dict[mpc.type] = groups_dict.get(mpc.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# nsms
for (nsm_id, nsmadds) in sorted(model.nsmadds.items()):
msg.append('bdf.nsmadds[%s]' % nsm_id)
groups_dict = {}
for nsmadd in nsmadds:
groups_dict[nsmadd.type] = groups_dict.get(nsmadd.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (mpc_id, nsms) in sorted(model.nsms.items()):
msg.append('bdf.nsms[%s]' % mpc_id)
groups_dict = {}
for nsm in nsms:
groups_dict[nsm.type] = groups_dict.get(nsm.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
# aero
if model.aero:
msg.append('bdf.aero')
msg.append(' %-8s 1' % ('AERO:'))
# aeros
if model.aeros:
msg.append('bdf:aeros')
msg.append(' %-8s 1' % ('AEROS:'))
#mkaeros
if model.mkaeros:
msg.append('bdf:mkaeros')
msg.append(' %-8s %s' % ('MKAERO:', len(model.mkaeros)))
# radset
if model.radset:
msg.append('bdf:radset')
msg.append(' %-8s 1' % ('RADSET:'))
#mkaeros
if model.seqgp:
msg.append('bdf:seqgp')
msg.append(' %-8s 1' % ('SEQGP:'))
for card_group_name in card_dict_groups:
try:
card_group = getattr(model, card_group_name)
except AttributeError:
msgi = 'cant find card_group_name=%r' % card_group_name
raise AttributeError(msgi)
groups = set() # type: Set[str]
if not isinstance(card_group, dict):
msgi = '%s is a %s; not dictionary, which is required by get_bdf_stats()' % (
card_group_name, type(card_group))
model.log.error(msgi)
continue
#raise RuntimeError(msg)
for card in card_group.values():
if isinstance(card, list):
for card2 in card:
groups.add(card2.type)
else:
groups.add(card.type)
group_msg = []
for card_name in sorted(groups):
try:
ncards = model.card_count[card_name]
group_msg.append(' %-8s : %s' % (card_name, ncards))
except KeyError:
# we get in here because we used add_grid or similar method, which
# doesn't increase the card_count, so instead we'll use _type_to_id_map
counter = '???'
if card_name in model._type_to_id_map:
counter = len(model._type_to_id_map[card_name])
if card_name == 'CORD2R' and counter == '???':
# there is always 1 CORD2R that isn't added to card_count/_type_to_id_map
continue
group_msg.append(' %-8s : %s' % (card_name, counter))
#assert card_name == 'CORD2R', model.card_count
if group_msg:
msg.append('bdf.%s' % card_group_name)
msg.append('\n'.join(group_msg))
msg.append('')
if model.reject_lines: # List[card]; card = List[str]
msg.append('Rejected Cards')
for name, counter in sorted(model.card_count.items()):
if name not in model.cards_to_read:
msg.append(' %-8s %s' % (name + ':', counter))
msg.append('')
for super_id, superelement in model.superelement_models.items():
msg += get_bdf_stats(superelement, return_type='list', word=' (Superelement %i)' % super_id)
if return_type == 'string':
return '\n'.join(msg)
return msg
def _get_bdf_stats_loads(model):
# type: (Any) -> List[str]
"""helper for ``get_bdf_stats(...)``"""
# loads
msg = []
if model.is_bdf_vectorized:
## kind of hackish
for (lid, load_combination) in sorted(model.load_combinations.items()):
msg.append('bdf.load_combinations[%s]' % lid)
msg.append('')
if len(model.loads):
msg.append('bdf.loads[%s] : ???')
else:
for (lid, load_combinations) in sorted(model.load_combinations.items()):
msg.append('bdf.load_combinations[%s]' % lid)
groups_dict = {} # type: Dict[str, int]
for load_combination in load_combinations:
groups_dict[load_combination.type] = groups_dict.get(load_combination.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
for (lid, loads) in sorted(model.loads.items()):
msg.append('bdf.loads[%s]' % lid)
groups_dict = {}
for loadi in loads:
groups_dict[loadi.type] = groups_dict.get(loadi.type, 0) + 1
for name, count_name in sorted(groups_dict.items()):
msg.append(' %-8s %s' % (name + ':', count_name))
msg.append('')
return msg
```
#### File: bdf/bdf_interface/uncross_reference.py
```python
from typing import List, Dict, Any
from pyNastran.bdf.bdf_interface.safe_cross_reference import SafeXrefMesh
class UnXrefMesh(SafeXrefMesh):
"""
Unlinks up the various cards in the BDF.
"""
def __init__(self) -> None:
"""
The main BDF class defines all the parameters that are used.
"""
SafeXrefMesh.__init__(self)
def uncross_reference(self, word: str='') -> None:
"""uncross references the model"""
self.log.debug("Uncross Referencing%s..." % word)
self._uncross_reference_nodes()
self._uncross_reference_coords()
self._uncross_reference_elements()
self._uncross_reference_properties()
self._uncross_reference_materials()
self._uncross_reference_masses()
self._uncross_reference_aero()
self._uncross_reference_constraints()
self._uncross_reference_loads()
self._uncross_reference_sets()
self._uncross_reference_optimization()
self._uncross_reference_superelements()
for super_id, superelement in sorted(self.superelement_models.items()):
superelement.uncross_reference(word=' (Superelement %i)' % super_id)
def _uncross_reference_nodes(self) -> None:
"""uncross references the GRID objects"""
for node in self.nodes.values():
node.uncross_reference()
for point in self.points.values():
point.uncross_reference()
def _uncross_reference_coords(self) -> None:
"""uncross references the CORDx objects"""
for cid, coord in self.coords.items():
if cid == 0:
continue
coord.uncross_reference()
def _uncross_reference_elements(self) -> None:
"""uncross references the element objects"""
for element in self.elements.values():
try:
element.uncross_reference()
except TypeError:
raise NotImplementedError('%s.uncross_reference' % element.type)
except AttributeError:
print(element)
raise
for element in self.masses.values():
element.uncross_reference()
for element in self.rigid_elements.values():
element.uncross_reference()
for element in self.plotels.values():
element.uncross_reference()
def _uncross_reference_properties(self) -> None:
"""uncross references the property objects"""
for prop in self.properties.values():
try:
prop.uncross_reference()
#except TypeError:
#raise NotImplementedError('%s.uncross_reference' % prop.type)
except AttributeError:
print(prop)
print('%s.uncross_reference error' % prop.type)
raise
def _uncross_reference_materials(self) -> None:
"""uncross references the material objects"""
try:
for material in self.materials.values():
material.uncross_reference()
except AttributeError:
print(material)
raise
try:
for material in self.creep_materials.values():
material.uncross_reference()
except AttributeError:
print(material)
raise
data = [self.MATS1, self.MATS3, self.MATS8,
self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
self.MATT8, self.MATT9]
for material_deps in data:
for mat in material_deps.values():
try:
mat.uncross_reference()
except AttributeError:
print(mat)
raise
def _uncross_reference_masses(self) -> None:
"""uncross references the mass objects"""
for mass in self.masses.values():
mass.uncross_reference()
for prop in self.properties_mass.values():
prop.uncross_reference()
def _uncross_reference_aero(self) -> None:
"""uncross references the aero objects"""
for caero in self.caeros.values():
caero.uncross_reference()
for paero in self.paeros.values():
paero.uncross_reference()
for trim in self.trims.values():
trim.uncross_reference()
for csschd in self.csschds.values():
csschd.uncross_reference()
for spline in self.splines.values():
spline.uncross_reference()
for aecomp in self.aecomps.values():
aecomp.uncross_reference()
for aelist in self.aelists.values():
aelist.uncross_reference()
for aeparam in self.aeparams.values():
aeparam.uncross_reference()
for trim in self.trims.values():
trim.uncross_reference()
for csschd in self.csschds.values():
csschd.uncross_reference()
#for aestat in self.aestats.values():
#aestat.uncross_reference()
for aesurf in self.aesurf.values():
aesurf.uncross_reference()
for aesurfs in self.aesurfs.values():
aesurfs.uncross_reference()
for flutter in self.flutters.values():
flutter.uncross_reference()
for monitor_point in self.monitor_points:
monitor_point.uncross_reference()
if self.aero:
self.aero.uncross_reference()
if self.aeros:
self.aeros.uncross_reference()
def _uncross_reference_constraints(self) -> None:
"""
Unlinks the SPCADD, SPC, SPCAX, SPCD, MPCADD, MPC, SUPORT,
SUPORT1, SESUPORT cards.
"""
for spcadds in self.spcadds.values():
for spcadd in spcadds:
spcadd.uncross_reference()
for spc in self.spcs.values():
for spci in spc:
spci.uncross_reference()
for spcoffs in self.spcoffs.values():
for spcoff in spcoffs:
spcoff.uncross_reference()
for mpcadds in self.mpcadds.values():
for mpcadd in mpcadds:
mpcadd.uncross_reference()
for mpc in self.mpcs.values():
for mpci in mpc:
mpci.uncross_reference()
for suport in self.suport:
suport.uncross_reference()
for suport1 in self.suport1.values():
suport1.uncross_reference()
for se_suport in self.se_suport:
se_suport.uncross_reference()
def _uncross_reference_loads(self) -> None:
"""
Unlinks the LOAD
PLOAD1, PLOAD2, PLOAD4
FORCE, FORCE1, FORCE2
MOMENT, MOMENT1, MOMENT2
DLOAD, ACSRCE, RLOAD1, RLOAD2, TLOAD1, TLOAD2
DPHASE, DAREA
TEMP
"""
for (unused_lid, load_combinations) in self.load_combinations.items():
for load_combination in load_combinations:
load_combination.uncross_reference()
for (unused_lid, loads) in self.loads.items():
for load in loads:
load.uncross_reference()
for (unused_lid, dloads) in self.dloads.items():
for dload in dloads:
dload.uncross_reference()
for (unused_lid, dload_entries) in self.dload_entries.items():
for dload_entry in dload_entries:
dload_entry.uncross_reference()
for unused_key, darea in self.dareas.items():
darea.uncross_reference()
for unused_key, dphase in self.dphases.items():
dphase.uncross_reference()
for unused_key, tic in self.tics.items():
tic.uncross_reference()
def _uncross_reference_sets(self) -> None:
"""uncross references the set objects"""
for set_obj in self.asets:
set_obj.uncross_reference()
for set_obj in self.omits:
set_obj.uncross_reference()
for set_obj in self.bsets:
set_obj.uncross_reference()
for set_obj in self.csets:
set_obj.uncross_reference()
for set_obj in self.qsets:
set_obj.uncross_reference()
for unused_name, set_objs in self.usets.items():
for set_obj in set_objs:
set_obj.uncross_reference()
# superelements
for unused_key, set_obj in self.se_sets.items():
set_obj.uncross_reference()
for set_obj in self.se_bsets:
set_obj.uncross_reference()
for set_obj in self.se_csets:
set_obj.uncross_reference()
for set_obj in self.se_qsets:
set_obj.uncross_reference()
for set_obj in self.se_usets:
set_obj.uncross_reference()
def _uncross_reference_optimization(self) -> None:
"""uncross references the optimization objects"""
for unused_key, deqatn in self.dequations.items():
deqatn.uncross_reference()
for unused_key, dresp in self.dresps.items():
dresp.uncross_reference()
for unused_key, dconstrs in self.dconstrs.items():
for dconstr in dconstrs:
dconstr.uncross_reference()
for unused_key, dvcrel in self.dvcrels.items():
dvcrel.uncross_reference()
for unused_key, dvmrel in self.dvmrels.items():
dvmrel.uncross_reference()
for unused_key, dvprel in self.dvprels.items():
dvprel.uncross_reference()
for unused_key, desvar in self.desvars.items():
desvar.uncross_reference()
for unused_key, desvar in self.topvar.items():
desvar.uncross_reference()
```
#### File: cards/elements/rods.py
```python
from numpy.linalg import norm # type: ignore
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import Element #, Mid
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
class RodElement(Element): # CROD, CONROD, CTUBE
def __init__(self):
Element.__init__(self)
@property
def node_ids(self):
return self._node_ids(nodes=self.nodes_ref, allow_empty_nodes=False)
def get_edge_ids(self):
return [tuple(sorted(self.node_ids))]
def Mass(self):
r"""
get the mass of the element.
.. math:: m = \left( \rho A + nsm \right) L
"""
L = self.Length()
mass = (self.Rho() * self.Area() + self.Nsm()) * L
return mass
class CROD(RodElement):
"""
+------+-----+-----+----+----+
| 1 | 2 | 3 | 4 | 5 |
+======+=====+=====+====+====+
| CROD | EID | PID | N1 | N2 |
+------+-----+-----+----+----+
"""
type = 'CROD'
_field_map = {
1: 'eid', 2:'pid',
}
def _update_field_helper(self, n, value):
if n == 3:
self.nodes[0] = value
elif n == 4:
self.nodes[1] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
@classmethod
def export_to_hdf5(cls, h5_file, model, eids):
"""exports the elements in a vectorized way"""
#comments = []
pids = []
nodes = []
for eid in eids:
element = model.elements[eid]
#comments.append(element.comment)
pids.append(element.pid)
nodes.append(element.nodes)
#h5_file.create_dataset('_comment', data=comments)
h5_file.create_dataset('eid', data=eids)
h5_file.create_dataset('pid', data=pids)
h5_file.create_dataset('nodes', data=nodes)
def __init__(self, eid, pid, nids, comment=''):
"""
Creates a CROD card
Parameters
----------
eid : int
element id
pid : int
property id (PROD)
nids : List[int, int]
node ids
comment : str; default=''
a comment for the card
"""
RodElement.__init__(self)
if comment:
self.comment = comment
self.eid = eid
self.pid = pid
self.prepare_node_ids(nids)
assert len(self.nodes) == 2
self.nodes_ref = None
self.pid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a CROD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
nids = [integer(card, 3, 'n1'),
integer(card, 4, 'n2')]
assert len(card) == 5, 'len(CROD card) = %i\ncard=%s' % (len(card), str(card))
return CROD(eid, pid, nids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a CROD card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
eid = data[0]
pid = data[1]
nids = data[2:4]
return CROD(eid, pid, nids, comment=comment)
def cross_reference(self, model):
msg = ', which is required by CROD eid=%s' % (self.eid)
self.nodes_ref = model.Nodes(self.nodes, msg=msg)
self.pid_ref = model.Property(self.pid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CROD eid=%s' % self.eid
self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
self.pid_ref = model.safe_property(self.pid, self.eid, xref_errors, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes = self.node_ids
self.pid = self.Pid()
self.nodes_ref = None
self.pid_ref = None
def _verify(self, xref):
eid = self.eid
pid = self.Pid()
unused_edges = self.get_edge_ids()
assert isinstance(eid, int), 'eid=%r' % eid
assert isinstance(pid, int), 'pid=%r' % pid
if xref: # True
mid = self.Mid()
L = self.Length()
A = self.Area()
nsm = self.Nsm()
mpa = self.MassPerLength()
mass = self.Mass()
assert isinstance(mid, integer_types), 'mid=%r' % mid
assert isinstance(L, float), 'L=%r' % L
assert isinstance(A, float), 'A=%r' % A
assert isinstance(nsm, float), 'nsm=%r' % nsm
assert isinstance(mpa, float), 'mass_per_length=%r' % mpa
assert isinstance(mass, float), 'mass=%r' % mass
c = self.Centroid()
for i in range(3):
assert isinstance(c[i], float), 'centroid[%i]=%r' % (i, c[i])
def Length(self):
r"""
Gets the length of the element.
.. math:: L = \sqrt{ (n_{x2}-n_{x1})^2+(n_{y2}-n_{y1})^2+(n_{z2}-n_{z1})^2 }
"""
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
L = norm(self.nodes_ref[1].get_position() - self.nodes_ref[0].get_position())
return L
def Rho(self):
r"""returns the material density \f$ \rho \f$"""
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.rho
def Centroid(self):
return (self.nodes_ref[0].get_position() + self.nodes_ref[1].get_position()) / 2.
def center_of_mass(self):
return self.Centroid()
def Mid(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.Mid()
def Area(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.A
def Nsm(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.nsm
def E(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.E()
def G(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.G()
def J(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.J()
def C(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.c
def MassPerLength(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
mass_per_length = self.pid_ref.mid_ref.rho * self.pid_ref.A + self.pid_ref.nsm
return mass_per_length
def raw_fields(self):
list_fields = ['CROD', self.eid, self.Pid()] + self.node_ids
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.raw_fields()
return self.comment + print_card_8(card)
def write_card_16(self, is_double=False):
card = self.raw_fields()
return self.comment + print_card_16(card)
class CTUBE(RodElement):
"""
+-------+-----+-----+----+----+
| 1 | 2 | 3 | 4 | 5 |
+=======+=====+=====+====+====+
| CTUBE | EID | PID | N1 | N2 |
+-------+-----+-----+----+----+
"""
type = 'CTUBE'
_field_map = {
1: 'eid', 2:'pid',
}
def _update_field_helper(self, n, value):
if n == 3:
self.nodes[0] = value
elif n == 4:
self.nodes[1] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
@classmethod
def export_to_hdf5(cls, h5_file, model, eids):
"""exports the elements in a vectorized way"""
#comments = []
pids = []
nodes = []
for eid in eids:
element = model.elements[eid]
#comments.append(element.comment)
pids.append(element.pid)
nodes.append(element.nodes)
#h5_file.create_dataset('_comment', data=comments)
h5_file.create_dataset('eid', data=eids)
h5_file.create_dataset('pid', data=pids)
h5_file.create_dataset('nodes', data=nodes)
def __init__(self, eid, pid, nids, comment=''):
"""
Creates a CTUBE card
Parameters
----------
eid : int
element id
pid : int
property id
nids : List[int, int]
node ids
comment : str; default=''
a comment for the card
"""
RodElement.__init__(self)
if comment:
self.comment = comment
self.eid = eid
self.pid = pid
self.prepare_node_ids(nids)
assert len(self.nodes) == 2
self.nodes_ref = None
self.pid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a CTUBE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
nids = [integer(card, 3, 'n1'),
integer(card, 4, 'n2')]
assert len(card) == 5, 'len(CTUBE card) = %i\ncard=%s' % (len(card), card)
return CTUBE(eid, pid, nids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a CTUBE card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
eid = data[0]
pid = data[1]
nids = data[2:4]
return CTUBE(eid, pid, nids, comment=comment)
def cross_reference(self, model):
msg = ', which is required by CTUBE eid=%s' % (self.eid)
self.nodes_ref = model.Nodes(self.nodes, msg=msg)
self.pid_ref = model.Property(self.pid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CTUBE eid=%s' % self.eid
self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
self.pid_ref = model.safe_property(self.pid, self.eid, xref_errors, msg=msg)
## TODO: xref coord
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes = self.node_ids
self.pid = self.Pid()
self.nodes_ref = None
self.pid_ref = None
def _verify(self, xref):
pid = self.Pid()
unused_edges = self.get_edge_ids()
assert isinstance(pid, int), 'pid=%r' % pid
if xref:
A = self.Area()
L = self.Length()
nsm = self.Nsm()
assert isinstance(A, float), 'A=%r' % A
assert isinstance(L, float), 'L=%r' % L
assert isinstance(nsm, float), 'nsm=%r' % nsm
if self.pid_ref.mid_ref.type == 'MAT1':
mpa = self.pid_ref.MassPerLength()
mass = self.Mass()
assert isinstance(mpa, float), 'mass_per_length=%r' % mpa
assert isinstance(mass, float), 'mass=%r' % mass
elif self.pid_ref.mid_ref.type == 'MAT4':
pass
else:
msg = '_verify does not support self.pid_ref.mid_ref.type=%s' % (
self.pid_ref.mid_ref.type)
raise NotImplementedError(msg)
c = self.Centroid()
for i in range(3):
assert isinstance(c[i], float), 'centroid[%i]=%r' % (i, c[i])
def Length(self):
r"""
Gets the length of the element.
.. math:: L = \sqrt{ (n_{x2}-n_{x1})^2+(n_{y2}-n_{y1})^2+(n_{z2}-n_{z1})^2 }
"""
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
L = norm(self.nodes_ref[1].get_position() - self.nodes_ref[0].get_position())
return L
def Rho(self):
r"""returns the material density \f$ \rho \f$"""
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.rho
def Mid(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.Mid()
def Mass(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.MassPerLength() * self.Length()
def Nsm(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.Nsm()
def Area(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.Area()
def E(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.E()
def G(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.mid_ref.G()
def J(self):
if self.pid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.pid_ref.J()
def Centroid(self):
return (self.nodes_ref[0].get_position() + self.nodes_ref[1].get_position()) / 2.
def center_of_mass(self):
return self.Centroid()
def raw_fields(self):
list_fields = ['CTUBE', self.eid, self.Pid()] + self.node_ids
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
return self.comment + print_card_8(card)
class CONROD(RodElement):
"""
+--------+-----+-----+----+-----+---+---+---+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+=====+====+=====+===+===+===+=====+
| CONROD | EID | N1 | N2 | MID | A | J | C | NSM |
+--------+-----+-----+----+-----+---+---+---+-----+
"""
type = 'CONROD'
pid = -10 # 10 is the element type per DMAP
_field_map = {
1: 'eid', 4:'mid', 5:'A', 6:'j', 7:'c', 8:'nsm',
}
def _update_field_helper(self, n, value):
if n == 2:
self.nodes[0] = value
elif n == 3:
self.nodes[1] = value
else:
raise KeyError('Field %r=%r is an invalid %s entry.' % (n, value, self.type))
@classmethod
def export_to_hdf5(cls, h5_file, model, eids):
"""exports the elements in a vectorized way"""
#comments = []
nodes = []
mids = []
A = []
J = []
c = []
nsm = []
for eid in eids:
element = model.elements[eid]
#comments.append(element.comment)
mids.append(element.mid)
nodes.append(element.nodes)
A.append(element.A)
J.append(element.j)
c.append(element.c)
nsm.append(element.nsm)
#h5_file.create_dataset('_comment', data=comments)
h5_file.create_dataset('eid', data=eids)
h5_file.create_dataset('nodes', data=nodes)
h5_file.create_dataset('mid', data=mids)
h5_file.create_dataset('A', data=A)
h5_file.create_dataset('J', data=J)
h5_file.create_dataset('c', data=c)
h5_file.create_dataset('nsm', data=nsm)
def __init__(self, eid, mid, nids, A=0.0, j=0.0, c=0.0, nsm=0.0, comment=''):
"""
Creates a CONROD card
Parameters
----------
eid : int
element id
mid : int
material id
nids : List[int, int]
node ids
A : float
area
j : float; default=0.
polar moment of inertia
c : float; default=0.
stress factor
nsm : float; default=0.
non-structural mass per unit length
comment : str; default=''
a comment for the card
"""
RodElement.__init__(self)
if comment:
self.comment = comment
self.eid = eid
self.mid = mid
self.A = A
self.j = j
self.c = c
self.nsm = nsm
self.prepare_node_ids(nids)
assert len(self.nodes) == 2
self.nodes_ref = None
self.mid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a CONROD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
nids = [integer(card, 2, 'n1'),
integer(card, 3, 'n2')]
mid = integer(card, 4, 'mid')
A = double_or_blank(card, 5, 'A', 0.0)
j = double_or_blank(card, 6, 'j', 0.0)
c = double_or_blank(card, 7, 'c', 0.0)
nsm = double_or_blank(card, 8, 'nsm', 0.0)
assert len(card) <= 9, 'len(CONROD card) = %i\ncard=%s' % (len(card), str(card))
return CONROD(eid, mid, nids, A, j, c, nsm, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a CONROD card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
eid = data[0]
nids = data[1:3]
mid = data[3]
A = data[4]
j = data[5]
c = data[6]
nsm = data[7]
return CONROD(eid, mid, nids, A, j, c, nsm, comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CONROD eid=%s' % (self.eid)
self.nodes_ref = model.Nodes(self.nodes, msg=msg)
self.mid_ref = model.Material(self.mid, msg=msg)
def safe_cross_reference(self, model, xref_errors):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CONROD eid=%s' % self.eid
self.nodes_ref = model.Nodes(self.node_ids, msg=msg)
self.mid_ref = model.safe_material(self.mid, self.eid, xref_errors, msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.nodes = self.node_ids
self.mid = self.Mid()
self.nodes_ref = None
self.mid_ref = None
def _verify(self, xref):
pid = self.Pid()
assert pid == -10, 'pid=%r' % pid
unused_edges = self.get_edge_ids()
if xref: # True
mid = self.Mid()
L = self.Length()
A = self.Area()
nsm = self.Nsm()
mpa = self.MassPerLength()
mass = self.Mass()
assert isinstance(mid, integer_types), 'mid=%r' % mid
assert isinstance(L, float), 'L=%r' % L
assert isinstance(A, float), 'A=%r' % A
assert isinstance(nsm, float), 'nsm=%r' % nsm
assert isinstance(mpa, float), 'mass_per_length=%r' % mpa
assert isinstance(mass, float), 'mass=%r' % mass
c = self.Centroid()
for i in range(3):
assert isinstance(c[i], float), 'centroid[%i]=%r' % (i, c[i])
def Length(self):
r"""
Gets the length of the element.
.. math:: L = \sqrt{ (n_{x2}-n_{x1})^2+(n_{y2}-n_{y1})^2+(n_{z2}-n_{z1})^2 }
"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
L = norm(self.nodes_ref[1].get_position() - self.nodes_ref[0].get_position())
return L
def Rho(self):
r"""returns the material density \f$ \rho \f$"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.mid_ref.rho
def Centroid(self):
"""Get the centroid of the element (save as the center of mass for the CONROD)"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return (self.nodes_ref[0].get_position() + self.nodes_ref[1].get_position()) / 2.
def center_of_mass(self):
"""Get the center of mass of the element (save as the centroid for the CONROD)"""
return self.Centroid()
def Mid(self):
if self.mid_ref is None:
return self.mid
#elif self.mid is None:
#print ("No material defined for element ", self.eid)
#return None
return self.mid_ref.mid
def Pid(self):
"""Spoofs the property id for the CONROD"""
return self.pid
def MassPerLength(self):
"""Gets the mass per length of the CONROD"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
massPerLength = self.mid_ref.rho * self.A + self.nsm
return massPerLength
def C(self):
"""torsional constant"""
return self.c
def Area(self):
return self.A
def J(self):
r"""returns the Polar Moment of Inertia, :math:`J`"""
return self.j
def Nsm(self):
"""Placeholder method for the non-structural mass"""
return self.nsm
def E(self):
r"""returns the Young's Modulus, :math:`E`$"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.mid_ref.E()
def G(self):
r"""returns the Shear Modulus, :math:`G`"""
if self.mid_ref is None:
msg = 'Element eid=%i has not been cross referenced.\n%s' % (self.eid, str(self))
raise RuntimeError(msg)
return self.mid_ref.G()
#def write_code_aster(self):
#msg = ''
#msg += " POUTRE=_F(GROUP_MA='CONROD_%s',\n" % self.eid
#msg += " SECTION='CERCLE', # circular section\n"
#if self.Thickness():
#msg += " CARA=('R','EP'), # radius, thickness\n"
#msg += " VALE=(%g,%g),\n" % (
#self.Radius(), self.Thickness())
#else:
#msg += " CARA=('R') # radius\n"
#msg += " VALE=(%g),\n" % self.Radius()
#return msg
def raw_fields(self):
list_fields = [
'CONROD', self.eid] + self.node_ids + [
self.Mid(), self.A, self.j, self.c, self.nsm]
return list_fields
def repr_fields(self):
j = set_blank_if_default(self.j, 0.0)
c = set_blank_if_default(self.c, 0.0)
nsm = set_blank_if_default(self.nsm, 0.0)
list_fields = [
'CONROD', self.eid] + self.node_ids + [self.Mid(), self.A, j, c, nsm]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
```
#### File: bdf/cards/expand_card.py
```python
from typing import List, Union
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.bdf_interface.assign_type import interpret_value
def expand_thru(fields, set_fields=True, sort_fields=False):
# type: (List[str], bool, bool) -> List[int]
"""
Expands a list of values of the form [1,5,THRU,9,13]
to be [1,5,6,7,8,9,13]
Parameters
----------
fields : List[int/str]
the fields to expand
set_fields : bool; default=True
Should the fields be converted to a set and then back to a list?
This is useful for [2, 'THRU' 5, 1]
sort_fields : bool; default=False
Should the fields be sorted at the end?
"""
# ..todo: should this be removed...is the field capitalized when read in?
if isinstance(fields, integer_types):
return [fields]
#elif isinstance(fields[0], integer_types): # don't use this [1, 'THRU', 10]
#return fields
elif len(fields) == 1:
return [int(fields[0])]
fields = [field.upper()
if isinstance(field, str) else field for field in fields]
out = []
nfields = len(fields)
i = 0
while i < nfields:
if isinstance(fields[i], str) and fields[i] == 'THRU':
istart = int(fields[i - 1])
iend = int(fields[i + 1])
# adding 1 to iend for the range offset
for j in range(istart+1, iend + 1):
out.append(j)
i += 2
else:
out.append(int(fields[i]))
i += 1
if set_fields:
out = list(set(out))
if sort_fields:
out.sort()
return out
def expand_thru_by(fields: List[str], set_fields: bool=True, sort_fields: bool=True,
require_int: bool=True, allow_blanks: bool=False) -> List[int]:
"""
Expands a list of values of the form [1,5,THRU,9,BY,2,13]
to be [1,5,7,9,13]
Parameters
----------
fields : List[int/str]
the fields to expand
set_fields : bool; default=True
Should the fields be converted to a set and then back to a list
to remove duplicates?
This is useful for [2, 'THRU' 5, 1]
sort_fields : bool; default=False
Should the fields be sorted at the end?
require_int : bool; default=True
True : all data must be integers
False : floats are allowed (e.g., DDVAL)
allow_blanks : bool; default=Fals
True : blank/Nones are ignored (e.g., NSM1/NSML1)
False : crash
.. todo:: not tested
Notes
-----
used for QBDY3 and what else ???
"""
if require_int:
func = int
else:
func = interpret_value
# ..todo: should this be removed...is the field capitalized when read in?
fields = [field.upper()
if isinstance(field, str) else field for field in fields]
if len(fields) == 1:
return [func(fields[0])]
out = []
nfields = len(fields)
i = 0
by = 1
while i < nfields:
#print('fields[i]=%r' % fields[i])
is_blank = (
allow_blanks and (
(isinstance(fields[i], str) and fields[i].strip() == '') or
fields[i] is None)
)
if is_blank:
#print('blank=%s' % fields[i])
i += 1
continue
if fields[i] == 'THRU':
by = 1
by_case = False
if i + 2 < nfields and fields[i + 2] == 'BY':
by = func(fields[i + 3])
else:
by = 1
by_case = True
min_value = func(fields[i - 1])
max_value = func(fields[i + 1])
max_range = int((max_value - min_value) // by + 1) # max range value
for j in range(0, max_range): # +1 is to include final point
value = min_value + by * j
out.append(value)
out.append(max_value)
if by_case: # null/standard case
# A thru B
i += 2
else: # BY case
# A thru B by C
i += 4
else:
out.append(func(fields[i]))
i += 1
if set_fields:
out = list(set(out))
if sort_fields:
out.sort()
return out
```
#### File: bdf/cards/material_deps.py
```python
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank, string)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
class MaterialDependence(BaseCard):
def __init__(self):
self.mid = None
def Mid(self):
if self.mid_ref is None:
return self.mid
return self.mid_ref.mid # TODO: is this something that should be supported?
def _get_table(self, key):
"""internal method for accessing tables"""
table = getattr(self, key)
table_ref = getattr(self, key + '_ref')
if table_ref is not None:
return table_ref.tid
return table
class MaterialDependenceThermal(MaterialDependence):
def __init__(self):
MaterialDependence.__init__(self)
def _xref_table(self, model, key, msg):
slot = getattr(self, key)
if slot is not None:
setattr(self, key + '_ref', model.TableM(slot, msg))
class MATS1(MaterialDependence):
"""
Specifies stress-dependent material properties for use in applications
involving nonlinear materials. This entry is used if a MAT1, MAT2 or MAT9
entry is specified with the same MID in a nonlinear solution sequence
(SOLs 106 and 129).
"""
type = 'MATS1'
def __init__(self, mid, tid, Type, h, hr, yf, limit1, limit2, comment=''):
MaterialDependence.__init__(self)
if comment:
self.comment = comment
#: Identification number of a MAT1, MAT2, or MAT9 entry.
self.mid = mid
#: Identification number of a TABLES1 or TABLEST entry. If H is
#: given, then this field must be blank.
self.tid = tid
#: Type of material nonlinearity. ('NLELAST' for nonlinear elastic
#: or 'PLASTIC' for elastoplastic.)
self.Type = Type
#: Work hardening slope (slope of stress versus plastic strain)
#: in units of stress. For elastic-perfectly plastic cases,
#: H=0.0. For more than a single slope in the plastic range,
#: the stress-strain data must be supplied on a TABLES1 entry
#: referenced by TID, and this field must be blank
self.h = h
#: Hardening Rule, selected by one of the following values
#: (Integer): (1) Isotropic (Default) (2) Kinematic
#: (3) Combined isotropic and kinematic hardening
self.hr = hr
#: Yield function criterion, selected by one of the following
#: values (1) <NAME> (2) Tresca (3) Mohr-Coulomb
#: (4) Drucker-Prager
self.yf = yf
#: Initial yield point
self.limit1 = limit1
#: Internal friction angle, measured in degrees, for the
#: Mohr-Coulomb and Drucker-Prager yield criteria
self.limit2 = limit2
self.tid_ref = None
self.mid_ref = None
@classmethod
def _init_from_empty(cls):
mid = 1
tid = 1
Type = None
h = None
hr = None
yf = None
limit1 = None
limit2 = None
return MATS1(mid, tid, Type, h, hr, yf, limit1, limit2, comment='')
def validate(self):
if self.Type not in ['NLELAST', 'PLASTIC']:
raise ValueError('MATS1 Type must be [NLELAST, PLASTIC]; Type=%r' % self.Type)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATS1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
tid = integer_or_blank(card, 2, 'tid')
Type = string(card, 3, 'Type')
if Type not in ['NLELAST', 'PLASTIC', 'PLSTRN']:
raise ValueError('MATS1 Type must be [NLELAST, PLASTIC, PLSTRN]; Type=%r' % Type)
if Type == 'NLELAST':
# should we even read these?
h = None
hr = None
yf = None
limit1 = None
limit2 = None
#h = blank(card, 4, 'h')
#hr = blank(card, 6, 'hr')
#yf = blank(card, 5, 'yf')
#limit1 = blank(card, 7, 'yf')
#limit2 = blank(card, 8, 'yf')
else:
h = double_or_blank(card, 4, 'H')
yf = integer_or_blank(card, 5, 'yf', 1)
hr = integer_or_blank(card, 6, 'hr', 1)
limit1 = double(card, 7, 'limit1')
if yf in [3, 4]:
limit2 = double(card, 8, 'limit2')
else:
#limit2 = blank(card, 8, 'limit2')
limit2 = None
assert len(card) <= 9, 'len(MATS1 card) = %i\ncard=%s' % (len(card), card)
return MATS1(mid, tid, Type, h, hr, yf, limit1, limit2, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATS1 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, tid, Type, h, yf, hr, limit1, limit2) = data
if Type == 1:
Type = 'NLELAST'
elif Type == 2:
Type = 'PLASTIC'
elif Type == 3:
Type = 'PLSTRN'
else: # pragma: no cover
raise RuntimeError(f'Invalid Type: mid={mid}; Type={Type}; must be 1=NLELAST, '
'2=PLASTIC, or 3=PLSTRN')
return MATS1(mid, tid, Type, h, hr, yf, limit1, limit2, comment=comment)
def Yf(self):
d = {1: 'VonMises', 2: 'Tresca', 3: 'MohrCoulomb', 4: 'Drucker-Prager'}
return d[self.yf]
def Hf(self):
d = {1: 'Isotropic', 2: 'Kinematic', 3: 'Combined'}
return d[self.hr]
def E(self, strain):
"""
Gets E (Young's Modulus) for a given strain.
Parameters
----------
strain : float / None
the strain (None -> linear E value)
Returns
-------
E : float
Young's Modulus
"""
msg = "E (Young's Modulus) not implemented for MATS1"
raise NotImplementedError(msg)
#if self.tid:
#E = self.tid_ref.Value(strain)
#return E
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATS1 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
if self.tid: # then self.h is used
self.tid_ref = model.Table(self.tid, msg=msg) # TABLES1 or TABLEST
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
if self.tid:
self.tid = self.Tid()
self.tid_ref = None
self.mid_ref = None
def Tid(self):
if self.tid_ref is None:
return self.tid
return self.tid_ref.tid
def raw_fields(self):
list_fields = ['MATS1', self.Mid(), self.Tid(), self.Type,
self.h, self.yf, self.hr, self.limit1, self.limit2]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT1(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT1 entry
fields via TABLEMi entries.
+-------+-------+-------+-------+-------+--------+------+------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=======+=======+=======+=======+========+======+======+=======+
| MATT1 | MID | T(E) | T(G) | T(NU) | T(RHO) | T(A) | | T(GE) |
+-------+-------+-------+-------+-------+--------+------+------+-------+
| | T(ST) | T(SC) | T(SS) | | | | | |
+-------+-------+-------+-------+-------+--------+------+------+-------+
"""
type = 'MATT1'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT1(mid, e_table=None, g_table=None, nu_table=None, rho_table=None,
a_table=None, ge_table=None, st_table=None,
sc_table=None, ss_table=None, comment='')
def __init__(self, mid, e_table=None, g_table=None, nu_table=None,
rho_table=None, a_table=None, ge_table=None, st_table=None,
sc_table=None, ss_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
if e_table == 0:
e_table = None
if g_table == 0:
g_table = None
if nu_table == 0:
nu_table = None
if rho_table == 0:
rho_table = None
if a_table == 0:
a_table = None
if ge_table == 0:
ge_table = None
if st_table == 0:
st_table = None
if sc_table == 0:
sc_table = None
if ss_table == 0:
ss_table = None
self.e_table = e_table
self.g_table = g_table
self.nu_table = nu_table
self.rho_table = rho_table
self.a_table = a_table
self.ge_table = ge_table
self.st_table = st_table
self.sc_table = sc_table
self.ss_table = ss_table
self.mid_ref = None
self.ss_table_ref = None
self.e_table_ref = None
self.g_table_ref = None
self.nu_table_ref = None
self.rho_table_ref = None
self.a_table_ref = None
self.ge_table_ref = None
self.st_table_ref = None
self.sc_table_ref = None
self.ss_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT1 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
e_table = integer_or_blank(card, 2, 'T(E)')
g_table = integer_or_blank(card, 3, 'T(G)')
nu_table = integer_or_blank(card, 4, 'T(nu)')
rho_table = integer_or_blank(card, 5, 'T(rho)')
a_table = integer_or_blank(card, 6, 'T(A)')
ge_table = integer_or_blank(card, 8, 'T(ge)')
st_table = integer_or_blank(card, 9, 'T(st)')
sc_table = integer_or_blank(card, 10, 'T(sc)')
ss_table = integer_or_blank(card, 11, 'T(ss)')
assert len(card) <= 12, 'len(MATT1 card) = %i\ncard=%s' % (len(card), card)
return MATT1(mid, e_table, g_table, nu_table, rho_table, a_table,
ge_table, st_table, sc_table, ss_table, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATT1 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, E_table, G_table, nu_table, rho_table, A_table, dunno_a, ge_table,
st_table, sc_table, ss_table, dunno_b) = data
if E_table == 0:
E_table = None
elif E_table > 100000000:
E_table = -(E_table - 100000000)
if G_table == 0:
G_table = None
elif G_table > 100000000:
G_table = -(G_table - 100000000)
if nu_table == 0:
nu_table = None
elif nu_table > 100000000:
nu_table = -(nu_table - 100000000)
if rho_table == 0:
rho_table = None
elif rho_table > 100000000:
rho_table = -(rho_table - 100000000)
if E_table == 0:
E_table = None
if A_table > 100000000:
A_table = -(A_table - 100000000)
mat = MATT1(mid, E_table, G_table, nu_table, rho_table, A_table,
ge_table, st_table, sc_table, ss_table, comment=comment)
assert dunno_a == 0, '%s; dunno_a=%s\n%s' % (data, dunno_a, str(mat))
assert dunno_b == 0, '%s; dunno_b=%s\n%s' % (data, dunno_b, str(mat))
return mat
def E(self, temperature):
"""
Gets E (Young's Modulus) for a given temperature.
Parameters
----------
temperature : float; default=None
the temperature (None -> linear E value)
Returns
-------
E : float
Young's Modulus
"""
E = None
if self.E_table:
E = self.E_table.Value(temperature)
return E
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT1 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'e_table', msg=msg)
self._xref_table(model, 'g_table', msg=msg)
self._xref_table(model, 'nu_table', msg=msg)
self._xref_table(model, 'rho_table', msg=msg)
self._xref_table(model, 'a_table', msg=msg)
self._xref_table(model, 'ge_table', msg=msg)
self._xref_table(model, 'st_table', msg=msg)
self._xref_table(model, 'sc_table', msg=msg)
self._xref_table(model, 'ss_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
## TODO: remove refs
self.e_table = self.E_table()
self.g_table = self.G_table()
self.nu_table = self.Nu_table()
self.rho_table = self.Rho_table()
self.a_table = self.A_table()
self.ge_table = self.Ge_table()
self.st_table = self.St_table()
self.sc_table = self.Sc_table()
self.ss_table = self.Ss_table()
self.mid_ref = None
self.mid_ref = None
self.ss_table_ref = None
self.e_table_ref = None
self.g_table_ref = None
self.nu_table_ref = None
self.rho_table_ref = None
self.a_table_ref = None
self.ge_table_ref = None
self.st_table_ref = None
self.sc_table_ref = None
self.ss_table_ref = None
def E_table(self):
return self._get_table('e_table')
def G_table(self):
return self._get_table('g_table')
def Nu_table(self):
return self._get_table('nu_table')
def Rho_table(self):
return self._get_table('rho_table')
def A_table(self):
return self._get_table('a_table')
def Ge_table(self):
return self._get_table('ge_table')
def St_table(self):
return self._get_table('st_table')
def Sc_table(self):
return self._get_table('sc_table')
def Ss_table(self):
return self._get_table('ss_table')
def raw_fields(self):
list_fields = [
'MATT1', self.Mid(), self.E_table(), self.G_table(),
self.Nu_table(), self.Rho_table(), self.A_table(), self.Ge_table(),
self.St_table(), self.Sc_table(), self.Ss_table(),
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT2(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+-------+--------+--------+--------+--------+--------+--------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=======+========+========+========+========+========+========+========+
| MATT2 | MID | T(G12) | T(G13) | T(G13) | T(G22) | T(G23) | T(G33) | T(RHO) |
+-------+-------+--------+--------+--------+--------+--------+--------+--------+
| | T(A1) | T(A2) | T(A3) | | T(GE) | T(ST) | T(SC) | T(SS) |
+-------+-------+--------+--------+--------+--------+--------+--------+--------+
"""
type = 'MATT2'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT2(mid, g11_table=None, g12_table=None, g13_table=None, g22_table=None,
g23_table=None, g33_table=None, rho_table=None,
a1_table=None, a2_table=None, a3_table=None, ge_table=None,
st_table=None, sc_table=None, ss_table=None, comment='')
def __init__(self, mid, g11_table=None, g12_table=None, g13_table=None,
g22_table=None, g23_table=None, g33_table=None, rho_table=None,
a1_table=None, a2_table=None, a3_table=None,
ge_table=None, st_table=None, sc_table=None, ss_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.g11_table = g11_table
self.g12_table = g12_table
self.g13_table = g13_table
self.g22_table = g22_table
self.g23_table = g23_table
self.g33_table = g33_table
self.rho_table = rho_table
self.a1_table = a1_table
self.a2_table = a2_table
self.a3_table = a3_table
self.ge_table = ge_table
self.st_table = st_table
self.sc_table = sc_table
self.ss_table = ss_table
self.mid_ref = None
self.g11_table_ref = None
self.g12_table_ref = None
self.g13_table_ref = None
self.g22_table_ref = None
self.g23_table_ref = None
self.g33_table_ref = None
self.rho_table_ref = None
self.a1_table_ref = None
self.a2_table_ref = None
self.a3_table_ref = None
self.ge_table_ref = None
self.st_table_ref = None
self.sc_table_ref = None
self.ss_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT2 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
g11_table = integer_or_blank(card, 2, 'T(G11)')
g12_table = integer_or_blank(card, 3, 'T(G12)')
g13_table = integer_or_blank(card, 4, 'T(G13)')
g22_table = integer_or_blank(card, 5, 'T(G22)')
g23_table = integer_or_blank(card, 6, 'T(G23)')
g33_table = integer_or_blank(card, 7, 'T(G33)')
rho_table = integer_or_blank(card, 8, 'T(rho)')
a1_table = integer_or_blank(card, 9, 'T(A1)')
a2_table = integer_or_blank(card, 10, 'T(A2)')
a3_table = integer_or_blank(card, 11, 'T(A3)')
ge_table = integer_or_blank(card, 13, 'T(ge)')
st_table = integer_or_blank(card, 14, 'T(st)')
sc_table = integer_or_blank(card, 15, 'T(sc)')
ss_table = integer_or_blank(card, 16, 'T(ss)')
assert len(card) <= 17, 'len(MATT2 card) = %i\ncard=%s' % (len(card), card)
return MATT2(mid, g11_table, g12_table, g13_table, g22_table, g23_table,
g33_table, rho_table, a1_table,
a2_table, a3_table, ge_table,
st_table, sc_table, ss_table,
comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT2 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'g11_table', msg=msg)
self._xref_table(model, 'g12_table', msg=msg)
self._xref_table(model, 'g13_table', msg=msg)
self._xref_table(model, 'g22_table', msg=msg)
self._xref_table(model, 'g23_table', msg=msg)
self._xref_table(model, 'g33_table', msg=msg)
self._xref_table(model, 'rho_table', msg=msg)
self._xref_table(model, 'a1_table', msg=msg)
self._xref_table(model, 'a2_table', msg=msg)
self._xref_table(model, 'a3_table', msg=msg)
self._xref_table(model, 'ge_table', msg=msg)
self._xref_table(model, 'st_table', msg=msg)
self._xref_table(model, 'sc_table', msg=msg)
self._xref_table(model, 'ss_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.g11_table = self.G11_table()
self.g12_table = self.G12_table()
self.g13_table = self.G13_table()
self.g22_table = self.G22_table()
self.g23_table = self.G23_table()
self.g33_table = self.G33_table()
self.rho_table = self.Rho_table()
self.a1_table = self.A1_table()
self.a2_table = self.A2_table()
self.a3_table = self.A3_table()
self.ge_table = self.Ge_table()
self.st_table = self.St_table()
self.sc_table = self.Sc_table()
self.ss_table = self.Ss_table()
self.mid_ref = None
def G11_table(self):
return self._get_table('g11_table')
def G12_table(self):
return self._get_table('g12_table')
def G13_table(self):
return self._get_table('g13_table')
def G22_table(self):
return self._get_table('g22_table')
def G23_table(self):
return self._get_table('g23_table')
def G33_table(self):
return self._get_table('g33_table')
def Rho_table(self):
return self._get_table('rho_table')
def A1_table(self):
return self._get_table('a1_table')
def A2_table(self):
return self._get_table('a2_table')
def A3_table(self):
return self._get_table('a3_table')
def Ge_table(self):
return self._get_table('ge_table')
def St_table(self):
return self._get_table('st_table')
def Sc_table(self):
return self._get_table('sc_table')
def Ss_table(self):
return self._get_table('ss_table')
def raw_fields(self):
list_fields = [
'MATT2', self.Mid(), self.G11_table(), self.G12_table(),
self.G13_table(), self.G22_table(), self.G23_table(),
self.G33_table(), self.Rho_table(), self.A1_table(),
self.A2_table(), self.A3_table(), None, self.Ge_table(),
self.St_table(), self.Sc_table(), self.Ss_table()
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
#MATT3 - CTRIAX6 only
class MATT3(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT3 entry fields via
TABLEMi entries that are temperature dependent.
+--------+-------+-------+--------+-------+----------+----------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=======+=======+========+=======+==========+==========+=========+========+
| MATT3 | MID | T(EX) | T(ETH) | T(EZ) | T(NUXTH) | T(NUTHZ) | T(NUZX) | T(RHO) |
+--------+-------+-------+--------+-------+----------+----------+---------+--------+
| | | | T(GZX) | T(AX) | T(ATH) | T(AZ) | | T(GE) |
+--------+-------+-------+--------+-------+----------+----------+---------+--------+
"""
type = 'MATT3'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT3(mid, ex_table=None, eth_table=None, ez_table=None, nuth_table=None,
nuxz_table=None, rho_table=None, gzx_table=None,
ax_table=None, ath_table=None, az_table=None, ge_table=None, comment='')
def __init__(self, mid, ex_table=None, eth_table=None, ez_table=None,
nuth_table=None, nuxz_table=None, rho_table=None,
gzx_table=None, ax_table=None, ath_table=None, az_table=None,
ge_table=None, comment=''):
"""
Creates a MATT3 card
"""
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.ex_table = ex_table
self.eth_table = eth_table
self.ez_table = ez_table
self.nuth_table = nuth_table
self.nuxz_table = nuxz_table
self.rho_table = rho_table
self.gzx_table = gzx_table
self.ax_table = ax_table
self.ath_table = ath_table
self.az_table = az_table
self.ge_table = ge_table
self.ex_table_ref = None
self.eth_table_ref = None
self.ez_table_ref = None
self.nuth_table_ref = None
self.nuxz_table_ref = None
self.rho_table_ref = None
self.gzx_table_ref = None
self.ax_table_ref = None
self.ath_table_ref = None
self.az_table_ref = None
self.ge_table_ref = None
self.mid_ref = None
def cross_reference(self, model):
msg = ', which is required by MATT3 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
#self._get_table('ex_table')
if self.ex_table is not None:
self.ex_table_ref = model.TableM(self.ex_table)
if self.eth_table is not None:
self.eth_table_ref = model.TableM(self.eth_table)
if self.ez_table is not None:
self.ez_table_ref = model.TableM(self.ez_table)
if self.nuth_table is not None:
self.nuth_table_ref = model.TableM(self.nuth_table)
if self.nuxz_table is not None:
self.nuxz_table_ref = model.TableM(self.nuxz_table)
if self.rho_table is not None:
self.rho_table_ref = model.TableM(self.rho_table)
if self.gzx_table is not None:
self.gzx_table_ref = model.TableM(self.gzx_table)
if self.ax_table is not None:
self.ax_table_ref = model.TableM(self.ax_table)
if self.ath_table is not None:
self.ath_table_ref = model.TableM(self.ath_table)
if self.az_table is not None:
self.az_table_ref = model.TableM(self.az_table)
if self.ge_table is not None:
self.ge_table_ref = model.TableM(self.ge_table)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.mid_ref = None
self.ex_table = self.Ex_table()
self.eth_table = self.Eth_table()
self.ez_table = self.Ez_table()
self.nuth_table = self.Nuth_table()
self.nuxz_table = self.Nuxz_table()
self.rho_table = self.Rho_table()
self.gzx_table = self.Gzx_table()
self.ax_table = self.Ax_table()
self.ath_table = self.Ath_table()
self.az_table = self.Az_table()
self.ge_table = self.Ge_table()
self.ex_table_ref = None
self.eth_table_ref = None
self.ez_table_ref = None
self.nuth_table_ref = None
self.nuxz_table_ref = None
self.rho_table_ref = None
self.gzx_table_ref = None
self.ax_table_ref = None
self.ath_table_ref = None
self.az_table_ref = None
self.ge_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT3 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
ex_table = integer_or_blank(card, 2, 'T(EX)')
eth_table = integer_or_blank(card, 3, 'T(ETH)')
ez_table = integer_or_blank(card, 5, 'T(EZ)')
nuth_table = integer_or_blank(card, 6, 'T(NUTH)')
nuxz_table = integer_or_blank(card, 7, 'T(NUXZ)')
rho_table = integer_or_blank(card, 8, 'T(RHO)')
gzx_table = integer_or_blank(card, 11, 'T(GZX)')
ax_table = integer_or_blank(card, 12, 'T(AX)')
ath_table = integer_or_blank(card, 13, 'T(ATH)')
az_table = integer_or_blank(card, 14, 'T(AZ)')
ge_table = integer_or_blank(card, 16, 'T(GE)')
assert len(card) <= 16, 'len(MATT3 card) = %i\ncard=%s' % (len(card), card)
return MATT3(mid, ex_table, eth_table, ez_table,
nuth_table, nuxz_table, rho_table, gzx_table,
ax_table, ath_table, az_table, ge_table, comment=comment)
def Ex_table(self):
if self.ex_table_ref is not None:
return self.ex_table_ref.tid
return self.ex_table
def Eth_table(self):
if self.eth_table_ref is not None:
return self.eth_table_ref.tid
return self.eth_table
def Ez_table(self):
if self.ez_table_ref is not None:
return self.ez_table_ref.tid
return self.eth_table
def Nuth_table(self):
if self.nuth_table_ref is not None:
return self.nuth_table_ref.tid
return self.nuth_table
def Nuxz_table(self):
if self.nuxz_table_ref is not None:
return self.nuxz_table_ref.tid
return self.nuxz_table
def Rho_table(self):
if self.rho_table_ref is not None:
return self.rho_table_ref.tid
return self.rho_table
def Gzx_table(self):
if self.gzx_table_ref is not None:
return self.gzx_table_ref.tid
return self.gzx_table
def Ax_table(self):
if self.ax_table_ref is not None:
return self.ax_table_ref.tid
return self.ax_table
def Ath_table(self):
if self.ath_table_ref is not None:
return self.ath_table_ref.tid
return self.ath_table
def Az_table(self):
if self.az_table_ref is not None:
return self.az_table_ref.tid
return self.az_table
def Ge_table(self):
if self.ge_table_ref is not None:
return self.ge_table_ref.tid
return self.ge_table
def raw_fields(self):
list_fields = [
'MATT3', self.Mid(), self.Ex_table(), self.Eth_table(), self.Ez_table(),
self.Nuth_table(), self.Nuxz_table(), self.Rho_table(), None, None,
self.Gzx_table(), self.Ax_table(), self.Ath_table(), self.Az_table(),
None, self.Ge_table(),
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT4(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+-------+-------+-------+--------+-------+-------+---------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+=======+=======+=======+=======+========+=======+=======+=========+
| MATT4 | MID | T(K) | T(CP) | | T(H) | T(mu) | T(HGEN) |
+-------+-------+-------+-------+--------+-------+-------+---------+
"""
type = 'MATT4'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT4(mid, k_table=None, cp_table=None, h_table=None,
mu_table=None, hgen_table=None, comment='')
def __init__(self, mid, k_table=None, cp_table=None, h_table=None,
mu_table=None, hgen_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
if k_table == 0:
k_table = None
if cp_table == 0:
cp_table = None
if h_table == 0:
h_table = None
if mu_table == 0:
mu_table = None
if hgen_table == 0:
hgen_table = None
self.mid = mid
self.k_table = k_table
self.cp_table = cp_table
self.h_table = h_table
self.mu_table = mu_table
self.hgen_table = hgen_table
self.mid_ref = None
self.k_table_ref = None
self.cp_table_ref = None
self.h_table_ref = None
self.mu_table_ref = None
self.hgen_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT4 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
k_table = integer_or_blank(card, 2, 'T(K)')
cp_table = integer_or_blank(card, 3, 'T(CP)')
h_table = integer_or_blank(card, 5, 'T(H)')
mu_table = integer_or_blank(card, 6, 'T(mu)')
hgen_table = integer_or_blank(card, 7, 'T(HGEN)')
assert len(card) <= 8, 'len(MATT4 card) = %i\ncard=%s' % (len(card), card)
return MATT4(mid, k_table, cp_table, h_table, mu_table,
hgen_table, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATT4 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, k_table, cp_table, null, h_table, mu_table, hgen_table) = data
assert null == 0, data
return MATT4(mid, k_table, cp_table, h_table, mu_table,
hgen_table, comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT4 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'k_table', msg=msg)
self._xref_table(model, 'cp_table', msg=msg)
self._xref_table(model, 'h_table', msg=msg)
self._xref_table(model, 'mu_table', msg=msg)
self._xref_table(model, 'hgen_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.k_table = self.K_table()
self.cp_table = self.Cp_table()
self.h_table = self.H_table()
self.mu_table = self.Mu_table()
self.hgen_table = self.Hgen_table()
self.mid_ref = None
self.mid_ref = None
self.k_table_ref = None
self.cp_table_ref = None
self.h_table_ref = None
self.mu_table_ref = None
self.hgen_table_ref = None
def K_table(self):
return self._get_table('k_table')
def Cp_table(self):
return self._get_table('cp_table')
def H_table(self):
return self._get_table('h_table')
def Mu_table(self):
return self._get_table('mu_table')
def Hgen_table(self):
return self._get_table('hgen_table')
def raw_fields(self):
list_fields = [
'MATT4', self.Mid(), self.K_table(), self.Cp_table(),
None,
self.H_table(), self.Mu_table(), self.Hgen_table()
]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT5(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=========+=========+========+========+========+========+========+=======+
| MATT5 | MID | T(Kxx) | T(Kxy) | T(Kxz) | T(Kyy) | T(Kyz) | T(Kzz) | T(CP) |
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
| | | T(HGEN) | | | | | | |
+-------+---------+---------+--------+--------+--------+--------+--------+-------+
"""
type = 'MATT5'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT5(mid, kxx_table=None, kxy_table=None, kxz_table=None, kyy_table=None,
kyz_table=None, kzz_table=None, cp_table=None, hgen_table=None, comment='')
def __init__(self, mid, kxx_table=None, kxy_table=None, kxz_table=None,
kyy_table=None, kyz_table=None, kzz_table=None,
cp_table=None, hgen_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.kxx_table = kxx_table
self.kxy_table = kxy_table
self.kxz_table = kxz_table
self.kyy_table = kyy_table
self.kyz_table = kyz_table
self.kzz_table = kzz_table
self.cp_table = cp_table
self.hgen_table = hgen_table
self.mid_ref = None
self.kxx_table_ref = None
self.kxy_table_ref = None
self.kxz_table_ref = None
self.kyy_table_ref = None
self.kyz_table_ref = None
self.kzz_table_ref = None
self.cp_table_ref = None
self.hgen_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT5 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
kxx_table = integer_or_blank(card, 2, 'T(Kxx)')
kxy_table = integer_or_blank(card, 3, 'T(Kxy)')
kxz_table = integer_or_blank(card, 5, 'T(Kxz)')
kyy_table = integer_or_blank(card, 6, 'T(Kyy)')
kyz_table = integer_or_blank(card, 7, 'T(Kyz)')
kzz_table = integer_or_blank(card, 8, 'T(Kyz)')
cp_table = integer_or_blank(card, 9, 'T(Kyz)')
hgen_table = integer_or_blank(card, 11, 'T(HGEN)')
assert len(card) <= 12, 'len(MATT5 card) = %i\ncard=%s' % (len(card), card)
return MATT5(mid, kxx_table, kxy_table, kxz_table, kyy_table,
kyz_table, kzz_table, cp_table, hgen_table,
comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a MATT5 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
(mid, kxx_table, kxy_table, kxz_table, kyy_table, kyz_table, kzz_table,
cp_table, null, hgen_table) = data
if kxx_table == 0:
kxx_table = None
if kxy_table == 0:
kxy_table = None
if kxz_table == 0:
kxz_table = None
if kyy_table == 0:
kyy_table = None
if kyz_table == 0:
kyz_table = None
if kzz_table == 0:
kzz_table = None
if cp_table == 0:
cp_table = None
if hgen_table == 0:
hgen_table = None
assert null == 0, data
return MATT5(mid, kxx_table, kxy_table, kxz_table, kyy_table,
kyz_table, kzz_table, cp_table, hgen_table,
comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT5 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
self._xref_table(model, 'kxx_table', msg=msg)
self._xref_table(model, 'kxy_table', msg=msg)
self._xref_table(model, 'kxz_table', msg=msg)
self._xref_table(model, 'kyy_table', msg=msg)
self._xref_table(model, 'kyz_table', msg=msg)
self._xref_table(model, 'kzz_table', msg=msg)
self._xref_table(model, 'cp_table', msg=msg)
self._xref_table(model, 'hgen_table', msg=msg)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.mid = self.Mid()
self.kxx_table = self.Kxx_table()
self.kxy_table = self.Kxy_table()
self.kxz_table = self.Kxz_table()
self.kyy_table = self.Kyy_table()
self.kyz_table = self.Kyz_table()
self.kzz_table = self.Kzz_table()
self.cp_table = self.Cp_table()
self.hgen_table = self.Hgen_table()
self.mid_ref = None
self.kxx_table_ref = None
self.kxy_table_ref = None
self.kxz_table_ref = None
self.kyy_table_ref = None
self.kyz_table_ref = None
self.kzz_table_ref = None
self.cp_table_ref = None
self.hgen_table_ref = None
def Kxx_table(self):
return self._get_table('kxx_table')
def Kxy_table(self):
return self._get_table('kxy_table')
def Kxz_table(self):
return self._get_table('kxz_table')
def Kyy_table(self):
return self._get_table('kyy_table')
def Kyz_table(self):
return self._get_table('kyz_table')
def Kzz_table(self):
return self._get_table('kzz_table')
def Cp_table(self):
return self._get_table('cp_table')
def Hgen_table(self):
return self._get_table('hgen_table')
def raw_fields(self):
list_fields = ['MATT5', self.Mid(),
self.Kxx_table(), self.Kxy_table(), self.Kxz_table(),
self.Kyy_table(), self.Kyz_table(), self.Kzz_table(),
self.Cp_table(), None, self.Hgen_table()]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class MATT8(MaterialDependenceThermal):
"""
Specifies temperature-dependent material properties on MAT2 entry
fields via TABLEMi entries.
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+========+========+=======+=========+========+========+========+========+
| MATT8 | MID | T(E1) | T(E2) | T(Nu12) | T(G12) | T(G1z) | T(G2z) | T(RHO) |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| | T(A1) | T(A2) | | T(Xt) | T(Xc) | T(Yt) | T(Yc) | T(S) |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
| | T(GE) | T(F12) | | | | | | |
+-------+--------+--------+-------+---------+--------+--------+--------+--------+
"""
type = 'MATT8'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT8(mid, e1_table=None, e2_table=None, nu12_table=None, g12_table=None,
g1z_table=None, g2z_table=None, rho_table=None,
a1_table=None, a2_table=None, xt_table=None, xc_table=None,
yt_table=None, yc_table=None, s_table=None, ge_table=None,
f12_table=None, comment='')
def __init__(self, mid, e1_table=None, e2_table=None, nu12_table=None,
g12_table=None, g1z_table=None, g2z_table=None, rho_table=None,
a1_table=None, a2_table=None,
xt_table=None, xc_table=None, yt_table=None, yc_table=None,
s_table=None, ge_table=None, f12_table=None, comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.e1_table = e1_table
self.e2_table = e2_table
self.nu12_table = nu12_table
self.g12_table = g12_table
self.g1z_table = g1z_table
self.g2z_table = g2z_table
self.rho_table = rho_table
self.a1_table = a1_table
self.a2_table = a2_table
self.xt_table = xt_table
self.xc_table = xc_table
self.yt_table = yt_table
self.yc_table = yc_table
self.s_table = s_table
self.ge_table = ge_table
self.f12_table = f12_table
self.mid_ref = None
self.e1_table_ref = None
self.e2_table_ref = None
self.nu12_table_ref = None
self.g12_table_ref = None
self.g1z_table_ref = None
self.g2z_table_ref = None
self.rho_table_ref = None
self.a1_table_ref = None
self.a2_table_ref = None
self.xt_table_ref = None
self.xc_table_ref = None
self.yt_table_ref = None
self.yc_table_ref = None
self.s_table_ref = None
self.ge_table_ref = None
self.f12_table_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT8 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
e1_table = integer_or_blank(card, 2, 'T(E1)')
e2_table = integer_or_blank(card, 3, 'T(E2)')
nu12_table = integer_or_blank(card, 3, 'T(Nu12)')
g12_table = integer_or_blank(card, 5, 'T(G12)')
g1z_table = integer_or_blank(card, 6, 'T(G1z)')
g2z_table = integer_or_blank(card, 7, 'T(G2z)')
rho_table = integer_or_blank(card, 8, 'T(Rho)')
a1_table = integer_or_blank(card, 9, 'T(A1)')
a2_table = integer_or_blank(card, 10, 'T(A2)')
xt_table = integer_or_blank(card, 12, 'T(Xt)')
xc_table = integer_or_blank(card, 13, 'T(Xc)')
yt_table = integer_or_blank(card, 14, 'T(Yt)')
yc_table = integer_or_blank(card, 15, 'T(Yc)')
s_table = integer_or_blank(card, 16, 'T(S)')
ge_table = integer_or_blank(card, 17, 'T(GE)')
f12_table = integer_or_blank(card, 18, 'T(F12)')
assert len(card) <= 19, 'len(MATT8 card) = %i\ncard=%s' % (len(card), card)
return MATT8(mid, e1_table, e2_table, nu12_table, g12_table,
g1z_table, g2z_table, rho_table,
a1_table, a2_table, xt_table,
xc_table, yt_table, yc_table,
s_table, ge_table, f12_table,
comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT1 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
if self.e1_table is not None:
self.e1_table_ref = model.TableM(self.e1_table)
if self.e2_table is not None:
self.e2_table_ref = model.TableM(self.e2_table)
if self.nu12_table is not None:
self.nu12_table_ref = model.TableM(self.nu12_table)
if self.g12_table is not None:
self.g12_table_ref = model.TableM(self.g12_table)
if self.g1z_table is not None:
self.g1z_table_ref = model.TableM(self.g1z_table)
if self.g2z_table is not None:
self.g2z_table_ref = model.TableM(self.g2z_table)
if self.rho_table is not None:
self.rho_table_ref = model.TableM(self.rho_table)
if self.a1_table is not None:
self.a1_table_ref = model.TableM(self.a1_table)
if self.a2_table is not None:
self.a2_table_ref = model.TableM(self.a2_table)
if self.xt_table is not None:
self.xt_table_ref = model.TableM(self.xt_table)
if self.xc_table is not None:
self.xc_table_ref = model.TableM(self.xc_table)
if self.yt_table is not None:
self.yt_table_ref = model.TableM(self.yt_table)
if self.s_table is not None:
self.s_table_ref = model.TableM(self.s_table)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
self.e1_table = self.E1_table()
self.e2_table = self.E2_table()
self.nu12_table = self.Nu12_table()
self.g12_table = self.G12_table()
self.g1z_table = self.G1z_table()
self.g2z_table = self.G2z_table()
self.rho_table = self.Rho_table()
self.a1_table = self.A1_table()
self.a2_table = self.A2_table()
self.xt_table = self.Xt_table()
self.xc_table = self.Xc_table()
self.yt_table = self.Yt_table()
self.yc_table = self.Yc_table()
self.s_table = self.S_table()
self.ge_table = self.Ge_table()
self.f12_table = self.F12_table()
self.e1_table_ref = None
self.e2_table_ref = None
self.nu12_table_ref = None
self.g12_table_ref = None
self.g1z_table_ref = None
self.g2z_table_ref = None
self.rho_table_ref = None
self.a1_table_ref = None
self.a2_table_ref = None
self.xt_table_ref = None
self.xc_table_ref = None
self.yt_table_ref = None
self.yc_table_ref = None
self.s_table_ref = None
self.ge_table_ref = None
self.f12_table_ref = None
def E1_table(self):
if self.e1_table_ref is not None:
return self.e1_table_ref.tid
return self.e1_table
def E2_table(self):
if self.e2_table_ref is not None:
return self.e2_table_ref.tid
return self.e2_table
def Nu12_table(self):
if self.nu12_table_ref is not None:
return self.nu12_table_ref.tid
return self.nu12_table
def G12_table(self):
if self.g12_table_ref is not None:
return self.g12_table_ref.tid
return self.g12_table
def G1z_table(self):
if self.g1z_table_ref is not None:
return self.g1z_table_ref.tid
return self.g1z_table
def G2z_table(self):
if self.g2z_table_ref is not None:
return self.g2z_table_ref.tid
return self.g2z_table
def Rho_table(self):
if self.rho_table_ref is not None:
return self.rho_table_ref.tid
return self.rho_table
def A1_table(self):
if self.a1_table_ref is not None:
return self.a1_table_ref.tid
return self.a1_table
def A2_table(self):
if self.a2_table_ref is not None:
return self.a2_table_ref.tid
return self.a2_table
def S_table(self):
if self.s_table_ref is not None:
return self.s_table_ref.tid
return self.s_table
def Ge_table(self):
if self.ge_table_ref is not None:
return self.ge_table_ref.tid
return self.ge_table
def F12_table(self):
if self.f12_table_ref is not None:
return self.f12_table_ref.tid
return self.f12_table
def Xt_table(self):
if self.xt_table_ref is not None:
return self.xt_table_ref.tid
return self.xt_table
def Xc_table(self):
if self.xc_table_ref is not None:
return self.xc_table_ref.tid
return self.xc_table
def Yt_table(self):
if self.yt_table_ref is not None:
return self.yt_table_ref.tid
return self.yt_table
def Yc_table(self):
if self.yc_table_ref is not None:
return self.yc_table_ref.tid
return self.yc_table
def raw_fields(self):
list_fields = ['MATT8', self.mid, self.E1_table(), self.E2_table(), self.G12_table(),
self.G1z_table(), self.G2z_table(), self.Rho_table(),
self.A1_table(), self.A2_table(), None,
self.Xt_table(), self.Xc_table(), self.Yt_table(), self.Yc_table(),
self.S_table(), self.Ge_table(), self.F12_table()]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+========+========+========+========+========+========+========+
| MATT9 | MID | T(G11) | T(G12) | T(G13) | T(G14) | T(G15) | T(G16) | T(G22) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(G23) | T(G24) | T(G25) | T(G26) | T(G33) | T(G34) | T(G35) | T(G36) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(G44) | T(G45) | T(G46) | T(G55) | T(G56) | T(G66) | T(RHO) | T(A1) |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
| | T(A2) | T(A3) | T(A4) | T(A5) | T(A6) | | T(GE) | |
+--------+--------+--------+--------+--------+--------+--------+--------+--------+
"""
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
class MATT9(MaterialDependenceThermal):
type = 'MATT9'
@classmethod
def _init_from_empty(cls):
mid = 1
return MATT9(mid, g11_table=None, g12_table=None, g13_table=None, g14_table=None,
g15_table=None, g16_table=None, g22_table=None, g23_table=None,
g24_table=None, g25_table=None, g26_table=None, g33_table=None,
g34_table=None, g35_table=None, g36_table=None, g44_table=None,
g45_table=None, g46_table=None, g55_table=None, g56_table=None,
g66_table=None, rho_table=None,
a1_table=None, a2_table=None, a3_table=None,
a4_table=None, a5_table=None, a6_table=None, ge_table=None, comment='')
def __init__(self, mid,
g11_table=None, g12_table=None, g13_table=None, g14_table=None,
g15_table=None, g16_table=None,
g22_table=None, g23_table=None, g24_table=None,
g25_table=None, g26_table=None,
g33_table=None, g34_table=None, g35_table=None, g36_table=None,
g44_table=None, g45_table=None, g46_table=None,
g55_table=None, g56_table=None,
g66_table=None,
rho_table=None,
a1_table=None, a2_table=None, a3_table=None,
a4_table=None, a5_table=None, a6_table=None,
ge_table=None,
comment=''):
MaterialDependenceThermal.__init__(self)
if comment:
self.comment = comment
self.mid = mid
self.g11_table = g11_table
self.g12_table = g12_table
self.g13_table = g13_table
self.g14_table = g14_table
self.g15_table = g15_table
self.g16_table = g16_table
self.g22_table = g22_table
self.g23_table = g23_table
self.g24_table = g24_table
self.g25_table = g25_table
self.g26_table = g26_table
self.g33_table = g33_table
self.g34_table = g34_table
self.g35_table = g35_table
self.g36_table = g36_table
self.g44_table = g44_table
self.g45_table = g45_table
self.g46_table = g46_table
self.g55_table = g55_table
self.g56_table = g56_table
self.g66_table = g66_table
self.rho_table = rho_table
self.a1_table = a1_table
self.a2_table = a2_table
self.a3_table = a3_table
self.a4_table = a4_table
self.a5_table = a5_table
self.a6_table = a6_table
self.ge_table = ge_table
self.mid_ref = None
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a MATT8 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
mid = integer(card, 1, 'mid')
g11_table = integer_or_blank(card, 2, 'T(G11)')
g12_table = integer_or_blank(card, 3, 'T(G12)')
g13_table = integer_or_blank(card, 4, 'T(G13)')
g14_table = integer_or_blank(card, 5, 'T(G14)')
g15_table = integer_or_blank(card, 6, 'T(G15)')
g16_table = integer_or_blank(card, 7, 'T(G16)')
g22_table = integer_or_blank(card, 8, 'T(G22)')
g23_table = integer_or_blank(card, 9, 'T(G23)')
g24_table = integer_or_blank(card, 10, 'T(G24)')
g25_table = integer_or_blank(card, 11, 'T(G25)')
g26_table = integer_or_blank(card, 12, 'T(G26)')
g33_table = integer_or_blank(card, 13, 'T(G33)')
g34_table = integer_or_blank(card, 14, 'T(G34)')
g35_table = integer_or_blank(card, 15, 'T(G35)')
g36_table = integer_or_blank(card, 16, 'T(G36)')
g44_table = integer_or_blank(card, 17, 'T(G44)')
g45_table = integer_or_blank(card, 18, 'T(G45)')
g46_table = integer_or_blank(card, 19, 'T(G46)')
g55_table = integer_or_blank(card, 20, 'T(G55)')
g56_table = integer_or_blank(card, 21, 'T(G56)')
g66_table = integer_or_blank(card, 22, 'T(G66)')
rho_table = integer_or_blank(card, 23, 'T(RHO)')
a1_table = integer_or_blank(card, 24, 'T(A1)')
a2_table = integer_or_blank(card, 25, 'T(A2)')
a3_table = integer_or_blank(card, 26, 'T(A3)')
a4_table = integer_or_blank(card, 27, 'T(A4)')
a5_table = integer_or_blank(card, 28, 'T(A5)')
a6_table = integer_or_blank(card, 29, 'T(A6)')
ge_table = integer_or_blank(card, 31, 'T(GE)')
assert len(card) <= 32, 'len(MATT9 card) = %i\ncard=%s' % (len(card), card)
return MATT9(mid, g11_table, g12_table, g13_table, g14_table, g15_table, g16_table,
g22_table, g23_table, g24_table, g25_table, g26_table,
g33_table, g34_table, g35_table, g36_table,
g44_table, g45_table, g46_table,
g55_table, g56_table, g66_table,
rho_table,
a1_table, a2_table, a3_table, a4_table, a5_table, a6_table,
ge_table, comment=comment)
def cross_reference(self, model):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by MATT1 mid=%s' % self.mid
self.mid_ref = model.Material(self.mid, msg=msg)
#if self.e1_table is not None:
#self.e1_table_ref = model.TableM(self.e1_table)
#if self.e2_table is not None:
#self.e2_table_ref = model.TableM(self.e2_table)
def uncross_reference(self) -> None:
"""Removes cross-reference links"""
pass
#self.e1_table = self.E1_table()
#self.e2_table = self.E2_table()
#self.e1_table_ref = None
#self.e2_table_ref = None
#def E1_table(self):
#if self.e1_table_ref is not None:
#return self.e1_table_ref.tid
#return self.e1_table
def raw_fields(self):
list_fields = [
'MATT9', self.mid,
self.g11_table,
self.g12_table,
self.g13_table,
self.g14_table,
self.g15_table,
self.g16_table,
self.g22_table,
self.g23_table,
self.g24_table,
self.g25_table,
self.g26_table,
self.g33_table,
self.g34_table,
self.g35_table,
self.g36_table,
self.g44_table,
self.g45_table,
self.g46_table,
self.g55_table,
self.g56_table,
self.g66_table,
self.rho_table,
self.a1_table,
self.a2_table,
self.a3_table,
self.a4_table,
self.a5_table,
self.a6_table,
self.ge_table,
]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
list_fields = self.raw_fields()
return self.comment + print_card_8(list_fields)
```
#### File: cards/test/test_contact.py
```python
import copy
import unittest
from pyNastran.bdf.bdf import BDF
from pyNastran.bdf.cards.test.utils import save_load_deck
#from pyNastran.bdf.field_writer_8 import print_card
class TestContact(unittest.TestCase):
def test_contact_01(self):
"""checks the BSURF cards"""
model = BDF(debug=False)
lines = [
'BSURF 3 1 2 3 4 5 6 7',
' 8 9 10 11 12 13 14 15',
' 16 17 18 19 20 21 22 23',
]
unused_card = model.add_card(copy.deepcopy(lines), 'BSURF', is_list=False)
out = model.bsurf[3].write_card(8, None)
lines2 = out.split('\n')
for line, line2 in zip(lines, lines2):
self.assertEqual(line, line2)
def test_contact_2(self):
sid = 42
eids = [1, 2, 3]
model = BDF(debug=False)
sid = 42
bsurf = model.add_bsurf(sid, eids, comment='bsurf')
bsurf.raw_fields()
sid = 43
g1s = [10, 11, 12]
g2s = [20, 21, 22]
g3s = [30, 31, 32]
bsurfs = model.add_bsurfs(sid, eids, g1s, g2s, g3s, comment='bsurfs')
bsurfs.raw_fields()
contact_set_id = 44
source_ids = [37, 38]
target_ids = [47, 48]
frictions = [0.11, 0.22]
min_distances = [0.001, 0.001]
max_distances = [0.1, 0.2]
bctset = model.add_bctset(contact_set_id, source_ids, target_ids, frictions,
min_distances, max_distances,
comment='bctset')
bctset.raw_fields()
contract_region = 100
surface = 'BOT'
contact_type = 'RIGID'
offset = .1012
master_grid_point = 101
bcrpara = model.add_bcrpara(contract_region, surface, offset, contact_type,
master_grid_point, comment='bcrpara')
bcrpara.raw_fields()
model.validate()
contact_region = 102
params = {'cat' : 111, 'dog' : 222, 'frog' : 0.}
bctpara = model.add_bctpara(contact_region, params, comment='bctpara')
bctpara.raw_fields()
str(bctpara)
contact_region = 300
contact_sets = [301, 302]
bctadd = model.add_bctadd(contact_region, contact_sets, comment='bctadd')
bctadd.raw_fields()
save_load_deck(model)
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: bdf/mesh_utils/mesh.py
```python
import numpy as np
from pyNastran.bdf.cards.aero.utils import (
points_elements_from_quad_points, create_axisymmetric_body)
def create_structured_chexas(model, pid,
x, y, z, nx, ny, nz, eid=1, nid=1):
nid0 = nid
nnodes = nx * ny * nz
nelements = (nx - 1) * (ny - 1) * (nz - 1)
assert nelements > 0, f'nx={nx} ny={ny} nz={nz} nelements={nelements}'
#points, elements = points_elements_from_cube_points(
#p1, p2, p3, p4,
#p5, p6, p7, p8,
#x, y, z, dtype='int32')
xv, yv, zv = np.meshgrid(x, y, z)
for point in zip(xv.ravel(), yv.ravel(), zv.ravel()):
model.add_grid(nid, point)
nid += 1
node_ids = np.arange(nnodes, dtype='int32').reshape(nx, ny, nz)
#print(node_ids)
p1 = node_ids[:nx-1, :ny-1, :nz-1].ravel()
p2 = node_ids[1:, :ny-1, :nz-1].ravel()
p3 = node_ids[1:, 1:, :nz-1].ravel()
p4 = node_ids[:nx-1, 1:, :nz-1].ravel()
p5 = node_ids[:nx-1, :ny-1, 1:].ravel()
p6 = node_ids[1:, :ny-1, 1:].ravel()
p7 = node_ids[1:, 1:, 1:].ravel()
p8 = node_ids[:nx-1, 1:, 1:].ravel()
elements = np.vstack([p1, p2, p3, p4, p5, p6, p7, p8]).T
#print(elements)
#print(elements.shape)
for node_ids in (elements + nid0).tolist():
#print(node_ids, type(node_ids))
model.add_chexa(eid, pid, node_ids)
eid += 1
return nid, eid
#def points_elements_from_cube_points(p1, p2, p3, p4,
#p5, p6, p7, p8,
#x, y, z, dtype='int32'):
#"""
#Creates nodes and elements in a structured grid given 8 points.
#Parameters
#----------
#p1-p8 : (3, ) float ndarray
#corner point locations
#x, y, z : (n, ) float ndarray
#percentage in x, y, and z directions
#dtype : str; default='int32'
#the type of elements
#Returns
#-------
#points (nx, ny, nz, 3) float ndarray
#the points
#elements (nquads, 8) int ndarray
#series of hexa elements
#nhexas = (nx-1) * (ny-1) * (nz-1)
#"""
#nx = x.shape[0]
#ny = y.shape[0]
#nz = z.shape[0]
#elements = elements_from_cube(nx, ny, nz, dtype=dtype)
#npoints = nx * ny
## shape the vectors so we can multiply them
#x = x.reshape((1, nx))
#y = y.reshape((1, ny))
#z = y.reshape((1, nz))
#p1 = np.asarray(p1).reshape(1, 3)
#p2 = np.asarray(p2).reshape(1, 3)
#p3 = np.asarray(p3).reshape(1, 3)
#p4 = np.asarray(p4).reshape(1, 3)
#p5 = np.asarray(p5).reshape(1, 3)
#p6 = np.asarray(p6).reshape(1, 3)
#p7 = np.asarray(p7).reshape(1, 3)
#p8 = np.asarray(p8).reshape(1, 3)
## x repeats ny times and varies slowly
## y repeats nx times and varies quickly
#xv = np.repeat(x, ny, axis=1).reshape(npoints, 1)
#yv = np.repeat(y, nx, axis=0).reshape(npoints, 1)
## calculate the points a and b xv% along the chord
#a = xv * p2 + (1 - xv) * p1
#b = xv * p3 + (1 - xv) * p4
## calculate the point yv% along the span
#points = yv * b + (1 - yv) * a
#assert points.shape == (npoints, 3), 'npoints=%s shape=%s' % (npoints, str(points.shape))
## create a matrix with the point counter
##ipoints = np.arange(npoints, dtype='int32').reshape((nx, ny))
#return points, elements
def create_structured_cquad4s(model, pid,
p1, p2, p3, p4,
nx, ny, nid=1, eid=1, theta_mcid=0.):
"""
Parameters
----------
p1 / p2 / p3 / p4 : (3, ) float ndarray
points defining the quad
nx : int
points in the p1-p2 direction
ny : int
points in the p1-p4 direction
nid / eid : int
node / element id offset
Returns
-------
nid : int
???
eid : int
???
"""
nid0 = nid
x = np.linspace(0., 1., nx + 1)
y = np.linspace(0., 1., ny + 1)
points, elements = points_elements_from_quad_points(
p1, p2, p3, p4, x, y, dtype='int32')
for point in points:
model.add_grid(nid, point)
nid += 1
for node_ids in elements + nid0:
model.add_cquad4(eid, pid, node_ids, theta_mcid=theta_mcid)
eid += 1
return nid, eid
#def cone3d(model, pid,
#xs, radius, nx=5, ntheta=10, endpoint=True):
#"""
#create a cone by segments in 3d
#xs = [0., 1., 2.]
#radius = [1., 2., 4.]
#>>> cone(model, pid, xs, radius1, radius2)
#"""
#xstation = np.asarray(xstation)
#ystation = np.zeros(xstation.shape)
#zstation = np.zeros(xstation.shape)
#aspect_ratio = 1.0
#xyz_elems = create_axisymmetric_body(
#nx, aspect_ratio,
#xstation, ystation, zstation, radii,
#p1, dy, dz)
#return
```
#### File: mesh_utils/test/test_mesh_utils.py
```python
import os
import unittest
from io import StringIO
from docopt import DocoptExit
import numpy as np
from cpylog import SimpleLogger
#import pyNastran
#from pyNastran.bdf.bdf import BDF
#root_path = pyNastran.__path__[0]
#test_path = os.path.join(root_path, 'bdf', 'test', 'unit')
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
from pyNastran.bdf.mesh_utils.export_mcids import export_mcids
from pyNastran.bdf.mesh_utils.split_cbars_by_pin_flag import split_cbars_by_pin_flag
from pyNastran.bdf.mesh_utils.split_elements import split_line_elements
from pyNastran.bdf.mesh_utils.pierce_shells import (
pierce_shell_model) #, quad_intersection, triangle_intersection)
from pyNastran.bdf.mesh_utils.mirror_mesh import (
write_bdf_symmetric, bdf_mirror, bdf_mirror_plane)
from pyNastran.bdf.mesh_utils.mass_properties import (
mass_properties, mass_properties_nsm) #mass_properties_breakdown
from pyNastran.bdf.mesh_utils.make_half_model import make_symmetric_model
from pyNastran.bdf.mesh_utils.bdf_merge import bdf_merge
from pyNastran.bdf.mesh_utils.utils import cmd_line
# not tested
from pyNastran.bdf.mesh_utils.mesh import create_structured_cquad4s, create_structured_chexas
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.abspath(os.path.join(PKG_PATH, '..', 'models'))
np.set_printoptions(edgeitems=3, infstr='inf',
linewidth=75, nanstr='nan', precision=3,
suppress=True, threshold=1000, formatter=None)
class TestMeshUtils(unittest.TestCase):
"""various mesh_utils tests"""
def test_free_faces(self):
"""CTETRA10"""
#bdf free_faces [-d | -l] [-f] [--encoding ENCODE] BDF_FILENAME SKIN_FILENAME
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'free_faces'])
bdf_filename = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending.bdf')
#log = get_logger(log=None, level='info', encoding='utf-8')
cmd_line(argv=['bdf', 'free_faces', bdf_filename, 'skin.bdf'], quiet=True)
os.remove('skin.bdf')
def test_structured_cquads(self):
"""tests create_structured_cquad4s"""
pid = 42
p1 = [0., 0., 0.]
p2 = [1., 0., 0.]
p3 = [1., 1., 0.]
p4 = [0., 1., 0.]
model = BDF()
nx = 10
ny = 20
create_structured_cquad4s(model, pid, p1, p2, p3, p4, nx, ny, nid=1, eid=1, theta_mcid=0.)
def test_structured_chexas(self):
"""tests test_structured_chexas"""
#1U CubeSat is 10 cm, 10 cm, 11.35 cm.
#2U CubeSat is 10 cm, 10 cm, 22.70 cm.
#6U CubeSat is 20 cm, 10 cm, 34.05 cm.
model = BDF()
pid = 1
i = 20.
j = 10.
k = 5.
p1 = [0., 0., 0.]
p2 = [i, 0., 0.]
p3 = [i, j, 0.]
p4 = [0., j, 0.]
p5 = [0., 0., k]
p6 = [i, 0., k]
p7 = [i, j, k]
p8 = [0., j, k]
nx = 2
ny = 2
nz = 2
x = np.linspace(0., i, nx + 1)
y = np.linspace(0., j, ny + 1)
z = np.linspace(0., k, nz + 1)
create_structured_chexas(model, pid,
x, y, z, nx, ny, nz, eid=1)
model.write_bdf('test_structured_chexas.bdf')
def test_eq1(self):
"""Collapse nodes 2 and 3; consider 1-3"""
log = SimpleLogger(level='error')
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1,,0.,0.,0.\n'
'GRID,2,,0.,0.,0.5\n'
'GRID,3,,0.,0.,0.51\n'
'GRID,10,,0.,0.,1.\n'
'GRID,11,,0.,0.,1.\n'
'CTRIA3,1,1,1,2,11\n'
'CTRIA3,3,1,2,3,11\n'
'CTRIA3,4,1,1,2,10\n'
'PSHELL,1,1,0.1\n'
'MAT1,1,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
# model = BDF(debug=False)
# model.read_bdf(bdf_filename_out)
# assert len(model.nodes) == 3, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq2(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
log = SimpleLogger(level='error')
msg = (
'CEND\n'
'BEGIN BULK\n'
'GRID,1, , 0., 0., 0.\n'
'GRID,20,, 1., 0., 0.\n'
'GRID,3, , 1.01, 0., 0.\n'
'GRID,40,, 1., 1., 0.\n'
'GRID,5, , 0., 1., 0.\n'
'GRID,6, , 0., 1.01, 0.\n'
'CTRIA3,1, 100,1,20,6\n'
'CTRIA3,10,100,3,40,5\n'
'PSHELL,100,1000,0.1\n'
'MAT1,1000,3.0,, 0.3\n'
'ENDDATA'
)
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
msg = 'nnodes=%s\n' % len(model.nodes)
for nid, node in sorted(model.nodes.items()):
msg += 'nid=%s xyz=%s\n' % (nid, node.xyz)
assert len(model.nodes) == 4, msg
#os.remove(bdf_filename)
os.remove(bdf_filename_out)
tol = 0.009
# Don't collapse anything because the tolerance is too small
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 6, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = [2, 3]
# Node 2 is not defined, so crash
with self.assertRaises(RuntimeError):
# node 2 is not defined because it should be node 20
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
tol = 0.2
node_list = [20, 3]
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_list, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
node_set = {20, 3}
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
tol = 0.2
aset = np.array([20, 3, 4], dtype='int32')
bset = np.array([20, 3], dtype='int32')
node_set = np.intersect1d(aset, bset)
assert len(node_set) > 0, node_set
# Only collpase 2 nodes
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 5, len(model.nodes)
os.remove(bdf_filename_out)
def test_eq3(self):
"""node_set=None"""
log = SimpleLogger(level='error')
lines = [
'$pyNastran: version=msc',
'$pyNastran: punch=True',
'$pyNastran: encoding=ascii',
'$NODES',
'$ Nodes to merge:',
'$ 5987 10478',
'$ GRID 5987 35.46 -6. 0.',
'$ GRID 10478 35.46 -6. 0.',
'$ 5971 10479',
'$ GRID 5971 34.92 -6. 0.',
'$ GRID 10479 34.92 -6. 0.',
'$ 6003 10477',
'$ GRID 6003 36. -6. 0.',
'$ GRID 10477 36. -6. 0.',
'GRID 5971 34.92 -6. 0.',
'GRID 5972 34.92-5.73333 0.',
'GRID 5973 34.92-5.46667 0.',
'GRID 5987 35.46 -6. 0.',
'GRID 5988 35.46-5.73333 0.',
'GRID 5989 35.46-5.46667 0.',
'GRID 6003 36. -6. 0.',
'GRID 6004 36.-5.73333 0.',
'GRID 6005 36.-5.46667 0.',
'GRID 10476 36. -6. -1.5',
'GRID 10477 36. -6. 0.',
'GRID 10478 35.46 -6. 0.',
'GRID 10479 34.92 -6. 0.',
'GRID 10561 34.92 -6. -.54',
'$ELEMENTS_WITH_PROPERTIES',
'PSHELL 1 1 .1',
'CQUAD4 5471 1 5971 5987 5988 5972',
'CQUAD4 5472 1 5972 5988 5989 5973',
'CQUAD4 5486 1 5987 6003 6004 5988',
'CQUAD4 5487 1 5988 6004 6005 5989',
'PSHELL 11 1 .1',
'CTRIA3 9429 11 10561 10476 10478',
'CTRIA3 9439 11 10478 10479 10561',
'CTRIA3 9466 11 10476 10477 10478',
'$MATERIALS',
'MAT1 1 3. .3',
]
bdf_filename = 'nonunique2.bdf'
bdf_filename_out = 'unique2.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write('\n'.join(lines))
tol = 0.01
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=None, crash_on_collapse=False,
log=log, debug=False)
model = BDF(debug=False)
model.read_bdf(bdf_filename_out)
assert len(model.nodes) == 11, len(model.nodes)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_eq4(self):
r"""
5
6 *-------* 40
| \ |
| \ |
| \ |
*-------* 3
1 20
"""
log = SimpleLogger(level='error')
msg = 'CEND\n'
msg += 'BEGIN BULK\n'
msg += 'GRID,1, , 0., 0., 0.\n'
msg += 'GRID,20,, 1., 0., 0.\n'
msg += 'GRID,3, , 1.01, 0., 0.\n'
msg += 'GRID,41,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,40,, 1., 1., 0.\n' # eq
msg += 'GRID,4,, 1., 1., 0.\n' # eq
msg += 'GRID,5, , 0., 1., 0.\n'
msg += 'GRID,6, , 0., 1.01, 0.\n'
msg += 'CTRIA3,1, 100,1,20,6\n'
msg += 'CTRIA3,10,100,3,40,5\n'
msg += 'PSHELL,100,1000,0.1\n'
msg += 'MAT1,1000,3.0,, 0.3\n'
msg += 'ENDDATA'
bdf_filename = 'nonunique.bdf'
bdf_filename_out = 'unique.bdf'
with open(bdf_filename, 'w') as bdf_file:
bdf_file.write(msg)
tol = 0.2
node_set = [4, 40, 41]
# Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
renumber_nodes=False, neq_max=4, xref=True,
node_set=node_set, crash_on_collapse=False,
log=log, debug=False)
model = BDF(log=log, debug=False)
model.read_bdf(bdf_filename_out)
nids = model.nodes.keys()
assert len(model.nodes) == 6, 'nnodes=%s nodes=%s' % (len(model.nodes), nids)
assert 1 in nids, nids
assert 20 in nids, nids
assert 3 in nids, nids
assert 4 in nids, nids
assert 5 in nids, nids
assert 6 in nids, nids
assert 40 not in nids, nids
assert 41 not in nids, nids
#print(nids)
os.remove(bdf_filename)
os.remove(bdf_filename_out)
def test_merge_01(self):
"""merges multiple bdfs into a single deck"""
log = SimpleLogger(level='error')
bdf_filename1 = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
bdf_filename2 = os.path.join(MODEL_PATH, 'sol_101_elements', 'static_solid_shell_bar.bdf')
bdf_filename3 = os.path.join(MODEL_PATH, 'solid_bending', 'solid_bending.bdf')
bdf_filename4 = os.path.join(MODEL_PATH, 'iSat', 'ISat_Dploy_Sm.dat')
bdf_filename_out1 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_staticbar_8.out')
bdf_filename_out2 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_static_bar_16.out')
bdf_filename_out3 = os.path.join(MODEL_PATH, 'bwb', 'BWBsaero_staticbar_isat.out')
bdf_filenames1 = [bdf_filename1, bdf_filename2]
bdf_filenames2 = [bdf_filename1, bdf_filename2, bdf_filename3, bdf_filename4]
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out1,
renumber=True, encoding=None, size=8, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames1, bdf_filename_out=bdf_filename_out2,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
bdf_merge(bdf_filenames2, bdf_filename_out=bdf_filename_out3,
renumber=False, encoding=None, size=16, is_double=False,
cards_to_skip=None, log=log)
read_bdf(bdf_filename_out1, log=log)
read_bdf(bdf_filename_out2, log=log)
read_bdf(bdf_filename_out3, log=log)
def test_exit(self):
"""tests totally failing to run"""
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'export_caero_mesh'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'convert'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'scale'])
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'bin'])
#with self.assertRaises(SystemExit):
#cmd_line(argv=['bdf', 'filter'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'mirror'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'renumber'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'equivalence'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'free_faces'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'merge'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'export_caero_mesh'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'transform'])
with self.assertRaises(SystemExit):
cmd_line(argv=['bdf', 'export_mcids'])
def test_export_caero_mesh(self):
"""tests multiple ``bdf`` tools"""
bdf_filename = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
argv = ['bdf', 'export_caero_mesh', bdf_filename, '-o', 'caero_no_sub.bdf']
with self.assertRaises(SystemExit):
cmd_line(argv=argv[:1])
with self.assertRaises(SystemExit):
cmd_line(argv=argv[:2])
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'export_caero_mesh', bdf_filename, '-o', 'caero_aesurf.bdf', '--subpanels',
'--pid', 'aesurf']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'export_caero_mesh', bdf_filename, '-o', 'caero_caero.bdf', '--subpanels',
'--pid', 'caero']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'export_caero_mesh', bdf_filename, '-o', 'caero_paero.bdf', '--subpanels',
'--pid', 'paero']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'export_caero_mesh', bdf_filename, '-o', 'caero.bdf', '--subpanels']
cmd_line(argv=argv, quiet=True)
#bdf mirror IN_BDF_FILENAME [-o OUT_BDF_FILENAME] [--plane PLANE] [--tol TOL]
argv = ['bdf', 'mirror', 'caero.bdf', '-o', 'caero2.bdf', '--plane', 'xz', '--tol', '1e-5']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'equivalence', 'caero2.bdf', '0.001', '-o', 'caero3.bdf']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'merge', 'caero2.bdf', 'caero2.bdf', '-o', 'caero3_merged.bdf']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'renumber', 'caero3.bdf', 'caero4.bdf', '--size', '8']
cmd_line(argv=argv, quiet=True)
#bdf transform IN_BDF_FILENAME [-o OUT_CAERO_BDF_FILENAME] [--shift XYZ]
argv = ['bdf', 'transform', 'caero4.bdf', '-o', 'caero5.bdf', '--shift', '0,0,20.']
cmd_line(argv=argv, quiet=True)
#' bdf convert IN_BDF_FILENAME [-o OUT_BDF_FILENAME] [--in_units IN_UNITS] [--out_units OUT_UNITS]\n'
argv = ['bdf', 'convert', 'caero5.bdf',
'-o', 'caero6.bdf',
'--in_units', 'in,lbm', '--out_units', 'ft,lbm']
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'scale', 'caero6.bdf',
#'-o', 'caero6.bdf',
'--length', '0.5', '--time', '1.', '--mass', str(0.5**3.)]
cmd_line(argv=argv, quiet=True)
os.remove('caero.bdf')
os.remove('caero2.bdf')
os.remove('caero3.bdf')
os.remove('caero3_merged.bdf')
os.remove('caero4.bdf')
os.remove('caero5.bdf')
os.remove('caero6.bdf')
#os.remove('caero5.scaled.bdf')
os.remove('caero6.scaled.bdf')
os.remove('caero_aesurf.bdf')
os.remove('caero_caero.bdf')
os.remove('caero_paero.bdf')
os.remove('caero_no_sub.bdf')
def test_export_mcids(self):
"""creates material coordinate systems"""
log = SimpleLogger(level='error')
bdf_filename = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
csv_filename = os.path.join(MODEL_PATH, 'bwb', 'mcids.csv')
export_mcids(bdf_filename, csv_filename,
export_xaxis=True, export_yaxis=True,
iply=9, log=log, debug=False)
model = read_bdf(bdf_filename, xref=False, debug=False)
model.safe_cross_reference()
#os.remove('mcids.csv')
argv = ['bdf', 'export_mcids', bdf_filename, '-o', csv_filename,
'--iplies', '0,1,2,3,4,5,6,7,8,9,10', '--no_x', '--no_y']
with self.assertRaises(DocoptExit):
# can't define both --no_x and --no_y
cmd_line(argv=argv, quiet=True)
argv = ['bdf', 'export_mcids', bdf_filename, '-o', csv_filename,
'--iplies', '0,1,2,3,4,5,6,7,8,9', '--no_x']
cmd_line(argv=argv, quiet=True)
eids = [1204, 1211]
export_mcids(model, csv_filename=None, eids=eids,
export_xaxis=True, export_yaxis=True,
iply=9, log=log, debug=False)
export_mcids(model, csv_filename=None, eids=eids,
export_xaxis=True, export_yaxis=False,
iply=9, log=log, debug=False)
export_mcids(model, csv_filename=None, eids=eids,
export_xaxis=False, export_yaxis=True,
iply=9, log=log, debug=False)
with self.assertRaises(AssertionError):
# export_xaxis and export_yaxis can't both be False
export_mcids(model, csv_filename=None, eids=eids,
export_xaxis=False, export_yaxis=False,
iply=9)
with self.assertRaises(RuntimeError):
# no iply=10
export_mcids(model, csv_filename, eids=eids,
export_xaxis=True, export_yaxis=True,
iply=10)
#os.remove('mcids.csv')
def test_split_cbars_by_pin_flag_1(self):
"""null pin flag test"""
bdf_filename = os.path.join(MODEL_PATH, 'sol_101_elements', 'static_solid_shell_bar.bdf')
split_cbars_by_pin_flag(bdf_filename, pin_flags_filename='pin_flags.csv',
bdf_filename_out='pin_flags.bdf', debug=False)
os.remove('pin_flags.csv')
os.remove('pin_flags.bdf')
argv = ['bdf', 'split_cbars_by_pin_flags', bdf_filename,
'-o', 'pin_flags.bdf', '-p', 'pin_flags.csv']
cmd_line(argv=argv, quiet=True)
os.remove('pin_flags.csv')
os.remove('pin_flags.bdf')
def test_split_cbars_by_pin_flag_2(self):
"""real pin flag test"""
lines = [
'SOL 101\n',
'CEND\n',
'SUBCASE 10\n',
' LOAD = 10\n',
' SPC = 123456\n',
' DISP(PLOT) = ALL\n',
' STRESS(PLOT) = ALL\n',
'BEGIN BULK\n',
'ENDDATA',
]
model = BDF(debug=False)
with self.assertRaises(NotImplementedError):
model.read_bdf(bdf_filename=lines, validate=True, xref=True,
punch=False, read_includes=True,
encoding=None)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [2., 0., 0.])
model.add_grid(4, [3., 0., 0.])
pid = 1000
mid = 1000
Type = 'BAR'
dim = [1., 2.]
model.add_pbarl(pid, mid, Type, dim)
E = 3.0e7
G = 3.0e6
nu = None
model.add_mat1(mid, E, G, nu)
x = [0., 1., 0.]
g0 = None
model.add_cbar(1, pid, [1, 2], x, g0, offt='GGG', pa=0, pb=0,
wa=None, wb=None, comment='reaction')
model.add_cbar(2, pid, [2, 3], x, g0, offt='GGG', pa=0, pb=456,
wa=None, wb=None, comment='End B')
model.add_cbar(3, pid, [3, 4], x, g0, offt='GGG', pa=456, pb=0,
wa=None, wb=None, comment='End A')
sid = 10
node = 4
mag = 1.
xyz = [1., 1., 0.]
model.add_force(sid, node, mag, xyz)
model.add_spc1(123456, '123456', 1)
model.validate()
bdf_file = StringIO()
bdf_file.writelines(lines)
bdf_file.seek(0)
model.read_bdf(bdf_filename=bdf_file, validate=True, xref=False,
punch=False, read_includes=True,
encoding=None)
#model.write_bdf('spike.bdf')
split_cbars_by_pin_flag(model, pin_flags_filename='pin_flags.csv',
bdf_filename_out='pin_flags.bdf', debug=False)
os.remove('pin_flags.csv')
os.remove('pin_flags.bdf')
def test_split_line_elements(self):
"""tests split_line_elements"""
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
pid = 1000
mid = 1000
Type = 'BAR'
dim = [1., 2.]
model.add_pbarl(pid, mid, Type, dim)
E = 3.0e7
G = 3.0e6
nu = None
model.add_mat1(mid, E, G, nu)
x = [0., 1., 0.]
g0 = None
#model.add_cbar(1, pid, 1, 2, x, g0, offt='GGG', pa=0, pb=0,
#wa=None, wb=None, comment='reaction')
#model.add_cbar(2, pid, 2, 3, x, g0, offt='GGG', pa=0, pb=456,
#wa=None, wb=None, comment='End B')
#model.add_cbar(3, pid, 3, 4, x, g0, offt='GGG', pa=456, pb=0,
#wa=None, wb=None, comment='End A')
#eids = [1, 2, 3]
nids = [1, 2]
model.add_cbar(1, pid, nids, x, g0, offt='GGG', pa=456, pb=5,
wa=None, wb=None, comment='End A')
model.add_cbeam(2, 2000, nids, x, g0, offt='GGG', bit=None, pa=456,
pb=5, wa=None, wb=None, sa=0,
sb=0, comment='')
A = 42.
model.add_conrod(3, mid, nids, A)
model.add_prod(4000, mid, A)
model.add_crod(4, 4000, nids)
Type = 'ROD'
xxb = [0.]
dims = [[1.]]
model.add_pbeaml(2000, mid, Type, xxb, dims)
eids = [1, 2, 3, 4]
split_line_elements(model, eids, neids=10,
eid_start=101, nid_start=101)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
#print(bdf_file.getvalue())
def test_shells_add(self):
"""
tests differential mass and material coordinate systems
on CQUAD4/CTRIA3 elements
"""
pid = 10
mid1 = 100
model = BDF(debug=False)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_cquad4(10, pid, [1, 2, 3, 4])
model.add_ctria3(11, pid, [1, 2, 3])
mids = [100, 100, 100]
thicknesses = [0.1, 0.1, 0.1]
model.add_pcomp(pid, mids, thicknesses, thetas=[0., 45., 90.], souts=None,
nsm=0., sb=0., ft=None,
tref=0., ge=0., lam=None,
z0=None, comment='')
pid = 11
model.add_ctria3(12, pid, [1, 2, 3], theta_mcid=45., zoffset=0.,
tflag=0, T1=0.1, T2=0.1, T3=0.1, # absolute - mass=0.1*0.5=0.05
comment='')
model.add_ctria3(13, pid, [1, 2, 3], theta_mcid=1, zoffset=0.,
tflag=0, T1=0.1, T2=0.1, T3=0.1, # absolute
comment='')
model.add_cquad4(14, pid, [1, 2, 3, 4], theta_mcid=45., zoffset=0.,
tflag=0, T1=0.1, T2=0.1, T3=0.1, T4=0.1, # absolute
comment='')
model.add_cquad4(15, pid, [1, 2, 3, 4], theta_mcid=1, zoffset=0.,
tflag=1, T1=0.1, T2=0.1, T3=0.1, T4=0.1, # relative
comment='')
origin = [0., 0., 0.]
zaxis = [0., 0., 1.]
xzplane = [1., 0., 0.]
model.add_cord2r(1, origin, zaxis, xzplane, rid=0)
model.add_pshell(pid, mid1=mid1, t=2.)
e11 = 1.0
e22 = 2.0
nu12 = 0.3
model.add_mat8(mid1, e11, e22, nu12, rho=1.0)
model.validate()
model.cross_reference()
model.pop_xref_errors()
mass = mass_properties(model, element_ids=13)[0]
bdf_file = StringIO()
model.write_bdf(bdf_file)
model.uncross_reference()
model.cross_reference()
model.pop_xref_errors()
assert np.allclose(mass, 0.05), mass # t=0.1; A=0.5; nsm=0.; mass=0.05
mass = mass_properties(model, element_ids=14)[0]
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
bdf_file.seek(0)
assert np.allclose(mass, 0.1), mass # t=0.1; A=1.0; nsm=0.; mass=0.1
csv_filename = 'mcids.csv'
export_mcids(model, csv_filename=csv_filename, eids=[12, 13],
export_xaxis=True, export_yaxis=True,
iply=0)
#with open(csv_filename, 'r') as csv_file:
#lines = csv_file.readlines()
#assert len(lines) > 0, 'lines=%s' % lines
#for line in lines:
#print(line.rstrip())
#print('-------------')
export_mcids(model, csv_filename=csv_filename, eids=[14, 15],
export_xaxis=True, export_yaxis=True,
iply=0)
model.uncross_reference()
model.safe_cross_reference()
model.uncross_reference()
os.remove(csv_filename)
#bdf_file = model.write_bdf(bdf_file)
model2 = BDF(debug=False)
model2.read_bdf(bdf_file, punch=True)
#with open(csv_filename, 'r') as csv_file:
#lines = csv_file.readlines()
#assert len(lines) > 0, 'lines=%s' % lines
#for line in lines:
#print(line.rstrip())
def test_mirror(self):
"""tests bdf mirroring"""
log = SimpleLogger(level='error')
pid_pshell = 10
pid_psolid = 11
mid1 = 100
model = BDF(log=log) # (log=log)
model.add_grid(1, [10., 10., 10.])
model.add_grid(2, [11., 10., 10.])
model.add_grid(3, [11., 11., 10.])
model.add_grid(4, [10., 11., 10.])
model.add_grid(5, [10., 10., 11.])
model.add_grid(6, [11., 10., 11.])
model.add_grid(7, [11., 11., 11.])
model.add_grid(8, [10., 11., 11.])
model.add_cquad4(1, pid_pshell, [1, 2, 3, 4]) # mass=1
model.add_ctria3(2, pid_pshell, [1, 2, 3]) # mass=0.5
model.add_conrod(3, mid1, [1, 3], A=1.0, j=0.0, c=0.0, nsm=0.0)
#model.add_ctetra(4, pid_psolid, [1, 2, 3, 5])
# penta
# pyram
#model.add_chexa(7, pid_psolid, [1, 2, 3, 4, 5, 6, 7, 8])
model.add_pshell(pid_pshell, mid1=mid1, t=1.)
model.add_psolid(pid_psolid, mid1)
E = 1.0
G = None
nu = 0.3
model.add_mat1(mid1, E, G, nu, rho=1.0)
model.validate()
model.cross_reference()
mass1, unused_cg1, unused_inertia1 = mass_properties(model)
# mirror_model=None -> new model
#
# just a cord2r
# y+ right
plane = np.array([
[0., 0., 0.],
[0., 0., 1.],
[1., 0., 0.],
])
model, unsed_mirror_model, unused_nid_offset, unused_eid_offset = bdf_mirror_plane(
model, plane, mirror_model=None, log=None, debug=True,
use_nid_offset=False)
#for nid, node in sorted(mirror_model.nodes.items()):
#print(nid, node.xyz)
out_filename = 'sym.bdf'
write_bdf_symmetric(model, out_filename=out_filename, encoding=None, size=8,
is_double=False,
enddata=None,
close=True, plane='xz') # +y/-y
model2 = read_bdf(out_filename, log=log)
assert len(model2.nodes) == 16, model2.nodes
mass2, cg2, unused_inertia2 = mass_properties(model2)
#print('cg1=%s cg2=%s' % (cg1, cg2))
assert np.allclose(mass1*2, mass2), 'mass1=%s mass2=%s' % (mass1, mass2)
assert np.allclose(cg2[1], 0.), 'cg2=%s stats=%s' % (cg2, model2.get_bdf_stats())
os.remove('sym.bdf')
def test_mirror2(self):
"""mirrors the BDF (we care about the aero cards)"""
log = SimpleLogger(level='warning')
bdf_filename = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
model = bdf_mirror(bdf_filename, plane='xz', log=log)[0]
model.uncross_reference()
model.cross_reference()
make_symmetric_model(model, plane='xz', zero_tol=1e-12)
#model.validate()
def test_pierce_model(self):
"""tests pierce_shell_model"""
log = SimpleLogger(level='error')
pid = 10
mid1 = 100
model = BDF(log=log)
# intersects (min)
model.add_grid(1, [0., 0., 0.])
model.add_grid(2, [1., 0., 0.])
model.add_grid(3, [1., 1., 0.])
model.add_grid(4, [0., 1., 0.])
model.add_cquad4(1, pid, [1, 2, 3, 4])
# intersects (max)
model.add_grid(5, [0., 0., 1.])
model.add_grid(6, [1., 0., 1.])
model.add_grid(7, [1., 1., 1.])
model.add_grid(8, [0., 1., 1.])
model.add_cquad4(2, pid, [5, 6, 7, 8])
# intersects (mid)
model.add_grid(9, [0., 0., 0.5])
model.add_grid(10, [1., 0., 0.5])
model.add_grid(11, [1., 1., 0.5])
model.add_grid(12, [0., 1., 0.5])
model.add_cquad4(3, pid, [9, 10, 11, 12])
# doesn't intersect
model.add_grid(13, [10., 0., 0.])
model.add_grid(14, [11., 0., 0.])
model.add_grid(15, [11., 1., 0.])
model.add_grid(16, [10., 1., 0.])
model.add_cquad4(4, pid, [13, 14, 15, 16])
model.add_pshell(pid, mid1=mid1, t=2.)
E = 1.0
G = None
nu = 0.3
model.add_mat1(mid1, E, G, nu, rho=1.0)
model.validate()
model.cross_reference()
xyz_points = [
[0.4, 0.6, 0.],
[-1., -1, 0.],
]
pierce_shell_model(model, xyz_points)
#def test_intersect(self):
#p0 = np.array([0,0,0], 'd')
#p1 = np.array([1,0,0], 'd')
#p2 = np.array([0,1,0], 'd')
#p3 = np.array([1,1,0], 'd')
#v = np.array([0,0,-1], 'd')
#for i in range(10):
#for j in range(10):
#p = np.array([i*.2-.5, j*.2-.5, 1.], 'd')
#print(i, j, p,
#triangle_intersection(p, v, p0, p1, p2),
#quad_intersection(p, v, p0, p1, p3, p2))
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: converters/abaqus/abaqus_cards.py
```python
import numpy as np
allowed_element_types = [
'r2d2', 'conn2d2',
'cpe3', 'cpe4', 'cpe4r',
'cps3', 'cps4', 'cps4r',
'coh2d4', 'c3d10h', 'cohax4',
'cax3', 'cax4r', 'mass', 'rotaryi', 't2d2', 'c3d8r',
]
class SolidSection:
"""a SolidSection defines depth and a material"""
def __init__(self, param_map, data_lines, log):
self.param_map = param_map
self.data_lines = data_lines
self.material = param_map['material']
if len(data_lines) == 0:
pass
elif len(data_lines) == 1:
assert len(data_lines) == 1, data_lines
line0 = data_lines[0]
assert len(line0) == 1, data_lines
try:
self.thickness = float(line0[0])
except ValueError:
self.thickness = 0.
for line in data_lines:
log.info('solid - %r' % line)
def __repr__(self):
"""prints a summary for the solid section"""
msg = 'SolidSection(\n'
msg += ' param_map = %r,\n' % self.param_map
msg += ' thickness = %s,\n' % self.thickness
msg += ')\n'
return msg
class Material:
"""a Material object is a series of nodes & elements (of various types)"""
def __init__(self, name, sections, density=None, ndepvars=None, ndelete=None):
self.name = name
self.density = density
#self.depvar = None
self.ndelete = ndelete
self.ndepvars = ndepvars
self.user_material = None
#print(sections)
#if 'density' in sections:
#self.density = sections['density']
#if 'depvar' in sections:
#self.depvar = sections['depvar']
#if 'user_material' in sections:
#self.user_material = sections['user_material']
self.sections = sections
def __repr__(self):
"""prints a summary for the material"""
msg = 'Material(\n'
msg += ' name=%r,\n' % self.name
for key, value in self.sections.items():
msg += ' %r : %r,\n' % (key, value)
msg += ')\n'
return msg
def write(self, abq_file):
#*Material, name=Glassy828DEA
#*Density
#1180.,
#*Elastic
#2.14078e+09, 0.42718
#*Material, name=MAT1_828DEA_Dam
#*Density
#1180.,
#*Depvar, delete=4
#20,
#*User Material, constants=16
#** K CTELIN C10 C01 DAM_FORM FUNC_FORM EVOLF EVMF
#3.2e+09, 5.667e-05, 3.75e+08, 0., 2., 1., 50000., 0.05
#**EVM0ISO EVM0VOL EVM0VM DAM_METHOD ALPHA A B C
#0., 0.5, 0.5, 1., 0., 0., 0.5, 0.6
#*Material, name=Steel
#*Density
#7800.,
#*Elastic
#2e+11, 0.3
abq_file.write('*Material, name=%s\n' % write_name(self.name))
if self.density:
abq_file.write('*Density\n %s,\n' % self.density)
if self.ndepvars:
ndelete = '' if self.ndelete is None else ', delete=%s' % self.ndelete
abq_file.write('*Depvar%s\n %s,\n' % (ndelete, self.ndepvars))
if self.user_material:
nconstants = ''
abq_file.write('*User Material%s\n %s,\n' % (nconstants, self.user_material))
#abq_file.write('** skipping Material %s\n' % self.name)
class Assembly:
def __init__(self, element_types, node_sets, element_sets):
self.element_types = element_types
self.node_sets = node_sets
self.element_sets = element_sets
def write(self, abq_file):
abq_file.write('** skipping Assembly\n')
def __repr__(self):
"""summary for the Assembly"""
etypes = list(self.element_types.keys())
nsets = list(self.node_sets.keys())
esets = list(self.element_sets.keys())
msg = (
'Assembly:\n'
' element_types = %s\n'
' node_sets = %s\n'
' element_sets = %s\n' % (etypes, nsets, esets)
)
return msg
class Part:
"""a Part object is a series of nodes & elements (of various types)"""
def __init__(self, name, nids, nodes, element_types, node_sets, element_sets,
solid_sections, log):
"""
creates a Part object
Parameters
----------
name : str
the name
element_types : Dict[element_type] : node_ids
element_type : str
the element type
bars:
r2d2 : (nelements, 2) int ndarray
shells:
cpe3 : (nelements, 3) int ndarray
cpe4 : (nelements, 4) int ndarray
cpe4r : (nelements, 4) int ndarray
cps3 : (nelements, 3) int ndarray
cps4 : (nelements, 4) int ndarray
cps4r : (nelements, 4) int ndarray
coh2d4 : (nelements, 4) int ndarray
cohax4 : (nelements, 4) int ndarray
cax3 : (nelements, 3) int ndarray
cax4r : (nelements, 4) int ndarray
solids:
c3d10h : (nelements, 10) int ndarray
"""
self.name = name
self.log = log
self.node_sets = node_sets
self.element_sets = element_sets
self.solid_sections = solid_sections
try:
self.nids = np.array(nids, dtype='int32')
except ValueError:
msg = 'nids=%s is not integers' % nids
raise ValueError(msg)
nnodes = len(self.nids)
node0 = nodes[0]
node_shape = len(node0)
if node_shape == 3:
self.nodes = np.array(nodes, dtype='float32')
elif node_shape == 2:
# abaqus can have only x/y coordinates, so we fake the z coordinate
self.nodes = np.zeros((nnodes, 3), dtype='float32')
nodes2 = np.array(nodes, dtype='float32')
#print(nodes2.shape, self.nodes.shape)
self.nodes[:, :2] = nodes2
else:
raise NotImplementedError(node0)
# bars
self.r2d2 = None
# ---shells---
# plane strain
self.cpe3 = None
self.cpe4 = None
self.cpe4r = None
# plane stress
self.cps3 = None
self.cps4 = None
self.cps4r = None
# other
self.coh2d4 = None
self.cohax4 = None
self.cax3 = None
self.cax4r = None
# solids
self.c3d10h = None
self.c3d8r = None
#-----------------------------------
# eids
self.r2d2_eids = None
self.cpe3_eids = None
self.cpe4_eids = None
self.cpe4r_eids = None
self.cps3_eids = None
self.cps4_eids = None
self.cps4r_eids = None
self.coh2d4_eids = None
self.cohax4_eids = None
self.cax3_eids = None
self.cax4r_eids = None
# rigid elements
self.c3d10h_eids = None
self.c3d8r_eids = None
self._store_elements(element_types)
def _etypes_nnodes(self):
"""internal helper method"""
etypes_nnodes = [
('r2d2', 2), # similar to a CBAR
# shells
('cpe3', 3),
('cpe4', 4),
('cpe4r', 4),
('cps3', 3),
('cps4', 4),
('cps4r', 4), # quad, plane stress, reduced
('coh2d4', 4), # cohesive zone
('cohax4', 4), # cohesive zone
('cax3', 3),
('cax4r', 4),
# solids
('c3d10h', 10), # tet10
('c3d8r', 8), # hexa8
]
return etypes_nnodes
def _store_elements(self, element_types):
"""helper method for the init"""
etypes_nnodes = self._etypes_nnodes()
for etype, nnodes in etypes_nnodes:
if etype in element_types:
etype_eids = '%s_eids' % etype
elements = element_types[etype]
eids_elements = np.array(elements, dtype='int32')
setattr(self, etype, eids_elements) # r2d2
setattr(self, etype_eids, eids_elements[:, 0]) # r2d2_eids
assert eids_elements.shape[1] == nnodes + 1, eids_elements.shape
def element(self, eid):
"""gets a specific element of the part"""
elem = None
etypes_nnodes = self._etypes_nnodes()
for etype, nnodes in etypes_nnodes:
etype_eids = '%s_eids' % etype
eids = getattr(self, etype_eids) # r2d2_eids
if eids is not None:
ieid = np.where(eid == eids)[0]
if len(ieid):
ieidi = ieid[0]
elems = getattr(self, etype) # r2d2
elem = elems[ieid, :]
return etype, ieid, elem
return None, None, None
def check_materials(self, materials):
"""validates the materials"""
for section in self.solid_sections:
key = section.material
if key in materials:
self.log.debug('material=%r for part=%r exists' % (key, self.name))
else:
self.log.warning('key=%r is an invalid material' % key)
@property
def nelements(self):
"""Gets the total number of elements"""
n_r2d2 = self.r2d2.shape[0] if self.r2d2 is not None else 0
# plane strain
n_cpe3 = self.cpe3.shape[0] if self.cpe3 is not None else 0
n_cpe4 = self.cpe4.shape[0] if self.cpe4 is not None else 0
n_cpe4r = self.cpe4r.shape[0] if self.cpe4r is not None else 0
# plane stress
n_cps3 = self.cps3.shape[0] if self.cps3 is not None else 0
n_cps4 = self.cps4.shape[0] if self.cps4 is not None else 0
n_cps4r = self.cps4r.shape[0] if self.cps4r is not None else 0
n_coh2d4 = self.coh2d4.shape[0] if self.coh2d4 is not None else 0
n_c3d10h = self.c3d10h.shape[0] if self.c3d10h is not None else 0
n_cohax4 = self.cohax4.shape[0] if self.cohax4 is not None else 0
n_cax3 = self.cax3.shape[0] if self.cax3 is not None else 0
n_cax4r = self.cax4r.shape[0] if self.cax4r is not None else 0
n_c3d8r = self.c3d8r.shape[0] if self.c3d8r is not None else 0
neids = (n_r2d2 +
n_cpe3 + n_cpe4 + n_cpe4r + # plane strain
n_cps3 + n_cps4 + n_cps4r + # plane stress
n_coh2d4 +
n_c3d10h + n_cohax4 + n_cax3 + n_cax4r +
n_c3d8r)
assert neids > 0, neids
return neids
def __repr__(self):
"""prints a summary for the part"""
nnodes = self.nodes.shape[0]
n_r2d2 = self.r2d2.shape[0] if self.r2d2 is not None else 0
# plane strain
n_cpe3 = self.cpe3.shape[0] if self.cpe3 is not None else 0
n_cpe4 = self.cpe4.shape[0] if self.cpe4 is not None else 0
n_cpe4r = self.cpe4r.shape[0] if self.cpe4r is not None else 0
# plane stress
n_cps3 = self.cps3.shape[0] if self.cps3 is not None else 0
n_cps4 = self.cps4.shape[0] if self.cps4 is not None else 0
n_cps4r = self.cps4r.shape[0] if self.cps4r is not None else 0
n_coh2d4 = self.coh2d4.shape[0] if self.coh2d4 is not None else 0
n_c3d10h = self.c3d10h.shape[0] if self.c3d10h is not None else 0
n_cohax4 = self.cohax4.shape[0] if self.cohax4 is not None else 0
n_cax3 = self.cax3.shape[0] if self.cax3 is not None else 0
n_cax4r = self.cax4r.shape[0] if self.r2d2 is not None else 0
n_c3d8r = self.c3d8r.shape[0] if self.c3d8r is not None else 0
neids = (n_r2d2 +
n_cpe3 + n_cpe4 + n_cpe4r + # plane strain
n_cps3 + n_cps4 + n_cps4r + # plane stress
n_coh2d4 +
n_c3d10h + n_cohax4 + n_cax3 + n_cax4r +
n_c3d8r)
assert neids == self.nelements, 'something is out of date...'
msg = (
f'Part(name={self.name}, nnodes={nnodes:d}, neids={neids:d},\n'
f' n_r2d2={n_r2d2}, n_cps3={n_cps3}, n_cpe3={n_cpe3}, '
f'n_cpe4={n_cpe4}, n_cpe4r={n_cpe4r}, n_coh2d4={n_coh2d4},\n'
f' n_cohax4={n_cohax4}, n_cax3={n_cax3}, n_cax4r={n_cax4r},'
f' n_cps4r={n_cps4r},\n'
f' n_c3d10h={n_c3d10h}, n_c3d8r=n_c3d8r)\n'
)
nsets = list(self.node_sets.keys())
esets = list(self.element_sets.keys())
msg += ' Node Sets: %s\n' % nsets
msg += ' Element Sets: %s\n' % esets
for section in self.solid_sections:
msg += str(section) + '\n'
return msg
@property
def element_types(self):
"""simplified way to access all the elements as a dictionary"""
element_types = {}
element_types['r2d2'] = (self.r2d2_eids, self.r2d2)
# plane strain element_types['cpe3'] = (self.cpe3_eids, self.cpe3)
element_types['cpe4'] = (self.cpe4_eids, self.cpe4)
element_types['cpe4r'] = (self.cpe4r_eids, self.cpe4r)
# plane stress
element_types['cps3'] = (self.cps3_eids, self.cps3)
element_types['cps4'] = (self.cps4_eids, self.cps4)
element_types['cps4r'] = (self.cps4r_eids, self.cps4r)
element_types['cohax4'] = (self.cohax4_eids, self.cohax4)
element_types['coh2d4'] = (self.coh2d4_eids, self.coh2d4)
element_types['cax3'] = (self.cax3_eids, self.cax3)
element_types['cax4r'] = (self.cax4r_eids, self.cax4r)
#element_types['cps4r'] = (self.cps4r_eids, self.cps4r)
element_types['c3d10h'] = (self.c3d10h_eids, self.c3d10h)
return element_types
def write(self, abq_file, is_2d=False):
"""writes a Part"""
#name, nids, nodes, element_types, node_sets, element_sets,
# solid_sections, log
abq_file.write('*Part,name=%s\n' % write_name(self.name))
abq_file.write('*Node\n')
if is_2d:
for nid, node in zip(self.nids, self.nodes):
abq_file.write('%i,\t%s,\t%s,\t%s\n' % (nid, node[0], node[1], node[2]))
else:
for nid, node in zip(self.nids, self.nodes):
abq_file.write('%i,\t%s,\t%s\n' % (nid, node[0], node[1]))
for set_name, values in sorted(self.node_sets.items()):
write_node_set_to_file(abq_file, set_name, values)
for elem_type, (eids, elems) in self.element_types.items():
if eids is None:
continue
abq_file.write('*Element,type=%s\n' % elem_type)
nnodes = elems.shape[1]
fmt = '%s,\t' * (nnodes - 1) + '%s\n'
for eid, elem in zip(eids, elems):
abq_file.write(fmt % tuple(elem))
for set_name, values in sorted(self.element_sets.items()):
write_element_set_to_file(abq_file, set_name, values)
abq_file.write('*endpart\n')
def write_name(name):
"""Abaqus has odd rules for writing words without spaces vs. with spaces"""
return '%r' % name if ' ' in name else '%s' % name
def write_element_set_to_file(abq_file, set_name, values_array):
"""writes an element set"""
abq_file.write('*Elset, elset=%s\n' % write_name(set_name))
write_set_to_file(abq_file, values_array)
def write_node_set_to_file(abq_file, set_name, values_array):
"""writes a node set"""
abq_file.write('*Nset, nset=%s\n' % write_name(set_name))
write_set_to_file(abq_file, values_array)
def write_set_to_file(abq_file, values_array):
"""writes 16 integer values per line to a set card"""
assert isinstance(values_array, np.ndarray), type(values_array)
nvalues = len(values_array)
nrows = nvalues // 16
nleftover = nvalues % 16
if nrows:
values_array_square = values_array[:nrows*16].reshape(nrows, 16)
fmt = '%i,\t' * 16 + '\n'
fmt2 = '%i,\t' * 15 + '%i\n'
for row in values_array_square[:-1, :]:
abq_file.write(fmt % tuple(row))
abq_file.write(fmt2 % tuple(values_array_square[-1, :]))
if nleftover:
fmt = '%i,\t' * (nleftover - 1) + '%i\n'
leftover = values_array[nrows*16:]
abq_file.write(fmt % tuple(leftover))
```
#### File: cart3d/dev/intersect.py
```python
from numpy import zeros, cross, dot, allclose, sign
from numpy.linalg import norm
from pyNastran.converters.cart3d.cart3d_reader import Cart3D
from scipy.spatial import KDTree
class Intersect:
def __init__(self, nodes, elements, regions):
self.nodes = nodes
self.elements = elements
self.regions = regions
def intersect_tris(self):
# ==== Calculate Global Edge Length ====
elements = self.elements - 1
nodes = self.nodes
ne, three = elements.shape
p1 = nodes[elements[:, 0], :]
p2 = nodes[elements[:, 1], :]
p3 = nodes[elements[:, 2], :]
centroid = (p1 + p2 + p3) / 3.
a = p2 - p1
b = p3 - p1
n = cross(a, b)
assert len(n) == ne, 'len(n)=%s ne=%s' % (len(n), ne)
print(n)
ni = norm(n, axis=1)
print('n.shape=%s ni.shape=%s' % (n.shape, ni.shape))
assert len(ni) == ne, 'len(ni)=%s ne=%s' % (len(ni), ne)
A = 0.5 * ni # area
print(min(ni))
assert A.min() > 0, A
#sys.exit()
n /= ni[:, None] # normal vector
assert len(n) == ne, 'len(n)=%s ne=%s' % (len(n), ne)
# Global Edge Length
gel = zeros((ne, 2), dtype='float64')
gel[:, 0] = norm(a, axis=1)
gel[:, 1] = norm(b, axis=1)
gel2 = gel.max(axis=1)
assert len(gel2) == ne, 'len(gel2)=%s ne=%s' % (len(gel2), ne)
# single valued "Global Edge Length" (hence the i)
geli = max(gel2)
print('global_edge_length = %s' % geli)
# we increase the search size just cause...
# we're expecting nice isotropic triangles, but aren't totally
# relying on it
geli *= 1.05
print('global_edge_length_i = %s' % geli)
# ==== create node -> element map ====
nid_to_eid_map = [[]] * ne
for eid, (n1, n2, n3) in enumerate(elements):
nid_to_eid_map[n1].append(eid)
nid_to_eid_map[n2].append(eid)
nid_to_eid_map[n3].append(eid)
# ==== Create KD Tree of centroids ====
centroid_tree = KDTree(centroid)
# ==== Intersect All Mesh Geoms ====
elements2 = []
for i, element in enumerate(elements):
c = centroid[i]
nodes1 = elements[i]
snodes = set(nodes1)
gel2i = gel2[i]
print('c[%i] = %s' % (i, c))
pts = centroid_tree.query_ball_point(c, gel2i)
#print(pts)
for pt in pts:
diff = norm(c - centroid[pt])
nodes2 = elements[pt]
common_set = snodes.intersection(nodes2)
if not common_set:
print(' c[%i]=%s alt[%i]=%s diff=%s gel2=%s valid=%s' % (i, list(nodes1),
pt, list(nodes2),
diff,
gel2[pt], diff < geli))
is_intersection = self.intersect(i, pt, nodes1, nodes2, nodes, n)
#print(centroid_tree.query(c, k=10))
#break
def intersect(self, e1, e2, element1, element2, nodes, n):
"""
http://fileadmin.cs.lth.se/cs/Personal/Tomas_Akenine-Moller/pubs/tritri.pdf
"""
n2 = n[e2]
#print("nodes.shape =", nodes.shape)
pt = nodes[element2[0], :]
d2 = -dot(n2, pt) # vo2 - node 0 on element 2
#dvi = []
#for i in range(3):
#ei = element1[i]
#dvii = dot(n2, nodes[ei, :]) + d2
#dvi.append(dvii)
#print(" dvi = %s" % dvi)
#e1 = elements1
dvi2 = dot(n2, nodes[element1, :].T) + d2
sdvi = sign(dvi2)
sign_range = sdvi.max() - sdvi.min()
if allclose(dvi2.min(), 0.) or sign_range == 2.:
print(" element2 = ", element2[0])
print(" ", pt)
print(" d2", d2)
print(" dvi = %s" % dvi2)
print(" sign_range = %s" % sign_range)
is_intersection = True
raise NotImplementedError()
else:
is_intersection = False
#print(" n2=%s" % (n2))
return is_intersection
def remove_inner_elements(self):
pass
def intersect_model(cart3d_filename):
cart3d = Cart3D()
cart3d.read_cart3d(cart3d_filename)
intersect = Intersect(cart3d.points, cart3d.elements, cart3d.regions)
intersect.intersect_tris()
def main():
cart3d_filename = 'threePlugs_bin.tri'
intersect_model(cart3d_filename)
if __name__ == '__main__': # pragma: no cover
main()
```
#### File: nastran/menus/setup_model_sidebar.py
```python
import os
import sys
# kills the program when you hit Cntl+C from the command line
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from qtpy.QtWidgets import QApplication, QPushButton
from pyNastran.gui.utils.qt.dialogs import save_file_dialog
from pyNastran.gui.utils.qt.results_window import ResultsWindow
from pyNastran.converters.nastran.wildcards import GEOM_BDF_SAVE
from pyNastran.converters.nastran.menus.modify_map import MODIFY_MAP, UPDATE_MAP
from pyNastran.converters.nastran.menus.modify_menu import ModifyMenu
from pyNastran.converters.nastran.menus.model_sidebar import Sidebar
def build_form_from_model(model):
form = []
objs = []
i = 0
nodes_form = []
properties_form = []
materials_form = []
caeros_form = []
#splines_form = []
import numpy as np
nids = list(model.nodes.keys())
spoints = list(model.spoints.keys())
ngrids = len(nids)
nspoints = len(spoints)
nnodes = ngrids + nspoints
nid_type = np.zeros((nnodes, 2), dtype='int32')
nid_type[:ngrids, 0] = nids
nid_type[:ngrids, 1] = 0 # grid
nid_type[ngrids:ngrids+nspoints, 0] = spoints
nid_type[ngrids:ngrids+nspoints, 1] = 1 # spoint
#for nid, nid_typei in nid_type:
#if nid_typei == 0:
#nodes_form.append(('GRID %i' % nid, i, []))
#elif nid_typei == 1:
#nodes_form.append(('SPOINT %i' % nid, i, []))
#else: # pragma: no cover
#raise RuntimeError(nid_type)
#i += 1
elements_form = []
mass_form = []
rigid_elements_form = []
for eid, elem in sorted(model.elements.items()):
elements_form.append(('%s %i' % (elem.type, eid), i, []))
objs.append(elem)
i += 1
for eid, elem in sorted(model.masses.items()):
mass_form.append(('%s %i' % (elem.type, eid), i, []))
objs.append(elem)
i += 1
for eid, elem in sorted(model.rigid_elements.items()):
rigid_elements_form.append(('%s %i' % (elem.type, eid), i, []))
objs.append(elem)
i += 1
elem_form = []
if elements_form and mass_form and rigid_elements_form:
elem_form = [
('Elastic', None, elements_form),
('Masses', None, mass_form),
('Rigid', None, rigid_elements_form),
]
elif elements_form and mass_form:
elem_form = [
('Elastic', None, elements_form),
('Masses', None, mass_form),
]
elif elements_form and rigid_elements_form:
elem_form = [
('Elastic', None, elements_form),
('Rigid', None, rigid_elements_form),
]
elif mass_form and rigid_elements_form:
elem_form = [
('Masses', None, mass_form),
('Rigid', None, rigid_elements_form),
]
elif elements_form:
elem_form = [
('Elastic', None, elements_form),
]
elif mass_form:
elem_form = [
('Masses', None, mass_form),
]
elif rigid_elements_form:
elem_form = [
('Rigid', None, rigid_elements_form),
]
for pid, prop in sorted(model.properties.items()):
properties_form.append(('%s %i' % (prop.type, pid), i, []))
objs.append(prop)
i += 1
for mid, mat in sorted(model.materials.items()):
materials_form.append(('%s %i' % (mat.type, mid), i, []))
objs.append(mat)
i += 1
for caeroid, caero in sorted(model.caeros.items()):
caeros_form.append(('%s %i' % (caero.type, caeroid), i, []))
objs.append(caero)
i += 1
#for splineid, spline in sorted(model.splines.items()):
#splines_form.append(('%s %i' % (spline.type, splineid), i, []))
#i += 1
#coords_form = []
#for cid, coord in sorted(model.coords.items()):
#coords_form.append(('%s %i' % (coord.type, cid), i, []))
#i += 1
#nodes = ('Nodes', None, nodes_form)
elements = ('Elements', None, elem_form)
properties = ('Properties', None, properties_form)
materials = ('Materials', None, materials_form)
caeros = ('CAEROs', None, caeros_form)
#splines = ('SPLINEs', None, splines_form)
#coords = ('Coords', None, coords_form)
#if nodes_form:
#form.append(nodes)
if elem_form:
form.append(elements)
if properties_form:
form.append(properties)
if materials_form:
form.append(materials)
if caeros_form:
form.append(caeros)
#if splines_form:
#form.append(splines)
#if coords_form:
#form.append(coords)
#print(form)
assert len(objs) > 0, objs
return form, objs
class ModelSidebar(Sidebar):
def __init__(self, parent, nastran_io=None):
self.bdf_filename = None
self.model = None
self.objs = None
self.parent2 = parent
self.nastran_io = nastran_io
from functools import partial
right_click_actions = [
('Print...', self.on_print, True),
('Stats...', self.on_stats, True),
('Modify...', self.on_modify, True),
]
export_button = QPushButton('Export')
export_button.clicked.connect(self.on_export)
setup_dict = {
4: export_button,
}
form = []
parent = self.parent2
super(ModelSidebar, self).__init__(parent, data=form, actions=right_click_actions,
results_window_title='Model',
clear_data=False, setup_dict=setup_dict, debug=True)
self.apply_button.setVisible(False)
self.show()
def on_print(self, icase):
"""prints the selected card"""
obj = self.objs[icase]
print(obj)
self.model.log.info('\n' + str(obj))
def on_stats(self, icase):
"""prints the stats for the selected card"""
stats = self.objs[icase].get_stats()
print(stats)
self.model.log.info(str(stats))
def on_modify(self, icase):
"""opens a menu to modify a card"""
obj = self.objs[icase]
update_function_name = None
if obj.type in UPDATE_MAP:
update_function_name = UPDATE_MAP[obj.type]
try:
variables = MODIFY_MAP[obj.type]
except KeyError:
self.model.log.warning(f'{obj.type} does not support Modify...')
keys = list(MODIFY_MAP.keys())
keys.sort()
print('keys=', keys)
print(obj)
return
self.load_menu(self.model, obj, variables, update_function_name, win_parent=None)
def load_menu(self, model, obj, variables, update_function_name, win_parent=None):
data = {
'font_size': 9,
'model' : model,
'obj' : obj,
'variables': variables,
'update_function_name': update_function_name,
}
parent = self.parent
self.menu = ModifyMenu(data, nastran_io=self.nastran_io, win_parent=None)
self.menu.show()
self.menu.exec_()
def set_model(self, model):
self.model = model
self.bdf_filename = model.bdf_filename
form, objs = build_form_from_model(self.model)
self.objs = objs
self.result_case_window.update_data(form)
def on_export(self):
"""exports a modified bdf model"""
bdf_filename_base, ext = os.path.splitext(self.bdf_filename)
default_name = bdf_filename_base + '.modified' + ext
default_dirname = os.path.dirname(default_name)
fname, unused_flt = save_file_dialog(self, 'Save As...', default_dirname, GEOM_BDF_SAVE)
if not fname:
return
self.model.write_bdf(fname)
def __repr__(self):
return '<nastran_menu.ModelSidebar(...)>'
def main(): # pragma: no cover
app = QApplication(sys.argv)
import pyNastran
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, '..', 'models')
bdf_filename = os.path.join(MODEL_PATH, 'bwb', 'bwb_saero.bdf')
#bdf_filename = os.path.join(MODEL_PATH, 'aero', 'bah_plane', 'bah_plane.bdf')
from pyNastran.bdf.bdf import read_bdf
model = read_bdf(bdf_filename)
print(model.get_bdf_stats())
unused_name = 'name'
#res_widget.update_results(form, name)
#--------------------------------------------
m = ModelSidebar(app)
m.set_model(model)
sys.exit(app.exec_())
if __name__ == "__main__": # pragma: no cover
main()
```
#### File: converters/panair/assign_type.py
```python
from typing import List, Union, Optional
def double(value, name):
# type: (str, str) -> float
"""casts to an float value"""
if isinstance(value, float):
return value
fvalue = float(value)
return fvalue
def integer(value, name):
# type: (str, str) -> int
"""casts to an integer value"""
if isinstance(value, int):
return value
value = value
fvalue = float(value)
if not fvalue.is_integer():
raise RuntimeError('%s=%r is not an integer' % (name, fvalue))
return int(fvalue)
def fortran_value(value):
# type: (float) -> str
return "%8.4E" % value
def integer_or_blank(value, name, default=None):
# type: (str, str, Optional[Union[float, int]]) -> Optional[Union[float, int]]
value = value.strip()
if not value:
return default
fvalue = float(value)
if not fvalue.is_integer():
raise RuntimeError('%s=%r is not an integer' % (name, fvalue))
return int(fvalue)
def double_or_blank(value, name, default=None):
# type: (str, str, Optional[float]) -> Optional[float]
value = value.strip()
if not value:
return default
try:
fvalue = float(value)
except ValueError:
raise SyntaxError('%s=%r is not a float' % (name, value))
return fvalue
```
#### File: converters/shabp/test_shabp.py
```python
import os
import unittest
import numpy as np
from cpylog import SimpleLogger
import pyNastran
from pyNastran.converters.shabp.shabp import read_shabp
from pyNastran.converters.shabp.shabp_results import ShabpOut
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.join(PKG_PATH, 'converters', 'shabp', 'models')
class TestShabp(unittest.TestCase):
"""tests S/HABP"""
def test_shabp_1(self):
"""tests nose.mk5"""
log = SimpleLogger(level='info', encoding='utf-8')
shabp_filename = os.path.join(MODEL_PATH, 'nose', 'noseX_working.mk5')
model = read_shabp(shabp_filename, log=log)
npatches_expected = 1
assert len(model.X) == npatches_expected, f'npatches_expected={npatches_expected} len(model.X)={len(model.X)}'
assert len(model.Y) == npatches_expected, f'npatches_expected={npatches_expected} len(model.X)={len(model.Y)}'
assert len(model.Z) == npatches_expected, f'npatches_expected={npatches_expected} len(model.X)={len(model.Z)}'
#print(f'component_name_to_patch = {model.component_name_to_patch}')
#print(f'patch_to_component_num = {model.patch_to_component_num}')
#print(f'component_to_params = {model.component_to_params}')
#print(f'component_num_to_name = {model.component_num_to_name}')
#print(f'component_name_to_num = {model.component_name_to_num}')
assert model.component_name_to_patch == {'ellipse': [1]}, model.component_name_to_patch
assert model.patch_to_component_num == {0: 1}, model.patch_to_component_num
assert model.component_to_params == {0: [5, 5, 1, 0, 0, 0.0, 1.0, 0.0, 1.0, 3.0, 3.0]}, model.component_to_params
assert model.component_num_to_name == {0: 'ellipse'}, model.component_num_to_name
assert model.component_name_to_num == {'ellipse': 0}, model.component_name_to_num
areas = model.get_area_by_patch()
assert np.allclose(areas, [266.47640991]), areas
areas = model.get_area_by_component()
assert np.allclose(areas['ellipse'], 266.47640991), areas
areas, lengths = model.get_area_xlength_by_component()
assert np.allclose(areas['ellipse'], 266.47640991), areas
assert np.allclose(lengths['ellipse'], 20.0), lengths
#self.title = ''
#self.header = ''
#self.shabp_cases = {}
def test_shabp_2(self):
"""tests the flap"""
log = SimpleLogger(level='info', encoding='utf-8')
shabp_infilename = os.path.join(MODEL_PATH, 'flap', 'flap_inviscid.mk5')
shabp_outfilename = os.path.join(MODEL_PATH, 'flap', 'SHABP.OUT')
#test.model.load_shabp_geometry(shabp_infilename)
#test.on_load_geometry(shabp_infilename, geometry_format='shabp', raise_error=True)
model = read_shabp(shabp_infilename, read_special_routines=True,
log=log, debug=None)
#print(f'component_name_to_patch = {model.component_name_to_patch}')
#print(f'patch_to_component_num = {model.patch_to_component_num}')
#print(f'component_to_params = {model.component_to_params}')
#print(f'component_num_to_name = {model.component_num_to_name}')
#print(f'component_name_to_num = {model.component_name_to_num}')
assert model.component_name_to_patch == {'COMP1': [1], 'FLAP': [2]}, model.component_name_to_patch
assert model.patch_to_component_num == {0: 1, 1: 2}, model.patch_to_component_num
assert model.component_to_params == {0: [3, 1, 1, 0, 0, 0.0, 1.0, 0.0, 1.0, 3.0, 3.0], 1: [3, 1, 1, 0, 0, 0.0, 1.0, 0.0, 1.0, 3.0, 3.0]}, model.component_to_params
assert model.component_num_to_name == {0: 'COMP1', 1: 'FLAP'}, model.component_num_to_name
assert model.component_name_to_num == {'COMP1': 0, 'FLAP': 1}, model.component_name_to_num
areas = model.get_area_by_patch()
assert np.allclose(areas, [50., 124.6875]), areas
areas = model.get_area_by_component()
assert np.allclose(areas['COMP1'], 124.6875), areas
assert np.allclose(areas['FLAP'], 50.0), areas
areas, lengths = model.get_area_xlength_by_component()
assert np.allclose(areas['COMP1'], 124.6875), areas
assert np.allclose(areas['FLAP'], 50.0), areas
assert np.allclose(lengths['COMP1'], 24.9375), lengths
assert np.allclose(lengths['FLAP'], 34.9375), lengths
out_model = ShabpOut(model, log=log)
Cpd, unused_deltad = out_model.read_shabp_out(shabp_outfilename)
#test.on_load_results(shabp_outfilename)
if __name__ == '__main__':
unittest.main()
```
#### File: stl/dev/stl_mesh.py
```python
import numpy as np
from pyNastran.converters.stl.stl import read_stl
from scipy.spatial import cKDTree
import scipy.interpolate
def projected_barycentric_coord(p, q, u, v):
r"""
points p, q
vector u, v
3
*v
/ \ <----p
q*----*u
1 2
u = p2 - p1
v = p3 - p1
"""
n = np.cross(u, v)
one_over_4_area_squared = 1.0 / (n @ n)
w = p - q
b[2] = (np.cross(u, w) @ n) * one_over_4_area_squared
b[1] = (np.cross(w, v) @ n) * one_over_4_area_squared
b[0] = 1.0 - b[1] - b[2]
return b
def project_points_onto_stl(stl, points):
"""
Parameters
----------
nodes : (n, 3) ndarray floats
The nodes on the surface.
elements : (n, 3) ndarray ints
The elements on the surface.
"""
nodes = stl.nodes
elements = stl.elements
if not hasattr(stl, 'centroids'):
n1 = elements[:, 0]
n2 = elements[:, 1]
n3 = elements[:, 2]
p1 = nodes[n1, :]
p2 = nodes[n2, :]
p3 = nodes[n3, :]
centroids = (p1 + p2 + p3) / 3.
stl.centroids = centroids
tree = cKDTree(centroids, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
stl.tree = tree
#tree = scipy.spatial.KDTree(data, leafsize=10)
#tree.query_ball_point(x, r, p=2., eps=0)
#tree.query_ball_tree(other, r, p=2., eps=0)
#tree.query_pairs(r, p=2., eps=0)
#tree.sparse_distance_matrix(other, max_distance, p=2.)
tree = stl.tree
#d : array of floats
#The distances to the nearest neighbors.
#If x has shape tuple+(self.m,), then d has shape tuple+(k,).
#Missing neighbors are indicated with infinite distances.
#i : ndarray of ints
#The locations of the neighbors in self.data.
#If `x` has shape tuple+(self.m,), then `i` has shape tuple+(k,).
#Missing neighbors are indicated with self.n.
dist, i = tree.query(points, k=1, eps=0, p=2,
distance_upper_bound=np.inf, n_jobs=1)
# distance from centroid to point, such that we get the element id directly
print('dist =', dist)
print('i =', i)
n1 = elements[i, 0]
n2 = elements[i, 1]
n3 = elements[i, 2]
p1 = nodes[n1, :]
p2 = nodes[n2, :]
p3 = nodes[n3, :]
u = p2 - p1
v = p3 - p1
#w = points_rotated - p1
n = np.cross(u, v)
#n2 = 1 / n**2
#gamma_a = (np.cross(u, w) @ n) / n2
#gamma_b = (np.cross(u, w) @ n) / n2
try:
nmag = np.linalg.norm(n, axis=1)
except ValueError:
print('n.shape =', n.shape)
raise
#area = nmag / 2.
assert nmag.size == n1.size, 'nmag.size=%s n1.size=%s' % (nmag.size, n1.size)
print('n1 =', n1)
print('n2 =', n2)
print('n3 =', n3)
p = points
a = p1
b = p2
c = p3
# http://math.stackexchange.com/questions/544946/determine-if-projection-of-3d-point-onto-plane-is-within-a-triangle
pc = p - c
alpha = np.linalg.norm(np.cross(p - b, pc)) / nmag
beta = np.linalg.norm(np.cross(pc, p - a)) / nmag
gamma = 1 - alpha - beta
#print('alpha =', alpha)
#print('beta =', beta)
#print('gamma =', gamma)
#print('a*p =', alpha[:, np.newaxis] * p1)
p_prime = alpha[:, np.newaxis] * p1 + beta[:, np.newaxis] * p2 + gamma[:, np.newaxis] * p3
#print('p_prime =\n', p_prime)
#tree.query_ball_point(x, r, p=2., eps=0)
#tree.query_ball_tree(other, r, p=2., eps=0)
#tree.query_pairs(r, p=2., eps=0)
#tree.sparse_distance_matrix(other, max_distance, p=2.)
return p_prime
def project_line_onto_stl(stl, pa, pb, npoints=11):
"""top down projection"""
normal = np.array([0., 0., -1.], dtype='float32')
#max_z = nodes[:, 2].max()
#min_z = nodes[:, 2].min()
# TODO: rotate if want a new normal
#dz = max_z - min_z
#dzi = dz / 20.
#points_rotated = points
#out_points = project_points_onto_stl(stl, points)
# TODO: rotate if want a new normal
p = np.linspace(0., 1., num=npoints, endpoint=True)
p21 = pb - pa
ratio = p21 / np.linalg.norm(p21)
print('p =', p)
print('ratio =', ratio)
points = pa + p[:, np.newaxis] * ratio
print('points =', points)
out_points = project_points_onto_stl(stl, points)
return out_points
def project_curve_onto_stl(stl, points, npoints=11):
"""top down projection"""
normal = np.array([0., 0., -1.], dtype='float32')
#max_z = nodes[:, 2].max()
#min_z = nodes[:, 2].min()
# TODO: rotate if want a new normal
#dz = max_z - min_z
#dzi = dz / 20.
#points_rotated = points
#out_points = project_points_onto_stl(stl, points)
# TODO: rotate if want a new normal
# create interpolation curve from points
p2 = points[1:, :]
p1 = points[:-1, :]
dx = np.linalg.norm(p2 - p1, axis=1)
assert dx.size == p1.shape[0]
t = dx.sum()
pa = points[0, :]
dp = points - pa
dx2 = np.linalg.norm(dp, axis=1)
t = dx2.sum()
# http://docs.scipy.org/doc/scipy-0.17.0/reference/generated/scipy.interpolate.interp1d.html
func = scipy.interpolate.interp1d(t, dx2, kind='cubic', axis=-1,
copy=True,
bounds_error=None,
fill_value=np.nan,
assume_sorted=False) # cubic spline
p = np.linspace(0., t, num=npoints, endpoint=True)
t2 = func(p)
dx = func(t2) + pa
#p21 = pb - pa
#ratio = p21 / np.linalg.norm(p21)
#print('p =', p)
#print('ratio =', ratio)
points = pa + dx
print('points =', points)
out_points = project_points_onto_stl(stl, points)
return out_points
def main():
import os
import pyNastran
PKG_PATH = pyNastran.__path__[0]
stl_filename = os.path.join(PKG_PATH, 'converters', 'stl', 'sphere.stl')
stl = read_stl(stl_filename)
#XYZ Global = (2.0035907914418716, 1.3287668328026303, 2.873731014735773)
#NodeID = 142; xyz=(1.88823, 1.5, 2.94889)
#lineNo=2110 annotate_cell_picker()
#XYZ Global = (1.9419959964242275, 1.141259948469464, 2.869267723165781)
#NodeID = 141; xyz=(1.93018, 1.02165, 2.85504)
#lineNo=2110 annotate_cell_picker()
#XYZ Global = (2.1320656653448338, 1.4367816967143772, 2.83778333777658)
#NodeID = 137; xyz=(2.25, 1.5, 2.79904)
# nids = [142, 137, 141]
# 2.0035907914418716, 1.3287668328026303, 2.873731014735773
points = np.array([
[2.0035907914418716, 1.3287668328026303, 2.873731014735773],
[2.25, 1.5, 2.79904],
[2.25, 1.5, 2.79903],
], dtype='float32')
pa = points[0, :]
pb = points[1, :]
out_points = project_points_onto_stl(stl, points)
out_points2 = project_line_onto_stl(stl, pa, pb, npoints=11)
#out_points3 = project_curve_onto_stl(stl, points, npoints=11)
def build():
from pyNastran.bdf.bdf import BDF
model = BDF(debug=False)
xyz1 = [0., 0., 0.]
xyz2 = [0., 1., 0.]
xyz3 = [1., 1., 0.]
xyz4 = [1., 0., 0.]
model.add_grid(1, xyz=xyz1)
model.add_grid(2, xyz=xyz2)
model.add_grid(3, xyz=xyz3)
model.add_grid(4, xyz=xyz4)
model.add_cquad4(eid=1, pid=1000, nids=[1, 2, 3, 4])
model.add_pshell(pid=100, mid1=1000, t=0.1)
model.add_mat1(mid=1000, E=1e7, G=None, nu=0.3)
if __name__ == '__main__':
build()
main()
```
#### File: cards/elements/beams.py
```python
from collections import defaultdict
import numpy as np
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double_or_blank, integer_double_string_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.dev.bdf_vectorized2.cards.elements.bars import init_x_g0
from pyNastran.bdf.cards.base_card import _format_comment
class BeamElement:
"""base class for CBEAM"""
card_name = ''
def __init__(self, model):
"""intializes the BeamElement"""
self.model = model
self.is_current = True
self.eid = np.array([], dtype='int32')
self.pid = np.array([], dtype='int32')
self.nids = np.array([], dtype='float64')
self.offt = np.array([], dtype='|U8')
self.x = np.array([], dtype='float64')
self.g0 = np.array([], dtype='int32')
self.pin_flags = np.array([], dtype='int32')
self.wa_offset = np.array([], dtype='float64')
self.wb_offset = np.array([], dtype='float64')
self.sab_warping = np.array([], dtype='int32')
self._eid = []
self._pid = []
self._nids = []
self._offt = []
self._x = []
self._g0 = []
self._pin_flags = []
self._wa_offset = []
self._wb_offset = []
self._sab_warping = []
self.comment = defaultdict(str)
def check_if_current(self, nid, nids):
"""we split this up to reason about it easier"""
if self.is_current:
if nid in nids:
# card exists, so we use that slot
add_card = False
else:
add_card = True
else:
add_card = True
return add_card
#def get_element_by_eid(self, eid):
#self.make_current()
#ieid = np.searchsorted(eid, self.eid)
#return self[ieid]
def make_current(self):
"""creates an array of the GRID points"""
if not self.is_current:
if len(self.eid) > 0: # there are already elements in self.eid
self.eid = np.hstack([self.eid, self._eid])
self.pid = np.vstack([self.pid, self._pid])
self.nids = np.hstack([self.nids, self._nids])
self.offt = np.hstack([self.offt, self._offt])
self.x = np.hstack([self.x, self._x])
self.g0 = np.hstack([self.g0, self._g0])
self.pin_flags = np.hstack([self.pin_flags, self._pin_flags])
self.wa_offset = np.hstack([self.wa_offset, self._wa_offset])
self.wb_offset = np.hstack([self.wb_offset, self._wb_offset])
# don't need to handle comments
else:
self.eid = np.array(self._eid, dtype='int32')
self.pid = np.array(self._pid, dtype='int32')
self.nids = np.array(self._nids, dtype='int32')
self.offt = np.array(self._offt, dtype='|U8')
self.x = np.array(self._x, dtype='float64')
self.g0 = np.array(self._g0, dtype='int32')
self.pin_flags = np.array(self._pin_flags, dtype='int32')
self.wa_offset = np.array(self._wa_offset, dtype='float64')
self.wb_offset = np.array(self._wb_offset, dtype='float64')
assert len(self.eid) == len(np.unique(self.eid))
isort = np.argsort(self.eid)
self.eid = self.eid[isort]
self.pid = self.pid[isort]
self.nids = self.nids[isort, :]
self.offt = self.offt[isort]
self.x = self.x[isort, :]
self.g0 = self.g0[isort]
self.pin_flags = self.pin_flags[isort, :]
self.wa_offset = self.wa_offset[isort, :]
self.wb_offset = self.wb_offset[isort, :]
self._eid = []
self._pid = []
self._nids = []
self._offt = []
self._x = []
self._g0 = []
self._pin_flags = []
self._wa_offset = []
self._wb_offset = []
self._sab_warping = []
self.is_current = True
def cross_reference(self, model):
"""does this do anything?"""
self.make_current()
def __len__(self):
"""returns the number of elements"""
return len(self.eid) + len(self._eid)
def repr_indent(self, indent=''):
self.make_current()
neids = len(self.eid)
if neids == 0:
return '%s%sv; nelements=%s' % (indent, self.card_name, neids)
msg = '%s%sv; nelements=%s:\n' % (indent, self.card_name, neids)
msg += '%s eid = %s\n' % (indent, self.eid)
upid = np.unique(self.pid)
if len(upid) == 1 and upid[0] == 0:
msg += '%s upid = %s\n' % (indent, upid)
else:
msg += '%s pid = %s\n' % (indent, self.pid)
#msg += ' nid =\n%s' % self.nid
return msg
def __repr__(self):
return self.repr_indent('')
class CBEAMv(BeamElement):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | X1 | X2 | X3 | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+=====+=====+=====+=====+=====+=====+==========+
| CBEAM | EID | PID | GA | GB | G0 | | | OFFT/BIT |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
| | SA | SB | | | | | | |
+-------+-----+-----+-----+-----+-----+-----+-----+----------+
offt/bit are MSC specific fields
"""
card_name = 'CBEAM'
def add(self, eid, pid, nids, x, g0, offt='GGG', bit=None,
pin_flags=None, wa=None, wb=None, sa=0, sb=0, comment=''):
"""
Adds a CBEAM card
Parameters
----------
pid : int
property id
mid : int
material id
nids : List[int, int]
node ids; connected grid points at ends A and B
x : List[float, float, float]
Components of orientation vector, from GA, in the displacement
coordinate system at GA (default), or in the basic coordinate system
g0 : int
Alternate method to supply the orientation vector using grid
point G0. Direction of is from GA to G0. is then transferred
to End A
offt : str; default='GGG'
Offset vector interpretation flag
None : bit is active
bit : float; default=None
Built-in twist of the cross-sectional axes about the beam axis
at end B relative to end A.
For beam p-elements ONLY!
None : offt is active
pin_flags : List[int, int]; default=None
None : [0, 0]; don't release the DOFs
Pin Flag at End A/B. Releases the specified DOFs
wa / wb : List[float, float, float]
Components of offset vectors from the grid points to the end
points of the axis of the shear center
sa / sb : int; default=0
Scalar or grid point identification numbers for the ends A and B,
respectively. The degrees-of-freedom at these points are the
warping variables . SA and SB cannot be specified for
beam p-elements
comment : str; default=''
a comment for the card
offt/bit are MSC specific fields
"""
if g0 is None:
g0 = -1
else:
x = [np.nan, np.nan, np.nan]
if pin_flags is None:
pin_flags = [0, 0]
self.model.bars.add(eid)
self.is_current = False
self._eid.append(eid)
self._pid.append(pid)
self._nids.append(nids)
self._x.append(x)
self._g0.append(g0)
self._offt.append(offt)
self._wa_offset.append(wa)
self._wb_offset.append(wb)
self._sab_warping.append([sa, sb])
self._pin_flags.append(pin_flags)
#self._offset.append(wa_offset)
if comment:
self.comment[eid] = _format_comment(comment)
def add_card(self, card, comment=''):
"""
Adds a CBEAM card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
ga = integer(card, 3, 'ga')
gb = integer(card, 4, 'gb')
x, g0 = init_x_g0(card, eid)
offt, bit = init_offt_bit(card, eid)# offt doesn't exist in NX nastran
pin_flag_a = integer_or_blank(card, 9, 'pa', 0)
pin_flag_b = integer_or_blank(card, 10, 'pb', 0)
wa = np.array([double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0)], 'float64')
wb = np.array([double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0)], 'float64')
sa = integer_or_blank(card, 17, 'sa', 0)
sb = integer_or_blank(card, 18, 'sb', 0)
assert len(card) <= 19, 'len(CBEAM card) = %i\ncard=%s' % (len(card), card)
return self.add(eid, pid, [ga, gb], x, g0, offt, bit,
[pin_flag_a, pin_flag_b], wa, wb, sa, sb, comment=comment)
#def update(self, grid):
#"""functions like a dictionary"""
#nid = grid.nid
#add_card = self.check_if_current(eid, self.eid)
#if add_card:
#self.add(nid, grid.xyz, cp=grid.cp, cd=grid.cd, # add_cquad4
#ps=grid.ps, seid=grid.seid, comment=grid.comment)
#self.is_current = False
#else:
#inid = np.where(nid == self.nid)[0]
#self.nid[inid] = grid.nid
#self.xyz[inid] = grid.xyz
#self.cp[inid] = grid.cp
#self.cd[inid] = grid.cd
#self.ps[inid] = grid.ps
#self.seid[inid] = grid.seid
#self.comment[nid] = comment
#self.is_current = True # implicit
#def __iter__(self):
#pass
#def __next__(self):
#pass
#def __items__(self):
#pass
#def __keys__(self):
#pass
#def __values__(self):
#pass
#def __getitem__(self, i):
#"""this works on index"""
#self.make_current()
#eid = self.eid[i]
#return GRID(nid, self.xyz[i], cp=self.cp[i], cd=self.cd[i],
#ps=self.ps[i], seid=self.seid[i], comment=self.comment[nid])
#def __setitem__(self, i, value):
#pass
#def __delitem__(self, i):
#pass
@classmethod
def get_x_g0_defaults(cls, x, g0):
"""
X and G0 compete for the same fields, so the method exists to
make it easier to write the card
Returns
-------
x_g0 : varies
g0 : List[int, None, None]
x : List[float, float, float]
"""
if g0 is not None:
return (g0, None, None)
else:
#print('x =', self.x)
#print('g0 =', self.g0)
#x1 = set_blank_if_default(self.x[0], 0.0)
#x2 = set_blank_if_default(self.x[1], 0.0)
#x3 = set_blank_if_default(self.x[2], 0.0)
return list(x)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
self.make_current()
msg = ''
for eid, pid, nodes, x, g0, offt, pin_flags, wa_offset, wb_offset in zip(
self.eid, self.pid, self.nids, self.x, self.g0, self.offt, self.pin_flags, self.wa_offset, self.wb_offset):
x1, x2, x3 = self.get_x_g0_defaults(x, g0)
#pa = set_blank_if_default(self.pa, 0)
#pb = set_blank_if_default(self.pb, 0)
#w1a = set_blank_if_default(self.wa[0], 0.0)
#w2a = set_blank_if_default(self.wa[1], 0.0)
#w3a = set_blank_if_default(self.wa[2], 0.0)
#w1b = set_blank_if_default(self.wb[0], 0.0)
#w2b = set_blank_if_default(self.wb[1], 0.0)
#w3b = set_blank_if_default(self.wb[2], 0.0)
ga, gb = nodes
pin_flag_a, pin_flag_b = pin_flags
w1a, w2a, w3a = wa_offset
w1b, w2b, w3b = wb_offset
#sa = set_blank_if_default(self.sa, 0)
#sb = set_blank_if_default(self.sb, 0)
sa = sb = 0
#(x1, x2, x3) = self.get_x_g0_defaults()
# offt doesn't exist in NX nastran
offt = set_blank_if_default(offt, 'GGG')
list_fields = ['CBEAM', eid, pid, ga, gb, x1, x2, x3, offt,
pin_flag_a, pin_flag_b, w1a, w2a, w3a, w1b, w2b, w3b, sa, sb]
msg += self.comment[eid] + print_card_8(list_fields)
bdf_file.write(msg)
return msg
class Beams:
"""Stores CBEAM elements that exist in 3D space"""
def __init__(self, model):
self.model = model
self.cbeam = model.cbeam
self._eids = set()
def add(self, eid):
if eid not in self._eids:
self._eids.add(eid)
else:
raise RuntimeError('eid=%s is duplicated' % eid)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
if len(self.cbeam):
self.cbeam.write_card(size, is_double, bdf_file)
def make_current(self):
self.cbeam.make_current()
def __len__(self):
return len(self.cbeam)
def repr_indent(self, indent=''):
msg = '%s<Beams> : nelements=%s\n' % (indent, len(self))
msg += '%s CBEAM: %s\n' % (indent, len(self.cbeam))
return msg
def __repr__(self):
return self.repr_indent(indent='')
def init_offt_bit(card, eid):
"""offt doesn't exist in NX nastran"""
field8 = integer_double_string_or_blank(card, 8, 'field8')
if isinstance(field8, float):
offt = None
bit = field8
elif field8 is None:
offt = 'GGG' # default
bit = None
elif isinstance(field8, str):
bit = None
offt = field8
msg = 'invalid offt parameter of CBEAM...offt=%s' % offt
assert offt[0] in ['G', 'B', 'O', 'E'], msg
assert offt[1] in ['G', 'B', 'O', 'E'], msg
assert offt[2] in ['G', 'B', 'O', 'E'], msg
else:
msg = ('field8 on %s card is not a string(offt) or bit '
'(float)...field8=%s\n' % (card.field(0), field8))
raise RuntimeError("Card Instantiation: %s" % msg)
return offt, bit
```
#### File: cards/elements/rods.py
```python
from collections import defaultdict
import numpy as np
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double_or_blank)
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.bdf.cards.base_card import _format_comment
class Rods:
"""intializes the Rods"""
def __init__(self, model):
self.model = model
self.conrod = model.conrod
self.crod = model.crod
self.ctube = model.ctube
self._eids = set()
def add(self, eid):
if eid not in self._eids:
self._eids.add(eid)
else:
raise RuntimeError('eid=%s is duplicated' % eid)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
if len(self.conrod):
self.conrod.write_card(size, is_double, bdf_file)
if len(self.crod):
self.crod.write_card(size, is_double, bdf_file)
if len(self.ctube):
self.ctube.write_card(size, is_double, bdf_file)
def make_current(self):
self.conrod.make_current()
self.crod.make_current()
self.ctube.make_current()
def __len__(self):
return len(self.conrod) + len(self.crod) + len(self.ctube)
def repr_indent(self, indent=' '):
msg = '%s<Rods> : nelements=%s\n' % (indent, len(self))
msg += '%s CONROD: %s\n' % (indent, len(self.conrod))
msg += '%s CROD : %s\n' % (indent, len(self.crod))
msg += '%s CTUBE : %s\n' % (indent, len(self.ctube))
return msg
def __repr__(self):
return self.repr_indent(indent='')
class RodElement:
"""base class for CONROD, CROD, and CTUBE"""
card_name = ''
def check_if_current(self, eid, eids):
"""we split this up to reason about it easier"""
if self.is_current:
if eid in eids:
# card exists, so we use that slot
add_card = False
else:
add_card = True
else:
add_card = True
return add_card
def cross_reference(self, model):
"""does this do anything?"""
self.make_current()
def __len__(self):
"""returns the number of elements"""
return len(self.eid) + len(self._eid)
def repr_indent(self, indent=''):
self.make_current()
neids = len(self.eid)
if neids == 0:
return '%s%sv; nelements=%s' % (indent, self.card_name, neids)
msg = '%s%sv; nelements=%s\n' % (indent, self.card_name, neids)
msg += '%s eid = %s\n' % (indent, self.eid)
if hasattr(self, 'pid'):
upid = np.unique(self.pid)
if len(upid) == 1:
msg += '%s upid = %s\n' % (indent, upid)
else:
msg += '%s pid = %s\n' % (indent, self.pid)
else:
msg += '%s A = %s\n' % (indent, self.A)
msg += '%s j = %s\n' % (indent, self.j)
msg += '%s c = %s\n' % (indent, self.c)
msg += '%s nsm = %s\n' % (indent, self.nsm)
return msg
#umcid = np.unique(self.mcid)
#if len(umcid) == 1 and umcid[0] == 0:
#msg += ' umcid = %s\n' % umcid
#else:
#msg += ' umcid = %s\n' % umcid
#msg += ' mcid = %s\n' % self.mcid
#utheta = np.unique(self.theta)
#if len(utheta) == 1 and umcid[0] == 0:
#msg += ' utheta = %s\n' % utheta
#else:
#msg += ' theta = %s\n' % self.theta
#msg += ' is_theta = %s\n' % self.is_theta
#msg += ' nid =\n%s' % self.nid
#return msg
def __repr__(self):
return self.repr_indent(indent='')
class CONRODv(RodElement):
"""
+--------+-----+-----+----+-----+---+---+---+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+=====+=====+====+=====+===+===+===+=====+
| CONROD | EID | N1 | N2 | MID | A | J | C | NSM |
+--------+-----+-----+----+-----+---+---+---+-----+
"""
card_name = 'CONROD'
def __init__(self, model):
self.model = model
self.is_current = True
self.eid = np.array([], dtype='int32')
self.nids = np.array([], dtype='int32')
self.mid = np.array([], dtype='int32')
self.A = np.array([], dtype='float64')
self.j = np.array([], dtype='float64')
self.c = np.array([], dtype='float64')
self.nsm = np.array([], dtype='float64')
self._eid = []
self._nids = []
self._mid = []
self._A = []
self._j = []
self._c = []
self._nsm = []
self.comment = defaultdict(str)
def add(self, eid, mid, nids, A=0., j=0., c=0., nsm=0., comment=''):
"""
Creates a CONROD card
Parameters
----------
eid : int
element id
mid : int
material id
nids : List[int, int]
node ids
A : float
area
j : float; default=0.
polar moment of inertia
c : float; default=0.
stress factor
nsm : float; default=0.
non-structural mass per unit length
comment : str; default=''
a comment for the card
"""
self.model.rods.add(eid)
self.is_current = False
self._eid.append(eid)
self._nids.append(nids)
self._mid.append(mid)
self._A.append(A)
self._j.append(j)
self._c.append(c)
self._nsm.append(nsm)
if comment:
self.comment[eid] = _format_comment(comment)
def add_card(self, card, comment=''):
"""
Adds a CONROD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
nids = [integer(card, 2, 'n1'),
integer(card, 3, 'n2')]
mid = integer(card, 4, 'mid')
A = double_or_blank(card, 5, 'A', 0.0)
j = double_or_blank(card, 6, 'j', 0.0)
c = double_or_blank(card, 7, 'c', 0.0)
nsm = double_or_blank(card, 8, 'nsm', 0.0)
self.add(eid, mid, nids, A, j, c, nsm, comment=comment)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
self.make_current()
msg = ''
for eid, mid, nodes, A, j, c, nsm in zip(self.eid, self.mid,
self.nids, self.A, self.j, self.c, self.nsm):
j = set_blank_if_default(j, 0.0)
c = set_blank_if_default(c, 0.0)
nsm = set_blank_if_default(nsm, 0.0)
list_fields = ['CONROD', eid] + nodes.tolist() + [mid, A, j, c, nsm]
msgi = print_card_8(list_fields)
msg += self.comment[eid] + msgi.rstrip() + '\n'
bdf_file.write(msg)
return msg
def make_current(self):
"""creates an array of the elements"""
if not self.is_current:
if len(self.eid) > 0: # there are already elements in self.eid
self.eid = np.hstack([self.eid, self._eid])
self.mid = np.vstack([self.mid, self._mid])
self.nids = np.hstack([self.nids, self._nids])
self.A = np.hstack([self.A, self._A])
self.j = np.hstack([self.j, self._j])
self.c = np.hstack([self.c, self._c])
self.nsm = np.hstack([self.nsm, self._nsm])
# don't need to handle comments
else:
self.eid = np.array(self._eid, dtype='int32')
self.mid = np.array(self._mid, dtype='int32')
self.nids = np.array(self._nids, dtype='int32')
self.A = np.array(self._A, dtype='float64')
self.j = np.array(self._j, dtype='float64')
self.c = np.array(self._c, dtype='float64')
self.nsm = np.array(self._nsm, dtype='float64')
assert len(self.eid) == len(np.unique(self.eid))
self._eid = []
self._mid = []
self._nids = []
self._A = []
self._j = []
self._c = []
self._nsm = []
self.is_current = True
class CRODv(RodElement):
"""
+------+-----+-----+----+----+
| 1 | 2 | 3 | 4 | 5 |
+======+=====+=====+====+====+
| CROD | EID | PID | N1 | N2 |
+------+-----+-----+----+----+
"""
card_name = 'CROD'
def __init__(self, model):
self.model = model
self.is_current = True
self.eid = np.array([], dtype='int32')
self.pid = np.array([], dtype='int32')
self.nids = np.array([], dtype='int32')
self._eid = []
self._pid = []
self._nids = []
self._dofs = []
self.comment = defaultdict(str)
def add(self, eid, pid, nids, comment=''):
"""
Creates a CROD card
Parameters
----------
eid : int
element id
pid : int
property id (PROD)
nids : List[int, int]
node ids
comment : str; default=''
a comment for the card
"""
self.model.solids.add(eid)
self.is_current = False
self._eid.append(eid)
self._pid.append(pid)
self._nids.append(nids)
if comment:
self.comment[eid] = _format_comment(comment)
def add_card(self, card, comment=''):
"""
Adds a CROD card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
nids = [integer(card, 3, 'n1'),
integer(card, 4, 'n2')]
assert len(card) == 5, 'len(CROD card) = %i\ncard=%s' % (len(card), str(card))
self.add(eid, pid, nids, comment=comment)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
self.make_current()
msg = ''
for eid, pid, nodes in zip(self.eid, self.pid, self.nids):
list_fields = ['CROD', eid, pid] + nodes.tolist()
msgi = print_card_8(list_fields)
msg += self.comment[eid] + msgi.rstrip() + '\n'
bdf_file.write(msg)
return msg
def make_current(self):
"""creates an array of the elements"""
if not self.is_current:
if len(self.eid) > 0: # there are already elements in self.eid
self.eid = np.hstack([self.eid, self._eid])
self.pid = np.vstack([self.pid, self._pid])
self.nids = np.hstack([self.nids, self._nids])
# don't need to handle comments
else:
self.eid = np.array(self._eid, dtype='int32')
self.pid = np.array(self._pid, dtype='int32')
self.nids = np.array(self._nids, dtype='int32')
assert len(self.eid) == len(np.unique(self.eid))
self._eid = []
self._pid = []
self._nids = []
self.is_current = True
class CTUBEv(RodElement):
"""
+--------+-----+-----+----+----+
| 1 | 2 | 3 | 4 | 5 |
+========+=====+=====+====+====+
| CELAS3 | EID | PID | S1 | S2 |
+--------+-----+-----+----+----+
"""
card_name = 'CTUBE'
def __init__(self, model):
self.model = model
self.is_current = True
self.eid = np.array([], dtype='int32')
self.pid = np.array([], dtype='int32')
self.nids = np.array([], dtype='int32')
self._eid = []
self._pid = []
self._nids = []
self.comment = defaultdict(str)
def add(self, eid, pid, nids, comment=''):
"""
Creates a CTUBE card
Parameters
----------
eid : int
element id
pid : int
property id
nids : List[int, int]
node ids
comment : str; default=''
a comment for the card
"""
self.model.rods.add(eid)
self.is_current = False
self._eid.append(eid)
self._pid.append(pid)
self._nids.append(nids)
if comment:
self.comment[eid] = _format_comment(comment)
def add_card(self, card, comment=''):
"""
Adds a CTUBE card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer_or_blank(card, 2, 'pid', eid)
nids = [integer(card, 3, 'n1'),
integer(card, 4, 'n2')]
assert len(card) == 5, 'len(CTUBE card) = %i\ncard=%s' % (len(card), card)
self.add(eid, pid, nids, comment=comment)
def write_card(self, size=8, is_double=False, bdf_file=None):
assert bdf_file is not None
self.make_current()
msg = ''
for eid, pid, nodes in zip(self.eid, self.pid, self.nids):
list_fields = ['CTUBE', eid, pid, nodes[0], nodes[1]]
msgi = print_card_8(list_fields)
msg += self.comment[eid] + msgi.rstrip() + '\n'
bdf_file.write(msg)
return msg
def make_current(self):
"""creates an array of the elements"""
if not self.is_current:
if len(self.eid) > 0: # there are already elements in self.eid
self.eid = np.hstack([self.eid, self._eid])
self.pid = np.vstack([self.pid, self._pid])
self.nids = np.hstack([self.nids, self._nids])
# don't need to handle comments
else:
self.eid = np.array(self._eid, dtype='int32')
self.pid = np.array(self._pid, dtype='int32')
self.nids = np.array(self._nids, dtype='int32')
assert len(self.eid) == len(np.unique(self.eid))
self._eid = []
self._pid = []
self._nids = []
self.is_current = True
```
#### File: dev/vtk_examples/vector_field2.py
```python
import vtk
import numpy as np
from pyNastran.gui.gui_utils.vtk_utils import numpy_to_vtk
def main():
grid = vtk.vtkUnstructuredGrid()
grid_mapper = vtk.vtkDataSetMapper()
grid_mapper.SetInputData(grid)
#grid_mapper.SetInputData(grid)
nodes = np.array([
[0., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 2., 1.],
], dtype='float32')
points = vtk.vtkPoints()
points.SetNumberOfPoints(4)
points_array = numpy_to_vtk(
num_array=nodes,
deep=True,
array_type=vtk.VTK_FLOAT,
)
nelements = 1
grid.Allocate(nelements, 1000)
grid.SetPoints(points)
elem = vtk.vtkQuad()
pts = elem.GetPointIds()
pts.SetId(0, 0)
pts.SetId(1, 1)
pts.SetId(2, 2)
pts.SetId(3, 3)
grid.InsertNextCell(elem.GetCellType(), pts)
grid.Modified()
forces = np.array([
[0., 0.1, 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., .3],
], dtype='float32')
rend = vtk.vtkRenderer()
if 1:
maskPts = vtk.vtkMaskPoints()
maskPts.SetInputData(grid)
arrow = vtk.vtkArrowSource()
arrow.SetTipResolution(16)
arrow.SetTipLength(0.3)
arrow.SetTipRadius(0.1)
glyph = vtk.vtkGlyph3D()
glyph.SetSourceConnection(arrow.GetOutputPort())
glyph.SetInputConnection(maskPts.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleFactor(1)
glyph.SetColorModeToColorByVector()
glyph.SetScaleModeToScaleByVector()
glyph.OrientOn()
glyph.Update()
glyph_mapper = vtk.vtkPolyDataMapper()
glyph_mapper.SetInputConnection(glyph.GetOutputPort())
glyph_mapper.SetScalarModeToUsePointFieldData()
glyph_mapper.SetColorModeToMapScalars()
glyph_mapper.ScalarVisibilityOn()
glyph_mapper.SelectColorArray('Elevation')
# Colour by scalars.
#glyph_mapper.SetScalarRange(scalarRangeElevation)
glyph_actor = vtk.vtkActor()
glyph_actor.SetMapper(glyph_mapper)
glyph_actor.RotateX(-45)
glyph_actor.RotateZ(45)
rend.AddViewProp(glyph_actor)
#rend.AddActor(glyph_actor)
geom_actor = vtk.vtkActor()
geom_actor.SetMapper(grid_mapper)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and Interactor
# ------------------------------------------------------------
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
renWin.AddRenderer(rend)
iren.SetRenderWindow(renWin)
# add actors
#rend.AddViewProp(geom_actor)
#rend.AddViewProp(edgeActor)
rend.AddActor(geom_actor)
#rend.AddViewProp(glyph_actor)
#rend.AddActor2D(scalarBar)
rend.SetBackground(0.7, 0.8, 1.0)
renWin.SetSize(800, 800)
renWin.Render()
iren.Start()
main()
```
#### File: gui_objects/test/test_settings.py
```python
import unittest
from numpy import allclose
from pyNastran.gui.gui_objects.utils import autotype_value
class TestSettings(unittest.TestCase):
def test_settings_bool(self):
value = autotype_value('true', bool)
assert value is True
value = autotype_value('false', bool)
assert value is False
value = autotype_value(True, bool)
assert value is True
value = autotype_value(False, bool)
assert value is False
def test_settings_int(self):
value = autotype_value('1', int)
assert allclose(value, 1)
value = autotype_value(1, int)
assert allclose(value, 1)
with self.assertRaises(ValueError):
value = autotype_value('4.2', int)
value = autotype_value(['1', '2', '3'], int)
assert allclose(value, [1, 2, 3])
value = autotype_value([1, 2, 3], int)
assert allclose(value, [1, 2, 3])
value = autotype_value(('1', '2', '3'), int)
assert allclose(value, (1, 2, 3))
value = autotype_value((1, 2, 3), int)
assert allclose(value, (1, 2, 3))
def test_settings_float(self):
value = autotype_value('1.1', float)
assert allclose(value, 1.1)
value = autotype_value(1.1, float)
assert allclose(value, 1.1)
value = autotype_value(['1.1', '2.2', '3.3'], float)
assert allclose(value, [1.1, 2.2, 3.3])
value = autotype_value([1.1, 2.2, 3.3], float)
assert allclose(value, [1.1, 2.2, 3.3])
value = autotype_value(('1.1', '2.2', '3.3'), float)
assert allclose(value, (1.1, 2.2, 3.3))
value = autotype_value((1.1, 2.2, 3.3), float)
assert allclose(value, (1.1, 2.2, 3.3))
if __name__ == '__main__':
unittest.main()
```
#### File: gui/qt_files/scalar_bar.py
```python
import numpy as np
import vtk
from pyNastran.gui.utils.colormaps import colormap_dict, RGB_MAPS, HSV_MAPS
class ScalarBar:
"""defines the ScalarBar at the side of the vtk panel"""
def set_visibility(self, is_visible):
"""show/hide the scalar bar"""
#print('is_visible=%s; is_shown=%s' % (is_visible, self.is_shown))
if is_visible:
self.VisibilityOn()
else:
self.VisibilityOff()
def VisibilityOn(self):
"""shows the scalar bar"""
if not self.is_shown:
self.scalar_bar.VisibilityOn()
self.scalar_bar.Modified()
self.is_shown = True
def VisibilityOff(self):
"""hides the scalar bar"""
if self.is_shown:
self.scalar_bar.VisibilityOff()
self.scalar_bar.Modified()
self.is_shown = False
def __init__(self, is_horizontal=False):
"""creates the scalar bar"""
self.scalar_bar = vtk.vtkScalarBarActor()
self.color_function = vtk.vtkColorTransferFunction()
self.color_function.SetNanColor(1., 1., 1.)
self.is_shown = True
self.is_horizontal = False
self.colormap = 'jet'
self.colormap_order = None
self.is_low_to_high = True
self.min_value = 10.
self.max_value = 20.
self.nlabels = 11
self.ncolors = 11
self.data_format = '%i'
#self.color_function.SetNanColor(0., 0., 0.)
#self.color_function.SetColorSpaceToLab()
#self.color_function.SetColorSpaceToRGB()
#self.scalar_bar.SetDragable(True)
#self.scalar_bar.SetPickable(True)
# blue - low
# red - high
drange = [10., 20.]
self.color_function.SetColorSpaceToHSV()
self.color_function.HSVWrapOff()
self.color_function.SetRange(*drange)
self.color_function.AddRGBPoint(drange[0], 0.0, 0.0, 1.0)
self.color_function.AddRGBPoint(drange[1], 1.0, 0.0, 0.0)
self.scalar_bar.SetTitle("Title1")
self.scalar_bar.SetLookupTable(self.color_function)
#self.scalar_bar.SetNanColor(0., 0., 0.) # RGB color - black
#self.scalar_bar.SetNanColor(1., 1., 1., 0.) # RGBA color - white
# old
#self.scalar_bar.SetHeight(0.9)
#self.scalar_bar.SetWidth(0.20) # the width is set first
#self.scalar_bar.SetPosition(0.77, 0.1)
if is_horizontal:
# put the scalar bar at the top
self.scalar_bar.SetOrientationToHorizontal()
width = 0.95
height = 0.15
x = (1 - width) / 2.
y = 1 - 0.02 - height
else:
# put the scalar bar at the right side
self.scalar_bar.SetOrientationToVertical()
width = 0.2
height = 0.9
x = 1 - 0.01 - width
y = (1 - height) / 2.
self.scalar_bar.SetPosition(x, y)
# the width is set first
# after the width is set, this is adjusted
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(x, y)
prop_title = vtk.vtkTextProperty()
prop_title.SetFontFamilyToArial()
#prop_title.ItalicOff()
prop_title.BoldOn()
prop_title.ShadowOn()
prop_label = vtk.vtkTextProperty()
prop_label.BoldOff()
prop_label.ShadowOn()
#self.scalar_bar.SetTitleTextProperty(prop_title)
#self.scalar_bar.SetLabelTextProperty(prop_label)
self.scalar_bar.SetLabelFormat("%i")
# allows 0-1 to be nice number when ranging values (gotta pick something)
self.scalar_bar.SetNumberOfLabels(11)
self.scalar_bar.SetMaximumNumberOfColors(11)
#self.scalar_bar.VisibilityOff() # first load -> scalar bar off
#self.scalar_bar.ShadowOn()
#self.scalar_bar.RepositionableOn()
self.scalar_bar.VisibilityOff()
def update_color_function(self, min_value, max_value,
colormap='jet', colormap_order=None, is_low_to_high=True):
"""updates the color_function"""
# jet - HSV :)
# jet with RGB is red to blue (not a bad colormap, but not jet)
# viridis and plasma look good as HSV
# (not sure on the exact difference, but it probably should be
# RGB based on the others)
#
# viridis - RGB
# plasma - RGB
# magma - not HSV, RGB
# inferno - not HSV, RGB
if colormap_order is None:
if colormap in ['jet', 'jet2', 'blend', None] or colormap in HSV_MAPS:
colormap_order = 'hsv'
colormap = 'jet'
elif colormap in RGB_MAPS:
colormap_order = 'rgb'
else:
raise NotImplementedError(colormap)
update_color_function = (
colormap_order != self.colormap_order or
is_low_to_high != self.is_low_to_high or
min_value != self.min_value or
max_value != self.max_value or
colormap != self.colormap # this iss last, so it's faster
)
if not update_color_function:
return
self.colormap = colormap
self.colormap_order = colormap_order
self.is_low_to_high = is_low_to_high
self.color_function.RemoveAllPoints()
if colormap_order == 'rgb':
self.color_function.SetColorSpaceToRGB()
elif colormap_order == 'hsv':
self.color_function.SetColorSpaceToHSV()
else:
raise NotImplementedError(colormap_order)
if colormap == 'jet':
if is_low_to_high:
self.color_function.AddRGBPoint(min_value, 0.0, 0.0, 1.0) # blue
self.color_function.AddRGBPoint(max_value, 1.0, 0.0, 0.0) # red
else:
self.color_function.AddRGBPoint(min_value, 1.0, 0.0, 0.0) # red
self.color_function.AddRGBPoint(max_value, 0.0, 0.0, 1.0) # blue
else:
if isinstance(colormap, str):
colormap = colormap_dict[colormap]
else:
assert isinstance(colormap[0][0], float), colormap
vals = np.linspace(min_value, max_value, num=len(colormap))
if is_low_to_high:
vals = vals[::-1]
for val, (red, green, blue) in zip(vals, colormap):
self.color_function.AddRGBPoint(val, red, green, blue)
def update_position(self, is_horizontal=True):
"""updates if the scalar bar is horizontal/vertical"""
update_position = is_horizontal is not self.is_horizontal
if not update_position:
return
if is_horizontal:
# put the scalar bar at the top
self.is_horizontal = True
self.scalar_bar.SetOrientationToHorizontal()
width = 0.95
height = 0.15
x = (1 - width) / 2.
y = 1 - 0.02 - height
else:
# put the scalar bar at the right side
self.is_horizontal = False
self.scalar_bar.SetOrientationToVertical()
width = 0.2
height = 0.9
x = 1 - 0.01 - width
y = (1 - height) / 2.
self.scalar_bar.SetHeight(height)
self.scalar_bar.SetWidth(width)
self.scalar_bar.SetPosition(x, y)
def update_title(self, title):
"""Updates the title. Pads the text so text isn't huge."""
nchars = len(title)
if nchars > 10:
padding = ''
else:
nspaces = (10 - nchars) // 2 + nchars % 2
padding = nspaces * ' '
self.scalar_bar.SetTitle('%s%s%s' % (padding, title, padding))
def update_data_format(self, min_value, max_value, data_format, nlabels=None, ncolors=None):
"""updates the data format and number of values/colors"""
data_format_display = data_format
if nlabels is None: # and labelsize is None:
nvalues = 11
if data_format == '%i':
data_format_display = '%.0f'
nvalues = int(max_value - min_value) + 1
# old
if nvalues < 7:
nvalues = 7
elif nvalues > 30:
nvalues = 11
# new
#if 0:
#text_prop = self.scalar_bar.GetLabelTextProperty()
##font_size = text_prop.GetFontSize()
#nvalues_max = 11
#nvalues_min = 7
#if nvalues > nvalues_max:
#font_size = 4
#nvalues = nvalues_max
#font_size = text_prop.SetFontSize(font_size)
#text_prop.Modified()
#elif nvalues < nvalues_min:
#nvalues = nvalues_min
#font_size = 12
#font_size = text_prop.SetFontSize(font_size)
#text_prop.Modified()
else:
if data_format == '%i':
data_format_display = '%.0f'
nvalues = nlabels
if ncolors is None:
ncolors = nvalues
assert data_format_display is not None, 'data_format is invalid = %r' % data_format_display
# the code explodes if these are too big
if nvalues > 100:
nvalues = 100
if ncolors > 100:
ncolors = 100
if ncolors < 2 and (max_value - min_value) > 0: # data_format == '%i' and
ncolors = 2
update_data_format = (
min_value != self.min_value or
max_value != self.max_value or
data_format != self.data_format or
#nlabels != self.nlabels or
ncolors != self.ncolors
)
if not update_data_format:
return
self.scalar_bar.SetLabelFormat(data_format_display)
#print('ncolors=%s nvalues=%s' % (ncolors, nvalues))
self.scalar_bar.SetNumberOfLabels(nvalues)
self.scalar_bar.SetMaximumNumberOfColors(ncolors)
def update(self, title, min_value, max_value, norm_value,
data_format,
nlabels=None, labelsize=None, ncolors=None, colormap='jet', colormap_order=None,
is_low_to_high=True, is_horizontal=True,
is_shown=True):
"""updates the scalar bar"""
#self.set_visibility(is_shown)
self.update_color_function(min_value, max_value,
colormap=colormap, colormap_order=colormap_order,
is_low_to_high=is_low_to_high)
self.update_position(is_horizontal=is_horizontal)
#if 0:
#self.color_function.SetRange(min_value, max_value)
#self.color_function.Update()
#scalar_range = self.grid.GetScalarRange()
#print('scalar_range', scalar_range)
#self.grid_mapper.SetScalarRange(scalar_range)
#self.grid_mapper.SetScalarRange(min_value, max_value)
#self.grid_mapper.SetScalarRange(max_value, min_value)
#self.grid_mapper.Update()
#self.scalar_bar.SetLookupTable(self.color_function)
self.update_title(title)
self.update_data_format(min_value, max_value, data_format,
nlabels=nlabels, ncolors=ncolors)
self.min_value = min_value
self.max_value = max_value
self.set_visibility(is_shown)
self.scalar_bar.Modified()
#def _is_int_result(data_format):
#if 'i' in data_format:
#return True
#return False
```
#### File: utils/vtk/gui_utils.py
```python
def add_actors_to_gui(gui, actors, render=True):
"""adds multiple vtk actors"""
if not len(actors):
return
renderer = gui.rend
for actor in actors:
renderer.AddActor(actor)
if render:
renderer.Render()
def remove_actors_from_gui(gui, actors, render=True):
"""removes multiple vtk actors"""
if not len(actors):
return
renderer = gui.rend
for actor in actors:
renderer.RemoveActor(actor)
if render:
renderer.Render()
```
#### File: dev/pyyeti/locate.py
```python
import numpy as np
def findvals(m, v):
"""
Get partition vector for all occurrences of all values in `v` in
`m`.
Parameters
----------
m : array
Array to be searched.
v : array
Array of values to find in m.
Returns
-------
pv : 1d ndarray
Values are indexes into `m` of any value in `v`. Will be
empty if `m` has none of the values in `v`.
`m` is flattened to 1d before searching (using column-major
ordering 'F'). The values in `pv` correspond to::
[ 0 r ...
1 r+1
... ...
r-1 2r-1 ... r*c-1 ] where m is r x c
Examples
--------
>>> import numpy as np
>>> import locate
>>> m = np.array([[10, 20], [30, 20]])
>>> locate.findvals(m, 20)
array([2, 3])
>>> locate.findvals(m, 30)
array([1])
>>> locate.findvals(m, 100)
array([], dtype=int64)
"""
m = np.atleast_1d(m)
v = np.atleast_1d(v)
m = m.flatten(order='F')
v = v.flatten()
pv = np.zeros(len(m), dtype=bool)
for i in range(len(v)):
pv |= m == v[i]
return pv.nonzero()[0]
def find_subsequence(seq, subseq):
"""
Returns indices of where subseq occurs in seq. Both are 1d numpy
arrays.
Parameters
----------
seq : array
1D array to search in.
subseq : array
1D array to search for.
Returns
-------
pv : array
1D numpy array of indices:
- length will be equal to the number of occurrences of subseq
- the indices are to the start of each subseq in seq
Will be empty if subseq is not found in seq.
Examples
--------
>>> import locate
>>> a = [1, 2, 3, 4, 5, 6, 2, 3]
>>> sub = [2, 3]
>>> locate.find_subsequence(a,sub)
array([1, 6])
>>> locate.find_subsequence(a,[6, 5])
array([], dtype=int64)
"""
seq = np.asarray(seq).reshape(-1)
subseq = np.asarray(subseq).reshape(-1)
target = subseq @ subseq
candidates = np.where(np.correlate(seq, subseq,
mode='valid') == target)[0]
# some of the candidates entries may be false positives; check:
check = candidates[:, np.newaxis] + np.arange(len(subseq))
mask = np.all((np.take(seq, check) == subseq), axis=-1)
return candidates[mask]
def find_rows(matrix, row):
"""
Returns indices of where row occurs in matrix.
Parameters
----------
matrix : array
2d numpy array.
row : array
1d numpy array.
Returns
-------
pv : array
A 1d numpy array of row indices. Will be empty if row is not
found or if length(row) != cols(matrix).
Examples
--------
>>> import numpy as np
>>> import locate
>>> mat = np.array([[7, 3], [6, 8], [4, 0],
... [9, 2], [1, 5], [6, 8]])
>>> locate.find_rows(mat,np.array([1, 2]))
array([], dtype=int64)
>>> pv = locate.find_rows(mat,np.array([6, 8]))
>>> pv
array([1, 5])
>>> mat[pv,:]
array([[6, 8],
[6, 8]])
"""
(r1, c1) = np.shape(matrix)
c2 = len(row)
if c1 != c2:
return np.array([], dtype=int)
i = find_subsequence(matrix.flatten('C'), row)
pv = np.mod(i, c1) == 0
return i[pv] // c1
def get_intersection(D1, D2, keep=0):
"""
Get row intersection partition vectors between two matrices or
vectors.
Parameters
----------
D1 : array
1d or 2d array.
D2 : array
1d or 2d array.
keep : integer
0, 1 or 2:
- if 0, rows are only swapped in the larger matrix
- if 1, rows are only swapped in D2
- if 2, rows are only swapped in D1
Returns
-------
tuple: (pv1, pv2)
pv1 : array
Row index vector into D1.
pv2 : array
Row index vector into D2.
`pv1` and `pv2` are found such that:
::
D1[pv1] == D2[pv2]
(Note for matrices: M[i] == M[i, :])
For matrices, the number of columns in D1 and D2 must be equal to
get non-empty results.
Examples
--------
>>> import numpy as np
>>> import locate
>>> mat1 = np.array([[7, 3], [6, 8], [4, 0], [9, 2], [1, 5]])
>>> mat2 = np.array([[9, 2], [1, 5], [7, 3]])
>>> pv1, pv2 = locate.get_intersection(mat1, mat2)
>>> pv1
array([3, 4, 0])
>>> pv2
array([0, 1, 2])
>>> np.all(mat1[pv1] == mat2[pv2])
True
>>> locate.get_intersection(mat1, mat2, 1)
(array([0, 3, 4]), array([2, 0, 1]))
>>> locate.get_intersection(mat2, mat1, 2)
(array([2, 0, 1]), array([0, 3, 4]))
>>> locate.get_intersection(mat2, mat1)
(array([0, 1, 2]), array([3, 4, 0]))
>>> mat3 = np.array([[1,2,3]])
>>> locate.get_intersection(mat1, mat3)
(array([], dtype=int64), array([], dtype=int64))
"""
D1 = np.array(D1)
D2 = np.array(D2)
if D1.ndim == D2.ndim == 1:
c1 = c2 = 1
r1 = len(D1)
r2 = len(D2)
D1 = np.atleast_2d(D1)
D2 = np.atleast_2d(D2)
D1 = D1.T
D2 = D2.T
else:
D1 = np.atleast_2d(D1)
D2 = np.atleast_2d(D2)
(r1, c1) = np.shape(D1)
(r2, c2) = np.shape(D2)
if c1 != c2:
return np.array([], dtype=int), np.array([], dtype=int)
# loop over the smaller one; index into 'd2' can be in any order
if r1 <= r2:
r = r1
d1 = D1
d2 = D2
switch = False
else:
r = r2
d1 = D2
d2 = D1
switch = True
pv1 = np.zeros(r, dtype=int)
pv2 = np.zeros(r, dtype=int)
j = 0
for i in range(r):
l = find_rows(d2, d1[i])
if l.size > 0:
pv1[j] = i
pv2[j] = l[0]
j += 1
if j == 0:
return np.array([], dtype=int), np.array([], dtype=int)
if switch:
t = pv1[:j]
pv1 = pv2[:j]
pv2 = t
else:
pv1 = pv1[:j]
pv2 = pv2[:j]
if switch and keep == 1:
si = pv1.argsort()
return pv1.take(si), pv2.take(si)
elif not switch and keep == 2:
si = pv2.argsort()
return pv1.take(si), pv2.take(si)
return pv1, pv2
def find2zo(pv, n):
"""
Return a True/False vector of length n where the True values are
located according to pv.
Example:
>>> import numpy as np
>>> import locate
>>> pv = np.array([0,3,5])
>>> locate.find2zo(pv,8)
array([ True, False, False, True, False, True, False, False], dtype=bool)
"""
tf = np.zeros(n, dtype=bool)
tf[pv] = True
return tf
def flippv(pv, n):
"""Flips the meaning of an index partition vector.
Parameters
----------
pv : ndarray
The index partition to flip.
n : integer
The length of the dimension to partition.
Returns
-------
notpv : ndarray
The complement of pv.
Example:
>>> import numpy as np
>>> import locate
>>> pv = np.array([0,3,5])
>>> locate.flippv(pv,8)
array([1, 2, 4, 6, 7])
"""
tf = np.ones(n, dtype=bool)
tf[pv] = False
return tf.nonzero()[0]
def list_intersection(L1, L2):
"""
Get list intersection partition vectors between two lists
Parameters
----------
L1 : list
List 1; the output vectors maintain the order of `L1`.
L1 : list
List 2.
Returns
-------
tuple: (pv1, pv2)
pv1 : ndarray. Index vector into L1.
pv2 : ndarray. Index vector into L2.
`pv1` and `pv2` are found such that:
[L1[i] for i in pv1] == [L2[i] for i in pv2]
Examples
--------
>>> import locate
>>> pv1, pv2 = locate.list_intersection(['a', 3, 'z', 0],
... [0, 'z', 1, 'a'])
>>> pv1
array([0, 2, 3])
>>> pv2
array([3, 1, 0])
>>> locate.list_intersection(['a', 'b'], [1, 2])
(array([], dtype=int64), array([], dtype=int64))
"""
inters = set(L1) & set(L2)
r = len(inters)
if r == 0:
return np.array([], dtype=int), np.array([], dtype=int)
pv1 = np.zeros(r, dtype=int)
pv2 = np.zeros(r, dtype=int)
j = 0
for j, item in enumerate(inters):
pv1[j] = L1.index(item)
pv2[j] = L2.index(item)
si = pv1.argsort()
return pv1[si], pv2[si]
if __name__ == '__main__':
mat1 = np.array([[7, 3], [6, 8], [4, 0], [9, 2], [1, 5]])
mat2 = np.array([[9, 2], [1, 5], [7, 3]])
pv1, pv2 = get_intersection(mat1, mat2)
assert np.all(np.array([3, 4, 0]) == pv1)
assert np.all(np.array([0, 1, 2]) == pv2)
pv1, pv2 = get_intersection(mat1, mat2, 1)
assert np.all(np.array([0, 3, 4]) == pv1)
assert np.all(np.array([2, 0, 1]) == pv2)
pv1, pv2 = get_intersection(mat1, mat2, 2)
assert np.all(np.array([3, 4, 0]) == pv1)
assert np.all(np.array([0, 1, 2]) == pv2)
pv = np.array([0, 3, 5])
tf = find2zo(pv, 8)
assert np.all(np.array([True, False, False, True, False, True,
False, False]) == tf)
import doctest
doctest.testmod()
```
#### File: pyNastran/op2/export_to_vtk.py
```python
from pyNastran.bdf.bdf import BDF
from pyNastran.op2.op2 import OP2
from numpy import zeros, searchsorted, arange
#def pack_nodes(fmt, data):
#return ''
def pack_int_array(fmt, data):
return ' '.join([str(val) for val in data]) + '\n'
def pack_float_1d_array(fmt, data):
return ' '.join([str(val) for val in data.ravel()]) + '\n'
def pack_float_3d_array(fmt, data):
msg = ''
for datai in data[0, :, :]:
msgi = ''
for dataii in datai:
msgi += '%s ' % dataii
msg += msgi[:-1] + '\n'
return msg #+ '\n\n'
def pack_float_2d_array(fmt, data):
msg = ''
for datai in data:
msgi = ''
for dataii in datai:
msgi += '%s ' % dataii
msg += msgi[:-1] + '\n'
return msg #+ '\n'
#def pack(fmt, data):
# return ''
def export_to_vtk(model):
bdf_filename = model + '.bdf'
op2_filename = model + '.op2'
vtk_filename = model + '.vtk'
export_to_vtk_filename(bdf_filename, op2_filename, vtk_filename)
model.log.info('finished exporting %s' % vtk_filename)
def export_to_vtk_filename(bdf_filename, op2_filename, vtk_filename, debug=False, log=None):
with open(vtk_filename, 'w') as vtk_file:
vtk_file.write('# vtk DataFile Version 3.1\n')
vtk_file.write('created by pyNastran\n')
#vtk_file.write('BINARY\n')
vtk_file.write('ASCII\n')
vtk_file.write('DATASET UNSTRUCTURED_GRID\n')
etype_map = {
# line
'CDAMP1' : 3,
'CDAMP2' : 3,
'CDAMP3' : 3,
'CDAMP4' : 3,
'CELAS1' : 3,
'CELAS2' : 3,
'CELAS3' : 3,
'CELAS4' : 3,
'CBAR' : 3,
'CBEAM' : 3,
'CROD' : 3,
'CONROD' : 3,
'CTUBE' : 3,
'CTRIA3' : 5, # triangle
'CQUAD4' : 9, # quad
'CSHEAR' : 9, # quad
# quadratic
'CTRIA6' : 22, # quadratic triangle
#'CQUAD8' : 23/28/30,
'CTETRA' : 10,
'CPENTA' : 13, # wedge
'CPYRAM' : 14,
'CHEXA' : 12, # hex
# quadratic solids
#'CTETRA' : 64,
#'CPENTA' : 65, # wedge
#'CPYRAM' : 66,
#'CHEXA' : 67, # hex
}
bdf = BDF(debug=debug, log=log)
bdf.read_bdf(bdf_filename)
op2 = OP2(debug=debug, log=log)
op2.read_op2(op2_filename)
out = bdf.get_card_ids_by_card_types()
#print('cards = [', ', '.join(sorted(out.keys())), ']')
grids = sorted(out['GRID'])
spoint = sorted(out['SPOINT'])
epoint = sorted(out['EPOINT'])
ngrid = len(grids)
nspoint = len(spoint)
nepoint = len(epoint)
nnodes = ngrid + nspoint + nepoint
ncrod = len(out['CROD'])
nconrod = len(out['CONROD'])
nctube = len(out['CTUBE'])
ncbeam = len(out['CBEAM'])
ncbar = len(out['CBAR'])
nline = ncrod + nconrod + nctube + ncbeam + ncbar
ncelas1 = len(out['CELAS1'])
ncelas2 = len(out['CELAS2'])
ncelas3 = len(out['CELAS3'])
ncelas4 = len(out['CELAS4'])
ncdamp1 = len(out['CDAMP1'])
ncdamp2 = len(out['CDAMP2'])
ncdamp3 = len(out['CDAMP3'])
ncdamp4 = len(out['CDAMP4'])
n0d = (ncelas1 + ncelas2 + ncelas3 + ncelas4 +
ncdamp1 + ncdamp2 + ncdamp3 + ncdamp4)
nctria3 = len(out['CTRIA3'])
ncquad4 = len(out['CQUAD4'])
nctria6 = len(out['CTRIA6'])
ncquad8 = len(out['CQUAD8'])
ncshear = len(out['CSHEAR'])
nshell = nctria3 + ncquad4 + nctria6 + ncquad8 + ncshear
nctetra4 = len(out['CTETRA'])
ncpyram5 = len(out['CPYRAM'])
ncpenta6 = len(out['CPENTA'])
nchexa8 = len(out['CHEXA'])
nctetra10 = 0
ncpyram8 = 0
ncpenta15 = 0
nchexa20 = 0
nsolid = (nctetra4 + ncpyram5 + ncpenta6 + nchexa8 +
nctetra10 + ncpyram8 + ncpenta15 + nchexa20)
#nelements = n0d + nline + nshell + nsolid
nelements = 0
etypes = [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4',
'CROD', 'CONROD', 'CTUBE',
'CBAR', 'CBEAM',
'CFAST', 'CBUSH', 'CBUSH1D', 'CBUSH2D',
'CTRIA3', 'CQUAD4', 'CTRIA6', 'CQUAD8', 'CSHEAR',
'CTETRA', 'CPENTA', 'CPYRAM', 'CHEXA',
]
assert len(etypes) == len(set(etypes)), 'there are duplicate etypes'
for etype in etypes:
if etype in out:
ne = len(out[etype])
nelements += ne
nproperties = nelements
bdf_nelements = bdf.nelements
# SPOINT & EPOINT are implicitly defined
xyz_cid0 = zeros((nnodes, 3), dtype='float32')
nids = zeros(nnodes, dtype='float32')
for i, nid in enumerate(grids):
xyz_cid0[i, :] = bdf.nodes[nid].get_position()
nids[:ngrid] = grids
if nspoint:
nids[i:i+nspoint] = spoint
if nepoint:
nids[i+nspoint:] = epoint
nid_fmt = '%ii' % nnodes
xyz_fmt = '%ii' % (nnodes * 3)
vtk_file.write('POINTS %i float\n' % nnodes)
vtk_file.write(pack_float_2d_array(xyz_fmt, xyz_cid0))
nelements = n0d + nline + nshell + nsolid
nmaterials = nelements
eid_fmt = '%ii' % nelements
eids = zeros(nelements, dtype='int32')
cell_types = zeros(nelements, dtype='int32')
pids = zeros(nelements, dtype='int32')
mids = zeros(nelements, dtype='int32')
# we'll add 1 to the slot count of each
# so for a single CROD, it has 2 nodes and 1 extra value (to indicate it's a line)
# for a total of 3
nline_slots = nline * 3
nshell_slots = 4 * nctria3 + 5 * (ncquad4 + ncshear) + 7 * nctria6 + 9 * ncquad8
nsolid_slots = 5 * nctetra4 + 6 * ncpyram5 + 7 * ncpenta6 + 9 * nchexa8
bdf.log.debug('nline=%s nshell=%s nsolid=%s' % (nline, nshell, nsolid))
assert nelements == bdf_nelements, 'nelements=%s bdf.nelements=%s card_count=\n%s' % (
nelements, bdf_nelements, bdf.card_count)
nelements_slots = nline_slots + nshell_slots + nsolid_slots
i = 0
vtk_file.write('CELLS %i %i\n' % (nelements, nelements_slots))
for eid, elem in sorted(bdf.elements.items()):
etype = etype_map[elem.type]
nids2 = searchsorted(nids, elem.node_ids)
nnodesi = len(nids2)
vtk_file.write('%i %s\n' % (nnodesi, str(nids2)[1:-1]))
if elem.type in ['CTETRA', 'CPENTA', 'CHEXA', 'CPYRAM', 'CBEAM', 'CROD', 'CBAR']:
pid = elem.Pid()
mid = elem.Mid()
elif elem.type in ['CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CBUSH', 'CFAST']:
pid = elem.Pid()
mid = 0
elif elem.type in ['CQUAD4', 'CQUAD8', 'CQUADX', 'CQUADX8', 'CQUAD',
'CTRIA3', 'CTRIA6', 'CTRIAX', 'CTRIAX6', 'CSHEAR']:
pid = elem.Pid()
prop = elem.pid_ref
if prop.type in ['PCOMP', 'PCOMPG']:
mid = prop.Mid(0)
elif prop.type in ['PSHELL']:
mid = prop.Mid1()
elif prop.type in ['PSHEAR']:
mid = prop.Mid()
else:
raise NotImplementedError(prop)
elif elem.type in ['CONROD']:
pid = 0
mid = elem.Mid()
else:
raise NotImplementedError(elem)
eids[i] = eid
pids[i] = pid
mids[i] = mid
cell_types[i] = etype
i += 1
assert nelements == bdf_nelements, 'i=%s nelements=%s bdf.nelements=%s' % (i, nelements, bdf_nelements)
#vtk_file.write('\n')
vtk_file.write('CELL_TYPES %i\n' % nelements)
vtk_file.write(pack_int_array(eid_fmt, cell_types))
vtk_file.write('\n')
vtk_file.write('POINT_DATA %i\n' % nnodes)
vtk_file.write('NodeID %i float\n' % nnodes)
vtk_file.write(pack_int_array(nid_fmt, nids))
fmt = '%si' % nelements
if nelements:
vtk_file.write('ElementID %i float\n' % nelements)
vtk_file.write(pack_int_array(eid_fmt, eids))
if nproperties:
vtk_file.write('PropertyID %i float\n' % nproperties)
vtk_file.write(pack_int_array(eid_fmt, pids))
if nmaterials:
vtk_file.write('MaterialID %i float\n' % nmaterials)
vtk_file.write(pack_int_array(eid_fmt, mids))
nodal_cases = [op2.eigenvectors, op2.displacements, op2.velocities, op2.accelerations]
fmt = '%sf' % (nnodes * 6)
for cases in nodal_cases:
keys = list(cases.keys())
if not keys:
continue
key0 = keys[0]
#print(key0)
node_ids = cases[key0].node_gridtype[:, 0]
if nnodes == len(node_ids):
# every node exists
i = arange(nnodes)
ni = nnodes
else:
# node_ids is a subset of nids
i = searchsorted(nids, node_ids)
ni = len(i)
names = ['T1', 'T2', 'T3', 'R1', 'R2', 'R3']
for isubcase, case in sorted(cases.items()):
if case.is_real:
#if i is None:
#data = case.data
#ni = nnodes
#else:
#data = zeros((nnodes, 6), dtype='float32')
#data[:, i, :] = case.data
data = case.data[:, i, :]
ntimes = case.data.shape[0]
case_type = case.__class__.__name__
for itime in range(ntimes):
if 0:
for icol, name in enumerate(names):
title = '%s_%s_isubcase=%s_itime=%s' % (case_type, name, isubcase, itime)
vtk_file.write('SCALARS %s float\n' % title)
vtk_file.write('LOOKUP_TABLE default\n')
#datai = data[itime, i, icol]
vtk_file.write(pack_float_1d_array(fmt, data[itime, i, icol]))
if 1:
title = '%s_isubcase=%s_itime=%s' % (case_type, isubcase, itime)
#FIELD RealDisplacementArray_FIELD_isubcase=1_itime=0 6
#t1 1 72 float
#0.00764469 0.00762899 ...
vtk_file.write('FIELD %s 6\n' % title)
for icol, name in enumerate(names):
vtk_file.write('%s 1 %s float\n' % (name, ni))
datai = case.data[itime, i, icol]
vtk_file.write(pack_float_1d_array(fmt, data[itime, i, icol]))
if 0:
title = '%s_FIELD_isubcase=%s_itime=%s' % (case_type, isubcase, itime)
vtk_file.write('FIELD %s 6 %i float\n' % (title, ni))
vtk_file.write('LOOKUP_TABLE default\n')
vtk_file.write(pack_float_2d_array(fmt, data[itime, i, :]))
#CELLS 217 1039
```
#### File: op2/op2_interface/random_results.py
```python
class RandomObjects:
prefix = ''
postfix = ''
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.load_vectors = {}
self.spc_forces = {}
self.mpc_forces = {}
self.crod_force = {}
self.conrod_force = {}
self.ctube_force = {}
self.cbar_force = {}
self.cbeam_force = {}
self.cbush_stress = {}
self.cbush_strain = {}
self.crod_stress = {}
self.conrod_stress = {}
self.ctube_stress = {}
self.cbar_stress = {}
self.cbeam_stress = {}
self.crod_strain = {}
self.conrod_strain = {}
self.ctube_strain = {}
self.cbar_strain = {}
self.cbeam_strain = {}
self.ctetra_strain = {}
self.cpenta_strain = {}
self.chexa_strain = {}
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.celas1_stress = {}
self.celas2_stress = {}
self.celas3_stress = {}
self.celas4_stress = {}
self.celas1_strain = {}
self.celas2_strain = {}
self.celas3_strain = {}
self.celas4_strain = {}
self.celas1_force = {}
self.celas2_force = {}
self.celas3_force = {}
self.celas4_force = {}
self.ctria3_force = {}
self.ctria6_force = {}
self.ctriar_force = {}
self.cquad4_force = {}
self.cquad8_force = {}
self.cquadr_force = {}
self.ctria3_stress = {}
self.ctria6_stress = {}
self.cquad4_stress = {}
self.cquad8_stress = {}
self.cquadr_stress = {}
self.ctriar_stress = {}
self.ctria3_strain = {}
self.ctria6_strain = {}
self.cquad4_strain = {}
self.cquad8_strain = {}
self.cquadr_strain = {}
self.ctriar_strain = {}
self.cbend_stress = {}
self.cbend_strain = {}
self.cbend_force = {}
self.cshear_stress = {}
self.cshear_strain = {}
self.cshear_force = {}
self.cbush_force = {}
self.cdamp1_force = {}
self.cdamp2_force = {}
self.cdamp3_force = {}
self.cdamp4_force = {}
self.cvisc_force = {}
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
self.cquad4_composite_strain = {}
self.cquad8_composite_strain = {}
self.cquadr_composite_strain = {}
self.ctria3_composite_strain = {}
self.ctria6_composite_strain = {}
self.ctriar_composite_strain = {}
def get_table_types(self):
tables = [
'displacements', 'velocities', 'accelerations',
'load_vectors', 'spc_forces', 'mpc_forces',
'celas1_force', 'celas2_force', 'celas3_force', 'celas4_force',
'crod_force', 'conrod_force', 'ctube_force',
'cbar_force', 'cbeam_force',
'cquad4_force', 'cquad8_force', 'cquadr_force',
'ctria3_force', 'ctria6_force', 'ctriar_force',
'celas1_stress', 'celas2_stress', 'celas3_stress', 'celas4_stress',
'crod_stress', 'conrod_stress', 'ctube_stress',
'cbar_stress', 'cbeam_stress',
'ctria3_stress', 'ctriar_stress', 'ctria6_stress',
'cquadr_stress', 'cquad4_stress', 'cquad8_stress',
'ctetra_stress', 'cpenta_stress', 'chexa_stress',
'celas1_strain', 'celas2_strain', 'celas3_strain', 'celas4_strain',
'crod_strain', 'conrod_strain', 'ctube_strain',
'cbar_strain', 'cbeam_strain',
'ctria3_strain', 'ctriar_strain', 'ctria6_strain',
'cquadr_strain', 'cquad4_strain', 'cquad8_strain',
'ctetra_strain', 'cpenta_strain', 'chexa_strain',
'cquad4_composite_stress', 'cquad8_composite_stress', 'cquadr_composite_stress',
'ctria3_composite_stress', 'ctria6_composite_stress', 'ctriar_composite_stress',
'cquad4_composite_strain', 'cquad8_composite_strain', 'cquadr_composite_strain',
'ctria3_composite_strain', 'ctria6_composite_strain', 'ctriar_composite_strain',
'cbend_stress', 'cbend_strain', 'cbend_force',
'cbush_stress', 'cbush_strain',
'cshear_stress', 'cshear_strain', 'cshear_force',
'cbush_force',
'cdamp1_force', 'cdamp2_force', 'cdamp3_force', 'cdamp4_force',
'cvisc_force',
]
return [self.prefix + table + self.postfix for table in tables]
class AutoCorrelationObjects(RandomObjects):
"""storage class for the ATO objects"""
prefix = 'ato.'
#postfix = ''
class PowerSpectralDensityObjects(RandomObjects):
"""storage class for the PSD objects"""
prefix = 'psd.'
#postfix = ''
class RootMeansSquareObjects(RandomObjects):
"""storage class for the RMS objects"""
prefix = 'rms.'
#postfix = ''
class CumulativeRootMeansSquareObjects(RandomObjects):
"""storage class for the CRMS objects"""
prefix = 'crm.'
#postfix = ''
class NumberOfCrossingsObjects(RandomObjects):
"""storage class for the NO objects"""
prefix = 'no.'
#postfix = ''
class RAECONS:
"""storage class for the RAECONS objects"""
def __init__(self):
self.ctria3_strain = {}
self.cquad4_strain = {}
self.chexa_strain = {}
def get_table_types(self):
tables = [
'chexa_strain',
'ctria3_strain', 'cquad4_strain',
]
return ['RAECONS.' + table for table in tables]
class RASCONS:
"""storage class for the RASCONS objects"""
def __init__(self):
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.ctetra_strain = {}
self.cpenta_strain = {}
self.chexa_strain = {}
self.ctria3_stress = {}
self.ctria6_stress = {}
self.cquad4_stress = {}
self.cquad8_stress = {}
self.cquadr_stress = {}
self.ctriar_stress = {}
self.ctria3_strain = {}
self.ctria6_strain = {}
self.cquad4_strain = {}
self.cquad8_strain = {}
self.cquadr_strain = {}
self.ctriar_strain = {}
def get_table_types(self):
tables = [
# OES - isotropic CTRIA3/CQUAD4 stress
'ctria3_stress', 'ctriar_stress', 'ctria6_stress',
'cquadr_stress', 'cquad4_stress', 'cquad8_stress',
# OES - isotropic CTRIA3/CQUAD4 strain
'ctria3_strain', 'ctriar_strain', 'ctria6_strain',
'cquadr_strain', 'cquad4_strain', 'cquad8_strain',
'ctetra_stress', 'chexa_stress', 'cpenta_stress',
'ctetra_strain', 'chexa_strain', 'cpenta_strain',
]
return ['RASCONS.' + table for table in tables]
class RAPCONS:
"""storage class for the RAPCONS objects"""
def __init__(self):
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
def get_table_types(self):
tables = [
'cquad4_composite_stress',
'cquad8_composite_stress',
'cquadr_composite_stress',
'ctria3_composite_stress',
'ctria6_composite_stress',
'ctriar_composite_stress',
#'cquad4_composite_strain',
#'cquad8_composite_strain',
#'cquadr_composite_strain',
#'ctria3_composite_strain',
#'ctria6_composite_strain',
#'ctriar_composite_strain',
]
return ['RAPCONS.' + table for table in tables]
class RAPEATC:
"""storage class for the RAPEATC objects"""
def __init__(self):
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
def get_table_types(self):
tables = [
'cquad4_composite_stress',
'cquad8_composite_stress',
'cquadr_composite_stress',
'ctria3_composite_stress',
'ctria6_composite_stress',
'ctriar_composite_stress',
#'cquad4_composite_strain',
#'cquad8_composite_strain',
#'cquadr_composite_strain',
#'ctria3_composite_strain',
#'ctria6_composite_strain',
#'ctriar_composite_strain',
]
return ['RAPEATC.' + table for table in tables]
class RAFCONS:
"""storage class for the RAFCONS objects"""
def __init__(self):
self.cbar_force = {}
self.cquad4_force = {}
self.cbush_force = {}
def get_table_types(self):
tables = [
'cbar_force',
'cquad4_force',
'cbush_force',
]
return ['RAFCONS.' + table for table in tables]
class RAGCONS:
"""storage class for the RAGCONS objects"""
def __init__(self):
self.grid_point_forces = {}
def get_table_types(self):
tables = [
'grid_point_forces',
]
return ['RAGCONS.' + table for table in tables]
class RAGEATC:
"""storage class for the RAGEATC objects"""
def __init__(self):
self.grid_point_forces = {}
def get_table_types(self):
tables = [
'grid_point_forces',
]
return ['RAGEATC.' + table for table in tables]
class RANCONS:
"""storage class for the RANCONS objects"""
def __init__(self):
self.cbar_strain_energy = {}
self.cbush_strain_energy = {}
self.chexa_strain_energy = {}
self.ctria3_strain_energy = {}
self.cquad4_strain_energy = {}
def get_table_types(self):
tables = [
'cbar_strain_energy', 'cbush_strain_energy',
'chexa_strain_energy',
'ctria3_strain_energy', 'cquad4_strain_energy',
]
return ['RANCONS.' + table for table in tables]
class RADEFFM:
"""storage class for the RADEFFM objects"""
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADEFFM.' + table for table in tables]
class RADCONS:
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADCONS.' + table for table in tables]
class RADEATC:
"""storage class for the RADEATC objects"""
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADEATC.' + table for table in tables]
class RANEATC:
"""storage class for the RANEATC objects"""
def __init__(self):
self.cbar_strain_energy = {}
self.cbush_strain_energy = {}
self.chexa_strain_energy = {}
self.ctria3_strain_energy = {}
self.cquad4_strain_energy = {}
def get_table_types(self):
tables = [
'cbar_strain_energy', 'cbush_strain_energy',
'chexa_strain_energy',
'ctria3_strain_energy', 'cquad4_strain_energy',
]
return ['RANEATC.' + table for table in tables]
class ROUGV1:
"""storage class for the ROUGV1 objects"""
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.eigenvectors = {}
def get_table_types(self):
tables = [
'displacements', 'velocities', 'accelerations', 'eigenvectors',
]
return ['ROUGV1.' + table for table in tables]
class RAFEATC:
"""storage class for the RAFEATC objects"""
def __init__(self):
self.cbar_force = {}
self.cquad4_force = {}
self.cbush_force = {}
def get_table_types(self):
tables = [
'cbar_force',
'cquad4_force',
'cbush_force',
]
return ['RAFEATC.' + table for table in tables]
class RASEATC:
"""storage class for the RASEATC objects"""
def __init__(self):
self.chexa_stress = {}
self.cquad4_stress = {}
def get_table_types(self):
tables = [
'chexa_stress',
'cquad4_stress',
]
return ['RASEATC.' + table for table in tables]
class RAEEATC:
"""storage class for the RAEEATC objects"""
def __init__(self):
self.chexa_strain = {}
self.ctria3_strain = {}
self.cquad4_strain = {}
def get_table_types(self):
tables = [
'chexa_strain',
'ctria3_strain', 'cquad4_strain',
]
return ['RAEEATC.' + table for table in tables]
```
#### File: op2/result_objects/grid_point_weight.py
```python
from io import StringIO
from struct import pack
import numpy as np
from pyNastran.utils import object_attributes, object_methods
from pyNastran.op2.result_objects.op2_objects import _write_table_header
from pyNastran.op2.op2_interface.write_utils import export_to_hdf5
float_types = (float, np.float32)
integer_types = (int, np.int32)
# ? ? ? ?
#good = (4, 2, 4, 8, 1464878927, 538976327, 8, 4, -1, 4, 4, 7, 4, 28, 101, 0, 0, 0, 0, 0, 1, 28, 4, -2, 4, 4, 1, 4, 4, 0, 4, 4, 2, 4, 8, 1464878927, 538976327, 8, 4, -3, 4, 4, 1, 4, 4, 0, 4, 4, 146, 4)
#bad = (4, 2, 4, 8, 1464878927, 538976327, 8, 4, -1, 4, 4, 7, 4, 28, 102, 0, 0, 0, 512, 0, 0, 28, 4, -2, 4, 4, 1, 4, 4, 0, 4, 4, 7, 4, 28, 1464878927, 538976327, 9, 27, 19, 0, 1, 28, 4, -3, 4, 4, 1, 4, 4)
class GridPointWeight:
def __init__(self):
"""
.. seealso:: http://www.6dof.com/index.php?option=com_content&view=article&id=298:output-from-the-grid-point-weight-generator&catid=178:courses-and-trainings&Itemid=61
"""
# The Grid Point Weight Generator (GPWG) module computes the rigid body
# mass properties of an entire structure with respect to a user specified point and with
# respect to the center of mass. Output from the module is requested by a PARAM
# GRDPNT card in the Bulk Data Deck which specifies from which grid point mass
# computations are to be referenced. Optionally, the absence of a specific grid point
# (i.e. PARAM, GRDPNT, 0) automatically causes the origin of the basic
# coordinate system to be utilized as a reference. The mass properties are initially
# defined in the basic coordinate system. Subsequently, the mass properties are
# transformed to principal mass axes and to principal inertia axes. The actual printout
# is composed of several elements. These are:
self.reference_point = None
# M0 RIGID BODY MASS MATRIX IN BASIC COORDINATE SYSTEM
# This is the rigid body mass matrix of the entire structure in the basic coordinate
# system with respect to a reference point chosen by the analyst.
self.MO = None
# S TRANSFORMATION MATRIX FOR SCALAR MASS PARTITION
# S is the transformation from the basic coordinate system to the set of principal axes
# for the 3 x 3 scalar mass partition of the 6 x 6 mass matrix. The principal axes for
# just the scalar partition are known as the principal mass axes.
self.S = None
self.mass = None
# XC.G. YC.G. ZC.G.
# It is possible in NASTRAN to assemble a structural model having different values of
# mass in each coordinate direction at a grid point. This can arise, for example, by
# assembling scalar mass components or from omitting some components by means of bar
# element pin flags. Consequently three distinct mass systems are assembled one in each of
# the three directions of the principal mass axes (the S system). This third tabulation
# has five columns. The first column lists the axis direction in the S coordinates. The
# second column lists the mass associated with the appropriate axis direction. The final
# three columns list the x, y, and z coordinate distances from the reference point to the
# center of mass for each of the three mass systems.
self.cg = None
# I(S) INERTIAS RELATIVE TO C.G.
# This is the 3 x 3 mass moment of inertia partition with respect to the center of
# gravity referred to the principal mass axes (the S system). This is not necessarily a
# diagonal matrix because the determination of the S system does not involve second
# moments. The values of inertias at the center of gravity are found from the values at
# the reference point employing the parallel axes rule.
self.IS = None
# I(Q) PRINCIPAL INERTIAS
# The principal moments of inertia at the center of gravity are displayed in matrix
# form with reference to the Q system of axes. The Q system is obtained from an eigenvalue
# analysis of the I(S) matrix.
self.IQ = None
# Q TRANSFORMATION MATRIX I(Q) = QT*IBAR(S)*Q
# Q is the coordinate transformation between the S axes and the Q axes. IBAR(S) is the
# same as I(s) except that the signs of the offdiagonal terms are reversed.
self.Q = None
self.title = ''
self.subtitle = ''
self.label = ''
self.superelement_adaptivity_index = ''
def export_to_hdf5(self, group, log) -> None:
"""exports the object to HDF5 format"""
export_to_hdf5(self, group, log)
def object_attributes(self, mode='public', keys_to_skip=None,
filter_properties=False):
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [
'object_methods', 'object_attributes',
]
return object_attributes(self, mode=mode,
keys_to_skip=keys_to_skip+my_keys_to_skip,
filter_properties=filter_properties)
def object_methods(self, mode='public', keys_to_skip=None):
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = []
my_keys_to_skip = [
'object_methods', 'object_attributes',
]
return object_methods(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
def __eq__(self, weight):
msg = ''
if not self.reference_point == weight.reference_point:
msg += f'reference_point: {self.reference_point} -> {weight.reference_point}\n'
if not np.array_equal(self.MO, weight.MO):
msg += f'reference_point: {self.MO} -> {weight.MO}\n'
if not np.array_equal(self.S, weight.S):
msg += f'reference_point: {self.S} -> {weight.S}\n'
if not np.array_equal(self.mass, weight.mass):
msg += f'reference_point: {self.mass} -> {weight.mass}\n'
if not np.array_equal(self.cg, weight.cg):
msg += f'reference_point: {self.cg} -> {weight.cg}\n'
if not np.array_equal(self.IS, weight.IS):
msg += f'reference_point: {self.IS} -> {weight.IS}\n'
if not np.array_equal(self.IQ, weight.IQ):
msg += f'reference_point: {self.IQ} -> {weight.IQ}\n'
if not np.array_equal(self.Q, weight.Q):
msg += f'reference_point: {self.Q} -> {weight.Q}'
if msg:
raise ValueError('GridPointWeight:\n' + msg)
return True
def set_grid_point_weight(self, reference_point, MO, S, mass, cg, IS, IQ, Q,
approach_code=1, table_code=13,
title='', subtitle='', label='',
superelement_adaptivity_index=''):
"""used by the op2 reader to set the table parameters"""
self.reference_point = reference_point
self.approach_code = approach_code
self.table_code = table_code
self.MO = MO
self.S = S
self.mass = mass
self.cg = cg
self.IS = IS
self.IQ = IQ
self.Q = Q
self.title = title
self.subtitle = subtitle
self.label = label
self.superelement_adaptivity_index = superelement_adaptivity_index
def get_stats(self, key='', short=True):
key2 = f'[{key!r}]'
if short:
msg = (f'GridPointWeight{key2}: ref_point=%s mass=%g; '
'[reference_point, M0, S, mass, cg, IS, IQ, Q]\n' % (
self.reference_point, self.mass.max()))
else:
msg = (
f'GridPointWeight{key2}:'
' reference_point=%s\n'
' mass=[%10g %10g %10g]\n'
' cg =[%10g %10g %10g]\n'
' [%10g %10g %10g]\n'
' [%10g %10g %10g]\n\n'
' IS =[%10g %10g %10g]\n'
' [%10g %10g %10g]\n'
' [%10g %10g %10g]\n\n'
' IQ =[%10g %10s %10s]\n'
' [%10s %10g %10s]\n'
' [%10s %10s %10g]\n\n'
' Q = [%10g %10g %10g]\n'
' [%10g %10g %10g]\n'
' [%10g %10g %10g]\n' % (
self.reference_point, self.mass[0], self.mass[1], self.mass[2],
self.cg[0, 0], self.cg[0, 1], self.cg[0, 2],
self.cg[1, 0], self.cg[1, 1], self.cg[1, 2],
self.cg[2, 0], self.cg[2, 1], self.cg[2, 2],
self.IS[0, 0], self.IS[0, 1], self.IS[0, 2],
self.IS[1, 0], self.IS[1, 1], self.IS[1, 2],
self.IS[2, 0], self.IS[2, 1], self.IS[2, 2],
self.IQ[0], '', '',
'', self.IQ[1], '',
'', '', self.IQ[2],
self.Q[0, 0], self.Q[0, 1], self.Q[0, 2],
self.Q[1, 0], self.Q[1, 1], self.Q[1, 2],
self.Q[2, 0], self.Q[2, 1], self.Q[2, 2],
)
)
return msg
def __repr__(self):
f = StringIO()
page_stamp = 'PAGE %i'
page_num = 1
self.write_f06(f, page_stamp, page_num)
msg = f.getvalue()
return msg
def _write_table_3(self, op2_file, fascii, new_result, table_name, itable=-3):
import inspect
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
fascii.write('%s.write_table_3: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if new_result and itable != -3:
header = [
4, 146, 4,
]
else:
header = [
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 146, 4,
]
op2_file.write(pack(b'%ii' % len(header), *header))
fascii.write('table_3_header = %s\n' % header)
#op2_file.write(pack('12i', *[4, itable, 4,
#4, 1, 4,
#4, 0, 4,
#4, 146, 4,
#]))
approach_code = self.approach_code
table_code = self.table_code
#isubcase = self.isubcase
#random_code = self.random_code
#format_code = 1
isubcase = 0
num_wide = 79 # self.num_wide
#acoustic_flag = self.acoustic_flag if hasattr(self, 'acoustic_flag') else 0
reference_point = self.reference_point
reference_point = 22
#thermal = self.thermal
title = b'%-128s' % self.title.encode('ascii')
subtitle = b'%-128s' % self.subtitle.encode('ascii') # missing superelement_adaptivity_index
label = b'%-128s' % self.label.encode('ascii')
#1, 13, 0, 0, 0, 0, 0, 0, 0, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
ftable3 = b'i' * 50 + b'128s 128s 128s'
#print(self.get_stats())
table3 = [
approach_code, table_code, reference_point, isubcase, 0,
0, 0, 0, 0, num_wide,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
title, subtitle, label,
]
n = 0
from itertools import count
for i, val, ftable3i in zip(count(), table3, ftable3.decode('ascii')):
assert val is not None, 'i=%s val=%s ftable3i=%s\n%s' % (i, val, ftable3i, self.get_stats())
if isinstance(val, integer_types):
n += 4
assert ftable3i == 'i', 'i=%s val=%s type=%s' % (i, val, ftable3i)
elif isinstance(val, float_types):
n += 4
assert ftable3i == 'f', 'i=%s val=%s type=%s' % (i, val, ftable3i)
else:
n += len(val)
assert n == 584, n
data = [584] + table3 + [584]
fmt = b'i' + ftable3 + b'i'
#op2_file.write(pack(fascii, '%s header 3c' % table_name, fmt, data))
fascii.write('%s header 3c = %s\n' % (table_name, data))
#j = 7
#print(ftable3[:j])
#print(table3[:j])
#pack(ftable3[:j], *table3[:j])
op2_file.write(pack(fmt, *data))
def write_op2(self, op2_file, op2_ascii, date, endian=b'<'):
itable = -1
import inspect
#allowed_tables = [
#'OUGV1', 'BOUGV1', 'BOPHIG', 'BOPG1',
#'OUPV1',
#'OQP1', 'OQMG1', 'OQG1', 'OQGV1', 'OPNL1',
#'OPG1', 'OPGV1',
#'OAGATO1', 'OAGCRM1', 'OAGNO1', 'OAGPSD1', 'OAGRMS1',
#'OQGPSD1',
#'OCRPG', 'OCRUG', 'OUG1',
#'OUGV1PAT',
#]
#assert self.table_name in allowed_tables, self.table_name
table_name = 'OGPWG'
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
subtable_name = b'OGPWG'
if itable == -1:
_write_table_header(op2_file, op2_ascii, date, table_name, subtable_name)
itable = -3
#s = Struct(op2_format)
#unused_node = self.node_gridtype[:, 0]
#gridtype = self.node_gridtype[:, 1]
#format_table4_1 = Struct(self._endian + b'15i')
#format_table4_2 = Struct(self._endian + b'3i')
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
#nnodes_device = self.node_gridtype[:, 0] * 10 + self.device_code
#(2+6) => (node_id, gridtypei, t1i, t2i, t3i, r1i, r2i, r3i)
#ntotal = nnodes * (2 + 6)
#print('shape = %s' % str(self.data.shape))
#assert nnodes > 1, nnodes
#assert ntotal > 1, ntotal
#unused_device_code = self.device_code
#fascii.write(' ntimes = %s\n' % self.ntimes)
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#for itime in range(self.ntimes):
# good = (4, -4, 4, 4, 1, 4, 4, 0, 4, 4, 78, 4, 312, 1057795080, 0, 0, 0, 0, 0, 0, 1057795080, 0, 0, 0, 1111254630, 0, 0, 1057795080, 0, -1036229018, 0, 0, 0, 0, 1143715840, -1451229184, 0, 0, 0, -1036229018, -1451229184, 1169886464, 0, 0, 1111254630, 0, 0, 0, 1171293184, 1065353216)
#bad = (4, -4, 4, 4, 1, 4, 4, 0, 4, 4, 78, 4, 312, 1057795080, 0, 0, 0, 0, 0, 0, 1057795080, 0, 0, 0, 1111254630, 0, 0, 1057795080, 0, -1036229018, 0, 0, 0, 0, 1143715840, -1451229184, 0, 0, 0, -1036229018, -1451229184, 1169886464, 0, 0, 1111254630, 0, 0, 0, 1171293184, 1065353216, 0, 0, 0, 1065353216, 0, 0, 0, 1065353216, 1057795080, 0, -2147483648, 0, 1057795080, 1118530999, 0, -2147483648, 1057795080, 1118530999, 0, 0, 1143715840, 696254464, 0, 696254464, 1156812654, 0, 0, 0, 1160033719, 1143715840, 1156812654, 1160033719, 1065353216, 0, 0, 0, 1065353216, 0, 0, 0, 1065353216, 312, -5, 4, 4, 1, 4, 4, 0, 4, 4, 0, 4, 4, 4, 2, 4, 8, 1447515471, 538976305)
ntotal = 78 # 79 * 4 = 316
new_result = True
self._write_table_3(op2_file, op2_ascii, new_result, table_name, itable)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4*ntotal]
op2_file.write(pack(b'%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4*ntotal))
# -------------------------------------------------------
fmt = endian + b'78f'
mcg = np.zeros((3, 4), dtype=self.cg.dtype)
mcg[:, 0] = self.mass
mcg[:, 1:] = self.cg
data = (self.MO.ravel().tolist() + self.S.ravel().tolist() +
mcg.ravel().tolist() + self.IS.ravel().tolist() + self.IQ.ravel().tolist() +
self.Q.ravel().tolist())
assert None not in data, data
msgi = pack(fmt, *data)
op2_file.write(msgi)
# -------------------------------------------------------
itable -= 1
header = [4 * ntotal,
4, itable, 4,
4, 1, 4,
4, 0, 4,
4, 0, 4, ]
op2_file.write(pack(endian + b'13i', *header))
op2_ascii.write('footer = %s\n' % header)
return itable
def write_f06(self, f06_file, page_stamp, page_num):
"""
writes the f06
Parameters
----------
f06_file : file / StringIO
a file-like object
page_stamp : str
the page formatter (e.g., 'PAGE %i')
page_num : int
the active page number
Returns
-------
page_num : int
the new page number
"""
if self.reference_point is None:
return page_num
msg = [' O U T P U T F R O M G R I D P O I N T W E I G H T G E N E R A T O R']
msg.append('0 REFERENCE POINT = %i' % self.reference_point)
# MO
msg.append(' M O')
for i in range(6):
msg.append(' * %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E *' % tuple(self.MO[i, :]))
msg.append(' S')
for i in range(3):
msg.append(' * %13.6E %13.6E %13.6E *' % tuple(self.S[i, :]))
msg.append(' DIRECTION')
msg.append(' MASS AXIS SYSTEM (S) MASS X-C.G. Y-C.G. Z-C.G.')
msg.append(' X %12.6E %13.6E %13.6E %13.6E' % (self.mass[0], self.cg[0, 0], self.cg[0, 1], self.cg[0, 2]))
msg.append(' Y %12.6E %13.6E %13.6E %13.6E' % (self.mass[1], self.cg[1, 0], self.cg[1, 1], self.cg[1, 2]))
msg.append(' Z %12.6E %13.6E %13.6E %13.6E' % (self.mass[2], self.cg[2, 0], self.cg[2, 1], self.cg[2, 2]))
msg.append(' I(S)')
for i in range(3):
msg.append(' * %13.6E %13.6E %13.6E *' % tuple(self.IS[i, :]))
msg.append(' I(Q)')
msg.append(' * %13.6E %13s %13s *' % (self.IQ[0], '', ''))
msg.append(' * %13s %13.6E %13s *' % ('', self.IQ[1], ''))
msg.append(' * %13s %13s %13.6E *' % ('', '', self.IQ[2]))
msg.append(' Q')
for i in range(3):
msg.append(' * %13.6E %13.6E %13.6E *' % tuple(self.Q[i, :]))
msg.append('\n' + page_stamp % page_num + '\n')
f06_file.write('\n'.join(msg))
return page_num + 1
```
#### File: tables/lama_eigenvalues/lama.py
```python
from struct import Struct
from pyNastran.op2.op2_interface.op2_common import OP2Common
from pyNastran.op2.tables.lama_eigenvalues.lama_objects import (
RealEigenvalues, ComplexEigenvalues, BucklingEigenvalues)
class LAMA(OP2Common):
def __init__(self):
OP2Common.__init__(self)
def _read_complex_eigenvalue_3(self, data, ndata):
"""parses the Complex Eigenvalues Table 3 Data"""
#raise NotImplementedError(self.table_name)
self.words = [
'aCode', 'tCode', '???', 'isubcase',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???']
#self.show_data(data)
unused_three = self.parse_approach_code(data)
self.six = self.add_data_parameter(data, 'six', b'i', 10, False) # seven
self._read_title(data)
def _read_buckling_eigenvalue_3(self, data, ndata):
"""parses the Buckling Eigenvalues Table 3 Data"""
#print(self.show_data(data))
#self._read_title_helper(data)
self.words = [
'aCode', 'tCode', '???', 'isubcase',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???']
#self.show_data(data)
unused_three = self.parse_approach_code(data)
self.seven = self.add_data_parameter(data, 'seven', b'i', 10, False) # seven
#: residual vector augmentation flag
self.residual_flag = self.add_data_parameter(data, 'residual_flag', b'i', 11, False)
#: fluid modes flag
self.fluid_flag = self.add_data_parameter(data, 'fluid_flag', b'i', 12, False)
self._read_title(data)
def _read_complex_eigenvalue_4(self, data, ndata):
"""parses the Complex Eigenvalues Table 4 Data"""
if self.read_mode == 1:
return ndata
ntotal = 24 # 4 * 6
nmodes = ndata // ntotal
n = 0
#assert self.isubcase != 0, self.isubcase
clama = ComplexEigenvalues(self.title, self.table_name, nmodes)
#assert self.title not in self.eigenvalues, f'table={self.table_name_str} title={self.title} optimization_count={self._count}'
self.eigenvalues[self.title] = clama
#self.eigenvalues[self.isubcase] = clama
structi = Struct(self._endian + b'ii4f')
for i in range(nmodes):
edata = data[n:n+ntotal]
out = structi.unpack(edata)
if self.is_debug_file:
self.binary_debug.write(' eigenvalue%s - %s\n' % (i, str(out)))
#(imode, order, eigr, eigc, freq, damping) = out # CLAMA
#print('imode=%s order=%s eigr=%s eigc=%s freq=%s damping=%s' %
#(imode, order, eigr, eigc, freq, damping))
clama.add_op2_line(out, i)
n += ntotal
assert n == ndata, 'clama length error'
return n
def _read_buckling_eigenvalue_4(self, data, ndata):
"""parses the Buckling Eigenvalues Table 4 Data"""
# BLAMA - Buckling eigenvalue summary table
# CLAMA - Complex eigenvalue summary table
# LAMA - Normal modes eigenvalue summary table
if self.read_mode == 1:
return ndata
ntotal = 28 # 4 * 7
nmodes = ndata // ntotal
n = 0
#assert self.isubcase != 0, self.isubcase
blama = BucklingEigenvalues(self.title, self.table_name, nmodes)
#assert self.title not in self.eigenvalues, f'table={self.table_name_str} title={self.title} optimization_count={self._count}'
self.eigenvalues[self.title] = blama
#self.eigenvalues[self.isubcase] = lama
structi = Struct(self._endian + b'ii5f')
for i in range(nmodes):
edata = data[n:n+ntotal]
out = structi.unpack(edata)
if self.is_debug_file:
self.binary_debug.write(' eigenvalue%s - %s\n' % (i, str(out)))
#(imode, order, eigen, omega, freq, mass, stiff) = out # BLAMA??
#(mode_num, extract_order, eigenvalue, radian, cycle, genM, genK) = line # LAMA
#(root_num, extract_order, eigr, eigi, cycle, damping) = data # CLAMA
blama.add_op2_line(out, i)
n += ntotal
return n
def _read_real_eigenvalue_3(self, data, ndata):
"""parses the Real Eigenvalues Table 3 Data"""
self.words = [
'aCode', 'tCode', '???', 'isubcase',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???',
'???', '???', '???', '???']
#self.show_data(data)
unused_three = self.parse_approach_code(data)
self.seven = self.add_data_parameter(data, 'seven', b'i', 10, False) # seven
## residual vector augmentation flag
self.residual_flag = self.add_data_parameter(data, 'residual_flag', b'i', 11, False)
## fluid modes flag
self.fluid_flag = self.add_data_parameter(data, 'fluid_flag', b'i', 12, False)
self.title = None
#print(self.data_code)
#self.add_data_parameter(data,'format_code', 'i',9,False) ## format code
#: number of words per entry in record;
#.. todo:: is this needed for this table ???
#self.add_data_parameter(data,'num_wide', 'i',10,False)
#if self.analysis_code == 2: # sort2
#self.lsdvmn = self.get_values(data,'i',5)
#print("*isubcase=%s" % self.isubcase)
#print("analysis_code=%s table_code=%s thermal=%s" % (
#self.analysis_code, self.table_code, self.thermal))
#self.print_block(data)
self._read_title(data)
def _read_real_eigenvalue_4(self, data, ndata):
"""parses the Real Eigenvalues Table 4 Data"""
if self.read_mode == 1:
return ndata
nmodes = ndata // 28
n = 0
ntotal = 28
#assert self.isubcase != 0, self.isubcase
lama = RealEigenvalues(self.title, self.table_name, nmodes=nmodes)
if self.table_name in [b'LAMA', b'LAMAS']:
result_name = 'eigenvalues'
elif self.table_name == b'LAMAF':
result_name = 'eigenvalues_fluid'
else: # pragma: no cover
raise NotImplementedError(self.table_name)
slot = getattr(self, result_name)
#assert self.title not in slot, f'{result_name}: table={self.table_name_str} title={self.title!r} optimization_count={self._count}'
slot[self.title] = lama
structi = Struct(self._endian + b'ii5f')
for i in range(nmodes):
edata = data[n:n+28]
out = structi.unpack(edata)
if self.is_debug_file:
self.binary_debug.write(' eigenvalue%s - %s\n' % (i, str(out)))
#(imode, extract_order, eigenvalue, radian, cycle, gen_mass, gen_stiffness) = out
lama.add_f06_line(out, i)
n += ntotal
return n
```
#### File: oes_stressStrain/real/oes_bush.py
```python
import numpy as np
from numpy import zeros
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header
class RealBushArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
#print('RealBushArray.nonlinear_factor =', self.nonlinear_factor)
@property
def is_real(self):
return True
@property
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
if self.table_name not in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError('%s needs to implement _get_msgs' % self.__class__.__name__)
def get_headers(self):
raise NotImplementedError('%s needs to implement get_headers' % self.__class__.__name__)
#return headers
def build(self):
"""sizes the vectorized attributes of the RealBushArray"""
#print("self.ielement =", self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
if self.element_type == 102:
#nnodes_per_element = 1
pass
else:
raise NotImplementedError(self.element_type)
# buggy MSC 2005 (was this ever fixed?)
# NX doesn't have this bug
if self.table_name in ['OESRMS2', 'OESNO2', 'OSTRRMS2', 'OSTRNO2']:
self.ntotal = self.nelements
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
_times = zeros(self.ntimes, dtype=dtype)
element = zeros(self.ntotal, dtype='int32')
# [tx, ty, tz, rx, ry, rz]
data = zeros((self.ntimes, self.ntotal, 6), dtype='float32')
if self.load_as_h5:
#for key, value in sorted(self.data_code.items()):
#print(key, value)
group = self._get_result_group()
self._times = group.create_dataset('_times', data=_times)
self.element = group.create_dataset('element', data=element)
self.data = group.create_dataset('data', data=data)
else:
self._times = _times
self.element = element
self.data = data
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Time 0.00 0.10
#ElementID Item
#11 tx 0.0 0.0
# ty 0.0 0.0
# tz 0.0 0.0
# rx 0.0 0.0
# ry 0.0 0.0
# rz 0.0 0.0
#21 tx 0.0 0.0
column_names, column_values = self._build_dataframe_transient_header()
data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
# >25.0
#Static tx ty tz rx ry rz
#ElementID
#1 1000.0 0.0 0.0 0.0 0.0 0.0
#
# <=24.2
#Static 0
#ElementID Item
#1 tx 1000.0
# ty 0.0
# tz 0.0
# rx 0.0
# ry 0.0
# rz 0.0
data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)
data_frame.index.name = 'ElementID'
data_frame.columns.names = ['Static']
#data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()
#data_frame.columns.names = ['Static']
#data_frame.index.names = ['ElementID', 'Item']
self.data_frame = data_frame
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(fx1, fy1, fz1, unused_mx1, unused_my1, unused_mz1) = t1
(fx2, fy2, fz2, unused_mx2, unused_my2, unused_mz2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s)\n (%s, %s, %s)\n' % (
eid,
fx1, fy1, fz1, #mx1, my1, mz1
fx2, fy2, fz2) #mx2, my2, mz2
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, tx, ty, tz, rx, ry, rz):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.itotal] = eid
self.data[self.itime, self.itotal, :] = [tx, ty, tz, rx, ry, rz]
self.itotal += 1
self.ielement += 1
def get_stats(self, short=False):
if not self.is_built:
return ['<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.ntotal
ntimes = self.ntimes
#ntotal = self.ntotal
nelements = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
headers = self.get_headers()
n = len(headers)
assert n == self.data.shape[2], 'nheaders=%s shape=%s' % (n, str(self.data.shape))
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' element.shape = %s\n' % str(self.element.shape).replace('L', ''))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_element_index(self, eids):
# elements are always sorted; nodes are not
itot = np.searchsorted(eids, self.element) #[0]
return itot
def eid_to_element_node_index(self, eids):
ind = np.ravel([np.searchsorted(self.element == eid) for eid in eids])
return ind
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg = self._get_msgs()
(ntimes, unused_ntotal) = self.data.shape[:2]
eids = self.element
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg))
#[tx, ty, tz, rx, ry, rz]
tx = self.data[itime, :, 0]
ty = self.data[itime, :, 1]
tz = self.data[itime, :, 2]
rx = self.data[itime, :, 3]
ry = self.data[itime, :, 4]
rz = self.data[itime, :, 5]
for eid, txi, tyi, tzi, rxi, ryi, rzi in zip(
eids, tx, ty, tz, rx, ry, rz):
vals = [txi, tyi, tzi, rxi, ryi, rzi]
vals2 = write_floats_13e(vals)
[txi, tyi, tzi, rxi, ryi, rzi] = vals2
f06_file.write('0 %8i %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, txi, tyi, tzi, rxi, ryi, rzi))
f06_file.write(page_stamp % page_num)
page_num += 1
if self.nonlinear_factor in (None, np.nan):
page_num -= 1
return page_num
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i6f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
#print('3, %s' % itable)
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
#print('4, %s' % itable)
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
tx = self.data[itime, :, 0]
ty = self.data[itime, :, 1]
tz = self.data[itime, :, 2]
rx = self.data[itime, :, 3]
ry = self.data[itime, :, 4]
rz = self.data[itime, :, 5]
for eid, eid_device, txi, tyi, tzi, rxi, ryi, rzi in zip(
eids, eids_device, tx, ty, tz, rx, ry, rz):
data = [eid_device, txi, tyi, tzi, rxi, ryi, rzi]
vals = [txi, tyi, tzi, rxi, ryi, rzi]
vals2 = write_floats_13e(vals)
[txi, tyi, tzi, rxi, ryi, rzi] = vals2
op2_ascii.write('0 %8i %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, txi, tyi, tzi, rxi, ryi, rzi))
op2.write(struct1.pack(*data))
#for eid, axiali, SMai, torsioni, SMti in zip(eids_device, axial, SMa, torsion, SMt):
#data = [eid, axiali, SMai, torsioni, SMti]
#op2_ascii.write(' eid=%s axial=%s SMa=%s torsion=%s SMt=%s\n' % tuple(data))
#op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class RealBushStressArray(RealBushArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealBushArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
def get_headers(self):
headers = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
return headers
def _get_msgs(self):
if self.element_type == 102:
pass
else:
raise NotImplementedError(self.element_type)
msg = [
' S T R E S S E S I N B U S H E L E M E N T S ( C B U S H )\n \n',
' ELEMENT-ID STRESS-TX STRESS-TY STRESS-TZ STRESS-RX STRESS-RY STRESS-RZ \n',
]
return msg
class RealBushStrainArray(RealBushArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
RealBushArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
def get_headers(self):
headers = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
return headers
def _get_msgs(self):
if self.element_type == 102:
pass
else:
raise NotImplementedError(self.element_type)
msg = [
' S T R A I N S I N B U S H E L E M E N T S ( C B U S H )\n'
' \n'
' ELEMENT-ID STRAIN-TX STRAIN-TY STRAIN-TZ STRAIN-RX STRAIN-RY STRAIN-RZ \n'
]
return msg
```
#### File: op2/writer/test_op2_writer.py
```python
import unittest
import os
from cpylog import get_logger
import pyNastran
#from pyNastran.bdf.bdf import BDF
#from pyNastran.op2.op2 import OP2, FatalError, read_op2
#from pyNastran.op2.op2_interface.op2_common import get_scode_word
from pyNastran.op2.op2_geom import read_op2_geom#, OP2Geom,
from pyNastran.op2.op2 import read_op2
from pyNastran.op2.test.test_op2 import run_op2
#from pyNastran.op2.writer.op2_writer import OP2Writer
PKG_PATH = pyNastran.__path__[0]
MODEL_PATH = os.path.abspath(os.path.join(PKG_PATH, '..', 'models'))
class TestOP2Writer(unittest.TestCase):
def test_write_1(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'solid_bending')
op2_filename = os.path.join(folder, 'solid_bending.op2')
op2_filename_debug = os.path.join(folder, 'solid_bending.debug.out')
op2_filename_out = os.path.join(folder, 'solid_bending_out.op2')
op2_filename_debug_out = os.path.join(folder, 'solid_bending_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
include_results='displacements', log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
assert op2 == op2b
def test_write_2(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'solid_bending')
op2_filename = os.path.join(folder, 'solid_bending.op2')
op2_filename_debug = os.path.join(folder, 'solid_bending.debug.out')
op2_filename_out = os.path.join(folder, 'solid_bending_out.op2')
op2_filename_debug_out = os.path.join(folder, 'solid_bending_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
assert op2 == op2b
def _test_write_3(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'sol_101_elements')
op2_filename = os.path.join(folder, 'static_solid_shell_bar.op2')
op2_filename_debug = os.path.join(folder, 'static_solid_shell_bar.debug.out')
op2_filename_out = os.path.join(folder, 'static_solid_shell_bar_out.op2')
op2_filename_debug_out = os.path.join(folder, 'static_solid_shell_bar_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
unused_op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out)
def test_write_4(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'sol_101_elements')
op2_filename = os.path.join(folder, 'static_solid_shell_bar.op2')
op2_filename_debug = os.path.join(folder, 'static_solid_shell_bar.debug.out')
op2_filename_out = os.path.join(folder, 'static_solid_shell_bar_out.op2')
op2_filename_debug_out = os.path.join(folder, 'static_solid_shell_bar_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_5(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'sol_101_elements')
op2_filename = os.path.join(folder, 'mode_solid_shell_bar.op2')
op2_filename_debug = os.path.join(folder, 'mode_solid_shell_bar.debug.out')
op2_filename_out = os.path.join(folder, 'mode_solid_shell_bar_out.op2')
op2_filename_debug_out = os.path.join(folder, 'mode_solid_shell_bar_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
exclude_results = [
#'*_strain_energy',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results,
log=log, )
op2.write_op2(op2_filename_out) #is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_6(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'sol_101_elements')
op2_filename = os.path.join(folder, 'transient_solid_shell_bar.op2')
op2_filename_debug = os.path.join(folder, 'transient_solid_shell_bar.debug.out')
op2_filename_out = os.path.join(folder, 'transient_solid_shell_bar_out.op2')
op2_filename_debug_out = os.path.join(folder, 'transient_solid_shell_bar_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_7(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'sol_101_elements')
op2_filename = os.path.join(folder, 'freq_solid_shell_bar.op2')
op2_filename_debug = os.path.join(folder, 'freq_solid_shell_bar.debug.out')
op2_filename_out = os.path.join(folder, 'freq_solid_shell_bar_out.op2')
op2_filename_debug_out = os.path.join(folder, 'freq_solid_shell_bar_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_elements_1(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='warning', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'freq_elements.op2')
op2_filename_debug = os.path.join(folder, 'freq_elements.debug.out')
op2_filename_out = os.path.join(folder, 'freq_elements_out.op2')
op2_filename_debug_out = os.path.join(folder, 'freq_elements_out.debug.out')
#model = os.path.splitext(op2_filename)[0]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_elements_2(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'freq_elements2.op2')
op2_filename_debug = os.path.join(folder, 'freq_elements2.debug.out')
op2_filename_out = os.path.join(folder, 'freq_elements_out2.op2')
op2_filename_debug_out = os.path.join(folder, 'freq_elements_out2.debug.out')
#model = os.path.splitext(op2_filename)[0]
exclude_results = [
'ctria6_force', 'ctriar_force', 'cshear_force',
'cvisc_force', 'modal_contribution.cshear_stress',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
unused_op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
#op2.assert_op2_equal(op2b,
#skip_results=['params', ],
#stop_on_failure=True, debug=False)
def test_write_elements_3(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'freq_random_elements.op2')
op2_filename_debug = os.path.join(folder, 'freq_random_elements.debug.out')
op2_filename_out = os.path.join(folder, 'freq_random_elements_out.op2')
op2_filename_debug_out = os.path.join(folder, 'freq_random_elements_out.debug.out')
#model = os.path.splitext(op2_filename)[0]
exclude_results = [
'ctria6_force', 'ctriar_force', 'cshear_force',
'cvisc_force', 'cshear_stress', '*strain_energy',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_elements_4(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'modes_complex_elements.op2')
op2_filename_debug = os.path.join(folder, 'modes_complex_elements.debug.out')
op2_filename_out = os.path.join(folder, 'modes_complex_elements_out.op2')
op2_filename_debug_out = os.path.join(folder, 'modes_complex_elements_out.debug.out')
#model = os.path.splitext(op2_filename)[0]
exclude_results = [
'ctria6_force', 'ctriar_force', 'cshear_force',
'cvisc_force',
'cshear_stress', #'*strain_energy',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_write_elements_5(self):
"""tests basic op2 writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'time_elements.op2')
op2_filename_debug = os.path.join(folder, 'time_elements.debug.out')
op2_filename_out = os.path.join(folder, 'time_elements_out.op2')
op2_filename_debug_out = os.path.join(folder, 'time_elements_out.debug.out')
#model = os.path.splitext(op2_filename)[0]
exclude_results = [
'cshear_force',
'cvisc_force', 'cshear_stress', 'grid_point_forces', '*strain_energy',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_thermal_1(self):
"""tests basic op2 thermal writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'elements')
op2_filename = os.path.join(folder, 'time_thermal_elements.op2')
op2_filename_debug = os.path.join(folder, 'time_thermal_elements.debug.out')
op2_filename_out = os.path.join(folder, 'time_thermal_elements_out.op2')
op2_filename_debug_out = os.path.join(folder, 'time_thermal_elements.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
exclude_results = [
'chbdye_thermal_load',
'chexa_thermal_load',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, log=log)
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
def test_thermal_2(self):
"""tests basic op2 thermal writing"""
log = get_logger(log=None, level='info', encoding='utf-8')
folder = os.path.join(MODEL_PATH, 'other')
op2_filename = os.path.join(folder, 'hd15306.op2')
op2_filename_debug = os.path.join(folder, 'hd15306.debug.out')
op2_filename_out = os.path.join(folder, 'hd15306_out.op2')
op2_filename_debug_out = os.path.join(folder, 'hd15306_out.debug.out')
#debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
#debug_file = model + '.debug.out'
exclude_results = [
'chbdyg_thermal_load',
'crod_thermal_load',
'cquad4_thermal_load',
#'chbdye_thermal_load',
#'chexa_thermal_load',
]
op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
exclude_results=exclude_results, debug=True, log=log)
str(op2.get_op2_stats(short=True))
op2.write_op2(op2_filename_out) #, is_mag_phase=False)
op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out, log=log)
op2.assert_op2_equal(op2b,
skip_results=['params', ],
stop_on_failure=True, debug=False)
#def test_thermal_3(self):
#"""tests basic op2 thermal writing"""
#folder = os.path.join(MODEL_PATH, 'other')
#op2_filename = os.path.join(folder, 'ofprand1.op2')
#op2_filename_debug = os.path.join(folder, 'ofprand1.debug.out')
#op2_filename_out = os.path.join(folder, 'ofprand1_out.op2')
#op2_filename_debug_out = os.path.join(folder, 'ofprand1.debug.out')
##debug_file = 'solid_bending.debug.out'
#model = os.path.splitext(op2_filename)[0]
##debug_file = model + '.debug.out'
#exclude_results = [
##'chbdyg_thermal_load',
##'crod_thermal_load',
##'cquad4_thermal_load',
##'chbdye_thermal_load',
##'chexa_thermal_load',
#]
#op2 = read_op2_geom(op2_filename, debug_file=op2_filename_debug,
#exclude_results=exclude_results,
##include_results='eigenvectors',
##include_results=['crod_stress', 'cbar_stress'],
##include_results=['crod_force', 'cbar_force'],
##include_results='element_forces',
##include_results='stress',
#)
#print(op2.get_op2_stats(short=True))
#op2.write_op2(op2_filename_out, is_mag_phase=False)
#op2b = read_op2_geom(op2_filename_out, debug_file=op2_filename_debug_out)
##op2b = read_op2(op2_filename_out, debug_file=op2_filename_debug_out)
#op2.assert_op2_equal(op2b,
#skip_results=['params', ],
#stop_on_failure=True, debug=False)
if __name__ == '__main__':
unittest.main()
```
#### File: utils/test/test_log.py
```python
import os
import unittest
from pyNastran.utils.log import make_log
class TestLog(unittest.TestCase):
def test_make_log(self):
"""tests make_log"""
make_log()
os.remove('pyNastran.log')
if __name__ == "__main__": # pragma: no cover
unittest.main()
``` |
{
"source": "JohannesSMHI/BAWS-vis",
"score": 2
} |
#### File: BAWS-vis/bawsvis/session.py
```python
import os
from bawsvis.config import Settings
from bawsvis.writers.raster import raster_writer
from bawsvis.writers.dictionary import json_writer
from bawsvis.writers.text import np_text_writer
class Session:
"""
"""
def __init__(self, data_path=None):
self.setting = Settings()
self.data_path = data_path
def export_data(self, data=None, file_name=None, writer='raster'):
"""
:param data:
:param file_name:
:param writer:
:return:
"""
file_name = ''.join((self.setting.export_directory, file_name))
if writer == 'raster':
raster_writer(file_name, data, self.setting.raster_template_meta)
elif writer == 'json':
json_writer(file_name, data)
elif writer == 'text':
np_text_writer(data, file_name)
```
#### File: BAWS-vis/bawsvis/utils.py
```python
import os
import numpy as np
import rasterio as rio
from collections import Mapping
from pyproj import Proj, CRS, transform
def transform_ref_system(lat=0.0, lon=0.0,
in_proj='EPSG:3006', # SWEREF 99TM 1200
out_proj='EPSG:4326'):
"""
Transform coordinates from one spatial reference system to another.
in_proj is your current reference system
out_proj is the reference system you want to transform to, default is EPSG:4326 = WGS84
(Another good is EPSG:4258 = ETRS89 (Europe), almost the same as WGS84 (in Europe)
and not always clear if coordinates are in WGS84 or ETRS89, but differs <1m.
lat = latitude
lon = longitude
To find your EPSG check this website: http://spatialreference.org/ref/epsg/
"""
o_proj = CRS(out_proj)
i_proj = CRS(in_proj)
x, y = transform(i_proj, o_proj, float(lon), float(lat))
return y, x
class Grid:
"""
"""
def __init__(self, meta):
self.meta = meta
self.proj = Proj(**self.meta['crs'])
self.map_width = int(self.meta['width'] * self.meta['transform'][0])
self.map_height = int(self.meta['height'] * self.meta['transform'][0])
self.x_size = int(self.map_width / self.meta['width'])
self.y_size = int(self.map_height / self.meta['height'])
self.xmin_proj = self.meta['transform'][2]
self.xmax_proj = self.xmin_proj + self.map_width
self.ymax_proj = self.meta['transform'][5]
self.ymin_proj = self.ymax_proj - self.map_height
self.xmin_wgs84, self.ymin_wgs84 = self.llcord
self.xmax_wgs84, self.ymax_wgs84 = self.urcord
self.map_wgs84_width = self.xmax_wgs84 - self.xmin_wgs84
self.map_wgs84_height = self.ymax_wgs84 - self.ymin_wgs84
self.x_wgs84_size = self.map_wgs84_width / self.meta['width']
self.y_wgs84_size = self.map_wgs84_height / self.meta['height']
def get_longitude_grid(self):
"""
:return:
"""
array1d = np.arange(self.xmin_proj, self.xmax_proj, self.x_size)
return np.tile(array1d, (self.map_height, 1))
def get_latitude_grid(self):
"""
:return:
"""
array1d = np.arange(self.ymin_proj, self.ymax_proj, self.y_size)
return np.flip(np.tile(array1d, (self.map_width, 1))).T
def get_longitude_wgs84grid(self):
"""
:return:
"""
array1d = np.arange(self.xmin_wgs84, self.xmax_wgs84, self.x_wgs84_size)
return np.tile(array1d, (self.meta['height'], 1))
def get_latitude_wgs84grid(self):
"""
:return:
"""
array1d = np.arange(self.ymin_wgs84, self.ymax_wgs84, self.y_wgs84_size)
return np.flip(np.tile(array1d, (self.meta['width'], 1))).T
@property
def llcord(self):
return self.proj(*(self.xmin_proj, self.ymin_proj), inverse=True)
@property
def urcord(self):
return self.proj(*(self.xmax_proj, self.ymax_proj), inverse=True)
def generate_filepaths(directory, pattern='', not_pattern='DUMMY_PATTERN',
pattern_list=[], endswith='',
only_from_dir=True):
"""
:param directory:
:param pattern:
:param not_pattern:
:param pattern_list:
:param endswith:
:param only_from_dir:
:return:
"""
for path, subdir, fids in os.walk(directory):
if only_from_dir:
if path != directory:
continue
for f in fids:
if pattern in f and not_pattern not in f and f.endswith(endswith):
if any(pattern_list):
for pat in pattern_list:
if pat in f:
yield os.path.abspath(os.path.join(path, f))
else:
yield os.path.abspath(os.path.join(path, f))
def recursive_dict_update(d, u):
""" Recursive dictionary update using
Copied from:
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
via satpy
"""
for k, v in u.items():
if isinstance(v, Mapping):
r = recursive_dict_update(d.get(k, {}), v)
d.setdefault(k, r)
else:
d.setdefault(k, u[k])
return d
# if __name__ == "__main__":
#
# meta = get_raster_meta('C:/Utveckling/BAWS-vis/bawsvis/etc/raster_template.tiff')
# g = Grid(meta)
```
#### File: bawsvis/writers/dictionary.py
```python
import json
def json_writer(file_path, data, indent=4):
with open(file_path, "w") as outfile:
json.dump(data, outfile, indent=indent)
``` |
{
"source": "JohannesSMHI/ctdpy",
"score": 3
} |
#### File: ctdpy/core/mapping.py
```python
import pandas as pd
class AttributeDict(dict):
"""Base class for attribute dictionaries."""
def __init__(self):
"""Initialize."""
super().__init__()
def _add_arrays_to_entries(self, **entries):
"""Add arrays as attributes to self."""
for key, array in entries.items():
# TODO Check if array is needed if only one value
setattr(self, key, array)
def add_entries(self, **entries):
"""Turn elements in arrays into attributes with a corresponding official field name."""
for key, array in entries.items():
setattr(self, key, key)
setattr(self, key.lower(), key)
if isinstance(array, pd.core.series.Series):
array = array.values
for value in array:
if not pd.isnull(value):
setattr(self, value, key)
setattr(self, value.lower(), key)
def add_entries_from_keylist(self, data, from_combo_keys=None, from_synonyms=None, to_key=''):
"""Create mapping attributes for ShipMapping().
Args:
data (dict):
from_combo_keys (list): list of keys
from_synonyms (list): list of keys
to_key (str):
"""
from_combo_keys = from_combo_keys or []
from_synonyms = from_synonyms or []
for i, value in enumerate(data[to_key]):
setattr(self, value, value)
if any(from_combo_keys):
setattr(self, ''.join([data[key][i].zfill(2) for key in from_combo_keys]), value)
if any(from_synonyms):
for key in from_synonyms:
setattr(self, data[key][i], value)
setattr(self, data[key][i].upper(), value)
def keys(self):
"""Return list of keys from self attributes."""
return list(self.__dict__.keys())
def get(self, key):
"""Get attribute from self using key."""
try:
return getattr(self, key)
except AttributeError:
try:
return getattr(self, key.lower())
except Exception:
if '[' in key:
try:
key = key.split('[')[0].strip()
return getattr(self, key.lower())
except Exception:
# print('No mapping found for key: ' + key)
return key
else:
# print('No mapping found for key: ' + key)
return key
def get_list(self, key_list):
"""Get list of values from self attributes based on key_list."""
return [self.get(key) for key in key_list]
def get_mapping_dict(self, key_list):
"""Get dictionary from self attributes based on key_list."""
return dict([(key, self.get(key)) for key in key_list])
def __getitem__(self, key):
"""Get item from self. If not key exists return None"""
return getattr(self, key)
class ParameterMapping(AttributeDict):
"""Load file to map data fields and parameters to a standard setting format."""
def __init__(self):
"""Initialize."""
super().__init__()
def map_parameter_list(self, para_list, ext_list=False):
"""Return mapped parameter list.
Args:
para_list (list): list of parameters
ext_list (bool): False or True, NotImplemented
"""
return self.get_list(para_list)
def get_parameter_mapping(self, para_list, ext_list=False):
"""Get a dictionary with mapped parameters from the given para_list."""
return self.get_mapping_dict(para_list)
class ShipMapping(AttributeDict):
"""Load file to map 2sign-cntry and 2sign-shipc to 4sign-shipc (ICES / SMHI)."""
def __init__(self):
"""Initialize."""
super().__init__()
def map_cntry_and_shipc(self, cntry=None, shipc=None):
"""Get SHIP code (according to standard of ICES)."""
return self.get(cntry + shipc)
def map_shipc(self, cntry_shipc):
"""Map SHIP code (according to standard of ICES)."""
return self.get(cntry_shipc)
```
#### File: core/readers/umsc.py
```python
import re
import numpy as np
from ctdpy.core import utils
from ctdpy.core.readers.seabird import SeaBird
from ctdpy.core.readers.metadata import XLSXmeta
from ctdpy.core.calculator import Calculator
class SeaBirdUMSC(SeaBird):
"""Reader for seabird data according to UMSC processing routines."""
def __init__(self, settings):
"""Initialize."""
super().__init__(settings)
@staticmethod
def add_calculated_parameters(df, latit):
"""Add caluculated parameters to dataframe."""
calc = Calculator()
df['DEPH'] = calc.get_true_depth(attribute_dictionary={'latitude': latit,
'pressure': df['PRES_CTD'].astype(np.float),
'gravity': df['PRES_CTD'].astype(np.float),
'density': df['DENS_CTD'].astype(np.float)})
def _convert_formats(self, meta_dict, filename):
"""Set and/or convert formats of metadata."""
timestamp = self._get_datetime(meta_dict['SDATE'])
meta_dict['SDATE'] = utils.get_format_from_datetime_obj(timestamp, '%Y-%m-%d')
meta_dict['STIME'] = utils.get_format_from_datetime_obj(timestamp, '%H:%M')
# meta_dict['SERNO'] = self._get_serno(meta_dict['SERNO'])
# meta_dict.setdefault('PROJ', 'BAS')
# meta_dict.setdefault('ORDERER', 'HAV, SMHI')
meta_dict.setdefault('SLABO', 'UMSC')
meta_dict.setdefault('ALABO', 'UMSC')
meta_dict.setdefault('POSYS', 'GPS')
if filename:
meta_dict['FILE_NAME'] = filename
def get_metadata(self, serie, map_keys=True, filename=None):
"""Return dictionary with metadata."""
meta_dict = {}
for ident, sep in zip(['identifier_metadata', 'identifier_metadata_2'],
['separator_metadata', 'separator_metadata_2']):
data = self.get_meta_dict(serie,
identifier=self.settings.datasets['cnv'].get(ident),
separator=self.settings.datasets['cnv'].get(sep),
keys=self.settings.datasets['cnv'].get('keys_metadata'))
meta_dict = utils.recursive_dict_update(meta_dict, data)
if map_keys:
new_dict = {}
for key in meta_dict:
if meta_dict[key]:
new_dict.setdefault(self.settings.pmap.get(key), meta_dict[key])
meta_dict = new_dict
self._convert_formats(meta_dict, filename)
return meta_dict
def get_meta_dict(self, series, keys=None, identifier='', separator=''):
"""Get metadata information for a specific identifier and separator."""
keys = keys or []
meta_dict = {}
boolean_startswith = self.get_index(series, identifier, as_boolean=True)
if any(keys):
for key in keys:
boolean_contains = self.get_index(series, key, contains=True,
as_boolean=True)
boolean = boolean_startswith & boolean_contains
if any(boolean):
if key == 'SERIAL NO':
meta = re.search('SERIAL NO. (.+?) ',
series[boolean].iloc[0]).group(1)
else:
meta = series[boolean].tolist()[0].split(separator)[-1].strip()
meta_dict.setdefault(key, meta)
else:
return series.loc[boolean_startswith]
return meta_dict
def _setup_dataframe(self, serie, metadata=None):
"""Convert pandas Serie into pandas DataFrame."""
header = self.get_data_header(serie, dataset='cnv')
df = self.get_data_in_frame(serie, header, dataset='cnv')
df = self.df_handler.map_column_names_of_dataframe(df)
# TODO use metadata['LATIT'] instead of 62.
self.add_calculated_parameters(df, latit=62.) # metadata['LATIT'])
return df
class MetadataUMSC(XLSXmeta):
"""Metadata Class for UMSC reader."""
def __init__(self, settings):
"""Initialize."""
super().__init__(settings)
self.data = {}
self.file_specs = self.settings.readers['umsc']['datasets']['xlsx']
``` |
{
"source": "JohannesSMHI/ctdvis",
"score": 3
} |
#### File: ctdvis/tests/app_to_serve.py
```python
from bokeh.plotting import curdoc
from ctdvis.session import Session
"""
Ref: https://stackoverflow.com/questions/55049175/running-bokeh-server-on-local-network
In a conda-prompt run:
cd "PATH_TO_THIS_SCRIPT"
bokeh serve app_to_serve.py
Bokeh app running at: http://localhost:5006/app_to_serve
"""
def bokeh_qc_tool():
"""Return bokeh layout.
Path to CTD-standard-format (including auto-QC-fields).
"""
data_dir = r'C:\Temp\CTD_DV\test_flex_format'
filters = None
# filters = dict(
# # month_list=[1, 2, 3],
# month_list=[4],
# # month_list=[7, 8, 9],
# # month_list=[10, 11, 12],
# # ship_list=['77SE', '34AR']
# # serno_min=311,
# # serno_max=355,
# )
s = Session(
# visualize_setting='slua_vis',
visualize_setting='smhi_expedition_vis',
# visualize_setting='smhi_vis',
data_directory=data_dir,
filters=filters,
)
s.setup_datahandler()
layout = s.run_tool(return_layout=True)
return layout
bokeh_layout = bokeh_qc_tool()
doc = curdoc()
doc.add_root(bokeh_layout)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.