metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeanfrancis/codeforcmr.cm",
"score": 3
}
|
#### File: jeanfrancis/codeforcmr.cm/update_avatars.py
```python
import glob
import os
import logging
import requests
import yaml
from email.utils import formatdate
logging.basicConfig(level=logging.INFO)
BLACKLIST = ('blog', '_site',)
AVATAR_PATH = 'img/avatars/'
def get_avatar(username, last_modified):
"""
Gets the users avatar from the github api whilst using the If-Modified-Since header to work around rate limits.
:param username: the github username
:param last_modified: timestamp formatted in the http stlye
:return: True if the profile didn't change didn't change, False if there was an error or the avatar
"""
headers = {'If-Modified-Since': last_modified}
github_auth_token = os.environ.get("GITHUB_AUTH_TOKEN", None)
if github_auth_token:
headers["Authorization"] = "token " + github_auth_token
response = requests.get('https://api.github.com/users/' + username, headers=headers)
if response.status_code == 304:
return False
if response.status_code != 200:
logging.error('Unexpected HTTP status code {} returned for {}'.format(response.status_code, username))
return False
url = response.json()['avatar_url']
avatar = requests.get(url, headers=headers)
if response.status_code == 304:
return False
if response.status_code != 200:
logging.error('Unexpected HTTP status code {} returned for {}'.format(response.status_code, username))
return False
return avatar.content
def main():
# Get date for the If-Last-Modified header
with open(os.path.join(AVATAR_PATH, 'last_modified.txt')) as fd:
last_modified = fd.readline().rstrip()
current_timestamp_formatted = formatdate(timeval=None, localtime=False, usegmt=True)
usernames = set()
labs = [x for x in glob.glob('_labs/*.yml') if not x.startswith(BLACKLIST)]
for lab in labs:
with open(lab) as f:
contents = f.read()
try:
_, frontmatter = contents.split('---\n', 2)
except ValueError:
_, frontmatter, _ = contents.split('---\n', 2)
meta = yaml.load(frontmatter)
if 'members' not in meta:
continue
for member in meta['members']:
if member['name'] is None:
continue
username = member.get('username-github')
if username == None:
continue
usernames.add((username, member['name']))
usernames = sorted(list(usernames))
logging.info("Found %d users", len(usernames))
for username, member_name in usernames:
logging.info('Processing Lab Member %s (%s)', member_name, username)
avatar = get_avatar(username, last_modified)
if not avatar:
continue
avatar_path = os.path.join(AVATAR_PATH, username + '.jpg')
print(avatar_path, AVATAR_PATH, username)
logging.info('Saving image to %s', avatar_path)
with open(avatar_path, 'wb') as fd:
fd.write(avatar)
# Remember the last successful run for the If-Last-Modified header
with open(os.path.join(AVATAR_PATH, 'last_modified.txt'), 'w') as fd:
fd.write(current_timestamp_formatted)
if __name__ == '__main__':
main()
```
|
{
"source": "JeanFred/inteGraality",
"score": 3
}
|
#### File: inteGraality/integraality/column_config.py
```python
from enum import Enum
from ww import f
class GroupingType(Enum):
YEAR = "year"
class ColumnSyntaxException(Exception):
pass
class ColumnConfigMaker:
@staticmethod
def make(key, title):
if key.startswith('P'):
splitted = key.split('/')
if len(splitted) == 3:
(property_name, value, qualifier) = splitted
elif len(splitted) == 2:
(property_name, value, qualifier) = (splitted[0], None, splitted[1])
else:
(property_name, value, qualifier) = (key, None, None)
return PropertyConfig(property=property_name, title=title, qualifier=qualifier, value=value)
elif key.startswith('L'):
return LabelConfig(language=key[1:])
elif key.startswith('D'):
return DescriptionConfig(language=key[1:])
else:
raise ColumnSyntaxException("Unknown column syntax %s" % key)
class ColumnConfig:
def get_info_query(self, property_statistics):
"""
Get the usage counts for a column for the groupings
:return: (str) SPARQL query
"""
query = f("""
SELECT ?grouping (COUNT(DISTINCT ?entity) as ?count) WHERE {{
?entity {property_statistics.selector_sparql} .""")
if property_statistics.grouping_type == GroupingType.YEAR:
query += f("""
?entity wdt:{property_statistics.grouping_property} ?date .
BIND(YEAR(?date) as ?grouping).""")
else:
query += f("""
?entity wdt:{property_statistics.grouping_property} ?grouping .""")
query += f("""
FILTER(EXISTS {{{self.get_filter_for_info()}
}})
}}
GROUP BY ?grouping
HAVING (?count >= {property_statistics.property_threshold})
ORDER BY DESC(?count)
LIMIT 1000
""")
return query
def get_totals_query(self, property_statistics):
"""
Get the totals of entities with the column set.
:return: (str) SPARQL query
"""
query = f("""
SELECT (COUNT(*) as ?count) WHERE {{
?entity {property_statistics.selector_sparql}
FILTER(EXISTS {{{self.get_filter_for_info()}
}})
}}
""")
return query
def get_info_no_grouping_query(self, property_statistics):
"""
Get the usage counts for a column without a grouping
:return: (str) SPARQL query
"""
query = f("""
SELECT (COUNT(*) AS ?count) WHERE {{
?entity {property_statistics.selector_sparql} .
MINUS {{ ?entity wdt:{property_statistics.grouping_property} _:b28. }}
FILTER(EXISTS {{{self.get_filter_for_info()}
}})
}}
""")
return query
class PropertyConfig(ColumnConfig):
def __init__(self, property, title=None, value=None, qualifier=None):
self.property = property
self.title = title
self.value = value
self.qualifier = qualifier
def __eq__(self, other):
return (
self.property == other.property
and self.title == other.title
and self.value == other.value
and self.qualifier == other.qualifier
)
def get_title(self):
return "/".join([x for x in [self.property, self.value, self.qualifier] if x])
def get_key(self):
return "".join([x for x in [self.property, self.value, self.qualifier] if x])
def make_column_header(self):
if self.qualifier:
property_link = self.qualifier
else:
property_link = self.property
if self.title:
label = f('[[Property:{property_link}|{self.title}]]')
else:
label = f('{{{{Property|{property_link}}}}}')
return f('! data-sort-type="number"|{label}\n')
def get_filter_for_info(self):
if self.qualifier:
return f("""
?entity p:{self.property} [ ps:{self.property} {self.value or '[]'} ; pq:{self.qualifier} [] ]""")
else:
return f("""
?entity p:{self.property}[]""")
class TextConfig(ColumnConfig):
def __init__(self, language, title=None):
self.language = language
self.title = title
def __eq__(self, other):
return (
self.language == other.language
and self.title == other.title
)
def get_title(self):
return self.get_key()
def make_column_header(self):
if self.title:
text = f('{self.title}')
else:
text = f('{{{{#language:{self.language}}}}}')
return f('! data-sort-type="number"|{text}\n')
def get_filter_for_info(self):
return f("""
?entity {self.get_selector()} ?lang_label.
FILTER((LANG(?lang_label)) = '{self.language}').""")
class LabelConfig(TextConfig):
def get_key(self):
return 'L%s' % self.language
def get_selector(self):
return 'rdfs:label'
class DescriptionConfig(TextConfig):
def get_key(self):
return 'D%s' % self.language
def get_selector(self):
return 'schema:description'
```
|
{
"source": "jeanggi90/sensorCar",
"score": 4
}
|
#### File: jeanggi90/sensorCar/dataSet.py
```python
import numpy as np
# import linecache # Get a specific line of a file
import os.path # check if a file exists at a certain path
import random # Shuffle lines in dataset
class DataSet():
"""
DataSets is responsible for processing, normalising and providing
dataSets entires to other classes for training of the network.
"""
fullDataSetPath = None
trainingDataSetPath = None
testDataSetPath = None
def __init__(self, fullDataSetPath, inputLabelNumber, trainingTestRatio=[9, 1]):
"""
Initiate dataSet with a fullDataSetPath, inputLabelNumber an array
representing the ration between inputs and lables. Optionally a
trainingTestRatio array cen be given which determines the ration
between training and test data. Default is 9:1
"""
self.inputLabelNumber = inputLabelNumber
self.trainingTestRatio = trainingTestRatio
# Check if path is valid and file exists
if os.path.exists(fullDataSetPath):
self.fullDataSetPath = fullDataSetPath
# Check if the trainingDataSetPath and testDataSetPath file already exists
trainingDataSetPath = self.fullDataSetPath[:self.fullDataSetPath.rfind(".")] + "_training.txt"
testDataSetPath = self.fullDataSetPath[:self.fullDataSetPath.rfind(".")] + "_test.txt"
# Assign them to attribute if they exists
if os.path.exists(trainingDataSetPath) and os.path.exists(testDataSetPath):
print("trainingDataSetPath and testDataSetPath exist, assigning them to attributes")
self.trainingDataSetPath = trainingDataSetPath
self.testDataSetPath = testDataSetPath
# Generate them if they do not exists yet
else:
self.splitDataSet()
else:
print("Given path is invalid. Reasign right path to attribute")
def normalizeInput(self, vector):
"""
Normalizes the vector by return a vector with the reciprocal value
of each element in vector
"""
return np.divide(1, vector, out=np.zeros_like(vector), where=vector != 0)
def splitDataSet(self):
"""
Split the fullDataSetPath by the trainingTestRation into two files,
which are saved in the same path as the fullDataSetPath but with the
ending "_training.txt" resp. "_test.txt".
"""
print("Splitting fullDataSetPath into trainingDataSetPath and testDataSetPath")
# Get number of lines(=data) in the fullDataSetPath
numberOfLines = 0
with open(self.fullDataSetPath, "r") as ff:
for line in ff:
numberOfLines += 1
self.trainingDataSetPath = self.fullDataSetPath[:self.fullDataSetPath.rfind(".")] + "_training.txt"
self.testDataSetPath = self.fullDataSetPath[:self.fullDataSetPath.rfind(".")] + "_test.txt"
# Get the number of elements for the training set (testset equals the remainder)
splitRatioSum = float(self.trainingTestRatio[0] + self.trainingTestRatio[1])
numberTrainingEntities = int(round(float(self.trainingTestRatio[0]) * numberOfLines / splitRatioSum))
# Split the entites of the fullDataSetPath into the two files
with open(self.fullDataSetPath, "r") as ff:
for (i, line) in enumerate(ff):
if i < numberTrainingEntities:
with open(self.trainingDataSetPath, "a") as trf:
trf.write(line)
if i >= numberTrainingEntities:
with open(self.testDataSetPath, "a") as tef:
tef.write(line)
print("Done creating training and test dataSet")
def shuffleDataSet(self, dataSetPath):
"""
dataSetPath is the path to the dataset which is then shuffled and
saved
"""
with open(dataSetPath, "r+") as f:
lines = f.readlines()
random.shuffle(lines)
f.seek(0)
f.writelines(lines)
def getStats(self):
"""
Analyses the dataset and gives the following statis about it:
Extrema of each collumn, mean of each collumn
"""
print("Analysing dataset")
with open(self.fullDataSetPath, "r") as ff:
# Get the first line in order to get the number of columns and set the extrema to the values of the first line
firstLine = ff.readline().strip()
firstLineEntities = np.array([float(i) for i in firstLine.split("\t")], dtype=np.float128)
numberOfColumns = firstLine.count("\t") + 1
# Holds the max value of each column in the first matrix row and the min value in the second row
# For initialisation set the firstLine's entities as the extremas
extremaVector = np.array([firstLineEntities, firstLineEntities], dtype=np.float128)
# Holds the sum of each column
sumVector = np.zeros(numberOfColumns)
numberOfLines = 0
# Get one line after another
for line in ff:
lineEntities = np.array([float(i) for i in line.split("\t")])
sumVector = np.add(lineEntities, sumVector)
# Check each entity if it is a extrema and assign it to the extremaVector if so
for (i, entity) in enumerate(lineEntities):
# If max
if entity > extremaVector[0][i]:
extremaVector[0][i] = entity
# If min
if entity < extremaVector[1][i]:
extremaVector[1][i] = entity
numberOfLines += 1
print("NumberOfColumns: {0},\nMaxValue: {1},\nMinValue: {2},\nNumberOfLines: {3},\nMeanValue: {4}".format(numberOfColumns, extremaVector[0], extremaVector[1], numberOfLines, np.divide(sumVector, numberOfLines)))
```
#### File: jeanggi90/sensorCar/experimentNetTF.py
```python
import tensorflow as tf
import numpy as np
import os
import shutil
from functools import wraps
def callOnce(inputFunc):
attribute = "_cache_" + inputFunc.__name__
@property
@wraps(inputFunc)
def checkAttribute(self):
if not hasattr(self, attribute):
setattr(self, attribute, inputFunc(self))
return getattr(self, attribute)
return checkAttribute
class ExperimentNetTF:
def __init__(self, shape, learningRate):
self.shape = shape
self.x = tf.placeholder(tf.float32, shape=[None, self.shape[0]], name="InputData")
self.y = tf.placeholder(tf.float32, shape=[None, self.shape[-1]], name="LabelData")
self.weights = self._getInitWeights()
self.logDir = "./log/experiment"
shutil.rmtree(self.logDir)
os.makedirs(self.logDir)
self.learningRate = learningRate
self.summaryWriter = tf.summary.FileWriter(self.logDir, graph=tf.get_default_graph())
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.output
self.optimizer
self.loss
tf.summary.scalar("loss", self.loss)
self.mergedSummary = tf.summary.merge_all()
def _getInitWeights(self):
return [tf.Variable(tf.truncated_normal([fromLayer, toLayer], stddev=0.1, name="Weight{}".format(i))) for i, (fromLayer, toLayer) in enumerate(zip(self.shape[:-1], self.shape[1:]))]
def train(self, datasetPath, epochs=1):
costMeanList = []
for epoch in range(epochs):
print(f"Epoch {epoch + 1}")
with open(datasetPath, "r") as ds:
costList = []
for i, line in enumerate(ds):
lineEntities = np.array([float(i) for i in line.split(",")], dtype=np.float128)
inputs = np.reshape(lineEntities[:3], (1, 3))
labels = np.reshape(np.divide(lineEntities[3:], 25), (1, 1))
# inputs = np.reshape(lineEntities[:2], (1, 2))
# labels = np.reshape(lineEntities[2:], (1, 1))
_, loss, summary = self.sess.run([self.optimizer, self.loss, self.mergedSummary], {self.x: inputs, self.y: labels})
costList.append(loss)
self.summaryWriter.add_summary(summary, epoch * 1000 + epoch + i)
tempList = np.array(costList)
costMeanList.append(np.mean(tempList))
addListSummary = tf.Summary()
addListSummary.value.add(tag="MeanLoss", simple_value=np.mean(tempList))
self.summaryWriter.add_summary(addListSummary, epoch)
self.summaryWriter.flush()
self.saveTrainingData("./experimentSave/test.txt", costMeanList)
def getPrediction(self, xData):
return self.sess.run(self.output, {self.x: xData})
@callOnce
def output(self):
layerInput = self.x
for weight in self.weights:
layerInput = tf.math.tanh(tf.matmul(layerInput, weight))
return layerInput
@callOnce
def loss(self):
return tf.reduce_mean(tf.square(self.y - self.output))
# return tf.square(self.y - self.output)
@callOnce
def optimizer(self):
return tf.train.GradientDescentOptimizer(self.learningRate).minimize(self.loss)
def saveTrainingData(self, filePath, lossList):
file = open(filePath, "a")
for loss in lossList:
file.write(str(loss) + "\n")
file.close()
def doSave(self, step):
savePath = self.saver.save(self.sess, os.path.join(self.savePath, "model"), global_step = step)
print("Saved current model to {}".format(savePath))
if __name__ == '__main__':
net = ExperimentNetTF([3, 10, 1], learningRate=0.0005)
net.train("simulation/dataset/trackMaster1k.txt", epochs=10)
# net.train("simulation/dataset/testAnd.txt", epochs=100)
```
|
{
"source": "jeanguanda/vmanage-dailybackup",
"score": 3
}
|
#### File: jeanguanda/vmanage-dailybackup/dailybackup.py
```python
import datetime as DT
import subprocess
import sys
from netmiko import ConnectHandler
keyfile = "vmanage"
logfile = "backupjob.log"
backup_path = "./backupdata"
login_info = {
"device_type": "linux",
"host": "10.75.58.50",
"username": "admin",
"use_keys": True,
"key_file": keyfile,
}
date = str(DT.date.today())
week_ago = DT.datetime.today() - DT.timedelta(days=7)
week_ago = str(week_ago.date())
zerofile = "/tmp/confdb_backup" + week_ago + ".tar.gz"
logtitle = "=" * 15 + "Day of " + date + "=" * 15 + "\n"
class SSHjob:
"""SSHjob defines a class for a job running through SSH by
calling the module netmiko.
...
Attributes
----------
net_connect : netmiko return object.
backup_ret : str
The return of running backup on vmanage.
ret1 : str
The first return, copy backup file.
ret2 : str
The second return, copy zero size file.
Methods
-------
connect():
Call the netmiko to connect.
run_backup():
Run backup request on vmanage.
copy_backup_file():
Copy backup file through scp.
copy_zero_file():
Copy zero size file to vmanage.
disconnect():
Disconnect vmanage
"""
def __init__(self):
self.net_connect = None
self.backup_ret = None
self.ret1 = None
self.ret2 = None
def connect(self):
self.net_connect = ConnectHandler(**login_info)
def run_backup(self):
backup_cmd = (
"request nms configuration-db backup path \
/home/admin/confdb_backup"
+ date
)
self.backup_ret = self.net_connect.send_command(backup_cmd)
def copy_backup_file(self):
runcmd = (
"scp -i "
+ keyfile
+ " "
+ login_info["username"]
+ "@"
+ login_info["host"]
+ ":"
+ "/home/admin/confdb_backup"
+ date
+ ".tar.gz "
+ backup_path
)
self.ret1 = str(
subprocess.run(
runcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
timeout=5,
)
)
def copy_zero_file(self):
runcmd = (
"touch "
+ zerofile
+ " && "
+ "scp -i vmanage "
+ zerofile
+ " admin@"
+ login_info["host"]
+ ":/home/admin/"
+ " && "
+ "rm "
+ zerofile
)
self.ret2 = str(
subprocess.run(
runcmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
timeout=5,
)
)
def disconnect(self):
self.net_connect.disconnect()
def main():
jobstart = str(DT.datetime.now())
backup_job = SSHjob()
backup_job.connect()
backup_job.run_backup()
backup_job.copy_backup_file()
backup_job.copy_zero_file()
backup_job.disconnect()
jobend = str(DT.datetime.now())
logdata = (
logtitle
+ jobstart
+ " Job started...\n"
+ backup_job.backup_ret
+ "\n"
+ backup_job.ret1
+ "\n"
+ backup_job.ret2
+ "\n"
+ jobend
+ " Job ended...\n"
)
with open(logfile, "a") as fobj:
fobj.write(logdata)
sys.exit(0)
if __name__ == "__main__":
main()
```
|
{
"source": "jean-heibig/PokerZ",
"score": 3
}
|
#### File: poker_ai/interface/text.py
```python
from time import time
from .translate import Translate
from ..blueprint.game.rules import Combo
color_chars = {2: '♠', 3: '♦', 5: '♥', 7: '♣'}
color_chars = {2: 's', 3: 'd', 5: 'h', 7: 'c'}
class Text:
"""Text for logs or graphic interface."""
def __init__(self, language):
self.log_name = "log/tree/{}.txt".format(str(time()).replace('.', '_'))
self.texts = Translate(language)
def get_player_showdown(self, player_nbr):
player = self.deal.players[player_nbr]
if player.alive:
hand_eval = self.hand_evals[player_nbr]
if self.best_eval <= hand_eval:
self.second_best_eval = self.best_eval
self.best_eval = hand_eval
self.showed.append(player_nbr)
def get_round_actions(self):
"""Return the bounds of each betting rounds."""
folds = 0
calls = 0
checks = 0
round_end = []
for action_nbr in range(len(self.deal.actions)):
move = self.deal.actions[action_nbr].move
if move == "bet":
calls = 0
checks = 0
elif move == "call":
calls += 1
elif move == "check":
checks += 1
elif move == "fold":
folds += 1
elif move == "raise":
calls = 0
checks = 0
if 4 < action_nbr:
if action_nbr == 5 and move == "check":
round_end.append(action_nbr + 1)
calls = 0
checks = 0
elif folds + calls == 5 or folds + checks == 6:
round_end.append(action_nbr + 1)
calls = 0
checks = 0
while len(round_end) < 3:
round_end.append(len(self.deal.actions))
return [self.deal.actions[:round_end[0]],
self.deal.actions[round_end[0]:round_end[1]],
self.deal.actions[round_end[1]:round_end[2]],
self.deal.actions[round_end[2]:]]
def primes_eval(self, hand_eval):
if hand_eval[0] in [2, 6]:
return hand_eval[1][:2]
else:
return [hand_eval[1][0]]
def primes_kickers(self, best_eval, hand_eval):
if hand_eval[0] in [2, 6]:
best_kickers = best_eval[1][2:]
hand_kickers = hand_eval[1][2:]
else:
best_kickers = best_eval[1][1:]
hand_kickers = hand_eval[1][1:]
kickers = []
for kicker_nbr in range(len(best_kickers)):
kickers.append(hand_kickers[kicker_nbr])
if best_kickers[kicker_nbr] == hand_kickers[kicker_nbr]:
pass
else:
return kickers
return kickers
def process_action(self, move):
"""Set hand states after each action."""
# Update bettors.
if move == "fold": # Fold.
self.alive_nbr -= 1
if move == "raise": # Raise.
self.bettors_nbr = self.alive_nbr
self.bettors_nbr -= 1
def reduce_evals(self):
"""Use kickers only for winners if any doubt.
Other hands are always in short form.
"""
win_player_nbr = min([player_nbr for player_nbr in self.showed
if self.hand_evals[player_nbr] == self.best_eval])
best_primes = self.primes_eval(self.best_eval)
for player_nbr in self.showed:
hand_eval = self.hand_evals[player_nbr]
hand_primes = self.primes_eval(hand_eval)
if player_nbr == win_player_nbr:
self.reduce_best_hand(hand_eval, hand_primes, player_nbr)
else:
if hand_eval[0] < self.best_eval[0]:
self.hand_evals[player_nbr] = ('short', hand_eval[0])
elif hand_primes == best_primes:
kickers = self.primes_kickers(self.best_eval, hand_eval)
if kickers == []:
self.hand_evals[player_nbr] = ('long', [hand_eval[0]] +
hand_primes)
else:
self.hand_evals[player_nbr] = ('kickers',
[hand_eval[0]] +
hand_primes, kickers)
if hand_eval[0] == 9:
self.hand_evals[player_nbr] = ('short', 9)
else:
self.hand_evals[player_nbr] = ('long', [hand_eval[0]] +
hand_primes)
def reduce_best_hand(self, hand_eval, hand_primes, player_nbr):
second_best_primes = self.primes_eval(self.second_best_eval)
if self.second_best_eval[0] < hand_eval[0]:
self.hand_evals[player_nbr] = ('short', hand_eval[0])
elif hand_primes == second_best_primes:
kickers = self.primes_kickers(self.second_best_eval, hand_eval)
if kickers == []:
self.hand_evals[player_nbr] = ('long', [hand_eval[0]] +
hand_primes)
else:
self.hand_evals[player_nbr] = ('kickers', [hand_eval[0]] +
hand_primes, kickers)
if hand_eval[0] == 9:
self.hand_evals[player_nbr] = ('short', 9)
else:
self.hand_evals[player_nbr] = ('long', [hand_eval[0]] +
hand_primes)
def write_action(self, action):
"""Represent one action."""
self.write_log(self.texts.action_text(action.move,
action.total_value))
def write_actions(self):
"""Print hand history."""
self.alive_nbr = 6
self.allin_nbr = 0
self.bettors_nbr = 6
self.stage = 2
round_actions = self.get_round_actions()
while 1 < self.alive_nbr and self.stage < 6:
if 0 < self.bettors_nbr and self.allin_nbr < self.alive_nbr:
self.write_board()
self.write_log(self.texts.text.start_round_text)
for action in round_actions[self.stage - 2]:
self.write_action(action)
self.process_action(action.move)
self.allin_nbr += action.allin
self.bettors_nbr = self.alive_nbr
self.write_log(self.texts.text.end_round_text)
self.stage += 1
else:
self.write_log('\n')
return
self.write_log('\n')
def write_board(self):
"""Show board through different stages."""
self.write_log('')
self.write_log(self.texts.stage_text(self.stage))
if 2 < self.stage:
self.write_log(self.texts.board_text(self.deal.board, self.stage))
self.write_log('')
def write_deal(self, deal, hand_nbr):
"""Print hand history."""
self.best_eval = 0, [0]
self.deal = deal
self.second_best_eval = 0, [0]
self.showed = []
self.write_start(hand_nbr)
self.write_actions()
self.write_showdown()
self.write_result()
def write_player_showdown(self, player_nbr):
player = self.deal.players[player_nbr]
if player.alive:
hand_eval = self.hand_evals[player_nbr]
if player_nbr in self.showed:
hole_cards = self.deal.hole_cards[player_nbr]
self.write_log(self.texts.reveal_text(hand_eval,
hole_cards,
player.player_name))
else:
self.write_log(self.texts.muck_text(player.player_name))
def write_start(self, hand_nbr):
"""Show deal state at beginning of play."""
self.write_log(self.texts.hand_nbr_text(hand_nbr))
self.write_log('')
[self.write_log(self.texts.info_text(10000 - (player_nbr < 3) * 50 *
player_nbr,
self.deal.hole_cards[player_nbr],
self.deal.players[player_nbr]
.player_name))
for player_nbr in range(6)]
self.write_log('')
def write_result(self):
"""Show result of play."""
self.write_log(self.texts.text.show_result_text)
for player in self.deal.players:
self.write_log(self.texts.result_text(player.player_name,
player.chips - 10000))
self.write_log('\n\n')
def write_showdown(self):
"""Show cards at showdown."""
if 1 < self.alive_nbr:
while self.stage < 6:
self.write_board()
self.stage += 1
self.write_log(self.texts.text.showdown_text)
self.hand_evals = Combo(self.deal.board,
self.deal.hole_cards).hand_evals
for hand_nbr in range(6):
hand_eval = self.hand_evals[hand_nbr]
if hand_eval[0] == 8 and hand_eval[1][0] == 41:
hand_eval = (9, [0])
for player_nbr in range(3, 9):
self.get_player_showdown(player_nbr % 6)
self.reduce_evals()
for player_nbr in range(3, 9):
self.write_player_showdown(player_nbr % 6)
self.write_log('\n')
def write_log(self, text):
self.log = open(self.log_name, "a")
self.log.write(text + '\n')
self.log.close()
```
|
{
"source": "JeanHenri79/mocodo",
"score": 2
}
|
#### File: mocodo/mocodo/argument_parser.py
```python
from __future__ import division
import argparse
import random
import os
import json
from .file_helpers import read_contents
from .common import version
import sys
import re
import gettext
import locale
from time import time
from io import open
from .mocodo_error import MocodoError
DESCRIPTION = """
NAME:
Mocodo - An Entity-Relation Diagram Generator.
DESCRIPTION:
Mocodo is an open-source tool for designing and teaching relational databases.
It takes as an input a textual description of both entities and associations
of an entity-relationship diagram (ERD). It outputs a vectorial drawing in SVG
and a relational schema in various formats (SQL, LaTeX, Markdown, etc.).
NOTE:
Each one of the following value is:
- explicitely specified by the user as a command line option;
- otherwise, retrieved from a file located at --params_path;
- otherwise, retrieved from a file named 'params.json' in the input directory;
- otherwise, calculated from a default value, possibly dependant of your system.
"""
EPILOG = u"""
SEE ALSO:
Online version http://mocodo.net
Source code https://github.com/laowantong/mocodo
Localization https://www.transifex.com/aristide/mocodo/
LICENSE:
GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007.
CONTACT:
Mail <<EMAIL>
Author <NAME>
Address Universite de Lorraine
Laboratoire LCOMS - UFR MIM
Ile du Saulcy
57000 Metz
France
""" # NB: accents raise an error in Jupyter Notebook
class ArgumentDefaultsRawDescriptionHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
def init_localization(script_directory, language):
if not language:
if sys.platform.lower().startswith("darwin") and os.system("defaults read -g AppleLanguages > /tmp/languages.txt") == 0:
language = re.search("\W*(\w+)", read_contents("/tmp/languages.txt")).group(1)
else:
try:
language = locale.getdefaultlocale()[0][:2]
except:
language = "en"
try:
with open("%s/res/messages_%s.mo" % (script_directory, language), "rb") as mo_contents:
trans = gettext.GNUTranslations(mo_contents)
except IOError:
trans = gettext.NullTranslations()
if sys.version_info.major == 2:
trans.install(unicode=True)
else:
trans.install()
return language
def has_expired(timeout):
if timeout:
timeout += time()
def inner_function():
return time() > timeout
else:
def inner_function():
return False
return inner_function
def rate(string):
try:
value = float(string)
except ValueError:
msg = "The rate %r cannot be coerced to float" % string
raise argparse.ArgumentTypeError(msg)
if not (0 <= value <= 1):
msg = "The rate %r is not between 0 and 1" % string
raise argparse.ArgumentTypeError(msg)
return value
def scale(string):
try:
value = float(string)
except ValueError:
msg = "The scale %r cannot be coerced to float" % string
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "The scale %r is not strictly positive" % string
raise argparse.ArgumentTypeError(msg)
return value
def non_negative_integer(string):
try:
value = int(string)
except ValueError:
msg = "The value %r cannot be coerced to an integer" % string
raise argparse.ArgumentTypeError(msg)
if value < 0:
msg = "The integer %r is negative" % string
raise argparse.ArgumentTypeError(msg)
return value
def positive_integer(string):
try:
value = int(string)
except ValueError:
msg = "The value %r cannot be coerced to an integer" % string
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "The integer %r is negative or zero" % string
raise argparse.ArgumentTypeError(msg)
return value
def parsed_arguments():
def add_key(key, value):
params[key] = value
params["added_keys"].append(key)
script_directory = os.path.dirname(os.path.realpath(os.path.join(__file__)))
parser = argparse.ArgumentParser(
prog="mocodo",
add_help=False,
formatter_class=ArgumentDefaultsRawDescriptionHelpFormatter,
description=DESCRIPTION,
epilog=EPILOG
)
mocodo_group = parser.add_argument_group("OPTIONS ON MOCODO ITSELF")
io_group = parser.add_argument_group("INPUT/OUTPUT")
aspect_group = parser.add_argument_group("ASPECT OF THE GRAPHICAL OUTPUT")
relational_group = parser.add_argument_group("RELATIONAL OUTPUT")
source_group = parser.add_argument_group("MODIFICATIONS OF THE SOURCE TEXT")
bb_group = parser.add_argument_group("BRANCH & BOUND LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=bb")
ga_group = parser.add_argument_group("GENETIC ALGORITHM LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=ga")
lp_group = parser.add_argument_group("LINEAR PROGRAMMING LAYOUT REARRANGEMENT", "sub-options triggered by the option --arrange=lp")
nb_group = parser.add_argument_group("NOTEBOOK SPECIFIC OPTIONS", "ignored when called from the command line")
if sys.platform.lower().startswith("darwin"):
default_params = {
"encodings": ["utf8", "macroman"],
"image_format": "nodebox" if os.path.exists("/Applications/NodeBox/NodeBox.app") or os.path.exists("/Applications/NodeBox.app") else "svg",
"shapes": "copperplate",
}
elif sys.platform.lower().startswith("win"):
default_params = {
"encodings": ["utf8", "ISO_8859-15"],
"image_format": "svg",
"shapes": "trebuchet",
}
else: # linux
default_params = {
"encodings": ["utf8", "ISO_8859-15"],
"image_format": "svg",
"shapes": "serif",
}
mocodo_group.add_argument("--language", metavar="CODE", type=str, help="override the automatic localization of the messages with the given language code (e.g., 'fr', 'en', ...)")
io_group.add_argument("--params_path", metavar="PATH", default="params.json", help="the path of the parameter file. If omitted, use 'params.json' in the input directory. If non existent, use default parameters.")
io_group.add_argument("--input", metavar="PATH", help="the path of the input file. By default, the output files will be generated in the same directory")
(args, remaining_args) = parser.parse_known_args()
text_type = (unicode if sys.version_info.major == 2 else str)
if args.input and not os.path.exists(args.input):
if os.path.exists(args.input + ".mcd"):
args.input += ".mcd"
else: # the user has explicitely specified a non existent input file
init_localization(script_directory, default_params.get("language", args.language))
raise MocodoError(18, _('The file "{input}" doesn\'t exist.').format(input=args.input))
default_params["input"] = args.input
if os.path.exists(args.params_path):
default_params.update(json.loads(read_contents(args.params_path)))
if not default_params["input"]:
default_params["input"] = "sandbox.mcd"
default_params["language"] = init_localization(script_directory, default_params.get("language", args.language))
default_params.setdefault("output_dir", os.path.dirname(default_params["input"]))
mocodo_group.add_argument("--help", action="help", help="show this help message and exit")
mocodo_group.add_argument("--version", action="version", version="%(prog)s " + version, help="display the version number, then exit")
mocodo_group.add_argument("--restore", action="store_true", help="recreate a pristine version of the files 'sandbox.mcd' and 'params.json' in the input directory, then exit")
aspect_group.add_argument("--df", metavar="STR", type=text_type, default=u"DF", help="the acronym to be circled in a functional dependency")
aspect_group.add_argument("--card_format", metavar="STR", type=text_type, nargs="?", default=u"{min_card},{max_card}", help="format string for minimal and maximal cardinalities")
aspect_group.add_argument("--strengthen_card", metavar="STR", type=text_type, nargs="?", default=u"_1,1_", help="string for relative cardinalities")
source_group.add_argument("--flex", metavar="FLOAT", type=float, default=0.75, help="flex straight legs whose cardinalities may collide")
aspect_group.add_argument("--tkinter", action="store_true", help="use Tkinter to calculate the pixel-dimensions of the labels")
aspect_group.add_argument("--colors", metavar="PATH", default="bw", help="the color palette to use when generating the drawing. Name (without extension) of a file located in the directory 'colors', or path to a personal file")
aspect_group.add_argument("--shapes", metavar="PATH", help="specification of the fonts, dimensions, etc. Name (without extension) of a file located in the directory 'shapes', or path to a personal file")
aspect_group.add_argument("--scale", metavar="RATE", type=scale, default=1, help="scale the diagram by the given factor")
aspect_group.add_argument("--hide_annotations", action="store_true", help="ignore the hovering of annotated elements")
relational_group.add_argument("--relations", metavar="NAME", nargs="*", default=["html", "text"], help="one or several templates for the generated relational schemas. Cf. directory 'relation_templates'")
relational_group.add_argument("--disambiguation", choices=["numbers_only", "annotations"], default="annotations", help="specify the way to disambiguate foreign attributes")
relational_group.add_argument("--title", metavar="STR", default=_(u'Untitled').encode("utf8"), type=str, help="database name (used for SQL output)")
relational_group.add_argument("--guess_title", action="store_true", help="use the name of the most referred entity as title")
io_group.add_argument("--output_dir", metavar="PATH", help="the directory of the output files")
io_group.add_argument("--encodings", metavar="STR", nargs="*", help="one or several encodings to be tried successively when reading the input file")
io_group.add_argument("--extract", action="store_true", help="create a separated JSON file for the geometric parameters")
io_group.add_argument("--image_format", choices=["svg", "nodebox"], help="override the automatic selection (depending on your installation) of the image format produced by the generated script")
io_group.add_argument("--print_params", action="store_true", help="display the contents of the parameter file, then exit")
source_group.add_argument("--arrange", nargs="?", const="bb", choices=["bb", "ga", "lp"], help="rearrange the layout with either a Branch & Bound, a Genetic Algorithm, or a Linear Program solver, then exit")
source_group.add_argument("--timeout", metavar="SECONDS", type=int, help="limit the duration of the layout rearrangement")
source_group.add_argument("--verbose", action="store_true", help="display some gory details during the layout rearrangement")
source_group.add_argument("--fit", metavar="INT", type=int, const=0, nargs="?", help="fit the layout in the nth smallest grid")
source_group.add_argument("--flip", choices=["h", "v", "d"], help="display an horizontal / vertical / diagonal flip of the input file, then exit")
source_group.add_argument("--obfuscate", metavar="PATH", type=os.path.abspath, nargs="?", const="lorem_ipsum.txt", help="display an obfuscated version of the input file, then exit. Cf. directory 'lorem'")
source_group.add_argument("--obfuscation_max_length", metavar="NAT*", type=positive_integer, help="maximal length of obfuscated labels")
source_group.add_argument("--obfuscation_min_distance", metavar="NAT*", type=positive_integer, default=3, help="minimal Damerau-Levenshtein's distance between any two obfuscated labels")
source_group.add_argument("--seed", metavar="FLOAT", type=float, help="initial value for the random number generator")
bb_group.add_argument("--call_limit", metavar="NAT*", type=positive_integer, default=10000, help="maximal number of calls for a given starting box")
bb_group.add_argument("--min_objective", metavar="NAT*", type=positive_integer, default=0, help="best acceptable fitness for a layout")
bb_group.add_argument("--max_objective", metavar="NAT*", type=positive_integer, default=15, help="worst acceptable fitness for a layout")
bb_group.add_argument("--organic", action="store_true", help="unconstrained Branch & Bound")
ga_group.add_argument("--population_size", metavar="NAT*", type=positive_integer, default=1000, help="number of individuals to evolve")
ga_group.add_argument("--crossover_rate", metavar="RATE", type=rate, default=0.9, help="crossover rate, between 0 and 1")
ga_group.add_argument("--mutation_rate", metavar="RATE", type=rate, default=0.06, help="mutation rate, between 0 and 1")
ga_group.add_argument("--sample_size", metavar="NAT*", type=positive_integer, default=7, help="the sample size in tournaments")
ga_group.add_argument("--max_generations", metavar="NAT*", type=positive_integer, default=300, help="maximal number of generations")
ga_group.add_argument("--plateau", metavar="NAT*", type=positive_integer, default=30, help="maximal number of consecutive generations without improvement")
lp_group.add_argument("--engine", nargs="?", const="cplex", choices=["cplex", "gurobi"], help="solver for the linear program")
nb_group.add_argument("--mld", action="store_true", help="display the HTML relational model in the cell output")
nb_group.add_argument("--no_mcd", action="store_true", help="do not display the conceptual diagram in the cell output")
nb_group.add_argument("--replace", action="store_true", help="replaces the cell contents by its output")
parser.set_defaults(**default_params)
params = vars(parser.parse_args(remaining_args))
params["added_keys"] = ["added_keys", "params_path"]
add_key("script_directory", script_directory)
add_key("has_expired", has_expired(params["timeout"]))
add_key("output_name", os.path.join(params["output_dir"], os.path.splitext(os.path.basename(params["input"]))[0]))
# import pprint
# pprint.pprint(params)
if not os.path.exists(params["input"]):
import shutil
shutil.copyfile(os.path.join(params["script_directory"], "pristine_sandbox.mcd"), params["input"])
random.seed(params["seed"])
# params["title"] = params["title"].decode("utf8")
return params
```
#### File: mocodo/mocodo/arrange_bb.py
```python
from __future__ import division
from itertools import product, count
from random import random, shuffle, randrange, choice
from .cross import cross, memoize
from math import hypot, sqrt
from .mocodo_error import MocodoError
def arrange(col_count, row_count, successors, multiplicity, organic, min_objective, max_objective, call_limit, verbose, has_expired, **kwargs):
@memoize
def bounded_neighborhood(x1, y1):
result = set()
for x2 in range(max(0, x1 - radius), min(col_count, x1 + radius + 1)):
for y2 in range(max(0, y1 - radius + abs(x1 - x2)), min(row_count, y1 + radius - abs(x1 - x2) + 1)):
if x1 != x2 or y1 != y2:
result.add((x2, y2))
return result
@memoize
def organic_neighborhood(x1, y1):
result = set()
for x2 in range(x1 - radius, x1 + radius + 1):
for y2 in range(y1 - radius + abs(x1 - x2), y1 + radius - abs(x1 - x2) + 1):
if x1 != x2 or y1 != y2:
result.add((x2, y2))
return result
@memoize
def bounded_hull(coords):
result = set()
for (x, y) in coords:
if x - 1 >= 0:
result.add((x - 1, y))
if x + 1 < col_count:
result.add((x + 1, y))
if y - 1 >= 0:
result.add((x, y - 1))
if y + 1 < row_count:
result.add((x, y + 1))
return result.difference(coords)
@memoize
def organic_hull(coords):
result = set()
for (x, y) in coords:
result.add((x - 1, y))
result.add((x + 1, y))
result.add((x, y - 1))
result.add((x, y + 1))
return result.difference(coords)
def recurs(box_coords, next_boxes, already_placed_segments, cumulated_distances):
if cumulated_distances > objective:
# print "cut"
return None
if len(next_boxes) == 0:
return {
"coords": box_coords,
"crossings": 0,
"distances": cumulated_distances,
}
outside_hull_count = len(next_boxes) - len(hull(frozenset(box_coords.values())))
if outside_hull_count * outside_hull_minimal_distance + cumulated_distances > objective:
# print "Lower bound cut"
return None
if has_expired():
raise MocodoError(10, _('Layout calculation time exceeded.'))
if next(iteration) > call_limit:
# print "call limit exceeded"
return None
box_to_place = next_boxes[0]
already_placed_successors = {box_coords[box]: box for box in successors[box_to_place] if box in box_coords}
if already_placed_successors:
already_placed_successor_coords = iter(already_placed_successors)
(x1, y1) = next(already_placed_successor_coords)
possible_coords = neighborhood(x1, y1).copy()
# print already_placed_successors[0], possible_coords
for (x1, y1) in already_placed_successor_coords:
possible_coords.intersection_update(neighborhood(x1, y1))
if not possible_coords:
# print "neighborhood intersection is empty"
return None
else:
# print "the box to place has no successors: all empty coords are possible"
possible_coords = set(product(range(col_count), range(row_count)))
possible_coords.difference_update(box_coords.values())
if not possible_coords:
# print "neighborhood intersection is not free"
return None
non_crossing_possible_coords = []
for (x1, y1) in possible_coords:
for ((x2, y2), (x3, y3, x4, y4)) in product(already_placed_successors, already_placed_segments):
if cross(x1, y1, x2, y2, x3, y3, x4, y4):
break
else:
non_crossing_possible_coords.append((x1, y1))
if not non_crossing_possible_coords:
# print "all possible coords result in a crossing with existing segment"
return None
weighted_possible_coords = []
for (x1, y1) in non_crossing_possible_coords:
cumulated_distance = 0
for ((x2, y2), placed_box) in already_placed_successors.items():
cumulated_distance += distances[abs(x1-x2)][abs(y1-y2)] * multiplicity[(box_to_place, placed_box)]
weighted_possible_coords.append((cumulated_distance, random(), x1, y1))
weighted_possible_coords.sort()
for (cumulated_distance, _, x1, y1) in weighted_possible_coords:
box_coords[box_to_place] = (x1, y1)
new_segments = [(x1, y1, x2, y2) for (x2, y2) in already_placed_successors]
new_next_boxes = list(successors[box_to_place].difference(box_coords).difference(next_boxes))
if len(next_boxes) == 1 and len(new_next_boxes) == 0 and len(box_coords) != box_count:
# print "the placed boxes have no more non placed successors"
new_next_boxes = list(set(range(box_count)).difference(box_coords))
if new_next_boxes:
new_next_boxes = [choice(new_next_boxes)]
shuffle(new_next_boxes)
result = recurs(
box_coords,
next_boxes[1:] + new_next_boxes,
already_placed_segments + new_segments,
cumulated_distances + cumulated_distance
)
if result:
return result
del box_coords[box_to_place]
box_count = col_count * row_count
neighborhood = organic_neighborhood if organic else bounded_neighborhood
hull = organic_hull if organic else bounded_hull
neighborhood_cache = {}
radius = 3
distances = [[hypot(i, j) - 1 for j in range(radius + 1)] for i in range(radius + 1)]
outside_hull_minimal_distance = distances[1][2]
if all(not successor for successor in successors):
# print "no link: return a random layout"
layout = list(range(box_count))
shuffle(layout)
return {
"layout": layout,
"crossings": 0,
"distances": 0,
}
for objective in range(min_objective, max_objective + 1):
if verbose:
print("Objective %s." % objective)
boxes = list(range(box_count))
shuffle(boxes)
for first_box in boxes:
iteration = count()
if successors[first_box]:
if verbose:
print(" Starting from box %s." % first_box)
result = recurs(
{first_box: (0, 0)},
list(successors[first_box]),
[],
0
)
if result:
coords = result["coords"]
if organic:
min_x = min(x for (x, y) in coords.values())
max_x = max(x for (x, y) in coords.values())
min_y = min(y for (x, y) in coords.values())
max_y = max(y for (x, y) in coords.values())
for (box_index, (x, y)) in coords.items():
coords[box_index] = (x - min_x, y - min_y)
result["row_count"] = row_count = max_y - min_y + 1
result["col_count"] = col_count = max_x - min_x + 1
result["layout"] = [None] * row_count * col_count
for (box_index, (x, y)) in coords.items():
result["layout"][x + y * col_count] = box_index
return result
if organic:
break
objective += 1
if __name__ == "__main__":
from .mcd import Mcd
from .argument_parser import parsed_arguments
from time import time
from random import seed
clauses = u"""
SUSPENDISSE: diam
SOLLICITUDIN, 0N SUSPENDISSE, 0N CONSECTETUER, 0N LOREM: lectus
CONSECTETUER: elit, sed
MAECENAS, 1N DIGNISSIM, 1N DIGNISSIM
DF1, 11 LOREM, 1N SUSPENDISSE
LOREM: ipsum, dolor, sit
TORTOR, 0N RISUS, 11 DIGNISSIM, 1N CONSECTETUER: nec
DIGNISSIM: ligula, massa, varius
DF, 11 RISUS, 0N RISUS
AMET, 11> LOREM, 01 CONSECTETUER: adipiscing
RISUS: ultricies, _cras, elementum
SEMPER, 0N RISUS, 1N DIGNISSIM
""".replace(" ", "").split("\n")
params = parsed_arguments()
mcd = Mcd(clauses, params)
params.update(mcd.get_layout_data())
starting_time = time()
seed(42)
result = arrange(**params)
if result:
print()
print(mcd.get_clauses_from_layout(**result))
print()
print("Cumulated distances:", result["distances"])
print("Duration:", time() - starting_time)
print()
```
#### File: mocodo/mocodo/damerau_levenshtein.py
```python
from __future__ import division
def damerau_levenshtein(seq1, seq2):
"""Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> damerau_levenshtein('ba', 'abc')
2
>>> damerau_levenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> damerau_levenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
"""
# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.
# However, only the current and two previous rows are needed at once,
# so we only store those.
one_ago = None
this_row = list(range(1, len(seq2) + 1)) + [0]
for x in range(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
two_ago, one_ago, this_row = one_ago, this_row, [0] * len(seq2) + [x + 1]
for y in range(len(seq2)):
del_cost = one_ago[y] + 1
add_cost = this_row[y - 1] + 1
sub_cost = one_ago[y - 1] + (seq1[x] != seq2[y])
this_row[y] = min(del_cost, add_cost, sub_cost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]
and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):
this_row[y] = min(this_row[y], two_ago[y - 2] + 1)
return this_row[len(seq2) - 1]
```
#### File: mocodo/mocodo/entity.py
```python
from __future__ import division
from .attribute import *
from .dynamic import Dynamic
class Entity:
def __init__(self, clause):
def clean_up(name, attributes):
name = name.strip(" \n\t")
cartouche = (name[:-1] if name[-1].isdigit() else name) # get rid of digit suffix, if any
return (name, cartouche, outer_split(attributes))
(self.name, self.attribute_labels) = clause.split(":", 1)
(self.name, self.cartouche, self.attribute_labels) = clean_up(self.name, self.attribute_labels)
self.legs = [] # iterating over box's legs does nothing if it is not an association
self.kind = "entity"
self.clause = clause
def set_strengthen_legs(self, legs):
self.strengthen_legs = legs
IdentifierAttribute = WeakAttribute if legs else StrongAttribute
self.attributes = []
for (i, attribute_label) in enumerate(self.attribute_labels):
if attribute_label == "":
self.attributes.append(PhantomAttribute(i))
elif attribute_label.startswith("_"):
if i == 0:
self.attributes.append(SimpleEntityAttribute(attribute_label[1:], i))
else:
self.attributes.append(IdentifierAttribute(attribute_label[1:], i))
elif i == 0:
self.attributes.append(IdentifierAttribute(attribute_label, i))
else:
self.attributes.append(SimpleEntityAttribute(attribute_label, i))
def calculate_size(self, style, get_font_metrics):
cartouche_font = get_font_metrics(style["entity_cartouche_font"])
self.get_cartouche_string_width = cartouche_font.get_pixel_width
self.cartouche_height = cartouche_font.get_pixel_height()
attribute_font = get_font_metrics(style["entity_attribute_font"])
self.attribute_height = attribute_font.get_pixel_height()
for attribute in self.attributes:
attribute.calculate_size(style, get_font_metrics)
cartouche_and_attribute_widths = [self.get_cartouche_string_width(self.cartouche)] + [a.w for a in self.attributes]
self.w = 2 * style["rect_margin_width"] + max(cartouche_and_attribute_widths)
self.h = len(self.attributes) * (self.attribute_height + style["line_skip_height"]) \
- style["line_skip_height"] \
+ 4 * style["rect_margin_height"] \
+ self.cartouche_height
self.w += self.w % 2
self.h += self.h % 2
self.style = style
def description(self):
result = ["Entity %s" % self.name]
result.extend([
{
"key": "env",
"env": [("x", """cx[u"%s"]""" % self.name), ("y", """cy[u"%s"]""" % self.name)],
},
{
"key": "begin",
"id": u"entity-%s" % self.name,
},
{
"key": "begin",
"id": u"frame-%s" % self.name,
},
{
"key": "stroke_depth",
"stroke_depth": 0,
},
{
"key": "stroke_color",
"stroke_color": Dynamic("colors['entity_cartouche_color']"),
},
{
"key": "color",
"color": Dynamic("colors['entity_cartouche_color']"),
},
{
"key": "rect",
"x": Dynamic("%s+x" % (-self.w // 2)),
"y": Dynamic("%s+y" % (-self.h // 2)),
"w": self.w,
"h": self.cartouche_height + 2 * self.style["rect_margin_height"],
},
{
"key": "stroke_color",
"stroke_color": Dynamic("colors['entity_color']"),
},
{
"key": "color",
"color": Dynamic("colors['entity_color']"),
},
{
"key": "rect",
"x": Dynamic("%s+x" % (-self.w // 2)),
"y": Dynamic("%s+y" % round(-self.h / 2 + self.cartouche_height + 2 * self.style["rect_margin_height"], 1)),
"w": self.w,
"h": self.h - self.cartouche_height - 2 * self.style["rect_margin_height"],
},
{
"key": "stroke_color",
"stroke_color": Dynamic("colors['entity_stroke_color']"),
},
{
"key": "stroke_depth",
"stroke_depth": self.style["box_stroke_depth"],
},
{
"key": "color",
"color": Dynamic("colors['transparent_color']"),
},
{
"key": "rect",
"x": Dynamic("%s+x" % (-self.w // 2)),
"y": Dynamic("%s+y" % (-self.h // 2)),
"w": self.w,
"h": self.h,
},
{
"key": "stroke_depth",
"stroke_depth": self.style["inner_stroke_depth"],
},
{
"key": "line",
"x0": Dynamic("%s+x" % (-self.w // 2)),
"y0": Dynamic("%s+y" % (-self.h // 2 + self.cartouche_height + 2 * self.style["rect_margin_height"])),
"x1": Dynamic("%s+x" % (self.w // 2)),
"y1": Dynamic("%s+y" % (-self.h // 2 + self.cartouche_height + 2 * self.style["rect_margin_height"])),
},
{
"key": "end",
},
{
"key": "text",
"family": self.style["entity_cartouche_font"]["family"],
"size": self.style["entity_cartouche_font"]["size"],
"text": self.cartouche,
"text_color": Dynamic("colors['entity_cartouche_text_color']"),
"x": Dynamic("%s+x" % (-self.get_cartouche_string_width(self.cartouche) // 2)),
"y": Dynamic("%s+y" % round(-self.h / 2 + self.style["rect_margin_height"] + self.style["cartouche_text_height_ratio"] * self.cartouche_height, 1)),
},
])
dx = self.style["rect_margin_width"] - self.w // 2
dy = self.cartouche_height + 3 * self.style["rect_margin_height"] - self.h // 2
for attribute in self.attributes:
attribute.name = self.name
result.extend(attribute.description(dx, dy))
dy += self.attribute_height + self.style["line_skip_height"]
result.extend([
{
"key": "end",
},
])
return result
```
#### File: mocodo/mocodo/fitness.py
```python
from __future__ import division
from itertools import product
from math import hypot
from collections import Counter
from .cross import cross
def fitness(links, multiplicity, col_count, row_count, max_distance = 4):
""" Return (by closure) a function evaluating the aesthetic quality of a given layout. """
def evaluate(layout):
for (position, index) in enumerate(layout):
coordinates[index] = divmod(position, col_count)
segments = [(coordinates[p1], coordinates[p2], multiplicity[p1, p2]) for (p1, p2) in links]
total_distances = 0
short_segments = []
for ((y1, x1), (y2, x2), m) in segments:
distance = distances[abs(x1-x2)][abs(y1-y2)] * m
if distance <= max_distance:
short_segments.append((x1, y1, x2, y2))
total_distances += distance
crossing_count = (link_count - len(short_segments)) * link_count
for (i, (x1, y1, x2, y2)) in enumerate(short_segments):
for (x3, y3, x4, y4) in short_segments[i+1:]:
crossing_count += cross(x1, y1, x2, y2, x3, y3, x4, y4)
return (crossing_count, total_distances)
distances = [[hypot(i, j) - 1 for j in range(row_count)] for i in range(col_count)]
coordinates = [(0, 0)] * (row_count * col_count)
link_count = len(links)
return evaluate
```
#### File: mocodo/mocodo/mcd.py
```python
from __future__ import division
import re
from .association import Association
from .entity import Entity
from .phantom import Phantom
from .diagram_link import DiagramLink
import itertools
from collections import defaultdict
from .grid import Grid
from .mocodo_error import MocodoError
compress_colons = re.compile(r"(?m)^:\n(?=:$)").sub
def cmp(x, y):
return (x > y) - (x < y)
SYS_MAXINT = 9223372036854775807 # an integer larger than any practical list or string index
class Mcd:
def __init__(self, clauses, params, get_font_metrics=None):
def parse_clauses():
self.entities = {}
self.associations = {}
seen = set()
self.rows = [[]]
self.header = ""
for clause in clauses:
clause = clause.strip(" \n\r\t")
if not clause:
self.rows.append([])
continue
if clause.startswith("%"):
if not self.rows[-1]:
self.header += "%s\n" % clause
continue
if clause == ":" * len(clause):
self.rows[-1].extend(Phantom(next(phantom_counter)) for colon in clause)
continue
if clause.startswith(":"):
raise MocodoError(19, _('The clause "{clause}" starts with a colon.').format(clause=clause))
clause = re.sub("\[.+?\]", substitute_forbidden_symbols_between_brackets, clause)
if "," in clause.split(":", 1)[0]:
element = Association(clause, params)
if element.name in self.associations:
raise MocodoError(7, _('Duplicate association "{association}". If you want to make two associations appear with the same name, you must suffix it with a number.').format(association=element.name))
self.associations[element.name] = element
elif ":" in clause:
element = Entity(clause)
if element.name in self.entities:
raise MocodoError(6, _('Duplicate entity "{entity}". If you want to make two entities appear with the same name, you must suffix it with a number.').format(entity=element.name))
self.entities[element.name] = element
else:
raise MocodoError(21, _('"{clause}" does not constitute a valid declaration of an entity or association.').format(clause=clause))
if element.name in seen:
raise MocodoError(8, _('One entity and one association share the same name "{name}".').format(name=element.name))
seen.add(element.name)
self.rows[-1].append(element)
if not seen:
raise MocodoError(4, _('The ERD is empty.'))
self.rows = [row for row in self.rows if row]
self.col_count = max(len(row) for row in self.rows)
self.row_count = len(self.rows)
def add_legs():
for association in self.associations.values():
for leg in association.legs:
try:
leg.entity = self.entities[leg.entity_name]
except KeyError:
if leg.entity_name in self.associations:
raise MocodoError(20, _(u'Association "{association_1}" linked to another association "{association_2}"!').format(association_1=association.name, association_2=leg.entity_name))
else:
raise MocodoError(1, _(u'Association "{association}" linked to an unknown entity "{entity}"!').format(association=association.name, entity=leg.entity_name))
def add_attributes_and_strength():
strengthen_legs = dict((entity_name, []) for entity_name in self.entities)
for association in self.associations.values():
for leg in association.legs:
if leg.strengthen:
strengthen_legs[leg.entity_name].append(leg)
for (entity_name, legs) in strengthen_legs.items():
self.entities[entity_name].set_strengthen_legs(legs)
def tweak_straight_cards():
coordinates = {}
for (j, row) in enumerate(self.rows):
for (i, box) in enumerate(row):
coordinates[box] = (i, j)
d = defaultdict(list)
tweakable_legs = {}
for association in self.associations.values():
for leg in association.legs:
(ei, ej) = coordinates[leg.entity]
(ai, aj) = coordinates[leg.association]
vector = (cmp(ai, ei), cmp(aj, ej))
vector = (" SN"[cmp(aj, ej)] + " EW"[cmp(ai, ei)]).strip()
d[leg.entity].append(vector)
tweakable_legs[(leg.entity, vector)] = leg
flex = params.get("flex", 0)
for (entity, vectors) in d.items():
for vector in vectors:
leg = tweakable_legs[(entity, vector)]
if not leg.cardinalities.strip():
continue
elif vector == "E":
if vectors.count("E") == 1 and "SE" in vectors and "NE" not in vectors:
leg.twist = True
elif vector == "S":
if vectors.count("S") == 1 and "SE" in vectors and "SW" not in vectors:
leg.twist = True
elif vector == "W":
if vectors.count("W") == 1 and "SW" in vectors and "NW" not in vectors:
leg.twist = True
elif vector == "N":
if vectors.count("N") == 1 and "NE" in vectors and "NW" not in vectors:
leg.twist = True
elif flex == 0:
continue
elif vector == "SE" and vectors.count("SE") == 1:
if vectors.count("E") > 1:
leg.set_spin_strategy(flex)
elif vectors.count("S") > 1:
leg.set_spin_strategy(-flex)
elif vector == "SW" and vectors.count("SW") == 1:
if vectors.count("S") > 1:
leg.set_spin_strategy(flex)
elif vectors.count("W") > 1:
leg.set_spin_strategy(-flex)
elif vector == "NW" and vectors.count("NW") == 1:
if vectors.count("W") > 1:
leg.set_spin_strategy(flex)
elif vectors.count("N") > 1:
leg.set_spin_strategy(-flex)
elif vector == "NE" and vectors.count("NE") == 1:
if vectors.count("N") > 1:
leg.set_spin_strategy(flex)
elif vectors.count("E") > 1:
leg.set_spin_strategy(-flex)
def add_diagram_links():
self.diagram_links = []
for (entity_name, entity) in self.entities.items():
for attribute in entity.attributes:
if attribute.primary_entity_name:
self.diagram_links.append(DiagramLink(self.entities, entity, attribute))
def may_center():
for row in self.rows:
n = self.col_count - len(row)
if n:
row[0:0] = [Phantom(next(phantom_counter)) for i in range(n // 2)]
row.extend(Phantom(next(phantom_counter)) for i in range(n // 2 + n % 2))
def make_boxes():
i = itertools.count()
self.boxes = []
for row in self.rows:
for box in row:
box.identifier = next(i)
self.boxes.append(box)
self.box_count = len(self.boxes)
def substitute_forbidden_symbols_between_brackets(text):
return text.group().replace(",", "<<<protected-comma>>>").replace(":", "<<<protected-colon>>>")
self.get_font_metrics = get_font_metrics
phantom_counter = itertools.count()
parse_clauses()
add_legs()
add_attributes_and_strength()
add_diagram_links()
may_center()
make_boxes()
tweak_straight_cards()
self.title = params["title"]
def get_layout_data(self):
successors = [set() for i in range(self.box_count)] # use `set` to deduplicate reflexive associations
multiplicity = defaultdict(int) # but count the multiplicity (1 or 2) of each link
if self.associations:
for association in self.associations.values():
for leg in association.legs:
successors[association.identifier].add(leg.entity.identifier)
successors[leg.entity.identifier].add(association.identifier)
multiplicity[(association.identifier, leg.entity.identifier)] += 1
multiplicity[(leg.entity.identifier, association.identifier)] += 1
else:
for diagram_link in self.diagram_links:
fei = diagram_link.foreign_entity.identifier
pei = diagram_link.primary_entity.identifier
if fei != pei:
successors[fei].add(pei)
successors[pei].add(fei)
multiplicity[(fei, pei)] += 1
multiplicity[(pei, fei)] += 1
return {
"links": tuple((node, child) for (node, children) in enumerate(successors) for child in children if node < child),
"successors": successors,
"col_count": self.col_count,
"row_count": self.row_count,
"multiplicity": dict(multiplicity)
}
def get_layout(self):
return [box.identifier for row in self.rows for box in row]
def get_row_text(self, row):
return "\n".join(box.clause.replace("<<<protected-comma>>>", ",").replace("<<<protected-colon>>>", ":") for box in row)
def set_layout(self, layout, col_count=None, row_count=None, **kwargs):
if col_count and row_count:
(self.col_count, self.row_count) = (col_count, row_count)
def get_or_create_box(index):
return Phantom() if layout[index] is None else self.boxes[layout[index]]
i = itertools.count()
self.rows = [[get_or_create_box(next(i)) for x in range(self.col_count)] for y in range(self.row_count)]
def suppress_empty_rows(y):
while self.rows: # there's at least one row
for box in self.rows[y]:
if box.kind != "phantom":
return
del self.rows[y]
self.row_count -= 1
suppress_empty_rows(0)
suppress_empty_rows(-1)
def suppress_empty_cols(x):
while self.rows[0]: # there's at least one column
for row in self.rows:
if row[x].kind != "phantom":
return
for row in self.rows:
del row[x]
self.col_count -= 1
suppress_empty_cols(0)
suppress_empty_cols(-1)
def get_clauses(self):
result = self.header
if self.associations:
result += "\n\n".join(self.get_row_text(row) for row in self.rows)
else:
result += "\n\n".join(":\n" + "\n:\n".join(self.get_row_text(row).split("\n")) + "\n:" for row in self.rows)
return compress_colons(":", result)
def get_clauses_horizontal_mirror(self):
result = self.header + "\n\n".join(self.get_row_text(row) for row in self.rows[::-1])
return compress_colons(":", result)
def get_clauses_vertical_mirror(self):
result = self.header + "\n\n".join(self.get_row_text(row[::-1]) for row in self.rows)
return compress_colons(":", result)
def get_clauses_diagonal_mirror(self):
result = self.header + "\n\n".join(self.get_row_text(row) for row in zip(*self.rows))
return compress_colons(":", result)
def get_reformatted_clauses(self, nth_fit):
grid = Grid(len(self.boxes) + 100) # make sure there are enough precalculated grids
start = len(self.entities) + len(self.associations) # number of nonempty boxes
if nth_fit < 0:
if (self.col_count, self.row_count) in grid: # the current grid is among precalculated ones
start = self.col_count * self.row_count # start from the completed grid
nth_fit = 1 # and calculate the next one
(col_count, row_count) = grid.get_nth_next(start, nth_fit)
result = []
i = 0
for box in self.boxes:
if box.kind != "phantom":
if i % col_count == 0 and i:
result.append("")
result.append(box.clause.replace("<<<protected-comma>>>", ",").replace("<<<protected-colon>>>", ":"))
i += 1
for i in range(i, col_count * row_count):
if i % col_count == 0 and i:
result.append("")
result.append(":")
return self.header + compress_colons(":", "\n".join(result))
def calculate_size(self, style):
def card_max_width():
get_pixel_width = self.get_font_metrics(style["card_font"]).get_pixel_width
cardinalities = {"0,N"} # default value, in case there is no cardinalities at all
for association in self.associations.values():
for leg in association.legs:
cardinalities.add(leg.cardinalities)
return max(map(get_pixel_width, cardinalities))
#
def calculate_sizes():
for row in self.rows:
for (i, box) in enumerate(row):
box.calculate_size(style, self.get_font_metrics)
max_box_width_per_column[i] = max(box.w, max_box_width_per_column[i])
for diagram_link in self.diagram_links:
diagram_link.calculate_size(style, self.get_font_metrics)
#
def make_horizontal_layout():
self.w = style["margin_size"]
for row in self.rows:
horizontal_offset = style["margin_size"]
for (i, box) in enumerate(row):
box.x = horizontal_offset + (max_box_width_per_column[i] - box.w) // 2
horizontal_offset += max_box_width_per_column[i] + join_width
self.w = max(self.w, horizontal_offset)
self.w += style["margin_size"] - join_width
#
def compress_horizontally():
dx = 0
for i in range(1, self.col_count):
dx = SYS_MAXINT
for row in self.rows:
b1 = row[i-1]
b2 = row[i]
space = b2.x - b1.x - b1.w - join_width
dx = min(dx, space)
for row in self.rows:
row[i].x -= dx
self.w -= dx
#
def make_vertical_layout():
vertical_offset = style["margin_size"]
for row in self.rows:
max_box_height = max(box.h for box in row)
for box in row:
box.y = vertical_offset + (max_box_height - box.h) // 2
vertical_offset += max_box_height + join_height
self.h = vertical_offset + style["margin_size"] - join_height
#
def compress_vertically():
dy = 0
for j in range(1, self.row_count):
dy = SYS_MAXINT
for (i2, b2) in enumerate(self.rows[j]):
y1_max = 0
for (i1, b1) in enumerate(self.rows[j-1]):
if (i1 == i2) or (b1.x < b2.x < b1.x + b1.w + join_width) or (b1.x - join_width < b2.x + b2.w < b1.x + b1.w):
y1_max = max(y1_max, b1.y + b1.h)
space = b2.y - y1_max - join_height
dy = min(dy, space)
for box in self.rows[j]:
box.y -= dy
self.h -= dy
#
style["card_max_width"] = card_max_width()
style["card_max_height"] = self.get_font_metrics(style["card_font"]).get_pixel_height()
join_width = 2 * style["card_margin"] + style["card_max_width"]
join_height = 2 * style["card_margin"] + style["card_max_height"]
max_box_width_per_column = [0] * self.col_count
calculate_sizes()
make_horizontal_layout()
compress_horizontally()
make_vertical_layout()
compress_vertically()
def description(self):
result = []
for element in self.associations.values():
result.extend(element.description())
for element in self.entities.values():
result.extend(element.description())
for element in self.diagram_links:
result.extend(element.description())
return result
if __name__=="__main__":
from .argument_parser import parsed_arguments
clauses = u"""
CLIENT: Réf. client, Nom, Prénom, Adresse
PASSER, 0N CLIENT, 11 COMMANDE
COMMANDE: Num commande, Date, Montant
INCLURE, 1N COMMANDE, 0N PRODUIT: Quantité
PRODUIT: Réf. produit, Libellé, Prix unitaire
""".replace(" ", "").split("\n")
params = parsed_arguments()
mcd = Mcd(clauses, params)
print(mcd.get_clauses_vertical_mirror())
```
#### File: mocodo/tests/arrange_lp_tests.py
```python
from __future__ import division
import sys
sys.path[0:0] = ["."]
from arrange_lp import *
import unittest
from mocodo.mcd import Mcd
from mocodo.argument_parser import parsed_arguments
from time import time
from random import seed
clauses = u"""
SUSPENDISSE: diam
SOLLICITUDIN, 0N SUSPENDISSE, 0N CONSECTETUER, 0N LOREM: lectus
CONSECTETUER: elit, sed
MAECENAS, 1N DIGNISSIM, 1N DIGNISSIM
DF1, 11 LOREM, 1N SUSPENDISSE
LOREM: ipsum, dolor, sit
TORTOR, 0N RISUS, 11 DIGNISSIM, 1N CONSECTETUER: nec
DIGNISSIM: ligula, massa, varius
DF, 11 RISUS, 0N RISUS
AMET, 11> LOREM, 01 CONSECTETUER: adipiscing
RISUS: ultricies, _cras, elementum
SEMPER, 0N RISUS, 1N DIGNISSIM
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
class ArrangeLP(unittest.TestCase):
def test_with_cplex(self):
params["engine"] = "cplex"
rearrangement = arrange(**params)
mcd.set_layout(**rearrangement)
result = mcd.get_clauses()
self.assertEqual(rearrangement["crossings"], 0)
self.assertEqual(round(rearrangement["distances"], 4), 0.8284)
self.assertEqual(rearrangement["layout"], [11, 3, 0, 4, 10, 7, 1, 5, 8, 6, 2, 9])
self.assertEqual(result, u"""
SEMPER, 0N RISUS, 1N DIGNISSIM
MAECENAS, 1N DIGNISSIM, 1N DIGNISSIM
SUSPENDISSE: diam
DF1, 11 LOREM, 1N SUSPENDISSE
RISUS: ultricies, _cras, elementum
DIGNISSIM: ligula, massa, varius
SOLLICITUDIN, 0N SUSPENDISSE, 0N CONSECTETUER, 0N LOREM: lectus
LOREM: ipsum, dolor, sit
DF, 11 RISUS, 0N RISUS
TORTOR, 0N RISUS, 11 DIGNISSIM, 1N CONSECTETUER: nec
CONSECTETUER: elit, sed
AMET, 11> LOREM, 01 CONSECTETUER: adipiscing
""".strip().replace(" ", ""))
def test_with_gurobi(self):
params["engine"] = "gurobi"
rearrangement = arrange(**params)
mcd.set_layout(**rearrangement)
result = mcd.get_clauses()
self.assertEqual(rearrangement["crossings"], 0)
self.assertEqual(round(rearrangement["distances"], 4), 0.8284)
self.assertEqual(rearrangement["layout"], [4, 0, 3, 11, 5, 1, 7, 10, 9, 2, 6, 8])
self.assertEqual(result, u"""
DF1, 11 LOREM, 1N SUSPENDISSE
SUSPENDISSE: diam
MAECENAS, 1N DIGNISSIM, 1N DIGNISSIM
SEMPER, 0N RISUS, 1N DIGNISSIM
LOREM: ipsum, dolor, sit
SOLLICITUDIN, 0N SUSPENDISSE, 0N CONSECTETUER, 0N LOREM: lectus
DIGNISSIM: ligula, massa, varius
RISUS: ultricies, _cras, elementum
AMET, 11> LOREM, 01 CONSECTETUER: adipiscing
CONSECTETUER: elit, sed
TORTOR, 0N RISUS, 11 DIGNISSIM, 1N CONSECTETUER: nec
DF, 11 RISUS, 0N RISUS
""".strip().replace(" ", ""))
if __name__ == '__main__':
unittest.main()
```
#### File: mocodo/tests/fitness_tests.py
```python
from __future__ import division
import sys
sys.path[0:0] = ["."]
from mocodo.fitness import *
import unittest
from mocodo.mcd import Mcd
from mocodo.argument_parser import parsed_arguments
from math import hypot
class ArrangeBB(unittest.TestCase):
def test_optimal_layout(self):
clauses = u"""
SCELERISQUE LOREM: blandit, elit, ligula
EROS, 11 SCELERISQUE LOREM, 1N PELLENTESQUE IPSUM: metus, congue
NIBH, 1N SCELERISQUE LOREM, 11 PELLENTESQUE IPSUM
PELLENTESQUE IPSUM: tincidunt, bibendum, consequat, integer
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(total_distances, 0.0)
def test_optimal_layout_with_reflexive_association(self):
clauses = u"""
Assistas, 01 Hci poilu, 0N Hci poilu
Hci poilu: graffiti, champignon, troussa, graffiti
Rayonnait, 0N Hci poilu, 0N Lappa: monobloc
Brisa: souffrait
Pillards, 0N Brisa, 0N Lappa, 0N Hci poilu: disions, lascar
Lappa: graffiti, champignon
Puni, 11 Lappa, 0N Lappa
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(total_distances, 0.0)
def test_diagonal_reflexive_association(self):
clauses = u"""
Norm : Draw, Unit, Folk, Peer, Tour, Hall
:
:
Baby, 1N Norm, 0N> Norm
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(round(total_distances, 4), 0.8284)
def test_2_0_link(self):
clauses = u"""
CLIENT: Réf. client, Nom, Prénom, Adresse
PASSER, 0N CLIENT, 11 COMMANDE
:
COMMANDE: Num commande, Date, Montant
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(total_distances, 1.0)
def test_1_1_link(self):
clauses = u"""
CLIENT: Réf. client, Nom, Prénom, Adresse
PASSER, 0N CLIENT, 11 COMMANDE
COMMANDE: Num commande, Date, Montant
:
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(total_distances, hypot(1, 1) - 1)
def test_2_1_link(self):
clauses = u"""
:
CLIENT: Réf. client, Nom, Prénom, Adresse
PASSER, 0N CLIENT, 11 COMMANDE
COMMANDE: Num commande, Date, Montant
:
:
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 0)
self.assertEqual(total_distances, hypot(2, 1) - 1)
def test_k33(self):
clauses = u"""
DIGNISSIM: nec sem, nunc, vulputate
IMPERDIET: a praesent, nibh, semper
TINCIDUNT: faucibus, orci, cursus
RHONCUS, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
SODALES, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
QUIS ENIM, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 9)
def test_k33_better(self):
clauses = u"""
DIGNISSIM: nec sem, nunc, vulputate
RHONCUS, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
IMPERDIET: a praesent, nibh, semper
SODALES, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
TINCIDUNT: faucibus, orci, cursus
QUIS ENIM, 1N DIGNISSIM, 1N IMPERDIET, 1N TINCIDUNT
""".replace(" ", "")
params = parsed_arguments()
mcd = Mcd(clauses.split("\n"), params)
params.update(mcd.get_layout_data())
d = mcd.get_layout_data()
evaluate = fitness(d["links"], d["multiplicity"], d["col_count"], d["row_count"])
size = d["col_count"] * d["row_count"]
(crossing_count, total_distances) = evaluate(list(range(size)))
self.assertEqual(crossing_count, 3)
if __name__ == '__main__':
unittest.main()
```
#### File: mocodo/tests/font_metrics_tests.py
```python
from __future__ import division
import sys
sys.path[0:0] = ["."]
import unittest
from mocodo.font_metrics import *
import tkFont
params = {}
params["tkinter"] = True
FontMetrics = font_metrics_factory(params)
helv36 = FontMetrics({"family": "Helvetica", "size": 36})
helv36b = FontMetrics({"family": "Helvetica", "size": 36, "weight": "bold"})
helv36b2 = FontMetrics({"family": "Helvetica-Bold", "size": 36})
helv36b3 = FontMetrics({"family": "Helvetica-Bold", "size": 36, "weight": "bold"})
helv36b4 = FontMetrics({"family": "Helvetica-Bold", "size": 36, "weight": tkFont.BOLD})
times12 = FontMetrics({"family": "Times", "size": 12})
class FontMetricsWithTkTest(unittest.TestCase):
def test_helv36_get_pixel_height(self):
self.assertEqual(helv36.get_pixel_height(), 36)
def test_helv36_get_pixel_width(self):
self.assertEqual(helv36.get_pixel_width("My string"), 146)
def test_helv36b_get_pixel_height(self):
self.assertEqual(helv36b.get_pixel_height(), 36)
def test_helv36b_get_pixel_width(self):
self.assertEqual(helv36b.get_pixel_width("My string"), 160)
def test_helv36b2_get_pixel_width(self):
self.assertEqual(helv36b2.get_pixel_width("My string"), 161)
def test_helv36b3_get_pixel_width(self):
self.assertEqual(helv36b3.get_pixel_width("My string"), 177)
def test_helv36b4_get_pixel_width(self):
self.assertEqual(helv36b4.get_pixel_width("My string"), 177)
def test_times12_get_pixel_height(self):
self.assertEqual(times12.get_pixel_height(), 12)
def test_times12_get_pixel_width(self):
self.assertEqual(times12.get_pixel_width("My string"), 47)
def test_empty_string_get_pixel_width(self):
self.assertEqual(times12.get_pixel_width(""), 0)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeanie76/Datacrat-Agg",
"score": 3
}
|
#### File: code/sheet_cleaner/functions.py
```python
from constants import *
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import pickle
import os
import pandas as pd
import re
def read_values(sheetid, range_, config):
# returns values read from a google sheet, as is.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
TOKEN = config['token']
CREDENTIALS = config['credentials']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
values = sheet.values().get(spreadsheetId=sheetid, range=range_).execute().get('values', [])
if not values:
raise ValueError('Sheet data not found')
else:
return values
def insert_values(sheetid, body, config, **kwargs):
'''
Insert values into spreadsheet.
range should be included in body.
example body:
body = {
'range': 'SheetName!A1:A3',
'majorDimension': 'ROWS',
'values': [[1], [2], [3]]
}
'''
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
TOKEN = config['token']
CREDENTIALS = config['credentials']
INPUTOPTION = kwargs['inputoption'] if 'inputoption' in kwargs.keys() else 'USER_ENTERED'
# values = list
# placement = A1 notation range.
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDENTIALS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
request = sheet.values().update(spreadsheetId=sheetid,
range=body['range'],
body=body,
valueInputOption=INPUTOPTION)
response = request.execute()
return response
def values2dataframe(values):
'''
Convert raw values as retrieved from read_values to a pandas dataframe
Adds a "row" number going off the assumption that we are reading from the top.
'''
columns = values[0]
ncols = len(columns)
data = values[1:]
for d in data:
if len(d) < ncols:
extension = ['']*(ncols-len(d))
d.extend(extension)
data = pd.DataFrame(data=data, columns=columns)
data['row'] = list(range(2, len(data)+2)) # keeping row number (+1 for 1 indexing +1 for column headers in sheet)
data['row'] = data['row'].astype(str)
return data
def index2A1(num):
if 0 <= num <= 25:
return alpha[num]
elif 26 <= num <= 51:
return 'A{}'.format(alpha[num%26])
elif 52 <= num <= 77:
return 'B{}'.format(alpha[num%26])
else:
raise ValueError('Could not convert index "{}" to A1 notation'.format(num))
def get_trailing_spaces(data):
'''
Generate error table for trailing whitespaces (front and back).
return: error_table[row, ID, column_name, value].
'''
# fix trailing spaces. This applies to all columns except "row"
df = data.copy()
error_table = pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
for c in df.columns:
if c == 'row':
continue
else:
stripped = df[c].str.strip()
invalid_bool = stripped != df[c]
invalid = df[invalid_bool][['row', 'ID']].copy()
invalid['column'] = c
invalid['value'] = df[c][invalid_bool].copy()
invalid['fix'] = stripped[invalid_bool]
error_table = error_table.append(invalid, ignore_index=True, sort=True)
return error_table
def get_NA_errors(data):
'''
Generate error table for mispelled NA values.
We chose to write them as "NA", and so far we only
replace "N/A" with "NA"
return error_table[row, ID, column, value, fix
'''
df = data.copy()
table = pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
for c in df.columns:
if c == 'row':
continue
else:
test = df[c].str.match('N/A')
errors = df[test][['row', 'ID']]
errors['column'] = c
errors['value'] = df[test][c]
errors['fix'] = df[test][c].replace('N/A', 'NA')
table = table.append(errors, ignore_index=True, sort=True)
return table
def ErrorTest(data, columns, rgx, table):
'''
Test a regex pattern on passed columns, generate error table
for things that did not pass the test.
Note this does not generate the fix. We do this after.
'''
df = data.copy()
for c in columns:
test = df[c].str.match(rgx)
invalid = df[~test][['row', "ID"]].copy()
invalid['column'] = c
invalid['value'] = df[~test][c]
table = table.append(invalid, ignore_index=True, sort=False)
return table
def fix_cells(sheetid, sheetname, error_table, column_dict, config):
'''
Fix specific cells on the private sheet, based on error table.
Error table also needs to provide the "fix" column which is what
we are replacing the current value with.
:column_dict: map from 'column_name' to A1 notation.
'''
assert 'fix' in error_table.columns
assert 'value' in error_table.columns
fixed = 0
for i,error in error_table.iterrows():
row = error['row']
a1 = column_dict[error['column']] + row
range_ = '{}!{}'.format(sheetname, a1)
try:
fetch = read_values(sheetid, f'{sheetname}!A{row}', config) # fetch ID to ensure that it is the same.
assert error['ID'] == fetch[0][0]
body = {
'range': range_,
'majorDimension': 'ROWS',
'values': [[error['fix']]]
}
insert_values(sheetid, body, config)
fixed += 1
except Exception as E:
print(error)
print(fetch)
raise E
return fixed
def generate_error_tables(data):
'''
Generate table for fields that don't pass the rgex tests.
For easy fixes (e.g. spacing) we can do it automatically, for tricker ones we save the table (fixed ones are omitted in error_report)
'''
table = pd.DataFrame(columns=['row', 'ID', 'value'])
table = ErrorTest(data, ['age'], rgx_age, table)
table = ErrorTest(data, ['sex'], rgx_sex, table)
table = ErrorTest(data, ['city', 'province', 'country'], rgx_country, table)
table = ErrorTest(data, ['latitude', 'longitude'], rgx_latlong, table)
table = ErrorTest(data, ['geo_resolution'], rgx_geo_res, table)
table = ErrorTest(data, date_columns, rgx_date, table)
table = ErrorTest(data, ['lives_in_Wuhan'], rgx_lives_in_wuhan, table)
fixable_errors = pd.DataFrame(columns=['row', 'ID', 'column', 'value', 'fix'])
not_fixable = []
for i, r in table.iterrows():
row = r.copy()
fix = False
col = row['column']
if col == 'sex':
test = row['value'].lower().strip() in ['male', 'female', '']
if test:
fix = row['value'].lower().strip()
elif col == 'age':
test = bool(re.match(rgx_age, row['value'].replace(' ', '')))
if test:
fix = row['value'].replace(' ', '')
elif col == 'country':
pass
elif col in ['latitude', 'longitude']:
pass
elif col == 'geo_resolution':
s = row['value']
test = bool(re.match(rgx_geo_res, s.replace(' ', '')))
if test:
fix = s.replace(' ', '')
elif col in date_columns:
pass
elif col == 'lives_in_Wuhan':
s = row['value']
test1 = bool(re.match(rgx_lives_in_wuhan, s.lower().strip()))
test2 = True if s in ['1', '0'] else False
if test1:
fix = s.lower().strip()
elif test2:
fix = 'yes' if s == '1' else 'no'
if fix:
row['fix'] = fix
fixable_errors = fixable_errors.append(row, ignore_index=True, sort=False)
else:
not_fixable.append(r.name)
fixable = fixable_errors.reset_index()
unfixable = table.loc[not_fixable].copy().reset_index()
return [fixable, unfixable]
def update_id_column(sheetid, sheetname, config, new=5000):
'''
Updates the ID column in private sheet.
--> inputs function in to 5000 next blank cells.
'''
range_ = f'{sheetname}!A:A' # ID column
ids = read_values(sheetid, range_, config)
nrows = len(ids) # 2
start = nrows + 1
end = start + new
entries = []
template = "=IF(NOT(ISBLANK({}!F{})), A{}+1, )"
for i in range(start, end+1):
string = template.format(sheetname, i, i-1)
entries.append(string)
body = {
'range': f'{sheetname}!A{start}:A{end}',
'majorDimension': 'ROWS',
'values': [[x] for x in entries]
}
response = insert_values(sheetid, body, config)
return response
def update_lat_long_columns(sheetid, sheetname, config):
'''
Input Lookup function into lat/long columns that have associated IDs
'''
# figure out range, based on ids I guess.
id_range_ = f'{sheetname}!A:A'
lat_range_ = f'{sheetname}!H:H'
lon_range_ = f'{sheetname}!I:I'
geo_range_ = f'{sheetname}!J:J'
ids = read_values(sheetid, id_range_, config)
lats = read_values(sheetid, lat_range_, config)
lons = read_values(sheetid, lon_range_, config)
geos = read_values(sheetid, geo_range_, config)
assert len(geos) == len(lats) == len(lons), 'columns H-J have different lengths'
assert len(ids) >= len(geos), 'ID column has less values than coordinate columns'
# figure out how many new entries we need.
N_new = len(ids) - len(geos)
start = len(geos) + 1 # number for first row to insert in.
end = start + N_new # last row.
# make entries
htemplate = '=IFNA(VLOOKUP(D{}&";"&E{}&";"&F{},geo_admin!I:S,3, false),)'
itemplate = '=IFNA(VLOOKUP(D{}&";"&E{}&";"&F{},geo_admin!I:S,4, false),)'
jtemplate = '=IFNA(VLOOKUP(D{}&";"&E{}&";"&F{},geo_admin!I:S,5, false),)'
entries = []
for row in range(start, end):
h = htemplate.format(row, row, row)
i = itemplate.format(row, row, row)
j = jtemplate.format(row, row, row)
entries.append([h, i, j])
body = {
'range': f'{sheetname}!H{start}:J{end}',
'majorDimension': 'ROWS',
'values': entries
}
response = insert_values(sheetid, body, config)
return response
def update_admin_columns(sheetid, sheetname, config):
'''
Input function into columns AA-AF.
'''
range0 = f'{sheetname}!A:A' # ids
ranges = ['{}!A{}:A{}'.format(sheetname, x, x) for x in ['A', 'B', 'C', 'D', 'E', 'F']]
ids = read_values(sheetid, range0, config)
values = [read_values(sheetid, r, config) for r in ranges]
max_ = max([len(x) for x in values])
N_new = len(ids) - max_
start = max_ + 1
end = start + N_new
template = '=IFNA(VLOOKUP(D{}&";"&E{}&";"&F{},geo_admin!I:S,REPLACEME, false), )'
templates = [template.replace('REPLACEME', str(i)) for i in range(6,12)]
entries = []
for row in range(start, end):
entry = [t.format(row, row, row) for t in templates]
entries.append(entry)
body = {
'range': f'{sheetname}!AA{start}:AF{end}',
'majorDimension': 'ROWS',
'values': entries
}
response = insert_values(sheetid, body, config)
return response
```
|
{
"source": "jeanielim/cookiecutter-data-science",
"score": 3
}
|
#### File: {{ cookiecutter.repo_name }}/tests/test_train_data.py
```python
cookiecutter.repo_name }}/tests/test_train_data.py
import pandas as pd
from unittest.mock import patch, Mock
from src.models.train_model import fetch_processed, fit_model
mock_data = {
'label': [1, 0, 0, 1],
'fizz': ['John', 'Bob', 'Sam', 'Kevin'],
'buzz': ['foo', 'bar', 'buzz', 'fizz'],
'foo': ['y', 'n', 'm', 'y'],
'bar': ['a', 'b', 'c', 'd'],
'fish': ['nyc', 'la', 'boston', 'amherst']
}
def test_fetch_processed(monkeypatch):
def mock_read_csv(fin):
return pd.DataFrame(mock_data)
monkeypatch.setattr(pd, 'read_csv', mock_read_csv)
x_train, x_test, y_train, y_test = fetch_processed('foo')
assert all(y_train >= 0)
assert all(y_test >= 0)
assert x_train.shape[0] > 0
assert x_test.shape[0] > 0
@patch('src.models.train_model.RandomForestClassifier')
def test_fit_model(mock_forest, monkeypatch):
mock_model = Mock()
attrs = {'fit.return_value': 'foo'}
mock_model.configure_mock(**attrs)
mock_forest.return_value = mock_model
model = fit_model('foo', 'bar')
assert model.fit() == 'foo'
```
|
{
"source": "jeanineharb/butterfree",
"score": 3
}
|
#### File: butterfree/clients/spark_client.py
```python
from typing import Any, Dict, List, Optional, Union
from pyspark.sql import DataFrame, DataFrameReader, SparkSession
from pyspark.sql.streaming import DataStreamReader, StreamingQuery
from pyspark.sql.types import StructType
from butterfree.clients import AbstractClient
class SparkClient(AbstractClient):
"""Handle Spark session connection.
Get query results with SQL, reads and writes data on external systems.
"""
def __init__(self) -> None:
self._session: Optional[SparkSession] = None
@property
def conn(self) -> SparkSession:
"""Gets or creates an SparkSession.
Returns:
Spark session
"""
if not self._session:
self._session = SparkSession.builder.getOrCreate()
return self._session
def read(
self,
format: str,
options: Dict[str, Any],
schema: Optional[StructType] = None,
stream: bool = False,
) -> DataFrame:
"""Use the SparkSession.read interface to load data into a dataframe.
Check docs for more information:
https://spark.apache.org/docs/latest/sql-data-sources-load-save-functions.html#generic-loadsave-functions
Args:
format: string with the format to be used by the DataframeReader.
options: options to setup the DataframeReader.
stream: flag to indicate if data must be read in stream mode.
schema: an optional pyspark.sql.types.StructType for the input schema.
Returns:
Dataframe
"""
if not isinstance(format, str):
raise ValueError("format needs to be a string with the desired read format")
if not isinstance(options, dict):
raise ValueError("options needs to be a dict with the setup configurations")
df_reader: Union[
DataStreamReader, DataFrameReader
] = self.conn.readStream if stream else self.conn.read
df_reader = df_reader.schema(schema) if schema else df_reader
return df_reader.format(format).options(**options).load()
def read_table(self, table: str, database: str = None) -> DataFrame:
"""Use the SparkSession.read interface to read a metastore table.
Args:
database: name of the metastore database/schema
table: name of the table in metastore
Returns:
Dataframe
"""
if not isinstance(table, str):
raise ValueError(
"table needs to be a string with the name of the registered table"
)
return self.conn.read.table(f"{database}.{table}" if database else table)
def sql(self, query: str) -> DataFrame:
"""Run a query using Spark SQL.
Args:
query: Spark SQL query.
Returns:
Dataframe
"""
return self.conn.sql(query)
@staticmethod
def write_dataframe(
dataframe: DataFrame, format_: str, mode: str, **options: Any
) -> None:
"""Receive a spark DataFrame and write it.
Args:
dataframe: dataframe containing data from a feature set.
format_: format used to save the dataframe.
mode: writing modem can be "error", "append", "overwrite" or
"ignore". For more information:
[here](https://spark.apache.org/docs/2.3.0/sql-programming-guide.html#save-modes).
**options: all other options that can be used in a DataFrameWriter.
"""
if not isinstance(format_, str):
raise ValueError("format needs to be a string")
if not isinstance(mode, str):
raise ValueError("mode needs to be a string")
dataframe.write.save(format=format_, mode=mode, **options)
def write_stream(
self,
dataframe: DataFrame,
processing_time: str,
output_mode: str,
checkpoint_path: str,
format_: str,
mode: str,
**options: Any,
) -> StreamingQuery:
"""Starts streaming data writing job.
Args:
dataframe: Spark dataframe containing data from a feature set.
processing_time: a processing time interval as a string.
E.g. '5 seconds', '1 minute'. Set a trigger that runs the
mini-batch periodically based on the processing time. If the
effect of processing data as soon as the data arrives, without
having to wait for the time frame, is desired, the value
'0 seconds' can be set.
output_mode: specifies how data of a streaming DataFrame/Dataset is
written to a streaming sink destination.
checkpoint_path: path on S3 to save checkpoints for the stream job.
These checkpoint can be used on the the job re-start to return
from where it stops.
format_: format used to save the dataframe.
mode: writing modem can be "error", "append", "overwrite" or
"ignore". For more information:
[here](https://spark.apache.org/docs/2.3.0/sql-programming-guide.html#save-modes).
**options: all other options that can be used in a DataFrameWriter.
More information about processing_time, output_mode and checkpoint_path
can be found in Spark documentation:
[here](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html)
Returns:
Streaming handler.
"""
if not dataframe.isStreaming:
raise ValueError("A stream df is needed to start a streaming job.")
return (
dataframe.writeStream.trigger(processingTime=processing_time)
.outputMode(output_mode)
.option("checkpointLocation", checkpoint_path)
.foreachBatch(
lambda batch_df, _: self.write_dataframe(
batch_df, format_, mode, **options
)
)
.start()
)
@staticmethod
def write_table(
dataframe: DataFrame,
database: str,
table_name: str,
path: str,
format_: str = None,
mode: str = None,
partition_by: List[str] = None,
**options: Any,
) -> None:
"""Receive a spark DataFrame and write it as a table in metastore.
Args:
dataframe: spark dataframe containing data from a feature set.
database: specified database name.
table_name: specified table name.
path: string with the local to save the table.
format_: string with the format used to save.
mode: writing mode, it can be: "error", "append", "overwrite" or
"ignore". More information:
[here](https://spark.apache.org/docs/2.3.0/sql-programming-guide.html#save-modes).
partition_by: names of partitioning columns.
options: all other options that can be used in a DataFrameWriter.
"""
if not isinstance(database, str):
raise ValueError("database needs to be a string")
if not isinstance(table_name, str):
raise ValueError("table_name needs to be a string")
if not isinstance(path, str):
raise ValueError("path needs to be a string of the local to save")
name = "{}.{}".format(database, table_name)
dataframe.write.saveAsTable(
mode=mode,
format=format_,
partitionBy=partition_by,
name=name,
path=path,
**options,
)
def create_temporary_view(
self, dataframe: DataFrame, name: str
) -> Optional[StreamingQuery]:
"""Create a temporary view from a given dataframe.
Args:
dataframe: dataframe to be be queried by the view.
name: name of the temporary view.
"""
if not dataframe.isStreaming:
return dataframe.createOrReplaceTempView(name)
return dataframe.writeStream.format("memory").queryName(name).start()
```
|
{
"source": "Jeaninezpp/Simplified_DMC",
"score": 2
}
|
#### File: Jeaninezpp/Simplified_DMC/location_dmc.py
```python
import argparse
import os
import torch
from torch.utils.data import DataLoader
from torch import optim
import numpy as np
from data.MUSIC_dataset import MUSIC_Dataset, MUSIC_AV_Classify
from model.base_model import resnet18
from model.dmc_model import DMC_NET
from sklearn import cluster, metrics
import numpy as np
from sklearn.preprocessing import normalize
from torch import nn
import torch.nn.functional as F
import pickle
def batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label):
batch_audio_data = torch.zeros(audio_data.shape[0] * 2, audio_data.shape[1], audio_data.shape[2],
audio_data.shape[3])
batch_image_data = torch.zeros(posi_img_data.shape[0] * 2, posi_img_data.shape[1], posi_img_data.shape[2],
posi_img_data.shape[3])
batch_labels = torch.zeros(audio_data.shape[0] * 2)
class_labels = torch.zeros(audio_data.shape[0] * 2)
for i in range(audio_data.shape[0]):
batch_audio_data[i * 2, :] = audio_data[i, :]
batch_audio_data[i * 2 + 1, :] = audio_data[i, :]
batch_image_data[i * 2, :] = posi_img_data[i, :]
batch_image_data[i * 2 + 1, :] = nega_img_data[i, :]
batch_labels[i * 2] = 1
batch_labels[i * 2 + 1] = 0
class_labels[i * 2] = posi_label[i]
class_labels[i * 2 + 1] = nega_label[i]
return batch_audio_data, batch_image_data, batch_labels, class_labels
def eva_metric2(predict, gt, pair_num=2):
num = int(predict.shape[0]/pair_num)
correct = 0
for i in range(num):
pos = predict[pair_num*i]
flag = True
for j in range(pair_num-1):
neg = predict[pair_num*i+j+1]
if pos >= neg:
flag = False
if flag == True:
correct += 1
return correct / num
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin=5.):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output, target, size_average=True):
distances = output.pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
def location_model_train(model, data_loader, optimizer, criterion):
model.train()
accs = 0
count = 0
losses = 0
for i, data in enumerate(data_loader, 0):
if i % 200 == 0:
print('location batch:%d' % i)
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, _, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label)
audio_data, image_data, av_labels = audio_data.type(torch.FloatTensor).cuda(), \
image_data.type(torch.FloatTensor).cuda(), \
av_labels.type(torch.FloatTensor).cuda()
optimizer.zero_grad()
av_outputs, _, _ = model(image_data, audio_data)
loss = criterion(av_outputs, av_labels)
loss.backward()
optimizer.step()
losses += loss.detach().cpu().numpy()
# acc = eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.cpu().numpy())
# accs += acc
count += 1
print('location loss is %.3f ' % (losses / count))
return accs / count
def location_model_eva(model, data_loader):
model.eval()
accs = 0
num = len(data_loader.dataset)
count = 0
results = {}
with torch.no_grad():
for i, data in enumerate(data_loader, 0):
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, img_path, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data,
posi_label, nega_label)
audio_data, image_data = audio_data.type(torch.FloatTensor).cuda(), image_data.type(torch.FloatTensor).cuda()
av_outputs, av_maps, av_dists = model(image_data, audio_data)
obj_localization = av_maps.detach().cpu().numpy()
obj_localization = obj_localization[::2]
av_dists = av_dists[::2]
# accs += eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.numpy())
count += 1
_, idx = torch.sort(av_dists, dim=1)
idx = idx[:, 1].detach().cpu().numpy()
for k in range(len(img_path)):
results[img_path[k][:-4]] = obj_localization[k]
pickle.dump(results, open('dmc.pkl', 'wb'))
return accs / count
def main():
parser = argparse.ArgumentParser(description='AID_PRETRAIN')
parser.add_argument('--data_list_dir', type=str,
default='./data/data_indicator/music/solo')
parser.add_argument('--data_dir', type=str, default='/home/ruiq/Music/solo')
parser.add_argument('--mode', type=str, default='train', help='train/val/test')
parser.add_argument('--json_file', type=str,default='./data/MUSIC_label/MUSIC_solo_videos.json')
parser.add_argument('--use_pretrain', type=int, default=0, help='whether to init from ckpt')
parser.add_argument('--ckpt_file', type=str, default='location_net_009_0.665.pth', help='pretrained model name')
parser.add_argument('--enable_img_augmentation', type=int, default=1, help='whether to augment input image')
parser.add_argument('--enable_audio_augmentation', type=int, default=1, help='whether to augment input audio')
parser.add_argument('--batch_size', type=int, default=32, help='training batch size')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='training batch size')
parser.add_argument('--epoch', type=int, default=100, help='training epoch')
parser.add_argument('--gpu_ids', type=str, default='[0,1,2,3]', help='USING GPU IDS e.g.\'[0,4]\'')
parser.add_argument('--num_threads', type=int, default=4, help='number of threads')
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--evaluate', type=int, default=0, help='only evaluate or not')
parser.add_argument('--v_cluster', type=int, default=2, help='number of visual cluster')
parser.add_argument('--a_cluster', type=int, default=1, help='number of audio cluster')
args = parser.parse_args()
train_list_file = os.path.join(args.data_list_dir, 'solo_training_1.txt')
val_list_file = os.path.join(args.data_list_dir, 'solo_validation.txt')
test_list_file = os.path.join(args.data_list_dir, 'solo_testing.txt')
train_dataset = MUSIC_Dataset(args.data_dir, train_list_file, args)
val_dataset = MUSIC_Dataset(args.data_dir, val_list_file, args)
test_dataset = MUSIC_Dataset(args.data_dir, test_list_file, args)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_threads)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
# net setup
visual_backbone = resnet18(modal='vision',pretrained=False)
audio_backbone = resnet18(modal='audio')
av_model = DMC_NET(visual_net=visual_backbone, audio_net=audio_backbone, v_cluster_num=args.v_cluster, a_cluster_num=args.a_cluster)
if args.use_pretrain:
PATH = args.ckpt_file
state = torch.load(PATH)
av_model.load_state_dict(state, strict=False)
av_model_cuda = av_model.cuda()
loss_func = ContrastiveLoss()
optimizer = optim.Adam(params=av_model_cuda.parameters(), lr=args.learning_rate, betas=(0.9, 0.999),
weight_decay=0.0001)
if args.evaluate:
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
return
for e in range(0, args.epoch):
print('Epoch is %03d' % e)
train_location_acc = location_model_train(av_model_cuda, train_dataloader, optimizer, loss_func)
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
print('train acc is %.3f, val acc is %.3f' % (train_location_acc, eva_location_acc))
if e % 3 == 0:
PATH = 'ckpt/dmc/dmc_stage_one_%03d_%.3f.pth' % (e, eva_location_acc)
torch.save(av_model_cuda.state_dict(), PATH)
if __name__ == '__main__':
main()
```
|
{
"source": "jeanings/jng-portfolio",
"score": 3
}
|
#### File: application/index/routes.py
```python
from flask import Blueprint, render_template, url_for
from flask import current_app as app
from .assets import build_assets
DEBUG_MODE = app.config['FLASK_DEBUG']
# Blueprint config.
index_bp = Blueprint('index_bp', __name__,
static_folder='static',
template_folder='templates'
)
if DEBUG_MODE == 'True':
build_assets(app)
# Index route.
@index_bp.route('/', methods=['GET'])
def index():
return render_template('index.html',
title="Some personal site on the web —— jeanings.space"
)
```
#### File: projects/japan_real_estate_choropleth/routes.py
```python
from flask import Blueprint, render_template
from flask import current_app as app
from .assets import build_assets
DEBUG_MODE = app.config['FLASK_DEBUG']
# Blueprint config.
japan_real_estate_choropleth_bp = Blueprint('japan_real_estate_choropleth_bp', __name__,
static_folder='static',
template_folder='templates'
)
if DEBUG_MODE == 'True':
build_assets(app)
# Japan real estate choropleth route.
@japan_real_estate_choropleth_bp.route('/fudousan-kakaku-nuriwake-chizu-2010-2020', methods=['GET'])
@japan_real_estate_choropleth_bp.route('/fudousan-kakaku-nuriwake-chizu-2010-2020/', methods=['GET'])
def japan_real_estate_choropleth():
return render_template('japan_real_estate_choropleth.html',
title="不動産取引価格塗り分け地図 2010~2020 —— jeanings.space",
MAPBOX_ACCESS_KEY=app.config["MAPBOX_ACCESS_KEY"]
)
```
#### File: tokaido/tools/create_db.py
```python
from pathlib import Path
from flask import current_app as app
from os import environ
from dotenv import load_dotenv
from sqlalchemy import create_engine, Table, Column, DECIMAL, Integer, String, Text, VARCHAR, MetaData
import json
load_dotenv(Path.cwd() / '.env')
DATABASE_URL = environ.get('DATABASE_URL')
DATAFILE = Path.cwd() / "application" / "projects" / "tokaido" / "tools" / "journal" / "journal.json"
engine = create_engine(DATABASE_URL)
meta = MetaData()
def table_exists():
""" Helper: Checks for table. """
print("Checking for table 'journalEntries' in database...")
if engine.dialect.has_table(engine, "journal"):
return True
else:
return False
def create_table():
""" Helper: create SQL model. """
print("Table doesn't exist, creating now.")
journal_entries = Table("journal", meta,
Column("id", Integer, primary_key=True),
Column("day", Integer, index=True, unique=True, nullable=True),
Column("date", String(50), index=False, unique=True, nullable=True),
Column("route", VARCHAR(75), index=False, unique=False, nullable=True),
Column("weather", VARCHAR(50), index=False, unique=False, nullable=True),
Column("weather_data", VARCHAR(100), index=False, unique=False, nullable=True),
Column("temp", Integer, index=False, unique=False, nullable=True),
Column("distance", DECIMAL(4,2), index=False, unique=False, nullable=True),
Column("distance_percent", DECIMAL(4,2), index=False, unique=False, nullable=True),
Column("distance_percent_cum", DECIMAL(5,2), index=False, unique=False, nullable=True),
Column("moving", String(20), index=False, unique=False, nullable=True),
Column("lodging", DECIMAL(6,2), index=False, unique=False, nullable=True),
Column("food", DECIMAL(6,2), index=False, unique=False, nullable=True),
Column("strava", VARCHAR(85), index=False, unique=False, nullable=True),
Column("entry", Text, index=False, unique=False, nullable=True)
)
journal_entries.create(engine)
def populate_table(data):
""" Helper: Fill in table. """
print("Adding data to table...")
for index, entry in enumerate(data['journalEntries']):
id_num = data['journalEntries'][index]['id']
day = data['journalEntries'][index]['day']
date = data['journalEntries'][index]['date']
route = data['journalEntries'][index]['route']
weather = data['journalEntries'][index]['weather']
weather_data = data['journalEntries'][index]['weatherData']
temp = data['journalEntries'][index]['temp']
distance = data['journalEntries'][index]['distance']
distancePercent = data['journalEntries'][index]['distancePercent']
distancePercentCum = data['journalEntries'][index]['distancePercentCum']
moving = data['journalEntries'][index]['moving']
lodging = data['journalEntries'][index]['lodging']
food = data['journalEntries'][index]['food']
strava = data['journalEntries'][index]['strava']
text_entry = data['journalEntries'][index]['entry']
with engine.connect() as connection:
connection.execute("""
INSERT INTO journal (id, day, date, route, weather, weather_data, temp, distance, distance_percent, distance_percent_cum, moving, lodging, food, strava, entry)
VALUES (%(id)s, %(day)s, %(date)s, %(route)s, %(weather)s, %(weather_data)s, %(temp)s, %(distance)s, %(distance_percent)s, %(distance_percent_cum)s, \
%(moving)s, %(lodging)s, %(food)s, %(strava)s, %(entry)s);""", {
"id": id_num,
"day": day,
"date": date,
"route": route,
"weather": weather,
"weather_data": weather_data,
"temp": temp,
"distance": distance,
"distance_percent": distancePercent,
"distance_percent_cum": distancePercentCum,
"moving": moving,
"lodging": lodging,
"food": food,
"strava": strava,
"entry": text_entry
}
)
def update_table(data):
""" Helper: Update table. """
print("Table exists - updating table instead.")
for index, entry in enumerate(data['journalEntries']):
id_num = data['journalEntries'][index]['id']
day = data['journalEntries'][index]['day']
date = data['journalEntries'][index]['date']
route = data['journalEntries'][index]['route']
weather = data['journalEntries'][index]['weather']
weather_data = data['journalEntries'][index]['weatherData']
temp = data['journalEntries'][index]['temp']
distance = data['journalEntries'][index]['distance']
distancePercent = data['journalEntries'][index]['distancePercent']
distancePercentCum = data['journalEntries'][index]['distancePercentCum']
moving = data['journalEntries'][index]['moving']
lodging = data['journalEntries'][index]['lodging']
food = data['journalEntries'][index]['food']
strava = data['journalEntries'][index]['strava']
text_entry = data['journalEntries'][index]['entry']
with engine.connect() as connection:
connection.execute("""
UPDATE journal SET entry = %(entry)s
WHERE id = %(id)s;""", {
"id": id_num,
"entry": text_entry
}
)
def create_db():
""" Reads in json and builds db with model from models.py """
with open(DATAFILE) as file:
print(">>> Opening {0}...".format(DATAFILE))
data = json.load(file)
if table_exists() is False:
create_table()
populate_table(data)
print("Table created and populated, closing.")
else:
update_table(data)
print("Table updated, closing.")
if __name__ == '__main__':
create_db()
```
#### File: tokaido/tools/photo_json.py
```python
import json, piexif, pytz, jsons
from copy import deepcopy
from datetime import datetime
from pathlib import Path
PROJ_FOLDER = Path.cwd() / "application" / "projects" / "tokaido"
IMG_FOLDER = PROJ_FOLDER / "tools" / "journal" / "images"
PHOTO_JSON = IMG_FOLDER / "tools" / "journal" / "photo.json"
JSON_FILE = PROJ_FOLDER / "tools" / "data" / "photo_data.json"
class Photo(object):
class Geometry(object):
def __init__(self):
self.type = "Point"
self.coordinates = None
class Properties(object):
def __init__(self):
self.filename = None
self.url = None
self.thumbnail = None
self.icon = None
self.date = None
self.day = None
def __init__(self):
self.type = "Feature"
self.geometry = self.Geometry()
self.properties = self.Properties()
def dms_to_deci_deg(dms_coord):
"""
Helper function to convert degrees/minutes/seconds to decimal degree coordinates.
https://docs.microsoft.com/en-us/office/troubleshoot/excel/convert-degrees-minutes-seconds-angles
"""
degrees = dms_coord[0][0]
minutes = dms_coord[1][0]
seconds = dms_coord[2][0]
deci_coord = degrees + (minutes / 60) + (seconds / 100 / 3600)
return deci_coord
def build_photo_json():
""" Crawls through images to extract data and generate a json from it. """
allowed_extensions = ['.jpg', '.JPG']
image_list = [item for item in IMG_FOLDER.rglob('**/*') if item.suffix in allowed_extensions]
collection = {"type": "FeatureCollection", "features": []}
data_dict = []
with open(JSON_FILE, 'w') as file:
for image in image_list:
# Load existing EXIF metadata.
exif_data = piexif.load(str(image))
print(">>> Extracting data from {0}...".format(image.name))
# Clean up date format.
tz_JPT = pytz.timezone('Japan')
raw_exif_time = exif_data['Exif'][piexif.ExifIFD.DateTimeOriginal]
exif_time = raw_exif_time.decode('ASCII')
exif_time = datetime.strptime(exif_time, "%Y:%m:%d %H:%M:%S")
exif_time = tz_JPT.localize(exif_time, is_dst=False)
latitude = dms_to_deci_deg(exif_data['GPS'][2])
longitude = dms_to_deci_deg(exif_data['GPS'][4])
# Add wanted data.
photo_data = Photo()
photo_data.geometry.coordinates = [longitude, latitude]
photo_data.properties.filename = image.name.strip(".jpg")
photo_data.properties.url = "https://storage.googleapis.com/jn-portfolio/projects/tokaido/images/" + image.name
photo_data.properties.thumbnail = "https://storage.googleapis.com/jn-portfolio/projects/tokaido/images/thumbs/" + image.name
photo_data.properties.icon = "https://storage.googleapis.com/jn-portfolio/projects/tokaido/images/icons/" + image.name
photo_data.properties.date = exif_time.strftime("%Y/%m/%d, %H:%M:%S")
photo_data.properties.day = exif_time.strftime("%Y/%m/%d")
temp_dict = {image.stem: photo_data}
# dump = json.dumps(temp_dict[image.stem].__dict__)
dump = temp_dict[image.stem].__dict__
# data_dict.append(deepcopy(temp_dict))
data_dict.append(deepcopy(dump))
# file.write(dump + "\n")
collection["features"] = data_dict
collectionString = jsons.dumps(collection)
# json.dump(data_dict, file, indent=2)
file.write(collectionString)
file.close()
print(">>> JSON generated, closing program.")
if __name__ == '__main__':
build_photo_json()
```
|
{
"source": "jeanjacquesp/via-cms",
"score": 2
}
|
#### File: via-cms/via_cms/cli.py
```python
import click
from via_cms.model import *
# from via_cms.model._relationship import GeolocPost
# from via_cms.model.feed.basket_dao import Basket
# from via_cms.model.feed.feed_document_dao import FeedDocument
# from via_cms.model.feed.feed_finance_dao import FeedFinance
# from via_cms.model.feed.feed_news_dao import FeedNews
# from via_cms.model.feed.feed_post_dao import FeedPost
# from via_cms.model.feed.price_dao import Price
# from via_cms.model.internal.role_dao import Role
# from via_cms.model.internal.workflow_dao import Workflow
# from via_cms.model.internal.user_dao import User
# from via_cms.model.monitor.client_dao import Client
# from via_cms.model.monitor.feedback_dao import Feedback
# from via_cms.model.static.command_dao import Command
# from via_cms.model.feed.feed_dao import Feed
# from via_cms.model.static.geoloc_dao import Geoloc
# from via_cms.model.static.profile_dao import Profile
# from via_cms.model.static.status_dao import Status
# from via_cms.model.static.subject_dao import Subject
# from via_cms.model.static.widget_dao import Widget
def command_translate(app):
@app.cli.group()
def translate():
"""Translation and localization commands."""
pass
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
# end if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .')
if os.system('pybabel init -i messages.pot -d via_cms\translations -l ' + lang):
raise RuntimeError('init command failed')
# end if os.system('pybabel init -i messages.pot -d translations -l ' + lang)
os.remove('messages.pot')
@translate.command()
def update():
"""Update all languages."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
# end if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .')
if os.system('pybabel update -i messages.pot -d translations'):
raise RuntimeError('update command failed')
# end if os.system('pybabel update -i messages.pot -d translations')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d translations'):
raise RuntimeError('compile command failed')
def command_importer(app):
@app.cli.group()
def importer():
pass
@importer.command(help='import from csv (UTF-8)')
@click.argument('file_path')
@click.argument('table_name')
def from_csv(file_path, table_name):
"""Import a csv file to the database."""
if not file_path:
raise FileExistsError('{} does not exist'.format(file_path))
# end if not file_path
if not table_name:
raise ValueError('Table name should not be empty')
elif table_name.lower() == 'basket_tbl':
Basket.import_from_csv(file_path)
elif table_name.lower() == 'command_tbl':
Command.import_from_csv(file_path)
elif table_name.lower() == 'feed_tbl':
Feed.import_from_csv(file_path)
elif table_name.lower() == 'widget_tbl':
Widget.import_from_csv(file_path)
elif table_name.lower() == 'geoloc_tbl':
Geoloc.import_from_csv(file_path)
elif table_name.lower() == 'profile_tbl':
Profile.import_from_csv(file_path)
elif table_name.lower() == 'status_tbl':
Status.import_from_csv(file_path)
elif table_name.lower() == 'subject_tbl':
Subject.import_from_csv(file_path)
elif table_name.lower() == 'role_tbl':
Role.import_from_csv(file_path)
elif table_name.lower() == 'user_tbl':
User.import_from_csv(file_path)
elif table_name.lower() == 'workflow_tbl':
Workflow.import_from_csv(file_path)
else:
raise ValueError('Table name not part of the CLI.')
@importer.command(help='export a table to csv (UTF-8)')
@click.argument('file_path')
@click.argument('table_name')
def to_csv(file_path, table_name):
if not file_path:
raise FileExistsError('{} does not exist'.format(file_path))
# end if not file_path
if not table_name:
raise ValueError('Table name should not be empty')
# end if not table_name
if table_name.lower() == 'geoloc':
Geoloc.export_to_csv(file_path)
# end if table_name.lower() == 'geoloc'
```
#### File: model/internal/role_dao.py
```python
from sqlalchemy.orm import validates
from via_cms.extension import db
from via_cms.model._database import Model
from via_cms.model._database import ValidateName
from via_cms.model.static.geoloc_dao import Geoloc
class Role(Model, ValidateName):
__tablename__ = 'role_tbl'
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
principal = db.Column(db.Unicode(31), nullable=False) # admin editor supervisor
constraint = db.Column(db.Unicode(31)) # null, region
name = db.Column(db.Unicode(64), nullable=False, unique=True)
label_ar = db.Column(db.Unicode(128), nullable=False, unique=True)
label_en = db.Column(db.Unicode(128), nullable=False, unique=True)
description_ar = db.Column(db.Unicode(512))
description_en = db.Column(db.Unicode(512))
geoloc_list = db.relationship('Geoloc', secondary='role_geoloc_tbl', backref=db.backref('role_list_br', lazy='dynamic'))
user_list = db.relationship('User', secondary='user_role_tbl', backref=db.backref('role_list_br', lazy='dynamic'))
@validates('constraint')
def validate_constraint(self, _, data):
if data:
assert data in ['region'] # the list of constraints
return data
@validates('principal')
def validate_principal(self, _, data):
if data:
assert data in ['admin', 'supervisor', 'editor', 'third_party'] # the list of constraints
return data
@staticmethod
def import_from_csv(file_path):
import csv
with open(file_path, encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONE)
first = True
for row in reader:
if first:
first = False
else:
id = row[0]
name = row[1]
principal = row[2]
constraint = row[3]
label_ar = row[4]
label_en = row[5]
description_ar = row[6]
description_en = row[7]
geoloc_list_column_separated = row[8]
if geoloc_list_column_separated:
geoloc_list_column_separated = geoloc_list_column_separated.split(':')
role = Role(id=id, name=name, principal=principal, constraint=constraint, label_en=label_en,
label_ar=label_ar, description_en=description_en, description_ar=description_ar)
for geoloc_id in geoloc_list_column_separated:
geoloc = Geoloc.query.get(int(geoloc_id))
if geoloc:
role.geoloc_list.append(geoloc)
role.save()
```
#### File: model/internal/user_dao.py
```python
import datetime as dt
from flask_login import UserMixin
from sqlalchemy.orm import validates
from via_cms.extension import bcrypt
from via_cms.extension import db
from via_cms.model._database import Model
from via_cms.model.internal.role_dao import Role
class User(UserMixin, Model):
__tablename__ = 'user_tbl'
# flask packages required to name the identifier column: id, no other choice.
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
username = db.Column(db.Unicode(64), unique=True, nullable=False)
email = db.Column(db.Unicode(128), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=True)
created = db.Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
updated = db.Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
last_seen = db.Column(db.DateTime, default=dt.datetime.utcnow)
alias_ar = db.Column(db.Unicode(64), nullable=True)
alias_en = db.Column(db.Unicode(64), nullable=True)
active = db.Column(db.Boolean, default=False)
#
post_list = db.relationship('FeedPost', backref=db.backref('editor'))
basket_list = db.relationship('Basket', backref=db.backref('editor'))
#
role_list = db.relationship('Role', secondary='user_role_tbl', backref=db.backref('user_list_br', lazy='dynamic'))
# TODO rights for accesses.
def __init__(self, username, email, password=None, **kwargs):
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
@classmethod
def get_by_id(cls, user_id):
if any((isinstance(user_id, (str, bytes)) and user_id.isdigit(), isinstance(user_id, (int, float))), ):
return cls.query.get(int(user_id))
return None
def get_geoloc_rights(self):
result = []
role_list = self.role_list
for role in role_list:
geoloc_list = role.geoloc_list
for geoloc in geoloc_list:
result.append(geoloc.id)
return result
@validates('email')
def validate_email(self, key, address):
assert '@' in address
return address
def is_admin(self):
return 'admin' in (x.principal for x in self.role_list)
def is_supervisor(self):
return 'supervisor' in (x.principal for x in self.role_list) or self.is_admin()
def is_supervisor_news(self):
return 'supervisor_news' in (x.principal for x in self.role_list) or self.is_supervisor() or self.is_admin()
def is_supervisor_price(self):
return 'supervisor_price' in (x.principal for x in self.role_list) or self.is_supervisor() or self.is_admin()
def is_editor(self):
return 'editor' in (x.principal for x in self.role_list) or self.is_supervisor() or self.is_admin()
def is_editor_price(self):
# A price editor is an editor limited to edit prices
return 'editor/price' in (x.principal for x in
self.role_list) or self.is_editor() or self.is_supervisor() or self.is_admin()
def is_editor_news(self):
# A price editor is an editor limited to edit news
return 'editor/news' in (x.principal for x in
self.role_list) or self.is_editor() or self.is_supervisor() or self.is_admin()
def is_allowed(self, requirement_list):
for role in self.role_list:
if role.principal in requirement_list:
return True
return False
@staticmethod
def import_from_csv(file_path):
import csv
with open(file_path, encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, quoting=csv.QUOTE_NONE)
first = True
for row in reader:
if first:
first = False
else:
id = row[0]
username = row[1]
email = row[2]
password = <PASSWORD>[3]
alias_ar = row[4]
alias_en = row[5]
role_id_columnseperated_list = row[6]
if role_id_columnseperated_list:
role_id_columnseperated_list = role_id_columnseperated_list.split(':')
user = User(id=int(id), username=username, email=email, password=password, alias_ar=alias_ar,
alias_en=alias_en, active=True)
for role_id in role_id_columnseperated_list:
role = Role.query.get(int(role_id))
user.role_list.append(role)
user.save()
```
#### File: remote/subscriber/message_handler.py
```python
import datetime as dt
#
#
import json
import traceback
from via_common.multiprocess.logger_manager import LoggerManager
from via_common.network.source_type import SourceType
from via_common.util.error import Error
from via_common.util.helper import deserialize_device_post
from via_cms.model import FeedPost
from via_cms.model.monitor.client_dao import Client
from via_cms.model.monitor.feedback_dao import Feedback
from via_cms.model.static.geoloc_dao import Geoloc
from via_cms.model.static.profile_dao import Profile
from via_cms.model.static.subject_dao import Subject
class MessageHandler:
"""
A callable data handler that transform compressed in-house formatted payload into the json
format (similar to iptc ninjs) readable by the external receiver (e.g. CMS).
"""
def __init__(self, app, sync_manager):
self.app = app
self.sync_manager = sync_manager
self.logger = LoggerManager.get_logger(__class__.__name__)
def __call__(self, mqtt_message):
return self.handle_payload(mqtt_message)
def handle_message(self, source_type, payload):
self.logger.debug('handle_message')
error = Error()
try:
if source_type == SourceType.DEVICE:
message = deserialize_device_post(payload)
error += self._handle_device_post(message)
else:
return 'Unknown source'
except Exception as e:
# TODO Exception
import traceback
traceback.print_tb(e.__traceback__) # TODO
err_msg = 'handle_message - raise EXCEPTION for message. Source_type: {}, error: {}'.format(source_type, str(e))
error.add(err_msg)
# TODO log
if error:
self.logger.warning(error.msg())
return error
def _handle_device_post(self, device_post):
error = Error()
with self.app.app_context():
if device_post.profile_id == Profile.query.filter_by(name='device').one().id:
if device_post.subject_id == Subject.query.filter_by(name='new_client').one().id: # 'new_client': TODO magic
error += self._handle_new_client(device_post)
elif device_post.subject_id == Subject.query.filter_by(name='feedback').one().id: # 'feedback': TODO magic
error += self._handle_feedback(device_post)
elif device_post.subject_id == Subject.query.filter_by(name='location_change').one().id: # 'location_change': TODO magic
error += self._handle_location_change(device_post)
else: # todo unknown profile
err_msg = 'handle_message - UNKNOWN profile for input message {}/{}'.format(packet_type, packet_id)
error.add(err_msg)
else: # todo unknown profile
err_msg = 'handle_message - UNKNOWN profile for input message {}/{}'.format(packet_type, packet_id)
error.add(err_msg)
return error
def _handle_feedback(self, device_post):
self.logger.debug('_handle_feedback')
error = Error()
with self.app.app_context():
device_id = device_post.device_id
item_id = device_post.feedback.item_id
item_version = device_post.feedback.item_version
try:
feedback_json = json.dumps(device_post.feedback.feedback_json)
except Exception as e:
traceback.print_tb(e.__traceback__) # TODO
error.add('json error: {}'.format(str(e)))
post = FeedPost.query.get((item_id, item_version))
if not post:
error.add('Post unknown for id: {}, version: {}'.format(item_id, item_version))
else:
client = Client.query.filter_by(device_id=device_id).first() # TODO error prone
if client:
feedback = Feedback(client_id=client.id, client=client, post=post, feedback_json=feedback_json)
feedback.save()
if post:
post.feedback_list.append(feedback)
post.save()
client.feedback_list.append(feedback)
client.save()
client_fdb_json = ""
try:
client_fdb_json = str(device_post.feedback.feedback_json)
except: # TODO properly
error.add('Error while handling feedback for client id, item_id, feedback: {}, {}, {}. No feedback found'
.format(client.id, item_id, client_fdb_json))
else:
self.logger.info('Handle feedback for client id, item_id, feedback: {}, {}, {}'.format(client.id, item_id, client_fdb_json))
else:
error.add('client unknown')
# End if client
# end if not post
# End with self.app.app_context()
return error
def _handle_new_client(self, device_post):
self.logger.debug('_handle_new_client')
error = Error()
with self.app.app_context():
device_id = device_post.device_id
geoloc_id = device_post.geoloc_id
try:
geoloc_id = int(geoloc_id)
except ValueError as e:
error.add('Invalid geoloc_id string {} for device id: {}'.format(geoloc_id, device_id))
self.logger.warning(error.msg())
return error
geoloc = Geoloc.query.get(geoloc_id)
if not geoloc:
# It is ok not to have geoloc as the device gps might not have got the geoloc yet.
error.add('Unknown geoloc {} for device id: {}'.format(geoloc_id, device_id))
self.logger.warning(error.msg())
# TODO this is temporary. the issue is that mysql does not accept a geoloc that does not exist, obviously
geoloc_id = 1000
# end if not geoloc
client = Client.query.filter_by(device_id=device_id).first()
# The client should be new
if not client:
client = Client(device_id=device_id, geoloc_id=geoloc_id)
client.save()
else:
error.add('client already registered, device_id: {}, geoloc_id: {}'.format(device_id.geoloc_id))
if not error:
self.logger.info('Handle new client id: {}, device_id: {}'.format(client.id, device_id))
error += self.sync_manager.sync_client_content(client)
# End with self.app.app_context()
return error
def _handle_location_change(self, device_post):
self.logger.debug('_handle_location_change')
error = Error()
with self.app.app_context():
device_id = device_post.device_id
geoloc_id = device_post.geoloc_id
try:
geoloc_id = int(geoloc_id)
except ValueError as e:
error.add('Invalid geoloc_id string {} for device id: {}'.format(geoloc_id, device_id))
self.logger.warning(error.msg())
return error
geoloc = Geoloc.query.get(geoloc_id)
if not geoloc:
error.add('Unknown geoloc {} for device id: {}'.format(geoloc_id, device_id))
self.logger.warning(error.msg())
return error
client = Client.query.filter_by(device_id=device_id).first()
# The client should exist already
if client:
client.geoloc_id = geoloc_id
client.updated = dt.datetime.utcnow()
client.save()
else:
# it is an error but we should then create it anyway
error += self._handle_new_client(device_post)
if error:
return error
# end if error
client = Client.query.filter_by(device_id=device_id).first()
# end if client
self.logger.info('Handle location change client id, geoloc_id: {}, {}'.format(client.id if client else 'Unknown', geoloc_id))
error += self.sync_manager.sync_client_content(client)
# End with self.app.app_context()
return error
```
#### File: via_cms/util/config_logger.py
```python
import os
#
#
#
from via_common.util.config_mixin_logger import ConfigMixInLogger
class ConfigLogger(ConfigMixInLogger):
def __init__(self, logger_queue=None):
super().__init__(logger_queue)
@classmethod
def get_config_path(cls):
return os.getenv('VIA_CMS_CONFIG_PATH')
@classmethod
def get_config_filename(cls):
return os.getenv('VIA_CMS_CONFIG_LOGGER_FILENAME') or 'logger.json'
@classmethod
def _init_config_logger(cls, logger_queue):
pass
```
#### File: via_cms/viewmodel/vm_basket.py
```python
from via_common.multiprocess.logger_manager import LoggerManager
from via_cms.model.feed.basket_dao import Basket
from via_cms.util.helper import get_locale
logger = LoggerManager.get_logger('vm_price')
def get_basket_list(subject_list):
"""
Returns a page of slugs
"""
lang = get_locale()
basket_list_blob = []
for subject in subject_list:
basket_list = Basket.query.filter_by(subject_id=subject.id)
for basket in basket_list:
subject = basket.subject
status = basket.status
status_label = ''
subject_label = ''
label = ''
editor = ''
if lang == 'ar':
status_label = status.label_ar
subject_label = subject.label_ar
label = basket.label_ar
editor = basket.editor.alias_ar
elif lang == 'en':
status_label = status.label_en
subject_label = subject.label_en
label = basket.label_en
editor = basket.editor.alias_en
basket_list_blob.append({
'id': basket.id,
'status_label': status_label,
'label': label,
'unit': basket.unit,
'created_date': basket.created.strftime('%y/%m/%d'),
'subject_id': basket.subject_id,
'code': basket.code,
'subject_name': basket.subject.name,
'editor': editor, })
return basket_list_blob
def get_top_tag_list(n):
"""
Returns N top tag_list
"""
raise NotImplementedError
def get_tagged_post_list(tag, limit):
"""
Gets the most recent limit post_list with a certain tag
"""
raise NotImplementedError
```
#### File: via_cms/viewmodel/vm_document.py
```python
import json
import traceback
from flask import current_app
from via_common.multiprocess.logger_manager import LoggerManager
from via_cms.model.feed.feed_document_dao import FeedDocument
from via_cms.model.feed.feed_post_dao import FeedPost
from via_cms.model.internal.user_dao import User
from via_cms.model.static.profile_dao import Profile
from via_cms.model.static.subject_dao import Subject
from via_cms.util.helper import get_locale
logger = LoggerManager.get_logger('vm_document')
def get_page_document(request_size, page_num):
"""
Returns a page of slugs
"""
lang = get_locale()
doc_list = FeedDocument.query.order_by(FeedDocument.created.asc()).offset(page_num * request_size).limit(request_size)
doc_list_blob = []
for doc in doc_list:
editor = ''
subject_label = ''
feedback_nb = len(doc.feedback_list)
feedback_list = ', '.join(("{}".format(x.id) for x in doc.feedback_list))
status_label = ''
workflow_label = ''
profile_label = ''
geotag = ''
if lang == 'ar':
status_label = doc.status.label_ar
workflow_label = doc.workflow.label_ar
subject_label = doc.subject.label_ar
profile_label = doc.profile.label_ar
geotag = '; '.join((x.label_ar for x in doc.geoloc_list))
editor = doc.editor.alias_ar
elif lang == 'en':
status_label = doc.status.label_en
workflow_label = doc.workflow.label_en
subject_label = doc.subject.label_en
profile_label = doc.profile.label_en
geotag = '; '.join((x.label_en for x in doc.geoloc_list))
editor = doc.editor.alias_en
geoloc_id_list = '; '.join((str(x.id) for x in doc.geoloc_list))
doc_list_blob.append({
'id': doc.id,
'version': doc.version,
'language': doc.language,
'title': doc.title[:30] + " ..." if doc.title and len(doc.title) > 34 else doc.title,
'created_date': doc.created.strftime('%y/%m/%d'),
'updated_date': doc.updated.strftime('%y/%m/%d'),
'updated_time': doc.updated.strftime('%H:%M:%S'),
'editor': editor,
'status_label': status_label,
'workflow_label': workflow_label,
'subject_label': subject_label,
'profile_label': profile_label,
'geotag': geotag,
'geoloc_id_list': geoloc_id_list,
'caption': doc.caption[:18] + " ..." if doc.caption
and len(doc.caption) > 22 else doc.caption,
'headline': doc.headline[:30] + " ..." if doc.headline
and len(doc.headline) > 34 else doc.headline,
'feedback_nb': feedback_nb,
'feedback_list': feedback_list
})
return doc_list_blob
def itemize_document(post_id):
lang = get_locale()
item = {}
post = FeedPost.query.filter_by(id=post_id).one()
doc = FeedDocument.query.filter_by(id=post_id).one()
from via_cms.main import get_config_flask
config_flask = get_config_flask()
if post and doc:
item['post_id'] = str(post.id)
item['created'] = post.created
item['updated'] = post.updated
item['posted'] = post.created.strftime("%a %d %B %Y - %H:%M")
item['lang'] = doc.language
item['title'] = doc.title
item['headline'] = doc.headline
item['caption'] = doc.caption
item['more_info'] = doc.more_info
item['feedback_definition'] = doc.feedback_definition
item['rendition_thumbnail_filename'] = doc.rendition_thumbnail_filename if doc.rendition_thumbnail_filename else ''
item['rendition_main_filename'] = doc.rendition_main_filename
profile = Profile.query.get(post.profile_id)
item['profile_name'] = profile.name
if lang == 'ar':
item['profile'] = profile.label_ar
elif lang == 'en':
item['profile'] = profile.label_en
subject = Subject.query.get(post.subject_id)
item['subject_name'] = subject.name
if lang == 'ar':
item['subject'] = subject.label_ar
elif lang == 'en':
item['subject'] = subject.label_en
item['status_name'] = doc.status.name
if lang == 'ar':
item['status'] = doc.status.label_ar
elif lang == 'en':
item['status'] = doc.status.label_en
user = User.query.get(post.user_id)
item['user_id'] = user.id
if lang == 'ar':
item['user'] = user.alias_ar + " - " + user.alias_en
elif lang == 'en':
item['user'] = user.alias_en + " - " + user.alias_ar
geoloc_list = {}
for geoloc in doc.geoloc_list:
label = ''
if lang == 'ar':
label = geoloc.label_ar
elif lang == 'en':
label = geoloc.label_en
geoloc_list.update({geoloc.id: label})
item['geoloc_list'] = geoloc_list
else:
item['error'] = "Document " + post_id + " Not found" # TODO manage error properly
return item
# TODO splitting here between geoloc is not optimal :(
def publish_document(doc, topic_prefix=''):
error = ''
if doc:
try:
result = doc.to_dict_of_dict_by_geoloc()
# content_profile = int(doc.profile.id).to_bytes(4, byteorder='little', signed=True)
# item_id = int(doc.id).to_bytes(8, byteorder='little', signed=True)
logger.debug('Publishing {}: post_id: {}'.format(doc.profile.name, doc.id))
for geoloc_id, content in result.items():
# TODO should be a future...
_, error = current_app.forwarder.send_data_sync(json.dumps(content).encode())
except ValueError as e:
error += '{}\n'.format(str(e)) # TODO exception
traceback.print_tb(e.__traceback__)
except Exception as e:
error += '{}\n'.format(str(e)) # TODO exception
traceback.print_tb(e.__traceback__)
return error
```
#### File: private/form/create_form_notice.py
```python
from flask_babel import lazy_gettext as _l
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed
from wtforms import FileField
from wtforms import IntegerField
from wtforms import Label
from wtforms import RadioField
from wtforms import SelectField
from wtforms import SubmitField
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from wtforms.validators import Length
from wtforms.validators import NumberRange
from wtforms.validators import Optional
from via_cms.config_flask import ConfigFlask
from via_cms.model.static.profile_dao import Profile
from via_cms.model.static.subject_dao import Subject
from via_cms.util.helper import get_locale
class CreateFormNotice(FlaskForm):
language = SelectField(_l('Language of the post'), choices=[(k, v) for k, v in ConfigFlask.LANGUAGE_DICT.items()])
subject = Label(_l('Subject'), "")
# For future use. Not displayed for now. It is why it is optional
# subtitle1 = SelectField(_l('Subtitle - top'), choices=[('place', '$location'), ], validators=[Optional()])
# For future use. Not displayed for now. It is why it is optional
# subtitle2 = SelectField(_l('Subtitle - bottom'), choices=[('event', '$event_date'), ], validators=[Optional()])
headline = TextAreaField(_l('Headline'),
render_kw={'rows': '1', 'data-label': _l('Headline'), 'placeholder': _l('Headline')},
validators=[DataRequired(), Length(max=128)])
place = TextAreaField(_l('Place'),
render_kw={'rows': '1', 'data-label': _l('Place'), 'placeholder': _l('Place')},
validators=[Length(max=70)])
#
# additional_Info = TextAreaField(_l('Additional Information'),
# render_kw={'rows': '3', 'data-label': _l('Additional Information'),
# 'placeholder': _l('Additional Information')},
# validators=[Length(max=255)])
summary = TextAreaField(_l('Summary'), render_kw={'rows': '2', 'data-label': _l('Summary'), 'placeholder': _l('Summary')},
validators=[Length(max=255)])
date = TextAreaField(_l('Date'), render_kw={'rows': '1', 'data-label': _l('Date'), 'placeholder': _l('23.09.2019')},
validators=[Length(max=10)])
end_date = TextAreaField(_l('End Date (Optional)'), render_kw={'rows': '1', 'placeholder': _l('22.01.2020')},
validators=[Length(max=10)])
# ----------------------------------------------------------------------------- #
body_json = TextAreaField(_l('Body in json format <small>(required)</small>'), render_kw={'rows': '8'},
validators=[DataRequired(), Length(max=2000)])
feedback_definition = TextAreaField(_l('Feedback as a series of json string'), render_kw={
'rows': '10'}, validators=[Optional(), Length(max=2000)])
more_info = TextAreaField(_l('URL for More Info <small>(must start with http:// or https://)</small>'), render_kw={'rows': '1'},
validators=[Optional(), Length(max=127)])
contact_json = TextAreaField(_l('Contact information as a json string'), render_kw={
'rows': '4'}, validators=[Optional(), Length(max=1000)])
geotag_list = TextAreaField(_l('List of Locations <small>(required)</small>'), render_kw={'rows': '1'}, validators=[DataRequired()])
rendition_thumbnail = FileField(_l('Icon file<p><small>The icon should be square, ideally 40x40 or 80x80 pixels. '
'The image should be optimised for the web (minimal size). It\'s resolution should be 72 ppi '
'(96 ppi if needed) Max size is 2kb. Format supported are jpg and png</small></p>'), default='',
validators=[Optional(), FileAllowed(['jpg', 'png', 'jpeg'],
_l('Only images of type jpeg or png can be used'))])
submit = SubmitField(_l('Submit'))
def __init__(self, subject_name):
super().__init__()
profile_news_id = Profile.query.filter_by(name='notice').first().id
lang = get_locale()
if lang == 'ar': # TODO add safeguard
self.subject.text = Subject.query.filter_by(profile_id=profile_news_id, name=subject_name).one().label_ar
elif lang == 'en':
self.subject.text = Subject.query.filter_by(profile_id=profile_news_id, name=subject_name).one().label_en
def validate(self):
#: additional validation
initial_validation = super().validate()
if not initial_validation:
return False
# TODO
return True
```
#### File: private/form/create_form_registration.py
```python
from flask_babel import lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms import PasswordField
from wtforms import SelectField
from wtforms import StringField
from wtforms.validators import DataRequired
from wtforms.validators import Email
from wtforms.validators import EqualTo
from wtforms.validators import Length
from wtforms.validators import Optional
from via_cms.model.internal.role_dao import Role
from via_cms.model.internal.user_dao import User
from via_cms.util.helper import get_locale
class CreateFormRegistration(FlaskForm):
username = StringField(_l('User name <small>(required)</small>'), validators=[DataRequired(), Length(min=3, max=55)])
alias_ar = StringField(_l('Alias in arabic <small>(required)</small>'), validators=[DataRequired(), Length(min=1, max=55)])
alias_en = StringField(_l('Alias in english <small>(required)</small>'), validators=[DataRequired(), Length(min=1, max=100)])
email = StringField(_l('Email <small>(required)</small>'), validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField(_l('Password <small>(required)</small>'), validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField(_l('Verify password <small>(required)</small>'),
[DataRequired(), EqualTo('password', message=_l('Passwords must match'))])
role_list = SelectField(_l('Role list (except admin)'), validators=[Optional()])
def __init__(self, *args, **kwargs):
super(CreateFormRegistration, self).__init__(*args, **kwargs)
self.user = None
lang = get_locale()
if lang == 'ar':
self.role_list.choices = [(role.name, role.label_ar) for role in Role.query if role.name != 'admin']
elif lang == 'en':
self.role_list.choices = [(role.name, role.label_en) for role in Role.query if role.name != 'admin']
def validate(self):
initial_validation = super(CreateFormRegistration, self).validate()
if not initial_validation:
return False
user = User.query.filter_by(username=self.username.data).first()
if user:
self.username.errors.append(_l("Username already registered"))
return False
user = User.query.filter_by(email=self.email.data).first()
if user:
self.email.errors.append(_l("Email already registered"))
return False
return True
class EmailForm(FlaskForm):
email = StringField(_l('Email <small>(required)</small>'), validators=[DataRequired(), Email()])
class PasswordForm(FlaskForm):
password = PasswordField('Password <small>(required)</small>', validators=[DataRequired()])
password2 = PasswordField(_l('Confirm Password <small>(required)</small>'),
validators=[DataRequired(), EqualTo('password', message=_l('Passwords must match'))])
class UsernameForm(FlaskForm):
username = StringField('Username <small>(required)</small>', validators=[DataRequired()])
username2 = StringField('Confirm Username <small>(required)</small>',
validators=[DataRequired(), EqualTo('username', message=_l('Usernames must match'))])
```
#### File: private/visualization/dashboard_basket.py
```python
from collections import namedtuple
from flask import Blueprint
from flask_login import login_required
from via_common.multiprocess.logger_manager import LoggerManager
from via_cms.model.static.profile_dao import Profile
from via_cms.model.static.subject_dao import Subject
from via_cms.util.helper import get_locale
from via_cms.util.helper import render_extensions
from via_cms.util.helper import role_required
from via_cms.viewmodel.vm_basket import get_basket_list
logger = LoggerManager.get_logger('dashboard_basket')
bp = Blueprint('private.visualization.dashboard_basket', __name__, url_prefix='/private/dashboard/', static_folder="../static")
@bp.route("/basket", methods=["GET", "POST"])
@login_required
@role_required(['editor', 'supervisor', 'admin'])
def dashboard_basket(page=None):
"""
"""
page = int(page) if page else 0 # TODO page + ValueError
_page_size = 100 # TODO: selectable on html
if not page or page <= 0:
next_page = 0
prev_page = 1
current = True
else:
next_page = page - 1
prev_page = page + 1
current = False
profile_id_price = Profile.query.filter_by(name='price').one().id
subject_list = Subject.query.filter_by(profile_id=profile_id_price)
basket_list = get_basket_list(subject_list)
lang = get_locale()
if lang == 'ar':
subject_list = [namedtuple('X', ('name', 'label'))(x.name, x.label_ar) for x in subject_list]
elif lang == 'en':
subject_list = [namedtuple('X', ('name', 'label'))(x.name, x.label_en) for x in subject_list]
return render_extensions("private/basket/dashboard_basket.html", subject_list=subject_list,
basket_list=basket_list, next_page=next_page, prev_page=prev_page,
current=current)
```
|
{
"source": "jeanjacquesp/via-common",
"score": 2
}
|
#### File: test/multiprocess/test_background_thread.py
```python
import multiprocessing
import multiprocessing.queues
from unittest.mock import patch, mock_open
import pytest
from test.tool import config_server_obj, middleware_obj, backgroundthread_obj, config_logger_obj, ConfigServerPure, ConnectRaised
from via_common.multiprocess.logger_manager import LoggerManager
from via_common.multiprocess.pipe_adapter import SIGNAL_SHUTDOWN_START
from via_common.multiprocess.queue_manager import QueueManager
class SomeException(Exception):
pass
class TestBackgroundThread:
config_internal_conn = None
middleware = None
backgroundthread = None
queue_manager = None
@pytest.fixture(scope="class")
def setup(self):
with patch("via_common.multiprocess.logger_manager.LoggerManager._check_or_create_directory"):
LoggerManager().__del__()
config_logger = config_logger_obj(config_logger='{"root":{"version":1}, "subprocess":{}}')
LoggerManager.init_root_logger(config_logger)
config_conn = {"host": "127.0.0.1", "port": 41567, "authkey": "abc"}
# We need a proper config otherwise cannot be pickled
self.__class__.config_internal_conn = ConfigServerPure(config_conn, 'test')
config_server = '{"host":"127.0.0.1", "port":12346, "authkey":"xyz", "user_id": "pqr", "login": "uvw", "timeout":123, "keepalive":456, "retry":789}'
with patch("builtins.open", new_callable=mock_open, read_data=config_server):
self.__class__.config_server = config_server_obj()
self.__class__.middleware = middleware_obj(self.config_server)
self.__class__.queue_manager = QueueManager(self.config_internal_conn)
self.__class__.backgroundthread = backgroundthread_obj(self.config_internal_conn, self.middleware)
yield "teardown"
LoggerManager().__del__()
def test_shutdown(self, setup):
self.backgroundthread.shutdown()
assert self.backgroundthread.called_shutdown
def test__initialise_thread_logger(self, setup):
self.backgroundthread._initialise_thread_logger()
assert self.backgroundthread.logger is not None
@patch("via_common.network.middleware.Middleware.connect")
def test__start_background_async(self, setup2):
self.backgroundthread._start_background_async(lambda x: x, 'X')
self.backgroundthread.system_queue.put(SIGNAL_SHUTDOWN_START)
assert self.backgroundthread.called_shutdown
@patch("via_common.network.middleware.Middleware.connect", side_effect=ConnectRaised)
def test__run_thread_forever(self, setup):
with pytest.raises(ConnectRaised) as ctx:
self.backgroundthread._run_thread_forever(lambda x: x, 'X')
assert isinstance(ctx.value, ConnectRaised)
def test__setup_system_queue(self, setup):
self.backgroundthread._setup_system_queue()
assert isinstance(self.backgroundthread.system_queue, multiprocessing.managers.BaseProxy)
@patch("multiprocessing.managers.BaseManager.connect", side_effect=ConnectRaised)
def test__setup_system_queue_error(self, setup):
with pytest.raises(ConnectionError) as ctx:
self.backgroundthread._setup_system_queue()
assert isinstance(ctx.value, ConnectionError)
```
#### File: test/network/test_middleware.py
```python
from unittest.mock import patch, mock_open
import pytest
from test.tool import config_server_obj, middleware_obj
class TestMiddleware:
@pytest.fixture(scope="class")
def setup(self):
config_server = '{"host":"abc", "port":1234, "authkey":"xyz", "user_id": "pqr", "login": "uvw", "timeout":123, "keepalive":456, "retry":789}'
with patch("builtins.open", new_callable=mock_open, read_data=config_server):
self.__class__.config_server = config_server_obj()
self.__class__.middleware = middleware_obj(self.config_server)
def test__init__(self, setup):
assert self.middleware.config == self.config_server
assert self.middleware.host == self.config_server.host()
assert self.middleware.port == self.config_server.port()
assert self.middleware.password == self.config_server.authkey()
def test_connect(self, setup):
with pytest.raises(NotImplementedError) as ctx:
self.middleware.connect()
assert isinstance(ctx.value, NotImplementedError)
def test_shutdown(self, setup):
with pytest.raises(NotImplementedError) as ctx:
self.middleware.shutdown()
assert isinstance(ctx.value, NotImplementedError)
def test_publish(self, setup):
with pytest.raises(NotImplementedError) as ctx:
self.middleware.publish('channel', 'message')
assert isinstance(ctx.value, NotImplementedError)
def test_subscribe_one_forever(self, setup):
with pytest.raises(NotImplementedError) as ctx:
self.middleware.subscribe_one_forever('channel', None)
assert isinstance(ctx.value, NotImplementedError)
def test__check_conn(self, setup):
with pytest.raises(NotImplementedError) as ctx:
self.middleware._check_conn()
assert isinstance(ctx.value, NotImplementedError)
# @classmethod
# def _middleware_obj(cls, config):
# class MiddlewareTest(Middleware):
#
# def __init__(self, config_server: ConfigMixInServer):
# super().__init__(config_server)
#
#
# MiddlewareTest.__abstractmethods__ = frozenset()
# return MiddlewareTest(config)
# @classmethod
# @patch("os.path.isfile", lambda x: True if x == 'path/filename.json' else False)
# @patch("os.path.join", lambda x, y: 'path/filename.json')
# @patch('via_common.util.config_mixin.ConfigMixIn.get_config_path', lambda: 'path')
# @patch('via_common.util.config_mixin.ConfigMixIn.get_config_filename', lambda: 'filename.json')
# @patch('via_common.util.config_mixin.ConfigMixIn._init_config', lambda x: None)
# def _config_server_obj(cls):
# class ConfigServerTest(ConfigMixInServer):
#
# def __init__(self, server_name):
# super().__init__(server_name)
#
#
# config_server = '{"host":"abc", "port":1234, "authkey":"xyz", "user_id": "pqr", "login": "uvw", "timeout":123, "keepalive":456, "retry":789}'
# ConfigServerTest.__abstractmethods__ = frozenset()
# with patch("builtins.open", new_callable=mock_open, read_data=config_server):
# config_server = ConfigServerTest('queue')
# return config_server
```
#### File: test/util/test_config_mixin_server.py
```python
from unittest.mock import patch, mock_open
import pytest
from test.tool import config_server_obj
@patch("os.path.isfile", lambda x: True if x == 'path/filename.json' else False)
@patch("os.path.join", lambda x, y: 'path/filename.json')
@patch('via_common.util.config_mixin.ConfigMixIn.get_config_path', lambda: 'path')
@patch('via_common.util.config_mixin.ConfigMixIn.get_config_filename', lambda: 'filename.json')
@patch('via_common.util.config_mixin.ConfigMixIn._init_config', lambda x: None)
class TestConfigMixinLogger:
test_json = '{"host":"abc", "port":1234, "authkey":"xyz", "user_id": "pqr", "login": "uvw", "timeout":123, "keepalive":456, "retry":789}'
EXPECTED = {"host": "abc", "port": 1234, "authkey": "xyz", "user_id": "pqr", "login": "uvw", "timeout": 123, "keepalive": 456, "retry": 789}
def test_base(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj("tmp_server_name_base")
assert some_config.config == self.EXPECTED
assert some_config.get_config_dict() == self.EXPECTED
assert some_config.host() == 'abc'
assert some_config.port() == 1234
assert some_config.authkey() == 'xyz'
assert some_config.timeout() == 123
assert some_config.keepalive() == 456
assert some_config.retry() == 789
assert some_config.server_name == 'tmp_server_name_base'
def test__check_config(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj("tmp_server_name_cfg")
assert some_config._check_config() == None
# Port is int
test_json_error_port = '{"host":"abc", "port":"1234", "authkey":"xyz", "timeout":"123", "keepalive":"456", "retry":"789"}'
with patch("builtins.open", new_callable=mock_open, read_data=test_json_error_port):
with pytest.raises(AttributeError) as ctx:
config_server_obj('tmp_server_name_cfg2')
assert ctx.errisinstance(AttributeError) and 'port' in ctx.exconly().lower()
# Timeout is int
test_json_error_timeout = '{"host":"abc", "port":1234, "authkey":"xyz", "timeout":"123", "keepalive":456, "retry":789}'
with patch("builtins.open", new_callable=mock_open, read_data=test_json_error_timeout):
with pytest.raises(AttributeError) as ctx:
config_server_obj('tmp_server_name_cfg3')
assert ctx.errisinstance(AttributeError) and 'timeout' in ctx.exconly().lower()
# Keepalive is int
test_json_error_keepalive = '{"host":"abc", "port":1234, "authkey":"xyz", "timeout":123, "keepalive":"456", "retry":789}'
with patch("builtins.open", new_callable=mock_open, read_data=test_json_error_keepalive):
with pytest.raises(AttributeError) as ctx:
config_server_obj('tmp_server_name_cfg4')
assert ctx.errisinstance(AttributeError) and 'keepalive' in ctx.exconly().lower()
# retry is int
test_json_error_retry = '{"host":"abc", "port":1234, "authkey":"xyz", "timeout":123, "keepalive":456, "retry":"789"}'
with patch("builtins.open", new_callable=mock_open, read_data=test_json_error_retry):
with pytest.raises(AttributeError) as ctx:
config_server_obj('tmp_server_name_cfg5')
assert ctx.errisinstance(AttributeError) and 'retry' in ctx.exconly().lower()
def test_host(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg6')
assert some_config.host() == 'abc'
def test_port(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg7')
assert some_config.port() == 1234
def test_login(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg8')
assert some_config.login() == 'uvw'
def test_authkey(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj("tmp_server_name_cfg9")
assert some_config.authkey() == 'xyz'
def test_user_id(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj("tmp_server_name_cfg10")
assert some_config.user_id() == 'pqr'
def test_timeout(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg11')
assert some_config.timeout() == 123
def test_keepalive(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg12')
assert some_config.keepalive() == 456
def test_retry(self):
with patch("builtins.open", new_callable=mock_open, read_data=self.test_json):
some_config = config_server_obj('tmp_server_name_cfg13')
assert some_config.retry() == 789
# @classmethod
# def _config_obj(cls, queue):
# class ConfigTest(ConfigMixInServer):
#
# def __init__(self, logger_queue):
# super().__init__(logger_queue)
#
#
# ConfigTest.__abstractmethods__ = frozenset()
# return ConfigTest(queue)
```
#### File: via_common/multiprocess/pipe_adapter.py
```python
import threading
from multiprocessing import Pipe
SIGNAL_SHUTDOWN_START = '#shutdown'
SIGNAL_SHUTDOWN_DONE = '#shutdown_done'
class PipeAdapter:
"""
A simple helper class used for managing shutdown through Pipe
"""
def __init__(self, pipe_receiver: Pipe, callback):
self.pipe_receiver = pipe_receiver
self.callback = callback
self.worker = None
@classmethod
def listen(cls, pipe_receiver, callback):
try:
pipe_receiver.recv()
callback()
except EOFError:
pass
def start(self):
self.worker = threading.Thread(target=self.listen,
args=(self.pipe_receiver,
self.callback,))
self.worker.start()
```
#### File: via_common/util/config_mixin.py
```python
import json
import os
#
# Global config as a data class
#
from abc import ABCMeta
from abc import abstractmethod
class ConfigMixIn(metaclass=ABCMeta):
"""
A helper MixIn for loading a json config.
"""
config = None
def __init__(self):
if self.__class__.config is None:
self.__class__.config = self._read_config()
self._init_config()
@classmethod
def get_config_dict(cls):
return cls.config
@classmethod
@abstractmethod
def get_config_path(cls):
# Something like return os.getenv('CONFIG_PATH')
raise NotImplementedError
@classmethod
@abstractmethod
def get_config_filename(cls):
# Something like return os.getenv('CONFIG_FILENAME')
raise NotImplementedError
@classmethod
@abstractmethod
def _init_config(cls):
raise NotImplementedError
@classmethod
def _read_config(cls):
"""
Read the global config. CONFIG_PATH environment variable can hold the path to
the config, otherwise current running directory is used.
"""
config_path = cls.get_config_path()
if config_path is None:
raise AttributeError('Config PATH missing')
# end if config_path is None
config_filename = cls.get_config_filename()
if config_filename is None:
raise AttributeError('Config FILENAME missing')
# end if config_filename is None
config_fullpath = os.path.join(config_path, config_filename)
if os.path.isfile(config_fullpath):
f = open(config_fullpath)
try:
config = json.load(f)
except json.JSONDecodeError:
raise
except TypeError:
raise
else:
raise FileNotFoundError('Config file not found at {}/{}'.format(config_path, config_filename))
# end if os.path.isfile(config_path)
return config
```
#### File: via_common/util/helper.py
```python
import json
import random
import uuid
import zlib
from collections import namedtuple
from via_common.generated.proto.feed_pb2 import DevicePost
from via_common.generated.proto.internal_pb2 import FromBroker, FromCMS
"""
Helper classes and functions
"""
#
# json helpers
#
def dict2obj(data):
"""
Helper function for creating the tuple subclasses with well-formed named fields
"""
return namedtuple('X', (''.join(c if c.isalnum() else '_' for c in x) if not x[0].isnumeric() else 'O' + ''.join(c if c.isalnum() else '_' for c in x)
for x in data.keys()))(*data.values())
def json2obj(data):
"""
Deserialize a str or bytes to a Python object using a helper functions to deduce the object
attributes
"""
return json.loads(data, object_hook=dict2obj)
def json2dict(obj):
"""
Translates a deserialized json object to a dictionary.
The assumption is that any contained object type has the same class name 'X'.
"""
res = {}
if not hasattr(obj, '_fields'):
return {} # TODO manage properly errors
for k in obj._fields:
v = getattr(obj, k)
if isinstance(v, str):
res.update({k: v})
elif isinstance(v, list):
res2 = []
for i in v:
res2.append(json2dict(i))
res.update({k: res2})
elif v.__class__.__name__ == obj.__class__.__name__:
res.update({k: json2dict(v)})
# if isinstance(v, str)
# end for k in obj._fields
return res
#
# IDs
#
def generate_unique_id():
"""
Generates a random UUID
"""
return str(uuid.uuid4())
def get_next_cms_id():
# TODO aproperway to manage message ID
return random.getrandbits(32)
def get_next_broker_id():
# TODO aproperway to manage message ID
return random.getrandbits(32)
def get_next_internal_id(): # TODO
# TODO aproperway to manage message ID
return random.getrandbits(32)
#
# Internal message wrappers
#
def wrap_message(message_id, source_type, message: bytes):
message_id_b = int(message_id).to_bytes(8, byteorder='little', signed=True)
source_type_b = int(source_type).to_bytes(4, byteorder='little', signed=True)
payload = message_id_b + source_type_b
if message:
payload += zlib.compress(message)
# end if message
return payload
def unwrap_payload(payload: bytes):
message_id = int.from_bytes(payload[:8], 'little', signed=True)
source_type = int.from_bytes(payload[8:12], 'little', signed=True)
try:
message = zlib.decompress(payload[12:])
except:
message = payload[12:]
return message_id, source_type, message
#
# PROTO Internal
#
def serialize_from_broker(message_id, item_id, item_version, topic, payload: bytes):
serialized = FromBroker()
serialized.message_id = message_id
serialized.item_id = item_id
serialized.item_version = item_version
serialized.topic = topic
serialized.payload = payload
serialized = serialized.SerializeToString()
return serialized
def deserialize_from_broker(payload: bytes):
deserialized = FromBroker()
deserialized.ParseFromString(payload)
return deserialized
def serialize_from_cms(message_id, profile_id, subject_id, item_id, item_version, message: bytes):
serialized = FromCMS()
serialized.message_id = message_id
serialized.profile_id = profile_id
serialized.subject_id = subject_id
serialized.item_id = item_id
serialized.item_version = item_version
serialized.payload = message
serialized = serialized.SerializeToString()
return serialized
def deserialize_from_cms(payload: bytes):
deserialized = FromCMS()
deserialized.ParseFromString(payload)
return deserialized
def deserialize_from_cms(payload: bytes):
deserialized = FromCMS()
deserialized.ParseFromString(payload)
return deserialized
#
# PROTO Gateway
#
def serialize_broker_post(broker_post):
return broker_post.SerializeToString()
def deserialize_device_post(payload):
deserialized = DevicePost()
deserialized.ParseFromString(payload)
return deserialized
```
|
{
"source": "JeanJdkJebuf/RAD_tk_builder",
"score": 3
}
|
#### File: RAD_tk_builder/builder/file_constructor.py
```python
import json
class FileConstructor:
"""This class is meant to be called by ParseIntoCreate class."""
def __init__(self):
with open("builder/dao.json") as json_file:
data = json.load(json_file)
self.data = data
def create_stock_class(self, arg1):
"""This function adds tkinter module into the document It also adds
variables with an import.
arg1 = file of variables for new document
n
"""
return self.data["introduction"].format(arg1)
def create_class_and_init(self, arg1):
"""This function creates the name of the new class It also adds
__init__() to the document.
arg1 is the TITLE name
"""
if arg1:
#if title exists
return self.data["classcreation"].format(arg1)
return self.data["classcreation"].format("")
def add_main_widget_function_to_init(self, arg1):
"""This function adds function in init so it launches itself at start.
arg1 is the name of the function.
arg1 should look like : self.add_"function_name"
"""
return self.data["addfunction"].format(arg1, arg1)
def add_widgets_to_master_widgets_func(self, arg1):
"""This function adds function in init so it launches itself at start
This function adds widgets to arg1.
arg1 is the name of the function.
arg1 should look like : self.add_"function_name"
"""
return self.data["addfuncmaster"].format(arg1, arg1)
def add_master_function(self, arg1):
"""This function adds master function arg1.
Takes only one arg
"""
return self.data["masterfunctionname"].format(arg1, arg1)
def add_widgets_function(self, arg1):
"""This function adds widgets to arg1.
Takes only one arg as master widget of slave widgets
"""
return self.data["widgetfunctionname"].format(arg1, arg1)
def add_identify_id_class_master(self, arg1, arg2, arg3=""):
"""This function creates widget's name and instanciates it.
It gives his name as arg1 arg2 is his class If it is not a
master widget (that doesn't instanciates from Tk()), you can
give arg3 as his master widget
"""
return self.data["functionidandclass"].format(arg1, arg2, arg3)
def add_widget_conf(self, arg1, arg2):
"""This function adds config to the current widget.
arg1 is the name of widget
arg2 is the configuration.
args should match this :
self.{arg1}.config({arg2})
"""
return self.data["widgetconfig"].format(arg1, arg2)
def add_widget_loc(self, arg1, arg2):
"""This function adds placement to the current widget.
arg1 is the name of widget
arg2 is its placement
args should match this:
self.{arg1}.{arg2}
"""
return self.data["widgetplace"].format(arg1, arg2)
def add_launch_function(self):
"""This function adds the "launch" function in the new document.
No args needed.
"""
return self.data["launchfunction"]
def add_name_eq_main(self):
"""This function adds if__name__ == '__main__' This allow the tkinter
window to launch automatically if called from the name of document.
No argument needed.
"""
return self.data["ifnameeqmain"]
def add_intro_conf(self, arg1):
"""This function adds intro to conf file.
Takes name of tk file as arg1
"""
return self.data["introconf"].format(arg1)
def add_text(self, arg1, arg2):
"""This function adds text in conf file for new_file configuration.
Takes arg1 as name of variable arg2 is the text
"""
return self.data["addtext"].format(arg1, arg2)
if __name__ == '__main__':
print("This is a constructor class, made for ParseIntoCreate class. \
For more informations on how to use this program, please consult README.md \
file")
```
#### File: RAD_tk_builder/builder/python_builder.py
```python
import xml.etree.ElementTree as ET
import errno
import os.path
try:
from builder.xml_converter import XmlDictConfig
from builder.conf import FILEERROR, ATTRIBERROR, DEFAULTTITLE, PYERROR,\
PYCONFERROR, PYERROREXISTS, PYCONFERROREXISTS
from builder.file_constructor import FileConstructor
from builder.recursive_packager import RecursivePackager
except:
from xml_converter import XmlDictConfig
from conf import FILEERROR, ATTRIBERROR, DEFAULTTITLE, PYERROR,\
PYCONFERROR, PYERROREXISTS, PYCONFERROREXISTS
from file_constructor import FileConstructor
from recursive_packager import RecursivePackager
class ParseIntoCreate:
"""This class is meant to create a tkinter code. It takes as argument
uifile a .ui file, created on pygubu-designer It will convert and create a
new document coded in python in newfile.
if you don't give uifile any argument, it will load a default template
you can consult in your target path.
newfile is the file that's going to be created.
defaultconf is the file that will include all variables for newfile.
For more informations, please consult the README.md file.
Have fun !
"""
def __init__(self, newfile, uifile="tests/template_ui_file.ui", defaultconf="conf.py"):
# newfile is the file that this class will create
self.newfile = newfile
# ui file is the file that's going to be converted
self.uifile = uifile
# defaultconf is the file that will be created and will include all
# variables for newfile
self.defaultconf = defaultconf
# getting all informations from ui file
try:
tree = ET.parse(self.uifile)
root = tree.getroot()
except OSError as er:
#if file isn't an xml file
if er.errno == errno.ENOENT:
print(FILEERROR)
return
try:
# Converting xml data into dictionnary
self.xmldict = XmlDictConfig(root)
except UnboundLocalError:
# if file can't be read
print(ATTRIBERROR)
return
# Loading constructor class
self.constructor = FileConstructor()
# Converting object into dictionnary
self.creating_new_dicts()
# self.realdict is now a packaged list
self.real_list = RecursivePackager(self.realdict)
self.real_list = self.real_list.return_converted_list()
# dictionnary of text for conf.py file
# List valors goes like this : [["LABEL_FRAME_TEXT", "some text"],
# ...
# ]
self.conf_text = []
# Adding erros if self.newfile or self.default_conf isn't .py
if self.newfile[-3:] != ".py":
print(PYERROR)
return
if self.defaultconf[-3:] != ".py":
print(PYCONFERROR)
return
# Adding erros if self.newfile or self.default_conf already exists
if os.path.isfile(self.newfile):
print(PYERROREXISTS)
return
if os.path.isfile(self.defaultconf):
print(PYCONFERROREXISTS)
return
# Running creating_new_file()
self.creating_new_file()
def creating_new_dicts(self):
"""This function is taking data inside xmldict and converts them into a
new dictionnary.
XmlDictConfig looks like a dictionnary, but it renders an
object. This class also prevents the code from being spread out
in the new file.
"""
# removing useless data
self.xmldict = self.xmldict["object"]
# Creating a new dictionnary from self.xmldict
# xmldict is actually an instance of XmlDictConfig
# class, and by no mean a dictionnary
self.realdict = {}
# Adding xmldict values to realdict
# cant do for x, y for some reasons
for keys in self.xmldict:
self.realdict[keys] = self.xmldict[keys]
def creating_new_file(self):
"""This function takes self.realdict datas and converts them into code,
using conf.py file as database."""
widget_list = self.getting_master_widgets()
# Fullfilling self.newfile with data
with open(self.newfile, "w") as newdoc:
#Documentation
# Removing .py in self.defaultconf using [:-3]
newdoc.write(self.constructor.create_stock_class(self.defaultconf[:-3]))
#Creating class and init
self.conf_text.append(["TITLE", DEFAULTTITLE])
newdoc.write(self.constructor.create_class_and_init("text."+"TITLE"))
# Adding functions in init
for widgets in widget_list:
# If widget is master widget
# and instanciates from tk()
if widgets[1]:
newdoc.write(self.constructor.add_main_widget_function_to_init(widgets[0]))
newdoc.write(self.constructor.add_widgets_to_master_widgets_func(widgets[0]))
else:
newdoc.write(self.constructor.add_widgets_to_master_widgets_func(widgets[0]))
# Creating functions, fulfilling them
# Know which widgets gets two functions passes
for widgets in widget_list:
# If widgets[0] is an instance of Tk()
if widgets[1]:
# Create master widget in its own function
self.creating_function(self.who_s_your_master(widgets[0], True),
newdoc,
True)
# Add slave widgets
self.creating_function(self.who_s_your_master(widgets[0]),
newdoc)
# Add launch function
newdoc.write(self.constructor.add_launch_function())
# Add if name == main function
newdoc.write(self.constructor.add_name_eq_main())
# Finally
newdoc.close()
###########################
# Now we can finally
# create document for conf
###########################
self.creating_conf_file()
def who_s_your_master(self, arg1, master=False):
"""This function takes arg1, parses self.real_list and returns a list
only containing widgets that have arg1 as master.
Optionnal argument as "master" is given if we're looking for all
informations of arg1 only.
"""
new_list = []
# If arg1 is instance of Tk()
if master:
for widgets in self.real_list:
if arg1 == widgets[1]:
new_list.append(widgets)
# If we're looking for all widgets that arg1 has
elif not master:
for widgets in self.real_list:
if arg1 == widgets[0]:
new_list.append(widgets)
# Return new_list once completed
return new_list
def creating_function(self, list_widgets, document, master=False):
"""This function helps creating_new_file function. It parses
RecursivePackager result to create a function for the new file.
Change master to True ONLY if you need to create a master
function.
"""
# If master = True
# Unique case
if master:
document.write(self.constructor.add_master_function(list_widgets[0][1]))
elif not master:
document.write(self.constructor.add_widgets_function(list_widgets[0][0]))
# Create loop, adding all widgets in list_widgets inside the function
for widgets in list_widgets:
# Add id and class for current widget
# if master = True, no arg3
if master:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2]))
# Add arg3 if master = False and widgets[0] is not null
elif not master and widgets[0]:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2],
"self." + widgets[0]))
elif not master and not widgets[0]:
document.write(self.constructor.add_identify_id_class_master(widgets[1],
widgets[2]))
if widgets[3]:
# if there is text in properties
if len(widgets[3]) > 1:
# Add text in conf_text list
self.conf_text.append([self.cap_text(widgets[1]), widgets[3][1]])
document.write(self.constructor.add_widget_conf(widgets[1],
widgets[3][0].format("text." + self.cap_text(widgets[1]))))
elif len(widgets[3]) == 1:
document.write(self.constructor.add_widget_conf(widgets[1],
widgets[3][0]))
if widgets[4]:
document.write(self.constructor.add_widget_loc(widgets[1],
widgets[4][0]))
# If _propagate == False
# Add place_propagate(0)
if len(widgets[4]) > 1:
document.write(self.constructor.add_widget_loc(widgets[1],
widgets[4][1]))
# Add spaces between widgets / functions
# for each iteration
document.write("\n")
def cap_text(self, arg):
"""This function takes arg and converts it to ARG_TEXT.
This function is usefull for the conf.py text.
"""
return arg.upper() + "_TEXT"
def getting_master_widgets(self):
"""This function works with creating_functions_for_new_file It returns
a list with all master widgets. Initial list is self.real_list.
Returns valors like this : [[example_widget, True]...]
True means example_widget is a master widget that instanciates
directly from tk()
False means example_widget is an instance of another widget.
"""
return_list = []
# Loop that gets all master widgets
for valors in self.real_list:
if valors[0]not in return_list:
return_list.append(valors[0])
list_valors = []
# Checking which widget is main widget.
for masters in return_list:
for valors in self.real_list:
# Do not count [] empty list
if isinstance(masters, str):
if masters == valors[1] and not valors[0]:
list_valors.append([masters, True])
if masters == valors[1] and valors[0]:
list_valors.append([masters, False])
return list_valors
def creating_conf_file(self):
"""This function is going to create a conf file. Data are stocked in
the self.conf_text list They are gathered during the writing of newfile
process, in the creating_function function.
conf file name is by default conf.py Can be changed during class
creation, by changing defaultconf arg
"""
# Fullfilling self.defaultconf with data
with open(self.defaultconf, "w") as newconf:
# Documentation
newconf.write(self.constructor.add_intro_conf(self.newfile))
# Adding all variables and text for self.newfile file
for text in self.conf_text:
newconf.write(self.constructor.add_text(text[0], text[1]))
newconf.close()
if __name__ == '__main__':
# test to make sure everything is working properly
parser = ParseIntoCreate("newdocument.py", "tests/template_ui_file.ui")
```
|
{
"source": "Jean-KOUAGOU/Cat-vs-Dog-Classifier",
"score": 3
}
|
#### File: Cat-vs-Dog-Classifier/utilities/custom_loss_print.py
```python
class LossPrettifier(object):
STYLE = {
'green' : '\033[32m',
'red' : '\033[91m',
'bold' : '\033[1m',
}
STYLE_END = '\033[0m'
def __init__(self, show_percentage=False):
self.show_percentage = show_percentage
self.color_up = 'green'
self.color_down = 'red'
self.loss_terms = {}
def __call__(self, epoch=None, **kwargs):
if epoch is not None:
print_string = f'Epoch {epoch: 5d} '
else:
print_string = ''
for key, value in kwargs.items():
pre_value = self.loss_terms.get(key, value)
if value > pre_value:
indicator = '▲'
show_color = self.STYLE[self.color_up]
elif value == pre_value:
indicator = ''
show_color = ''
else:
indicator = '▼'
show_color = self.STYLE[self.color_down]
if self.show_percentage:
show_value = 0 if pre_value == 0 \
else (value - pre_value) / float(pre_value)
key_string = f'| {key}: {show_color}{value:3.4f}({show_value:+3.4%}) {indicator}'
else:
key_string = f'| {key}: {show_color}{value:.4f} {indicator}'
# Trim some long outputs
key_string_part = key_string[:32]
print_string += key_string_part+f'{self.STYLE_END}\t'
self.loss_terms[key] = value
print(print_string)
# reporter = LossPrettifier(show_percentage=True)
```
#### File: Cat-vs-Dog-Classifier/utilities/test_class.py
```python
import numpy as np
def test(loaders, model, criterion, use_cuda):
# monitor test loss and accuracy
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
```
|
{
"source": "Jean-KOUAGOU/CLLearner",
"score": 3
}
|
#### File: CLLearner/Modified_ConEx/helper_funcs.py
```python
import datetime
import logging
import os
def create_experiment_folder(folder_name='Experiments'):
directory = os.getcwd() + '/' + folder_name + '/'
folder_name = str(datetime.datetime.now())
path_of_folder = directory + folder_name
os.makedirs(path_of_folder)
return path_of_folder, path_of_folder[:path_of_folder.rfind('/')]
def create_logger(*, name, p):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler(p + '/info.log')
fh.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
return logger
```
|
{
"source": "jeanku/pyutil",
"score": 2
}
|
#### File: pyutil/simutil/ConfigParser.py
```python
import importlib
class ConfigParser(object):
__loaded__ = dict()
def __call__(self, module, default=None):
module = module.split('.')
key = '{}'.format('.'.join(module[:-1]))
if self.__loaded__.get(key) is None:
self.__loaded__[key] = importlib.import_module(key)
return getattr(self.__loaded__[key], module[-1], default)
def get(self, module, default=None):
return self.__call__(module, default)
if __name__ == '__main__':
data = ConfigParser()('Config.Filepath.RESOURCE_BASE_PATH')
data = ConfigParser()('Config.Filepath.RESOURCE_BASE_PATH1', 'default')
data = ConfigParser().get('Config.Filepath.RESOURCE_BASE_PATH')
data = ConfigParser().get('Config.Settings.BLUR_MASK_MODIFIER', 'default')
print(data)
```
#### File: pyutil/simutil/Log.py
```python
__author__ = ''
import logging
import datetime
from .Env import Env
from pathlib import Path
class Log(object):
_instance = {} # 安日志存储logging 实例对象
def debug(self, msg):
self.logger().debug(self._format_msg(msg))
def info(self, msg):
self.logger().info(self._format_msg(msg))
def warning(self, msg):
self.logger().warning(self._format_msg(msg))
def error(self, msg):
self.logger().error(self._format_msg(msg))
def critical(self, msg):
self.logger().critical(self._format_msg(msg))
def logger(self):
date = datetime.datetime.now().strftime("%Y%m%d")
if self._instance.get(date) is None:
logger = logging.getLogger("logging1")
formatter = logging.Formatter('%(asctime)s 【%(levelname)s】%(message)s')
logger.setLevel(Env()('LOG_LEVEL', 'DEBUG')) # log level
fh = logging.FileHandler(self._logfile())
fh.setFormatter(formatter)
if Env()('LOG_DEBUG', 'false').lower() == 'true': # debug
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
self._instance[date] = logger
return self._instance.get(date)
def _logfile(self):
'''
处理日志文件路径
:return:
'''
date = datetime.datetime.now().strftime("%Y%m%d")
path = Env()('LOG_PATH', None)
if path is not None and path:
path = Path(path)
if path.is_dir():
return path.joinpath('{}.log'.format(date))
return path
else:
raise Exception('LOG_PATH not set in .env file')
# return Path(Env.basedir).joinpath('Storage/Logs/{}.log'.format(date)) # 默认 ./Storage/Logs/**.log
def _format_msg(self, msg):
if isinstance(msg, BaseException):
import traceback
return traceback.format_exc()
return msg.__str__()
```
#### File: pyutil/simutil/Oss.py
```python
from .Env import Env
import oss2
from oss2.models import BucketCors, CorsRule, BucketReferer
class Oss():
'''
阿里OSS服务类
'''
_oss_sinstance = None
def __init__(self, access_key=None, access_secret=None, end_point=None, bucket_name=None):
if access_key is not None:
self._oss_sinstance = oss2.Bucket(
oss2.Auth(access_key, access_secret), end_point, bucket_name
)
else:
env = Env()
self._oss_sinstance = oss2.Bucket(
oss2.Auth(env('OSS_ACCESS_KEY'), env('OSS_ACCESS_SECRET')),
env('OSS_END_POINT'), env('OSS_BUCKET_NAME'),
)
def push(self, filename, content, header=None):
'''
上传文件
:param filename: 上传文件名, 例如:'data/test.json'
:param content: 上传的文件内容
:param header: header
:return:
'''
return self._oss_sinstance.put_object(filename, content, headers=header)
def push_file(self, filename, local_filename, header=None):
'''
上传本地文件
:param filename: 上传文件名, 例如:'data/test.json'
:param local_filename: '本地文件路径'
:param header: header
:return:
'''
return self._oss_sinstance.put_object_from_file(filename, local_filename, headers=header)
def rule(self, allowed_origins=['*'], allowed_methods=['GET', 'POST', 'HEAD'], allowed_headers=['*'],
max_age_seconds=600):
'''
处理跨域
:param allowed_origins: 来源
:param allowed_methods: 接受的请求方法
:param allowed_headers: 接受的请求头
:param max_age_seconds: 缓存时间(秒)
:return:
'''
rule = CorsRule(allowed_origins=allowed_origins, allowed_methods=allowed_methods,
allowed_headers=allowed_headers, max_age_seconds=max_age_seconds)
self._oss_sinstance.put_bucket_cors(BucketCors([rule]))
return self
def referer(self, referers=None):
'''
防盗链
:param referers: ['http://www.longhash.com', 'http://api.longhash.com']
:return: self
'''
if referers is not None:
self._oss_sinstance.put_bucket_referer(BucketReferer(False, referers))
else:
self._oss_sinstance.put_bucket_referer(BucketReferer(True, []))
return self
```
#### File: pyutil/simutil/Path.py
```python
__author__ = ''
from pathlib import Path as BasePath
from simutil.App import app
class Path():
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = BasePath(app('BASE_PATH'))
return cls._instance
```
#### File: pyutil/simutil/Redis.py
```python
import redis
from .Env import Env
class Redis():
_instance = dict()
def __init__(self, host=None, port=6379, password=<PASSWORD>, db=0):
'''
构造方法
:param host: host
:param port: port
:param password: 密码
:param db: 默认db
'''
env = Env()
if host is not None:
self.host = host
self.port = port
self.password = password
self.db = db
else:
self.host = env("REDIS_HOST", 'localhost')
self.port = int(env("REDIS_PORT", 6379))
self.password = env('REDIS_PASSWORD', None)
self.db = int(env("REDIS_DB", 0))
if self._instance.get(self.db) is None:
pool = redis.ConnectionPool(host=self.host, port=self.port, db=self.db, password=self.password, decode_responses=True)
self._instance[self.db] = redis.Redis(connection_pool=pool)
def select(self, db):
'''
切换redis库
:param db: 库index
:return: redis.Redis
'''
if type(db) != int:
raise Exception('select db must be the type of int')
if self._instance.get(db) is None:
pool = redis.ConnectionPool(host=self.host, port=self.port, db=db, password=self.password, decode_responses=True)
self._instance[db] = redis.Redis(connection_pool=pool)
return self._instance[db]
def __getattr__(self, key):
'''
默认库则调用
:param key: redis 执行方法
:return: result
'''
return self._instance.get(self.db).__getattribute__(key)
```
#### File: simutil/Scaffold/Artisan.py
```python
import sys
import pymysql
import codecs, os
class Artisan():
author = 'jemes' # 作者
profix = 'lh_' # 表前缀
profix_filter = True # 表前缀过滤 符合才生成model
namespace = 'Models.Longhash' # 命名空间
database = 'database' # 数据库名称
path = os.path.abspath(os.path.dirname(__file__)) # 当前路径
output = None # 输出路径
config = { # 数据库配置
'host': '127.0.0.1',
'port': 3306,
'user': 'dev',
'password': '<PASSWORD>',
'db': 'database',
'charset': 'utf8mb4',
}
def handle(self, args=sys.argv):
'''
处理model入口方法
:param args: 命令行输入参数
:return:
'''
self.check(args)
if args.__len__() == 2: # 只有一个表名称
modelname = self.auth_model_name(args[1])
else:
modelname = self.define_model_name(args[2])
tablename = args[1]
if tablename[:self.profix.__len__()] != self.profix and self.profix_filter: # 表面前缀不一致 则不处理
return
sql = self.table_sql(tablename)
self.model(modelname, tablename, sql)
def all(self):
'''
处理model入口方法
:param args: 命令行输入参数
:return:
'''
tables = [index[0] for index in self.tables()]
for index in tables:
self.handle([None, index])
self.basemodel()
def check(self, args):
'''
校验配置是否正确
:param args:
:return:
'''
if args.__len__() == 1:
raise Exception('invalid params')
if self.config is None:
raise Exception('databse not config')
if self.output is None:
raise Exception('output path invalid')
def auth_model_name(self, tablename):
'''
根据表名自动生成model名称
:param tablename: 表名称
:return:
'''
if tablename[:self.profix.__len__()] != self.profix: # 表面前缀不一致 则不处理
tablename = tablename.replace('-', '_').split('_')
else:
tablename = tablename[self.profix.__len__():].replace('-', '_').split('_')
return ''.join([index.capitalize() for index in tablename]) + 'Model'
def define_model_name(self, name):
'''
生成用户自定义model名称
:param name: model名称
:return:
'''
return name.capitalize() if name[-5:] == 'Model' else name.capitalize() + 'Model'
def tables(self):
'''
显示sql
:param tablesname:
:return:
'''
db = pymysql.connect(**self.config)
cursor = db.cursor()
cursor.execute("show tables")
result = cursor.fetchall()
db.close()
return result
def table_sql(self, tablesname):
'''
显示sql
:param tablesname:
:return:
'''
db = pymysql.connect(**self.config)
cursor = db.cursor()
cursor.execute("show full columns from `%s`" % tablesname)
result = cursor.fetchall()
db.close()
return result
def model(self, modelname, tablename, sql):
'''
生成model
:param name: 生成的model名称
:param tablename: 表名称
:param sql: 表的结构
:return:
'''
columns = [index[0] for index in sql]
with codecs.open(self.path + '/model.txt', "rb", "UTF-8") as f:
s = f.read()
_template_parameter = {
'author': self.author,
'namespace': self.namespace,
'classname': modelname,
'database': self.database,
'tablename': tablename,
'columns': self.format_columns(sql),
'create_time': '\'create_time\'' if 'create_time' in columns else None,
'update_time': '\'update_time\'' if 'update_time' in columns else None
}
s = s % _template_parameter
with codecs.open(self.output + '/' + modelname + '.py', "wb", "UTF-8") as f:
f.write(s)
f.flush()
def format_columns(self, sql):
'''
生成column & 注释
:param sql:
:return:
'''
columns = [index[0] for index in sql]
max_value = max([index.__len__() for index in columns])
comments = [index[-1] for index in sql]
data = ['\'{}\', {} # {}'.format(columns[i], ' ' * (50 + max_value - columns[i].__len__()), comments[i]) for i in range(columns.__len__())]
return '\n '.join(data)
def basemodel(self):
'''
生成base model.txt
:return:
'''
with codecs.open(self.path + '/basemodel.txt', "rb", "UTF-8") as f:
s = f.read()
with codecs.open(self.output + '/BaseModel.py', "wb", "UTF-8") as f:
f.write(s)
f.flush()
if __name__ == '__main__':
sql = Artisan().handle([None, 'lh_test'])
# sql = Artisan().all()
# sql = Artisan().basemodel()
pass
```
#### File: pyutil/test/sklearnDemo.py
```python
'''
Created on 2016年4月24日
@author: <NAME>
'''
# Simple Regession
import numpy as np
# 周广告播放数量
x = [6, 8, 10, 14, 18]
# 周汽车销售数据
y = [7, 9, 13, 17.5, 18]
# 使用最小二乘法
def fitSLR(x, y):
n = len(x)
denominator = 0
numerator = 0
for i in range(0, n):
numerator += ((x[i] - np.mean(x)) * (y[i] - np.mean(y)))
denominator += (x[i] - np.mean(x)) ** 2
print("denominator:", denominator / (n - 1))
print("numerator:", numerator / (n - 1))
b1 = numerator / float(denominator)
# b0 = np.mean(y)/float(np.mean(x))
b0 = np.mean(y) - b1 * np.mean(x)
return b0, b1
def predict(b0, b1, x):
return b0 + b1 * x
b0, b1 = fitSLR(x, y)
print(b0, b1)
x_test = 16 # [17.5862069]
print("y_test:", predict(b0, b1, x_test))
exit(0)
```
|
{
"source": "jean/labels",
"score": 2
}
|
#### File: jean/labels/setup.py
```python
import pathlib
import setuptools
def read(*args: str) -> str:
file_path = pathlib.Path(__file__).parent.joinpath(*args)
return file_path.read_text("utf-8")
setuptools.setup(
name="labels",
version="0.3.0.dev0",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="MIT",
url="https://github.com/hackebrot/labels",
project_urls={
"Repository": "https://github.com/hackebrot/labels",
"Issues": "https://github.com/hackebrot/labels/issues",
},
description="CLI app for managing GitHub labels for Python 3.6 and newer. 📝",
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=setuptools.find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires=">=3.6",
install_requires=["click", "requests", "pytoml", "attrs"],
entry_points={"console_scripts": ["labels = labels.cli:labels"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=["github", "command-line"],
)
```
#### File: labels/tests/test_cli.py
```python
import typing
import shlex
import pytest
from click.testing import CliRunner
from labels import __version__
from labels.cli import labels
@pytest.fixture(name="set_username", autouse=True)
def fixture_set_username(monkeypatch: typing.Any, username: str) -> None:
"""Set the username environment variable."""
monkeypatch.setenv("LABELS_USERNAME", username)
@pytest.fixture(name="set_token", autouse=True)
def fixture_set_token(monkeypatch: typing.Any, token: str) -> None:
"""Set the token environment variable."""
monkeypatch.setenv("LABELS_TOKEN", token)
@pytest.fixture(name="run_cli")
def fixture_run_cli() -> typing.Callable:
"""Return a function that invokes a click CLI runner."""
runner = CliRunner()
def run(cli_options: str) -> typing.Any:
"""Run the CLI with the given options and return the result."""
return runner.invoke(labels, shlex.split(cli_options))
return run
@pytest.mark.parametrize("version_option", ["-V", "--version"])
def test_version_option(run_cli: typing.Callable, version_option: str) -> None:
"""Test for the CLI version option."""
result = run_cli(version_option)
assert result.exit_code == 0
assert result.output == f"labels, version {__version__}\n"
@pytest.mark.usefixtures("mock_list_labels", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("hackebrot", "pytest-emoji", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["no_override"],
)
def test_fetch_default_owner_and_repo(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_write: str
) -> None:
"""Test that fetch loads repo_owner and repo info from the Git repository."""
result = run_cli(f"-v fetch -f {labels_file_write}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
@pytest.mark.usefixtures("mock_list_labels", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("hackebrot", "labels", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_repo"],
)
def test_fetch_default_owner(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_write: str
) -> None:
"""Test that fetch overrides the repo from the Git repository."""
result = run_cli(f"-v fetch -r {repo_name} -f {labels_file_write}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
@pytest.mark.usefixtures("mock_list_labels", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("pytest-dev", "pytest-emoji", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_owner"],
)
def test_fetch_default_repo(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_write: str
) -> None:
"""Test that fetch overrides the owner from the Git repository."""
result = run_cli(f"-v fetch -o {repo_owner} -f {labels_file_write}")
assert result.exit_code == 0, result.exc_info
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
@pytest.mark.usefixtures("mock_list_labels", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("pytest-dev", "pytest", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_owner_and_repo"],
)
def test_fetch(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_write: str
) -> None:
"""Test that fetch overrides the owner and repo from the Git repository."""
result = run_cli(f"-v fetch -o {repo_owner} -r {repo_name} -f {labels_file_write}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
@pytest.mark.usefixtures("mock_sync", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("hackebrot", "pytest-emoji", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["no_override"],
)
def test_sync_default_owner_and_repo(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_sync: str
) -> None:
"""Test that sync loads owner and repo info from the Git repository."""
result = run_cli(f"-v sync -f {labels_file_sync}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
assert f"Deleting label 'infra' for {repo_owner}/{repo_name}" in result.output
assert f"Editing label 'bug' for {repo_owner}/{repo_name}" in result.output
assert (
f"Creating label 'dependencies' for {repo_owner}/{repo_name}" in result.output
)
@pytest.mark.usefixtures("mock_sync", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("hackebrot", "labels", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_repo"],
)
def test_sync_default_owner(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_sync: str
) -> None:
"""Test that sync overrides the repo from the Git repository."""
result = run_cli(f"-v sync -r {repo_name} -f {labels_file_sync}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
assert f"Deleting label 'infra' for {repo_owner}/{repo_name}" in result.output
assert f"Editing label 'bug' for {repo_owner}/{repo_name}" in result.output
assert (
f"Creating label 'dependencies' for {repo_owner}/{repo_name}" in result.output
)
@pytest.mark.usefixtures("mock_sync", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("pytest-dev", "pytest-emoji", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_owner"],
)
def test_sync_default_repo(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_sync: str
) -> None:
"""Test that sync overrides the owner from the Git repository."""
result = run_cli(f"-v sync -o {repo_owner} -f {labels_file_sync}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
assert f"Deleting label 'infra' for {repo_owner}/{repo_name}" in result.output
assert f"Editing label 'bug' for {repo_owner}/{repo_name}" in result.output
assert (
f"Creating label 'dependencies' for {repo_owner}/{repo_name}" in result.output
)
@pytest.mark.usefixtures("mock_sync", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("pytest-dev", "pytest", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_owner_and_repo"],
)
def test_sync(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_sync: str
) -> None:
"""Test that sync overrides the owner and repo from the Git repository."""
result = run_cli(f"-v sync -o {repo_owner} -r {repo_name} -f {labels_file_sync}")
assert result.exit_code == 0
assert f"Requesting labels for {repo_owner}/{repo_name}" in result.output
assert f"Deleting label 'infra' for {repo_owner}/{repo_name}" in result.output
assert f"Editing label 'bug' for {repo_owner}/{repo_name}" in result.output
assert (
f"Creating label 'dependencies' for {repo_owner}/{repo_name}" in result.output
)
@pytest.mark.usefixtures("mock_list_labels", "mock_repo_info")
@pytest.mark.parametrize(
"repo_owner, repo_name, remote_url",
[("pytest-dev", "pytest", "<EMAIL>:hackebrot/pytest-emoji.git")],
ids=["override_owner_and_repo"],
)
def test_sync_dryrun(
run_cli: typing.Callable, repo_owner: str, repo_name: str, labels_file_sync: str
) -> None:
"""Test that sync with the dryrun option works as designed."""
result = run_cli(f"-v sync -n -o {repo_owner} -r {repo_name} -f {labels_file_sync}")
assert result.exit_code == 0
output = (
"This would update the following labels:\n"
" - bug\n"
"This would delete the following labels:\n"
" - infra\n"
"This would create the following labels:\n"
" - dependencies\n"
"This would NOT modify the following labels:\n"
" - docs\n"
)
assert output in result.output
```
|
{
"source": "jeanlaroche/pythran",
"score": 2
}
|
#### File: pythran/omp/__init__.py
```python
from ctypes.util import find_library
from subprocess import check_output, CalledProcessError
from numpy.distutils.misc_util import (
msvc_runtime_major, get_shared_lib_extension
)
import ctypes
import os
import sys
try:
# there may be an environ modification when loading config
from pythran.config import compiler
except ImportError:
def compiler():
return os.environ.get('CXX', 'c++')
cxx = compiler()
class OpenMP(object):
"""
Internal representation of the OpenMP module.
Custom class is used to dynamically add omp runtime function
to this library when function is called.
"""
def __init__(self):
ver = msvc_runtime_major()
if ver is None:
self.init_not_msvc()
else:
self.init_msvc(ver)
def init_msvc(self, ver):
vcomp_path = find_library('vcomp%d.dll' % ver)
if not vcomp_path:
raise ImportError("I can't find a shared library for vcomp.")
else:
# Load the library (shouldn't fail with an absolute path right?)
self.libomp = ctypes.CDLL(vcomp_path)
self.version = 20
def get_libomp_names(self):
"""Return list of OpenMP libraries to try, based on platform and
compiler detected."""
if cxx is None:
# Can't tell what compiler we're using, guessing we need libgomp
names = ['libgomp']
else:
cmd = [cxx, '--version']
try:
version_str = os.path.dirname(check_output(cmd).decode().strip())
except (OSError, CalledProcessError):
version_str = ''
if 'clang' in version_str:
names = ['libomp', 'libiomp5', 'libgomp']
elif version_str.startswith('Intel'):
names = ['libiomp5']
else:
# Too many GCC flavors and version strings, make this the default
# rather than try to detect if it's GCC
names = ['libgomp']
return [name + get_shared_lib_extension() for name in names]
def init_not_msvc(self):
""" Find OpenMP library and try to load if using ctype interface. """
# find_library() does not automatically search LD_LIBRARY_PATH
# until Python 3.6+, so we explicitly add it.
# LD_LIBRARY_PATH is used on Linux, while macOS uses DYLD_LIBRARY_PATH
# and DYLD_FALLBACK_LIBRARY_PATH.
env_vars = []
if sys.platform == 'darwin':
env_vars = ['DYLD_LIBRARY_PATH', 'DYLD_FALLBACK_LIBRARY_PATH']
else:
env_vars = ['LD_LIBRARY_PATH']
paths = []
for env_var in env_vars:
paths += os.environ.get(env_var, '').split(os.pathsep)
libomp_names = self.get_libomp_names()
for libomp_name in libomp_names:
if cxx is None or sys.platform == 'win32':
# Note: Clang supports -print-file-name, but not yet for
# clang-cl as of v12.0.0 (April '21)
continue
cmd = [cxx, '-print-file-name=' + libomp_name]
# the subprocess can fail in various ways in that case just give up
try:
path = os.path.dirname(check_output(cmd).decode().strip())
if path:
paths.append(path)
except (OSError, CalledProcessError):
pass
for libomp_name in libomp_names:
# Try to load find libomp shared library using loader search dirs
libomp_path = find_library(libomp_name)
# Try to use custom paths if lookup failed
for path in paths:
if libomp_path:
break
path = path.strip()
if os.path.isfile(os.path.join(path, libomp_name)):
libomp_path = os.path.join(path, libomp_name)
if libomp_path:
# Load the library
try:
self.libomp = ctypes.CDLL(libomp_path)
except OSError:
raise ImportError("found openMP library '{}' but couldn't load it. "
"This may happen if you are cross-compiling.".format(libomp_path))
self.version = 45
return
raise ImportError("I can't find a shared library for libomp, you may need to install it "
"or adjust the {} environment variable.".format(env_vars[0]))
def __getattr__(self, name):
"""
Get correct function name from libgomp ready to be use.
__getattr__ is call only `name != libomp` as libomp is a real
attribute.
"""
if name == 'VERSION':
return self.version
return getattr(self.libomp, 'omp_' + name)
# see http://mail.python.org/pipermail/python-ideas/2012-May/014969.html
sys.modules[__name__] = OpenMP()
```
#### File: pythran/optimizations/loop_full_unrolling.py
```python
from pythran import metadata
from pythran.analyses import HasBreak, HasContinue, NodeCount
from pythran.openmp import OMPDirective
from pythran.conversion import to_ast
from pythran.passmanager import Transformation
from copy import deepcopy
import gast as ast
class LoopFullUnrolling(Transformation):
'''
Fully unroll loops with static bounds
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('for j in [1,2,3]: i += j')
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(LoopFullUnrolling, node)
>>> print(pm.dump(backend.Python, node))
j = 1
i += j
j = 2
i += j
j = 3
i += j
>>> node = ast.parse('for j in (a,b): i += j')
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(LoopFullUnrolling, node)
>>> print(pm.dump(backend.Python, node))
j = a
i += j
j = b
i += j
>>> node = ast.parse('for j in {1}: i += j')
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(LoopFullUnrolling, node)
>>> print(pm.dump(backend.Python, node))
j = 1
i += j
>>> node = ast.parse('for j in builtins.enumerate("1"): j')
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(LoopFullUnrolling, node)
>>> print(pm.dump(backend.Python, node))
j = (0, '1')
j
'''
MAX_NODE_COUNT = 4096
def visit_For(self, node):
# if the user added some OpenMP directive, trust him and no unroll
if metadata.get(node, OMPDirective):
return node # don't visit children because of collapse
# first unroll children if needed or possible
self.generic_visit(node)
# a break or continue in the loop prevents unrolling too
has_break = any(self.gather(HasBreak, n)
for n in node.body)
has_cont = any(self.gather(HasContinue, n)
for n in node.body)
if has_break or has_cont:
return node
# do not unroll too much to prevent code growth
node_count = self.gather(NodeCount, node)
def unroll(elt, body):
return [ast.Assign([deepcopy(node.target)], elt, None)] + body
def dc(body, i, n):
if i == n - 1:
return body
else:
return deepcopy(body)
def getrange(n):
return getattr(getattr(n, 'func', None), 'attr', None)
if isinstance(node.iter, (ast.Tuple, ast.List)):
elts_count = len(node.iter.elts)
total_count = node_count * elts_count
issmall = total_count < LoopFullUnrolling.MAX_NODE_COUNT
if issmall:
self.update = True
return sum([unroll(elt, dc(node.body, i, elts_count))
for i, elt in enumerate(node.iter.elts)], [])
code = compile(ast.gast_to_ast(ast.Expression(node.iter)),
'<loop unrolling>', 'eval')
try:
values = list(eval(code, {'builtins': __import__('builtins')}))
except Exception:
return node
values_count = len(values)
total_count = node_count * values_count
issmall = total_count < LoopFullUnrolling.MAX_NODE_COUNT
if issmall:
try:
new_node = sum([unroll(to_ast(elt),
dc(node.body, i, values_count))
for i, elt in enumerate(values)], [])
self.update = True
return new_node
except Exception:
return node
return node
```
#### File: pythran/optimizations/simplify_except.py
```python
from pythran.passmanager import Transformation
import gast as ast
def getid(node):
if isinstance(node, ast.Attribute):
return getid(node.value), node.attr
if isinstance(node, ast.Name):
return node.id
return node
class SimplifyExcept(Transformation):
"""
Remove redundant except clauses
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('try: pass\\nexcept (OSError, OSError): pass')
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(SimplifyExcept, node)
>>> print(pm.dump(backend.Python, node))
try:
pass
except OSError:
pass
"""
def visit_ExceptHandler(self, node):
if isinstance(node.type, ast.Tuple):
all_ids = {getid(elt) for elt in node.type.elts}
to_remove = []
for i, elt in enumerate(node.type.elts):
eltid = getid(elt)
if eltid in all_ids:
all_ids.remove(eltid)
else:
to_remove.append(i)
for i in reversed(to_remove):
node.type.elts.pop(i)
if len(node.type.elts) == 1:
node.type = node.type.elts[0]
self.update = True
self.update |= bool(to_remove)
return node
```
#### File: pythran/tests/test_named_parameters.py
```python
from pythran.tests import TestEnv
from pythran.syntax import PythranSyntaxError
class TestNamedParameters(TestEnv):
def test_call_with_named_argument(self):
self.run_test("""
def foo(a):
return a
def call_with_named_argument(n):
return foo(a=n)""", 1, call_with_named_argument=[int])
def test_call_with_named_arguments(self):
self.run_test("""
def foo(a,b):
return a / b
def call_with_named_arguments(n):
return foo(b=n, a=2*n)""", 1, call_with_named_arguments=[int])
def test_call_with_args_and_named_argument(self):
self.run_test("""
def foo(a, b):
return a - b
def call_with_args_and_named_argument(m,n):
return foo(m, b=n)""", 1, 2, call_with_args_and_named_argument=[int, int])
def test_call_with_args_and_named_arguments(self):
self.run_test("""
def foo(a,b,c):
return c + a / b
def call_with_args_and_named_arguments(n, m):
return foo(m, c=2*n, b=n)""", 1, 2, call_with_args_and_named_arguments=[int, int])
def test_call_with_default_and_named_argument(self):
self.run_test("""
def foo(a, b=1):
return a - b
def call_with_default_and_named_argument(m,n):
return foo(a=m)""", 1, 2, call_with_default_and_named_argument=[int, int])
def test_call_with_default_and_named_arguments(self):
self.run_test("""
def foo(a,b,c=1):
return c + a / b
def call_with_default_and_named_arguments(n, m):
return foo(m, b=n)""", 1, 2, call_with_default_and_named_arguments=[int, int])
def test_intrinsic_named_argument(self):
""" Check named arguments with attributes as value. """
self.run_test("""
def intrinsic_named_argument(n):
import numpy
return numpy.ones(n, dtype=numpy.uint8).nbytes""",
4, intrinsic_named_argument=[int])
def test_intrinsic_named_argument_without_default(self):
self.run_test("""
def intrinsic_named_argument_without_default(n):
import numpy as np
return np.expand_dims(np.ones(n), axis=0)""",
4, intrinsic_named_argument_without_default=[int])
def test_nested_function_with_named_arguments(self):
self.run_test('''
def nested_function_with_named_arguments(a):
b = a * 2
def foo(c):
return b + c
return foo(c=a)''',
4, nested_function_with_named_arguments=[int])
def test_nested_function_with_several_named_arguments(self):
self.run_test('''
def nested_function_with_several_named_arguments(a):
b = a * 2
def foo(c, e):
return b + c + e
return foo(e = 4, c=a)''',
4, nested_function_with_several_named_arguments=[int])
def test_aliasing_functions_with_named_arguments(self):
self.run_test('''
def aliasing_functions_with_named_arguments(n):
import numpy
if n > 10:
my = numpy.ones
else:
my = numpy.zeros
return my(n, dtype=numpy.uint8).nbytes''',
4, aliasing_functions_with_named_arguments=[int])
def test_aliasing_functions_with_different_structural_types(self):
with self.assertRaises(PythranSyntaxError):
self.run_test('''
def aliasing_functions_with_different_structural_types(n):
import numpy
if n > 10:
my = sum
else:
my = numpy.zeros
return my(n, dtype=numpy.uint8).nbytes''',
4, aliasing_functions_with_different_structural_types=[int])
def test_default_argument_all_filled(self):
code = '''
def default_argument_all_filled(x):
return test2(x,2)
def test2(a, b=3):
return a, b'''
self.run_test(code, 10, default_argument_all_filled=[int])
```
#### File: pythran/tests/test_version.py
```python
import re
import pythran
from pythran.tests import TestEnv
class TestVersion(TestEnv):
def test_version_check_cython(self):
# Cython actually does this check (variable is named
# `pythran_is_pre_0_9_6`). Make sure it doesn't break.
v = pythran.__version__
pre_096 = tuple(map(int, v.split('.')[0:3])) < (0, 9, 6)
self.assertFalse(pre_096)
def test_valid_version_string(self):
# Verify that the pythran version is a valid one (note: excludes
# .post suffix, and complies to PEP 440. Test taken from NumPy
version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])"
dev_suffix = r"\.dev0\+[0-9]*\.g[0-9a-f]+"
# For released versions:
res1 = re.match(version_pattern, pythran.__version__)
# For dev versions:
res2 = re.match(version_pattern + dev_suffix, pythran.__version__)
self.assertTrue(res1 is not None or res2 is not None,
pythran.__version__)
```
|
{
"source": "jeanlego/nxBender",
"score": 2
}
|
#### File: nxBender/nxbender/ppp.py
```python
import subprocess
import threading
import pty
import os
import logging
import sys
from . import sslconn
import ssl
import signal
import select
import socket
class PPPSession(object):
def __init__(self, options, session_id, routecallback=None):
self.options = options
self.session_id = session_id
self.routecallback = routecallback
self.pppargs = [
'debug', 'debug',
'dump',
'logfd', '2', # we extract the remote IP thru this
'lcp-echo-interval', '10',
'lcp-echo-failure', '2',
'ktune',
'local',
'noipdefault',
'noccp', # server is buggy
'noauth',
'nomp',
'usepeerdns',
]
def run(self):
master, slave = pty.openpty()
self.pty = master
try:
self.pppd = subprocess.Popen(['pppd'] + self.pppargs,
stdin = slave,
stdout = slave,
stderr = subprocess.PIPE)
except OSError as e:
logging.error("Unable to start pppd: %s" % e.strerror)
sys.exit(1)
os.close(slave)
self.tunsock = sslconn.SSLTunnel(self.session_id, self.options, self.options.server, self.options.port)
self.pty = master
def sigint_twice(*args):
logging.info('caught SIGINT again, killing pppd')
self.pppd.send_signal(signal.SIGKILL)
def sigint(*args):
logging.info('caught SIGINT, signalling pppd')
self.killing_pppd = True
self.pppd.send_signal(signal.SIGTERM)
signal.signal(signal.SIGINT, sigint_twice)
os.kill(os.getpid(), signal.SIGHUP) # break out of select()
old_sigint = signal.signal(signal.SIGINT, sigint)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
signal.signal(signal.SIGWINCH, signal.SIG_IGN)
try:
while self.pppd.poll() is None:
stop = self._pump()
if stop:
break
except ssl.SSLError as e: # unexpected
logging.exception(e)
except socket.error as e: # expected (peer disconnect)
logging.error(e.strerror)
finally:
code = self.pppd.poll()
if code is not None: # pppd caused termination
logger = logging.error
if getattr(self, 'killing_pppd', False) and code == 5:
logger = logging.info
logger("pppd exited with code %d" % code)
if code in [2, 3]:
logging.warn("Are you root? You almost certainly need to be root")
else:
self.pppd.send_signal(signal.SIGHUP)
logging.info("Shutting down...")
os.close(self.pty)
self.pppd.wait()
signal.signal(signal.SIGINT, old_sigint)
self.tunsock.close()
def _pump(self):
r_set = [self.tunsock, self.pppd.stderr]
w_set = []
# If the SSL tunnel is blocked on writes, apply backpressure (stop reading from pppd)
if self.tunsock.writes_pending:
w_set.append(self.tunsock)
else:
r_set.append(self.pty)
try:
r, w, x = select.select(r_set, w_set, [])
except select.error:
return True # interrupted
if self.tunsock in r:
self.tunsock.read_to(self.pty)
if self.pty in r:
stop = self.tunsock.write_from(self.pty)
if stop:
return stop
if self.tunsock in w:
self.tunsock.write_pump()
if self.pppd.stderr in r:
line = self.pppd.stderr.readline().strip().decode('utf-8', errors='replace')
if self.options.show_ppp_log:
print("pppd: %s" % line)
if line.startswith("remote IP address"):
remote_ip = line.split(' ')[-1]
self.routecallback(remote_ip)
```
|
{
"source": "jeanlescure/sphinxcontrib-apa",
"score": 3
}
|
#### File: sphinxcontrib-apa/sphinxcontrib/apa.py
```python
from __future__ import unicode_literals
from collections import Counter
import re
import unicodedata
from pybtex.style.labels import BaseLabelStyle
from pybtex.plugin import register_plugin
from dataclasses import dataclass, field
import sphinxcontrib.bibtex.plugin
from sphinxcontrib.bibtex.style.referencing import BracketStyle, PersonStyle
from sphinxcontrib.bibtex.style.referencing.author_year \
import AuthorYearReferenceStyle
from sphinxcontrib.bibtex.style.referencing.label \
import LabelReferenceStyle
from typing import Union
_nonalnum_pattern = re.compile('[^A-Za-z0-9 \-]+', re.UNICODE)
def bracket_style() -> BracketStyle:
return BracketStyle(
left='(',
right=')',
)
def person_style() -> PersonStyle:
return PersonStyle(
style='last',
abbreviate=False,
sep=' & ',
sep2=None,
last_sep=None,
other=' et al',
)
def _strip_accents(s):
return "".join(
(c for c in unicodedata.normalize('NFD', s)
if not unicodedata.combining(c)))
def _strip_nonalnum(parts):
"""Strip all non-alphanumerical characters from a list of strings.
>>> print(_strip_nonalnum([u"ÅA. B. Testing 12+}[.@~_", u" 3%"]))
AABTesting123
"""
s = "".join(parts)
return _nonalnum_pattern.sub("", _strip_accents(s))
class ApaLabelStyle(BaseLabelStyle):
def format_labels(self, sorted_entries):
labels = [self.format_label(entry) for entry in sorted_entries]
count = Counter(labels)
counted = Counter()
for label in labels:
if count[label] == 1:
yield label
else:
yield label + chr(ord('a') + counted[label])
counted.update([label])
def format_label(self, entry):
label = "Anonymous"
if 'author' in entry.persons:
label = self.format_author_or_editor_names(entry.persons['author'])
elif 'editor' in entry.persons:
label = self.format_author_or_editor_names(entry.persons['editor'])
elif 'organization' in entry.fields:
label = entry.fields['organization']
if label.startswith("The "):
label = label[4:]
if 'year' in entry.fields:
return "{}, {}".format(label, entry.fields['year'])
else:
return "{}, n.d.".format(label)
def format_author_or_editor_names(self, persons):
if len(persons) == 1:
return _strip_nonalnum(persons[0].last_names)
elif len(persons) == 2:
return "{} & {}".format(
_strip_nonalnum(persons[0].last_names),
_strip_nonalnum(persons[1].last_names))
else:
return "{} et al.".format(
_strip_nonalnum(persons[0].last_names))
register_plugin('pybtex.style.labels', 'apa', ApaLabelStyle)
@dataclass
class ApaReferenceStyle(AuthorYearReferenceStyle):
bracket_parenthetical: BracketStyle = field(default_factory=bracket_style)
bracket_textual: BracketStyle = field(default_factory=bracket_style)
bracket_author: BracketStyle = field(default_factory=bracket_style)
bracket_label: BracketStyle = field(default_factory=bracket_style)
bracket_year: BracketStyle = field(default_factory=bracket_style)
person: PersonStyle = field(default_factory=person_style)
sphinxcontrib.bibtex.plugin.register_plugin(
'sphinxcontrib.bibtex.style.referencing',
'apastyle', ApaReferenceStyle)
def setup(app):
return {
'version': '1.0.0',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
```
|
{
"source": "jeanlilly/awsgi",
"score": 3
}
|
#### File: awsgi/awsgi/__init__.py
```python
from base64 import b64encode, b64decode
from io import BytesIO
import itertools
import collections
import sys
import gzip
ONE_MTU_SIZE = 1400
try:
# Python 3
from urllib.parse import urlencode
# Convert bytes to str, if required
def convert_str(s):
return s.decode('utf-8') if isinstance(s, bytes) else s
# Convert str to bytes, if required
def convert_byte(b):
return b.encode('utf-8', errors='strict') if (
isinstance(b, str)) else b
except ImportError:
# Python 2
from urllib import urlencode
# No conversion required
def convert_str(s):
return s
# Convert str to bytes, if required
def convert_byte(b):
return b.encode('utf-8', errors='strict') if (
isinstance(b, (str, unicode))) else b
try:
service_version = open("./VERSION").read().strip()
except Exception:
service_version = "undefined"
__all__ = 'response',
def convert_b46(s):
return b64encode(s).decode('ascii')
class StartResponse(object):
def __init__(self, base64_content_types=None, use_gzip=False):
'''
Args:
base64_content_types (set): Set of HTTP Content-Types which should
return a base64 encoded body. Enables returning binary content from
API Gateway.
'''
self.status = 500
self.status_line = '500 Internal Server Error'
self.headers = [
("version", service_version)
]
self.use_gzip = use_gzip
self.chunks = collections.deque()
self.base64_content_types = set(base64_content_types or []) or set()
def __call__(self, status, headers, exc_info=None):
self.status_line = status
self.status = int(status.split()[0])
self.headers[:] = headers
return self.chunks.append
def use_binary_response(self, headers, body):
content_type = headers.get('Content-Type')
if content_type and ';' in content_type:
content_type = content_type.split(';')[0]
return content_type in self.base64_content_types
def use_gzip_response(self, headers, body):
content_type = headers.get('Content-Type')
return self.use_gzip and content_type in {
"application/javascript",
"application/json",
"text/css",
"text/html",
"text/plain",
"text/html",
"image/svg+xml",
"font/otf",
"font/ttf"
} and len(body) > ONE_MTU_SIZE
def build_body(self, headers, output):
totalbody = b''.join(itertools.chain(
self.chunks, output,
))
is_gzip = self.use_gzip_response(headers, totalbody)
is_b64 = self.use_binary_response(headers, totalbody)
print(f"IS_GZIP = {is_gzip}")
print(f"is_b64 = {is_b64}")
if is_gzip:
totalbody = gzip.compress(totalbody)
headers["Content-Encoding"] = "gzip"
is_b64 = True
if is_b64:
converted_output = convert_b46(totalbody)
else:
converted_output = convert_str(totalbody)
return {
'isBase64Encoded': is_b64,
'body': converted_output,
}
def response(self, output):
headers = dict(self.headers)
rv = {
'statusCode': self.status,
'headers': headers,
}
rv.update(self.build_body(headers, output))
return rv
class StartResponse_GW(StartResponse):
def response(self, output):
rv = super(StartResponse_GW, self).response(output)
rv['statusCode'] = str(rv['statusCode'])
return rv
class StartResponse_ELB(StartResponse):
def response(self, output):
rv = super(StartResponse_ELB, self).response(output)
rv['statusCode'] = int(rv['statusCode'])
rv['statusDescription'] = self.status_line
return rv
def environ(event, context):
body = event.get('body', '') or ''
if event.get('isBase64Encoded', False):
body = b64decode(body)
# FIXME: Flag the encoding in the headers
body = convert_byte(body)
environ = {
'REQUEST_METHOD': event['httpMethod'],
'SCRIPT_NAME': '',
'SERVER_NAME': '',
'SERVER_PORT': '',
'PATH_INFO': event['path'],
'QUERY_STRING': urlencode(event['queryStringParameters'] or {}),
'REMOTE_ADDR': '127.0.0.1',
'CONTENT_LENGTH': str(len(body)),
'HTTP': 'on',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.input': BytesIO(body),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.url_scheme': '',
'awsgi.event': event,
'awsgi.context': context,
}
headers = event.get('headers', {}) or {}
for k, v in headers.items():
k = k.upper().replace('-', '_')
if k == 'CONTENT_TYPE':
environ['CONTENT_TYPE'] = v
elif k == 'ACCEPT_ENCODING':
environ['ACCEPT_ENCODING'] = v
elif k == 'HOST':
environ['SERVER_NAME'] = v
elif k == 'X_FORWARDED_FOR':
environ['REMOTE_ADDR'] = v.split(', ')[0]
elif k == 'X_FORWARDED_PROTO':
environ['wsgi.url_scheme'] = v
elif k == 'X_FORWARDED_PORT':
environ['SERVER_PORT'] = v
environ['HTTP_' + k] = v
return environ
def select_impl(event, context):
if 'elb' in event.get('requestContext', {}):
return environ, StartResponse_ELB
else:
return environ, StartResponse_GW
def response(app, event, context, base64_content_types=None):
environ, StartResponse = select_impl(event, context)
use_gzip = bool("gzip" in event.get("headers", {}).get('accept-encoding', ""))
sr = StartResponse(base64_content_types=base64_content_types, use_gzip=use_gzip)
output = app(environ(event, context), sr)
response = sr.response(output)
return response
```
|
{
"source": "jeanlks/ARP",
"score": 3
}
|
#### File: ARP/QDA/QDA.py
```python
import math
import numpy as np
import pandas as pd
def normalizationBySd(matrix):
result = []
for vector in matrix:
line = []
mean = np.mean(vector)
median = np.median(vector)
for item in vector:
val = (item - median / mean);
line.append(val)
result.append(line)
return result
def normalizationByMaxMin(matrix, max, min):
result = []
for column in matrix:
line = []
for item in column:
val = (item - min) / (max - min)
line.append(val)
result.append(line)
return result
def predict(testVector, meansVector, covarianceMatrix):
inverseMatrix = np.linalg.inv(covarianceMatrix)
features_sub = np.subtract(testVector, meansVector)
partial_result = np.dot(np.dot(features_sub, inverseMatrix), np.transpose(features_sub))
return 1/2 * np.log(covarianceMatrix) + partial_result
```
|
{
"source": "Jean-LouisH/Lilliputian",
"score": 2
}
|
#### File: assets/scripts/debug_escape_to_splash_screen.py
```python
import lilliputian as lp
def on_input():
if lp.get_input_api().is_on_release("escape"):
lp.get_scene_api().load_scene("assets/scenes/splash_screen.yml")
```
#### File: assets/scripts/debug_time_display.py
```python
import lilliputian as lp
import time
def on_frame():
scene_api = lp.get_scene_api()
if scene_api.this_has_component(lp.CV.UI_TEXT_LABEL):
current_time_string = time.strftime("%H:%M:%S", time.localtime())
ui_text_label = scene_api.get_this_component_variant(lp.CV.UI_TEXT_LABEL).get_ui_text_label()
ui_text_label.set_text("Time: " + current_time_string)
```
|
{
"source": "Jean-LouisH/Omnia",
"score": 2
}
|
#### File: scripts/python/constantly_rotate.py
```python
import omnia
def on_logic_frame():
omnia.get_component("Transform").rotate_y(0.325)
```
#### File: scripts/python/debug_time_display.py
```python
import omnia
import time
def on_logic_frame():
scene_api = omnia.get_scene_api()
if scene_api.has_component("UITextLabel"):
current_time_string = time.strftime("%H:%M:%S", time.localtime())
ui_text_label = scene_api.get_component("UITextLabel")
ui_text_label.set_text("Time: " + current_time_string)
```
|
{
"source": "jeanlst/soen691-clustering-project",
"score": 3
}
|
#### File: soen691-clustering-project/soen691_clustering_project/bfr.py
```python
import glob
import os
from pathlib import Path
import numpy as np
import math
from cluster import Cluster
from bin_heap import BinHeap
#the BFR class proper
class BFR:
def __init__(self, data=None, k=None, alpha=1, beta=1):
"""
DATA: numpy matrix
K: Number of Clusters
ALPHA: for Primary Compression. Number of standard deviations from the centroid under
which points are grouped together and summarized
BETA: for Secondary Compression. Merge groups of points where their "tightness" is under
this value
"""
self.__data = np.array(data) if isinstance(data, list) else data
self.__k = k
self.__alpha = alpha
self.__beta = beta
#Data Dimensionality
self.__dims = None
#list of Clusters
self.__clusters = []
def __convertStoG(self, mtx):
'''
Convert a matrix of singleton points into a matrix group of (SUM, SUMSQ, N)
'''
#get number of rows and columns
rows = mtx.shape[0]
cols = mtx.shape[1]
#set number of cols and rows for new mtx
nrows = rows
ncols = cols * 2 + 1
#new mtx
nmtx = np.zeros((nrows, ncols))
#set values
nmtx[:,:cols] = mtx * 1.0
nmtx[:,cols:-1] = mtx ** 2.0
nmtx[:,-1] = 1 * 1.0
return nmtx
def __dist_mean(self, row1, row2, dims):
'''
distance calculation for grouped values
'''
#number of elements
n1 = row1[dims * 2]
n2 = row2[dims * 2]
#means
mean1 = (row1 * 1.0) / n1
mean2 = (row2 * 1.0) / n2
#distance calculation
#squared euclidean
total_dist = 0
for i in list(range(dims)):
total_dist += (mean1[i] - mean2[i]) ** 2.0
return total_dist
def __closest(self, row, centroids, dims):
'''
Given a row and a matrix of centroids, return index number
for closest centroid
dims: number of dimensions
'''
#assign distance to row
#leave out ID list for centroids
distances = np.apply_along_axis(lambda x: self.__dist_mean(row, x, dims), 1, centroids[:,:-1])
#get index of closest
min_index = np.argmin(distances)
#return 'ID no.' of closest centroid
return centroids[min_index,-1]
def __cmean(self, row, dims):
"""
given a row, return the mean
"""
nrow = (row[:dims] * 1.0) / row[dims * 2]
return nrow
def __centroid_selection(self, mtx, k, dims):
'''
Select centroid at random, ensuring that the selected centroids are all
different values
'''
#attempt to find centroids that are unique (no duplicate rows)
unique_cts = False
itr = 0
while unique_cts == False:
#select k points
indices = np.random.choice(mtx.shape[0], k, replace=False)
#get the centroids
selected = mtx[indices,:]
#get means
sel_means = np.apply_along_axis(lambda x: self.__cmean(x, dims), 1, selected)
#should be more robust way to check uniqueness
#filter for uniqueness
unique_cents = np.unique(sel_means, axis=0)
#check if unique and orig are same size
if selected.shape[0] == unique_cents.shape[0]:
unique_cts = True
itr += 1
if itr > 100:
print("Unable to find unique centroids!")
break
return selected
def __kmeans_groups_init(self, mtx, k, dims):
'''
perform weighed kmeans with groups of values
choose initial centroids
'''
centroids = self.__centroid_selection(mtx, k, dims)
#index numbers for the newly selected centroids
c_inds = np.arange(k).reshape((k,1))
#append indices to end of centroid list
centroids = np.hstack((centroids, c_inds))
#assign to each point closest centroid
#column vector for assignments
assignments = np.apply_along_axis(lambda x: self.__closest(x, centroids, dims), 1, mtx)
#matrix plus assignments
assigned = np.hstack((mtx, assignments.reshape(assignments.shape[0], 1)))
return assigned, centroids
def __recenter(self, mtx, dims):
'''
given a matrix of assigned points, average points and return new centroids
'''
#get all centroid IDs
cent_IDs = np.unique(mtx[:,-1])
#calculate averages
current = np.zeros((cent_IDs.shape[0], mtx.shape[1]))
#creating a dictionary to associate IDs with indices
cent_dict = {k: cent_IDs[k] for k in list(range(len(cent_IDs)))}
#for each unique value / centroid
#dont see how to do this without a loop
for i in list(range(len(cent_IDs))):
#slicing for current value
cind = np.where(mtx[:,-1] == cent_dict[i])[0]
#selecting
c_slice = mtx[cind,:]
#sum
c_sum = np.sum(c_slice, 0)
#set to index
current[int(i),:-1] = c_sum[:-1]
#set last slot (index)
current[int(i),-1] = cent_dict[i]
return current
def __centroid_comp(self, cts1, cts2, dims):
'''
compare 2 lists of centroids,
check if identical, regardless of number of
members in the subgroup
'''
#check if centroids are the same
same_cents = True
for i in range(cts1.shape[0]):
#get averages for cluster center
cls_c1 = cts1[i,:dims] / cts1[i,dims*2]
cls_c2 = cts2[i,:dims] / cts2[i,dims*2]
#equality check
if np.array_equal(cls_c1, cls_c2) == False:
same_cents = False
return same_cents
def __matrix_insert(self, old_c, new_c):
"""
Attempt to fix the "dissapearing centroid" issue.
reinsert discarded centroid into new matrix
"""
#get IDs for centroids
old_ids = np.unique(old_c[:,-1])
new_ids = np.unique(new_c[:,-1])
#find missing indices
missed = np.setdiff1d(old_ids, new_ids)
#create empty matrix, same size as old
reconst = np.zeros((old_c.shape[0], old_c.shape[1]))
#fill it
#new values
for i in new_ids:
#get relevant indexes
rind = np.where(new_c[:,-1] == i)[0][0]
r2ind = np.where(old_c[:,-1] == i)[0][0]
reconst[r2ind,:] = new_c[rind,:]
for i in missed:
#get relevant index
rind = np.where(old_c[:,-1] == i)[0][0]
reconst[rind,:] = old_c[rind,:]
#return reconstructed mtx
return reconst
def __kmeans_converge(self, mtx, cents, k, dims):
'''
given a set of assigned points, average and reassign
until convergeance
'''
converge = False
while converge == False:
#get new centroids from average of
new_cents = self.__recenter(mtx, dims)
if cents.shape[0] != new_cents.shape[0]:
#print("disappearing centroid")
#attempting to fix the "Disapearing Centroid" problem
new_cents = self.__matrix_insert(cents, new_cents)
if self.__centroid_comp(cents, new_cents, dims) == True:
#centroids are equivalent, convergeance gained
converge = True
else:
#reassign
reassign = np.apply_along_axis(lambda x: self.__closest(x, new_cents, dims), 1, mtx[:,:-1])
#orig matrix plus assignments
mtx = np.hstack((mtx[:,:-1], reassign.reshape(reassign.shape[0], 1)))
#assign new centroids as old
cents = new_cents
#return matrix with new centroids
return mtx, new_cents
def __kmeans_assign(self, mtx, centroids, k, dims):
'''
Given an unassigned matrix and some centroids, assign centroids to
rows then perform kmeans
'''
#take matrix and centroids and assign to points
assigned = self.__assign_pts(mtx, centroids, dims)
#perform k_means until convergeance
final_asg, final_cents = self.__kmeans_converge(assigned, centroids, k, dims)
#return new assignments and centroids
return final_asg, final_cents
def __kmeans_group(self, data, k, convert=True, dm=1):
'''
perform kmeans. Convert Data into summary groups prior.
data = matrix of points, either converted or not
k = number of centroids
convert = indicate whether to convert or not the data
dims = dimensionality of the data. Fed to alg if
'''
#dimensionality set if working with groups already
dims = dm
mtx = data
#conversion to "Triplice" Format if not already done
#ignore if working with an already converted set
if convert == True:
#number of dimensions
dims = data.shape[1]
#data conversion
mtx = self.__convertStoG(data)
#initial assignment
init_mtx, init_cents = self.__kmeans_groups_init(mtx, k, dims)
#loop until convergeance
final_asg, final_cents = self.__kmeans_converge(init_mtx, init_cents, k, dims)
#return matrix with assignments as well as centroids
return final_asg, final_cents
def __nearest_cls(self, mtx, dims):
'''
given a matrix of assigned points/clusters, return the indices
of the two nearest clusters
'''
nb_rows = mtx.shape[0]
#minimum dist set to infinity
min_distance = math.inf
#placeholder indices
min_inds = [-1, -1]
#for each row
for i in range(nb_rows):
#for each pair of rows
for j in range(i + 1, nb_rows):
#get distance of these two
current_dist = self.__dist_mean(mtx[i,:], mtx[j,:], dims)
#current distance less than minimum
if current_dist < min_distance:
#reset minimum
min_distance = current_dist
#set new minimum indices
min_inds = [i, j]
return min_inds
def __merge_clusters(self, mtx, merge_inds, group_nb):
'''
Given an assigned matrix and a list of indices to merge together,
return a matrix with the specified rows merged together.
group_nb = integer identifying the cluster used for the newly merged cluster
assigned matrix: (SUM, SUMSQ, N, CLUSTER Nb.)
'''
#indices for rows to keep as is
keep_inds = [x for x in list(range(mtx.shape[0])) if x not in merge_inds]
#retrieve merging rows
merging_rows = mtx[merge_inds,:]
#retrieve rows to keep
keep_rows = mtx[keep_inds,:]
#sum rows
merged = np.sum(merging_rows, 0)
#replace last in index with group number
merged[-1] = group_nb
#re-add merged row to rest of dataset
new_mtx = np.vstack((keep_rows, merged))
return new_mtx
def __assign_pts(self, mtx, cents, dims):
'''
given matrices for both points and centroids,
assign to the points the nearest centroid.
Return list of points with assigned centroids.
'''
#assign to each point closest centroid
#column vector for assignments
assignments = np.apply_along_axis(lambda x: self.__closest(x, cents, dims), 1, mtx)
#matrix plus assignments
assigned = np.hstack((mtx, assignments.reshape(assignments.shape[0], 1)))
return assigned
def __hierar_cls_agg(self, data, k):
'''
Perform Agglomerative Hierarchical Clustering on the given Dataset, assigning to each point
an individual cluster and progressively merging until k clusters is reached
Return both assignments and the list of clusters
'''
#get number of dimensions
dims = data.shape[1]
#convert data to required format
mtx = self.__convertStoG(data)
#keep original matrix for later transformation
mtx_init = self.__convertStoG(data)
#initial assignment
#list of clusters
cluster_lst = np.arange(mtx.shape[0])
#add to matrix
mtx = np.hstack((mtx,cluster_lst.reshape(cluster_lst.shape[0], 1)))
#while correct number of clusters has not been found
while mtx.shape[0] != k:
#get the two nearest rows
near_ind = self.__nearest_cls(mtx[:,:-1], dims)
#get cluster number of first row to merge
cls_nb = mtx[near_ind[0],-1]
#merge them together
mtx = self.__merge_clusters(mtx, near_ind, cls_nb)
#change matrix 'id's to just '0,1,2'
mtx[:,-1] = np.arange(k)
#assign points in original matrix to clusters
assign_mtx = self.__assign_pts(mtx_init, mtx[:,], dims)
return assign_mtx, mtx
def __get_variances(self, row, dims):
'''
given a row, and number of dimensions, return the variance for
each element.
Return an array where elements are the variance for each element
of the row.
'''
#sum
row_sum = row[:dims]
#sum of squares
row_ssq = row[dims:dims * 2]
#number of elements
row_n = row[dims * 2]
#variance
variances = (row_ssq / row_n) - (((row_sum) ** 2) / (row_n ** 2))
return variances
def __mahalanobis_dist(self, row, centroids, dims):
'''
return a given element (singleton or otherwise)'s mahalanobis distance from
the given distribution. using centroid distance if the row in question is
a collection of points summary
'''
#get point
point = row[:dims] / row[dims*2]
#select from the list of centroids the distribution to use
#row's assignment is currently closest centroid
dist = centroids[int(row[-1]),:]
#get dist avg
dist_avg = dist[:dims] / dist[dims*2]
#interval
interval = point - dist_avg
#get variances for distribution
varis = self.__get_variances(dist, dims)
#square interval and divide by vars
int2 = (interval ** 2) / varis
#sum and return distance
return np.sum(int2)
def __pc_merge(self, mtx):
'''
Merge operation used in primary compression.
Given a submatrix, merge together all rows that have the same assignment
'''
#for each centroid ID
c_ids = np.unique(mtx[:,-1])
#print("Centroid IDs to merge")
#print(c_ids)
#print("Merge Matrix:")
#print(mtx)
for i in c_ids:
#indices for the merge
merge_inds = np.where(mtx[:,-1] == i)[0]
#check if there are actually more than 1 row to merge
#skip if not
if len(merge_inds) > 1:
mtx = self.__merge_clusters(mtx, merge_inds, i)
#return newly formed matrix at the end
return mtx
def __primary_compression(self, mtx, centroids, dims, radius):
'''
Primary Compression step for the BFR clustering algorithm.
mtx : matrix of assigned points
centroids: current centroids
dims: dimensionality
radius: minimum mahalanobis distance under which points are compressed
under the centroid
'''
#calculate mahalanobis distances for each point to the nearest centroid
mh_dists = np.apply_along_axis(lambda x: self.__mahalanobis_dist(x, centroids, dims), 1, mtx)
#convert NaN values to 0
mh_dists = np.nan_to_num(mh_dists)
#print("Mahalanobis Distances")
#print(mh_dists)
#check if distance is less than threshold
#compress points to centroid if distance is less
threshold = mh_dists < radius
#select rows to be compressed, this includes the centroids,
#as their dist is 0
compr_inds = np.where(threshold == True)[0]
#separate matrix into 2: indices to be merged and
#those to be left alone
#print("Full mtx: ", mtx.shape)
#rows to merge
to_merge = mtx[compr_inds,:]
#rows to keep
noCompr_inds = [x for x in list(range(mtx.shape[0])) if x not in compr_inds]
to_leave = mtx[noCompr_inds,:]
#print("To merge: ", to_merge.shape)
#print("To keep: ", to_leave.shape)
#merge selected indices, then append to kept
merged = self.__pc_merge(to_merge)
new_mtx = np.vstack((merged, to_leave))
#print("Remade Matrix:")
#print(new_mtx)
return new_mtx
def __get_tightness(self, row, dims):
'''
get tightness for given distribution, which is essentially
the max of the standard deviations
'''
#get array of variances
variances = self.__get_variances(row, dims)
#square root is standard deviation
st_div = variances ** (1 / 2)
#get maximum value
std_max = np.max(st_div)
return std_max
def __merged_tightness(self, row1, row2, dims):
'''
merge two rows together and get check their
tightness
'''
row_c = row1 + row2
tgt = self.__get_tightness(row_c, dims)
return tgt
def __valid_merge(self, row1, row2, dims, orig_cts):
"""
Verify whether a proposed merge is valid according to
whether the merged rows have at least one subcluster
"""
#get IDs
id_1 = row1[(dims * 2) + 1]
id_2 = row2[(dims * 2) + 1]
#check if they are in the centroid list
check1 = id_1 in orig_cts
check2 = id_2 in orig_cts
#if at least one is not
if (check1 and check2) == False:
return True
#if both original centroids, invalid
return False
def __get_tightest_pair(self, mtx, dims, orig_cts):
'''
given a matrix of rows of clusters, return the indices of the rows
that are considered the "tightest", as well as the value
ORIG_CTS: index values of original centroids, make sure not to merge
groups that are both "REAL" clusters
'''
nb_rows = mtx.shape[0]
#minimum dist set to infinity
min_tightness = math.inf
#placeholder indices
min_inds = [-1, -1]
#for each row
for i in range(nb_rows):
#for each pair of rows
for j in range(i + 1, nb_rows):
#get tightness from merged row of these two
proj_tightness = self.__merged_tightness(mtx[i,:], mtx[j,:], dims)
#current distance less than minimum
if proj_tightness < min_tightness:
#check assigned IDs are not BOTH part of original matrices
i1_check = mtx[i,(dims * 2) + 1] in orig_cts
i2_check = mtx[j,(dims * 2) + 1] in orig_cts
#only merge if at least one index is from a subgroup
if (i1_check and i2_check) == False:
#reset minimum
min_tightness = proj_tightness
#set new minimum indices
min_inds = [i, j]
return min_inds, min_tightness
def __hierar_cls_agg_tight(self, mtx, k, beta, dims, orig_cts):
'''
Perform Agglomerative Hierarchical Clustering on the given matrix, assigning to each point
an individual cluster and progressively merging the groups whose projected "tightness" is less
than or equal to beta. Stop when no more merge options are available
mtx: matrix of assigned points
k: number of clusters
beta: rows who are tighter than this value are merged
dims: dimensions
orig_cts: IDs of centroids from original Clustering Algorithm K. We want make sure we only
merge if one of the subclusters available is not already assigned to a "Main" cluster.
'''
#stopping condition
stop_merge = False
#while correct number of clusters has not been found
while stop_merge == False:
#get the two tightest row indices, and the value
tight_ind, t_val = self.__get_tightest_pair(mtx, dims, orig_cts)
#if the value is greater than beta, stop
if t_val > beta:
stop_merge = True
else:
#if value equal or less, merge and iterate again
#get cluster number of first row to merge
cls_nb1 = mtx[tight_ind[0],-1]
#get ID for second row to merge
cls_nb2 = mtx[tight_ind[1],-1]
#take the minimum of the two
cls_nb = min(cls_nb1, cls_nb2)
#merge them together
mtx = self.__merge_clusters(mtx, tight_ind, cls_nb)
#return new matrix
return mtx
def __heap_clust(self, mtx, k, beta, dims, orig_cts):
"""
Hierarchical Agglomerative Clustering using a priority queue implemented with a min_heap
Otherwise similar to above
mtx: matrix of assigned points (SUMS, SUMSQ, N, ID)
k: nb. of clusters
beta: merge rows that are "tighter" than this value
dims: dimensionality
orig_cts: original centroids
"""
#Create Heap
m_heap = BinHeap()
#generate list of indices
row_inds = list(np.arange(mtx.shape[0]))
#counter for future indices
future_ind = mtx.shape[0]
#populate the heap with valid merges
nb_rows = mtx.shape[0]
#for each pair of rows
for i in range(nb_rows):
for j in range(i + 1, nb_rows):
#get rows
r1 = mtx[i,:]
r2 = mtx[j,:]
#check for valid merge
if self.__valid_merge(r1, r2, dims, orig_cts):
#get tightness
tightn = self.__merged_tightness(r1, r2, dims)
#get indices
index1 = row_inds[i]
index2 = row_inds[j]
ilist = [index1, index2]
#create heap object
addh = [tightn,ilist]
#add to heap
m_heap.insert(addh)
#heap populated, now check for merges
looping = True
while looping == True:
#if heap is empty, break
if m_heap.heapSize() == 0:
break
#check top element of heap
top = m_heap.top()
#if value exceeds beta, break
if top[0] > beta:
break
#assume value is still valid
#check if indices still exist, not merged before
m_inds = top[1]
inds_exist = all(e in row_inds for e in m_inds)
if inds_exist == True:
#proceed to merge and update heap
#get, from inds, matching rows in the matrix
ir1 = row_inds.index(m_inds[0])
ir2 = row_inds.index(m_inds[1])
tight_ri = [ir1, ir2]
#get ID to use for merged
#get cluster number of first row to merge
cls_nb1 = mtx[ir1,dims * 2 + 1]
#get ID for second row to merge
cls_nb2 = mtx[ir2,dims * 2 + 1]
#take the minimum of the two
cls_nb = min(cls_nb1, cls_nb2)
#merge rows
mtx = self.__merge_clusters(mtx, tight_ri, cls_nb)
#remove indices from index list
row_inds.remove(m_inds[0])
row_inds.remove(m_inds[1])
#add new index to index list
row_inds.append(future_ind)
future_ind +=1
#get lnew row
nrow = mtx[-1,:]
#populate heap with tightness metrics for new row
for i in range(mtx.shape[0] - 1):
#for every row except last (recently merged)
crow = mtx[i,:]
#if the merge is valid
if self.__valid_merge(crow, nrow, dims, orig_cts):
#same process as above during initialization
#get tightness
p_tight = self.__merged_tightness(nrow, crow, dims)
#get associated row indices
#latest for new row
nr_ind = row_inds[-1]
#get other row index
cr_ind = row_inds[i]
inlst = [nr_ind, cr_ind]
#create heap object
nheap_o = [p_tight, inlst]
#add to heap
m_heap.insert(nheap_o)
#regardless of whether we have merged or not, pop top of heap
m_heap.remove()
#restart loop until end conditions fulfilled
#return reduced matrix
return mtx
def __secondary_compression(self, mtx, centroids, dims, beta, k2):
'''
Secondary Compression Step. Take remaining singleton points and attempt
to cluster them though kmeans. Find subclusters that are "tight", then
merge them together through agglomerative hierarchical clustering while
the tightness bound still holds.
beta: tightness bound, standard deviation. Merge subclusters while they
are still considered "tight".
k2: number of clusters for subclustering, assumed k2 > K
'''
#separate singleton points from clusters
#indices for singletons
single_inds = np.where(mtx[:,dims*2] == 1)[0]
#indices for clusters
clust_inds = [x for x in list(range(mtx.shape[0])) if x not in single_inds]
#separate
#singleton elements
singletons = mtx[single_inds,:]
#clustered elements
clustered_pts = mtx[clust_inds,:]
#If the value of k2 is greater than the number of singletons,
#skip secondary compression
if k2 > singletons.shape[0]:
return mtx
#run kmeans on the singleton points with k2 > K
#only if the number of singleton points exceeds k2
subclusters, subcls_cts = self.__kmeans_group(singletons[:,:-1], k2, convert=False, dm=dims)
#adjust IDs of subclusters so that they are not confounded with
#"main" centroids
#get number of centroids
octs_nb = centroids.shape[0]
#get IDs for the K centroids
k1_ids = np.unique(centroids[:,-1])
#adjust IDs
subclusters[:,-1] += octs_nb
subcls_cts[:,-1] += octs_nb
#Identify "Tight" Subclusters
#get tightness for subclusters
sub_tight = np.apply_along_axis(lambda x: self.__get_tightness(x, dims), 1, subcls_cts)
#identify "tight" subclusters
#discard any over the threshold
tight_thresh = sub_tight > beta
#get indices
tight_inds = np.where(tight_thresh == False)[0]
#get the others
loose_inds = np.where(tight_thresh == True)[0]
#proceed if there are any tight subclusters
if len(tight_inds) > 0:
#slice
tight_cls = subcls_cts[tight_inds,:]
#add to list of clusters from earlier
cls_plus_t = np.vstack((clustered_pts, tight_cls))
#perform agglomerative hierarchical clustering on cls_plus_t
#NAIVE IMPLEMENTATION: Commented out and use alternate version when ready
#cls_merged = self.__hierar_cls_agg_tight(cls_plus_t, k2, beta, dims, k1_ids)
#PRIORITY LIST IMPLEMENTATION:
cls_merged = self.__heap_clust(cls_plus_t, k2, beta, dims, k1_ids)
#slice loose centroids
loose_cls = subcls_cts[loose_inds,:]
#get remaining singletons that were not merged
subc_nm = np.apply_along_axis(lambda x: x[-1] in loose_cls[:,-1], 1, subclusters)
unmerged_inds = np.where(subc_nm == True)[0]
#print('Unmerged inds:', unmerged)
#print(unmerged_inds)
#slice singleton list
loose_singles = subclusters[unmerged_inds,:]
#stack with centroids/tight clusters
final_mtx = np.vstack((cls_merged, loose_singles))
else:
#no tight subclusters, just return original matrix
final_mtx = mtx
return final_mtx
def __bfr_loop(self, data, centroids, k, dims):
"""
The standard loop for the BFR algorithm:
K-means, then Primary Compression, Then Secondary Compression
K-means not done from scratch, points
data: matrix of unassigned points, in cluster format
centroids: matrix of points chosen as centroids
"""
#assign data to centroids and perform k-means
mtx_assign, new_cents = self.__kmeans_assign(data, centroids, self.__k, self.__dims)
#primary compression
compressed_asg = self.__primary_compression(mtx_assign, new_cents, self.__dims, self.__alpha)
#secondary compression
#k2 > K. Set to k2 = K * 2
compressed2_asg = self.__secondary_compression(compressed_asg, new_cents, self.__dims, self.__beta, self.__k * 2)
#return compressed matrix and new centroids
return compressed2_asg, new_cents
def __create_clusters(self, centroids, mtx):
"""
Given a list of centroids and a matrix of assigned points, create cluster objects
and store them
"""
#for each centroid:
for i in range(centroids.shape[0]):
#create base cluster
cluster_W = Cluster(None, None)
#set center
#take sum of points and divide by size of group
cluster_W.center = list(centroids[i,:self.__dims] / centroids[i,-2])
#add to list of clusters
self.__clusters.append(cluster_W)
#iterating through matrix to store values
for i in range(mtx.shape[0]):
#identify assigned centroid
cent = mtx[i,-1]
#get points
point = list(mtx[i,:self.__dims])
#add to the right cluster
self.__clusters[int(cent)].points.append(point)
self.__clusters[int(cent)].indexes.append(i)
def get_clusters(self):
"""
Return list of clusters
"""
return self.__clusters
def get_indexes(self):
"""
Return list of Indexes
"""
return [cluster.indexes for cluster in self.__clusters]
def get_centres(self):
"""
Return Cluster Centroids
"""
return [cluster.center for cluster in self.__clusters]
def cluster_noPart(self):
"""
Cluster the given data without partitioning it. Essentially going through
one cycle of KMEANS, PRIMARY COMPRESSION and SECONDARY COMPRESSION, then returning a result.
"""
#begin by performing K-Means on the data
data = self.__data
self.__dims = data.shape[1]
assignments_k, centroids_k = self.__kmeans_group(data, self.__k)
#next, do primary compression
compressed_asg = self.__primary_compression(assignments_k, centroids_k, self.__dims, self.__alpha)
#next, secondary compression
#using k2 = 2 * K for now.
compressed2_asg = self.__secondary_compression(compressed_asg, centroids_k, self.__dims, self.__beta, self.__k * 2)
#reassign centroids to points from secondary compr.
reassigned = self.__assign_pts(compressed2_asg[:,:-1], centroids_k, self.__dims)
#reassign until convergeance
#get final centroids
assignments_k, centroids_k = self.__kmeans_converge(reassigned, centroids_k, self.__k, self.__dims)
#convert data to group format
data_g = self.__convertStoG(self.__data)
#assign to original points
final_assign = self.__assign_pts(data_g, centroids_k, self.__dims)
#create cluster objects
self.__create_clusters(centroids_k, final_assign)
#return final_assign, centroids_k
def cluster_partition(self, filename, chunk_size, separator, k, alpha, beta, truths=True):
"""
Read a dataset from file in chunks of the specified size.
filename: name of file to read
chunk_size: size of chunks to load into memory. in Bytes
separator: separator for the file to read
Then BFR arguments...
truths : will ignore last column during clustering, assuming these are true values
"""
#set params
self.__data = None
self.__k = k
self.__alpha = alpha
self.__beta = beta
#list of Clusters
self.__clusters = []
#open the file
f = open(filename, "r")
#read the first line to determine number of rows / item size
line1 = f.readline()
#turn into numpy array
#take 1 less column is truths values are read
l1 = np.fromstring(line1, sep=separator) if truths == False else np.fromstring(line1, sep=separator)[:-1]
#get memory size per line, size is size of item by number of columns
chunk_line = l1.itemsize * l1.size
#get number of columns
nb_cols = l1.size
#
#Data Dimensionality
self.__dims = nb_cols
#get number of lines to load per partition
lines_per_chunk = int(chunk_size / chunk_line)
#check for end of file
end_file = False
#checks for first iteration exception
first_iter = True
f_iter2 = True
#sum total of data to return
data_total = np.zeros(nb_cols).reshape(1,nb_cols)
#holding centroids
cents = None
while end_file == False:
#until end of file is reached
#array with dummy line, to be removed
data_m = np.zeros(nb_cols).reshape(1,nb_cols)
#First, read the next chunk of data into memory
for i in range(lines_per_chunk):
#check if this is the first iteration
if first_iter == True:
#special case for first iteration
#add first line to matrix
data_m = np.vstack((data_m, l1))
#remove first iteration
first_iter = False
else:
#normal execution
#read a line from the file
nline = f.readline()
#check line size
l_size = len(nline)
#if the string read is of length 0, end of file reached
if l_size == 0:
#mark end of file, break loop
end_file = True
break
#otherwise, continue
#convert to numpy array
line_a = np.fromstring(nline, sep=separator) if truths == False else np.fromstring(nline, sep=separator)[:-1]
#add to matrix
data_m = np.vstack((data_m, line_a))
#loop complete
#if resulting matrix had no rows added, stop loop
if data_m.shape[0] == 1:
break
#remove dummy line
data_m = data_m[1:,:]
#otherwise, continue
#convert data to summary format
data_m = self.__convertStoG(data_m)
#do operations here
#add to totals
if f_iter2 == True: #check for first iteration
#keep the uncompressed data in memory for final assignment
self.__data = data_m
#set as total
data_total = data_m
#run K-means on the data, get assigned points and centroids
#remove the assigned column from data_total
data_total, cents = self.__kmeans_group(data_total, self.__k, convert=False, dm=self.__dims)
#pass a loop of BFR
data_total, cents = self.__bfr_loop(data_total[:,:-1], cents, self.__k, self.__dims)
#drop point assignments
data_total = data_total[:,:-1]
#first iter done
f_iter2 = False
else:
#add to uncompressed data
self.__data = np.vstack((self.__data, data_m))
#add to working data
data_total = np.vstack((data_total, data_m))
#perform BFR loop
data_total, cents = self.__bfr_loop(data_total, cents, self.__k, self.__dims)
#drop point assignments
data_total = data_total[:,:-1]
#close file
f.close()
#final assignment
final_assign = self.__assign_pts(self.__data, cents, self.__dims)
#convert to cluster object
self.__create_clusters(cents, final_assign)
#done
if __name__ == "__main__":
#testing
data_path = Path('./data/2d/shaped/flame.dat')
#load data
flame = np.loadtxt(data_path, delimiter="\t")
b = BFR(flame[:,:-1], 2)
print("Clustering...")
b.cluster_noPart()
print("Results:")
print(b.get_clusters())
```
#### File: soen691-clustering-project/soen691_clustering_project/clustering.py
```python
class Clustering:
def get_clusters(self):
raise NotImplementedError("The method not implemented")
def get_indexes(self):
raise NotImplementedError("The method not implemented")
```
#### File: soen691-clustering-project/soen691_clustering_project/hierarchical_agglomerative.py
```python
from cluster import Cluster
from clustering import Clustering
import heapq
class HierarchicalAgglomerative(Clustering):
def __init__(self, data, number_of_clusters):
self.__data = data
self.__number_of_clusters = number_of_clusters
self.__clusters = {str([index]): Cluster(point, index) for index, point in enumerate(self.__data)}
self.__dimension = len(data[0]) if len(data) > 0 else 0
self.__validate_arguments()
def clustering(self):
self.__build_priority_queue(self.__compute_distances())
old_clusters = []
while len(self.__clusters) > self.__number_of_clusters:
min_distance, min_heap_node = heapq.heappop(self.__heap)
closest_clusters = min_heap_node[1]
if not self.__valid_heap_node(min_heap_node, old_clusters):
continue
str_closest_clusters = list(map(str, closest_clusters))
closest_clusters_objs = [self.__clusters[str_closest_clusters[0]], self.__clusters[str_closest_clusters[1]]]
merged_cluster = Cluster(None, None)
merged_cluster.points = closest_clusters_objs[0].points + closest_clusters_objs[1].points
merged_cluster.indexes = closest_clusters_objs[0].indexes + closest_clusters_objs[1].indexes
merged_cluster.center = self.__compute_centroid(merged_cluster)
del self.__clusters[str_closest_clusters[0]]
del self.__clusters[str_closest_clusters[1]]
old_clusters.extend(closest_clusters)
self.__update_heap(merged_cluster)
self.__clusters[str(merged_cluster.indexes)] = merged_cluster
def __compute_centroid(self, cluster):
center = [0] * self.__dimension
for point in cluster.points:
for idx_coord, coord in enumerate(point):
center[idx_coord] += coord
return [coord / len(cluster) for coord in center]
def __compute_distances(self):
distances = []
for idx_n, cluster_n in self.__clusters.items():
for idx_i, cluster_i in self.__clusters.items():
if idx_n != idx_i:
dist = cluster_n.distance(cluster_i)
distances.append((dist, [dist, [cluster_n.indexes, cluster_i.indexes]]))
return distances
def __build_priority_queue(self, distances):
heapq.heapify(distances)
self.__heap = distances
return self.__heap
def __update_heap(self, new_cluster):
for idx, cluster in self.__clusters.items():
dist = new_cluster.distance(cluster)
heapq.heappush(self.__heap, (dist, [dist, [new_cluster.indexes, cluster.indexes]]))
def __valid_heap_node(self, heap_node, old_clusters):
for old_cluster in old_clusters:
if old_cluster in heap_node[1]:
return False
return True
def __merge_clusters(self):
""" Naive approach"""
min_distance = float('inf')
closest_clusters = None
for idx_a, cluster_a in enumerate(self.__clusters):
for cluster_b in self.__clusters[idx_a + 1:]:
distance = cluster_a.distance(cluster_b)
if distance < min_distance:
min_distance = distance
closest_clusters = [cluster_a, cluster_b]
merged_cluster = Cluster(None, None)
merged_cluster.points = closest_clusters[0].points + closest_clusters[1].points
merged_cluster.indexes = closest_clusters[0].indexes + closest_clusters[1].indexes
merged_cluster.center = [0] * self.__dimension
for point in merged_cluster.points:
for idx_coord, coord in enumerate(point):
merged_cluster.center[idx_coord] += coord
merged_cluster.center = [coord / len(merged_cluster) for coord in merged_cluster.center]
self.__clusters.remove(closest_clusters[0])
self.__clusters.remove(closest_clusters[1])
self.__clusters.append(merged_cluster)
def get_clusters(self):
return list(self.__clusters.values())
def get_indexes(self):
return [cluster.indexes for cluster in self.__clusters.values()]
def __validate_arguments(self):
if len(self.__data) == 0:
raise ValueError("Empty input data. Data should contain at least one point.")
if self.__number_of_clusters <= 0:
raise ValueError(
"Incorrect amount of clusters '{:d}'. Amount of cluster should be greater than 0.".format(
self.__number_of_clusters))
elif not type(self.__number_of_clusters) == int:
raise ValueError(
"Incorret type for amount of clusters '{:d}'. Amount of cluster should be an integer.".format(
self.__number_of_clusters))
```
#### File: soen691-clustering-project/soen691_clustering_project/soen691_clustering_project.py
```python
import glob
import os
from collections import defaultdict
from cure import Cure
from kmeans import KMeans
from bfr import BFR
from hierarchical_agglomerative import HierarchicalAgglomerative
from visualizer import ClusteringVisualizer
from sklearn.datasets.samples_generator import make_circles
import numpy as np
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Helper Functions """
""" """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def read_all_data_labeled(directory, extension, pattern='*', dimension='2d', all_data_dict=None):
data_dict = defaultdict(dict) if all_data_dict is None else all_data_dict
for path in glob.glob(directory + pattern + extension):
data_dict[dimension][os.path.basename(path).replace(extension, '')] = read_data_labeled(path)
return data_dict
def read_data_labeled(filename):
with open(filename) as f:
data_dict = defaultdict(list)
for line in f.readlines():
split = line.split('\t')
size = len(split)
data_dict[split[size - 1].strip()].append([float(coord) for coord in split[:size - 1]])
return data_dict
def get_data(data_name, data_dict):
data = []
for cluster in data_dict[data_name].values():
data.extend(cluster)
return data
def compare_clustering(data, clusters):
data_to_analyze = {}
clustering_to_analyze = {}
for label, points_list in enumerate(sorted([sorted(cluster) for cluster in data.values()])):
for point in sorted(points_list):
data_to_analyze[tuple(point)] = int(label)
sorted_clusters = sorted([sorted(cluster.points) for cluster in clusters]) if not type(
clusters[0]) == list else sorted([sorted(cluster) for cluster in clusters])
for label, cluster in enumerate(sorted_clusters):
for point in cluster:
clustering_to_analyze[tuple(point)] = int(label)
total = len(data_to_analyze)
miss = 0.0
for point, label in clustering_to_analyze.items():
if data_to_analyze[point] != label:
miss += 1
return (1 - miss / total) * 100
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Main Method """
""" """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if __name__ == '__main__':
all_data = read_all_data_labeled('./data/3d/shaped/', extension='.dat', dimension='3d',
all_data_dict=read_all_data_labeled('./data/2d/shaped/', '.dat'))
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Non-Spherical 2D """
""" """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
results_nonspherical_2d = []
dataset_name = 'spiral'
dataset = get_data(dataset_name, all_data['2d'])
real_clusters = all_data['2d'][dataset_name]
len_data = sorted([len(cluster) for cluster in all_data['2d'][dataset_name].values()])
n_clusters = len(all_data['2d'][dataset_name].keys())
print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.15, 10)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
results_nonspherical_2d.append([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='Path-based2: SPIRAL')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), df_list, canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
dataset_name = 'jain'
dataset = get_data(dataset_name, all_data['2d'])
real_clusters = all_data['2d'][dataset_name]
len_data = sorted([len(cluster) for cluster in all_data['2d'][dataset_name].values()])
n_clusters = len(all_data['2d'][dataset_name].keys())
print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.3, 5)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
results_nonspherical_2d.append([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='Jain')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), df_list, canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
circles = make_circles(factor=0.5, noise=0.05, n_samples=700)
circles_clusters = defaultdict(list)
real_clusters = defaultdict(list)
for k, v in list(zip(circles[0], circles[1])):
circles_clusters[v].append(k)
real_clusters[v].append(list(k))
circles_clusters = list(circles_clusters.values())
df = np.concatenate(circles_clusters)
df_list = df.tolist()
n_clusters = 2
# dataset_name = 'r15'
# dataset = get_data(dataset_name, all_data['2d'])
# real_clusters = all_data['2d'][dataset_name]
# len_data = sorted([len(cluster) for cluster in all_data['2d'][dataset_name].values()])
# n_clusters = len(all_data['2d'][dataset_name].keys())
# print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
# df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.1, 10)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
results_nonspherical_2d.append([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='Circles')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), df_list, canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
dataset_name = 'pathbased'
dataset = get_data(dataset_name, all_data['2d'])
real_clusters = all_data['2d'][dataset_name]
len_data = sorted([len(cluster) for cluster in all_data['2d'][dataset_name].values()])
n_clusters = len(all_data['2d'][dataset_name].keys())
print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.1, 10)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
results_nonspherical_2d.append([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='Path-based1')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" Non-Spherical 3D """
""" """
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset_name = 'fcps_atom'
dataset = get_data(dataset_name, all_data['3d'])
real_clusters = all_data['3d'][dataset_name]
len_data = sorted([len(cluster) for cluster in all_data['3d'][dataset_name].values()])
n_clusters = len(all_data['3d'][dataset_name].keys())
print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.1, 10)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
print([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='FCPS Atom')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), df_list, canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
dataset_name = 'fcps_chainlink'
dataset = get_data(dataset_name, all_data['3d'])
real_clusters = all_data['3d'][dataset_name]
len_data = sorted([len(cluster) for cluster in all_data['3d'][dataset_name].values()])
n_clusters = len(all_data['3d'][dataset_name].keys())
print('---------{} DATASET---------'.format(dataset_name.upper()), len_data, n_clusters)
df_list = dataset
kmeans = KMeans(df_list, n_clusters)
kmeans.clustering()
kmeans_clusters = []
for indexes in kmeans.get_indexes():
cluster = []
for index in indexes:
cluster.append(df_list[index])
kmeans_clusters.append(cluster)
hac = HierarchicalAgglomerative(df_list, n_clusters)
hac.clustering()
cure = Cure(df_list, n_clusters, 0.1, 10)
cure.clustering()
bfr = BFR(data=df_list, k=n_clusters)
bfr.cluster_noPart()
print([compare_clustering(real_clusters, kmeans_clusters),
compare_clustering(real_clusters, hac.get_clusters()),
compare_clustering(real_clusters, cure.get_clusters()),
compare_clustering(real_clusters, bfr.get_clusters())])
visualizer = ClusteringVisualizer(number_canvas=4, number_columns=2, number_clusters=n_clusters,
titles=['KMEANS', 'HAC', 'CURE', 'BFR'], fig_title='FCPS Chainlink')
visualizer.add_clustering(kmeans.get_indexes(), df_list, canvas=0)
visualizer.add_clustering(hac.get_indexes(), df_list, canvas=1)
visualizer.add_clustering(cure.get_indexes(), df_list, canvas=2)
visualizer.add_clustering(bfr.get_indexes(), df_list, canvas=3)
visualizer.plot(invisible_axis=True)
```
|
{
"source": "jeanluc243/Apprends-Moi-Python",
"score": 4
}
|
#### File: POO/exercices3/CompteBancaire.py
```python
class CompteBancaire():
"""
Gestion des Banques
"""
def __init__(self, nom = 'Dupont', solde = 1000 ):
"""
constructeur
"""
self.nom = nom
self.solde = solde
def depot(self, somme):
"""
ajouter somme au solde
"""
self.solde = self.solde + somme
def retrait(self, somme):
"""
retirer somme du solde
"""
self.solde = self.solde - somme
def affiche(self):
print("Le compte bancaire de {0} est de {1} euros".format(
self.nom , self.solde))
if __name__ == '__main__':
compte1 = CompteBancaire('Duchmo1', 800)
compte1.depot(350)
compte1.retrait(200)
compte1.affiche()
```
|
{
"source": "jeanlucancey/pronunciamento",
"score": 2
}
|
#### File: pronunciamento/blog/views.py
```python
from django.shortcuts import (
get_object_or_404, redirect, render)
from django.views.decorators.http import \
require_http_methods
from django.views.generic import View
from .forms import PostForm
from .models import Post
class PostCreate(View):
form_class = PostForm
template_name = 'blog/post_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()})
def post(self, request):
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
return render(
request,
self.template_name,
{'form': bound_form})
class PostDelete(View):
def get(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
return render(
request,
'blog/post_confirm_delete.html',
{'post': post})
def post(self, request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug__iexact=slug)
post.delete()
return redirect('blog_post_list')
@require_http_methods(['HEAD', 'GET'])
def post_detail(request, year, month, slug):
post = get_object_or_404(
Post,
pub_date__year=year,
pub_date__month=month,
slug=slug)
return render(
request,
'blog/post_detail.html',
{'post': post})
class PostList(View):
def get(self, request):
return render(
request,
'blog/post_list.html',
{'post_list': Post.objects.all()})
class PostUpdate(View):
form_class = PostForm
model = Post
template_name = 'blog/post_form_update.html'
def get_object(self, year, month, slug):
return get_object_or_404(
self.model,
pub_date__year=year,
pub_date__month=month,
slug=slug)
def get(self, request, year, month, slug):
post = self.get_object(year, month, slug)
context = {
'form': self.form_class(
instance=post),
'post': post,
}
return render(
request, self.template_name, context)
def post(self, request, year, month, slug):
post = self.get_object(year, month, slug)
bound_form = self.form_class(
request.POST, instance=post)
if bound_form.is_valid():
new_post = bound_form.save()
return redirect(new_post)
else:
context = {
'form': bound_form,
'post': post,
}
return render(
request,
self.template_name,
context)
```
#### File: pronunciamento/cestmoilechef/views.py
```python
import django
from os import system
from django.http import (Http404, HttpResponse)
from django.template import Context, loader
from django.shortcuts import (get_object_or_404, \
redirect, \
# render_to_response, \
render)
from django.views.generic import View # Pour faire des class-based views, voir p. 255
from blog.models import Post
from jla_utils.utils import Fichier
from .models import Categorie, Photo
from .forms import CategorieForm, PhotoForm
# * A - Divers à mettre en tête
# ** A1 - Principal
def pronunciamento(request):
template = loader.get_template('cestmoilechef/pronunciamento.html')
message = "J'ai quétchoze à dire, et ce que j'ai à dire, " + \
"c'est que c'est moi le chef, pas ce connard de Django!"
context = Context({'message': message})
output = template.render(context)
return HttpResponse(output)
# ** A2 - Routines
def vireGuill (mention):
if mention[0] == '"' and mention[len(mention) - 1] == '"':
mention = mention[1:len(mention) - 1]
return mention
# * B - Categorie, dans l'ordre L-CRUD-PIE
# ** B1 - Categorie - L comme List
def listeCategories(request):
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Voici la liste des catégories incluses dans la base "
pageEntiere += "(nom complet, puis slug).</p>\n"
mesCategories = Categorie.objects.all()
nbCategories = Categorie.objects.count()
for numCategorie in range(nbCategories):
maCategorie = mesCategories[numCategorie]
monNom = maCategorie.nom
monSlug = maCategorie.slug
ligneAEcrire = "<p>[%s] - [%s]</p>\n" % (monNom, monSlug)
pageEntiere += ligneAEcrire
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
def listeCategories2(request):
# Fonction écrite sans shortcuts, et que je trouve beaucoup plus claire et souple,
# mais que Pinkham recommande de remplacer par listeCategories3 ou plutôt
# listeCategories4
categorie_list = Categorie.objects.all()
template = loader.get_template('cestmoilechef/categorie_list.html')
context = Context({'categorie_list': categorie_list})
output = template.render(context)
return HttpResponse(output)
# def listeCategories3(request):
# Variante de la fonction précédente, avec shortcuts et nonobstant
# deja obsolete car render tend a remplacer render_to_response (ca fait
# plus de trucs au prix d'une degradation des temps de réponse).
# Perso moi-je, je trouve ça de toute façon beaucoup moins clair
# et de surcroît pas souple du tout. Voir Pinkham 5.6.3 p. 139
# return render_to_response('cestmoilechef/categorie_list.html',
# {'categorie_list': Categorie.objects.all()})
# Ce qui suit a ete neutralise au profit d'une class-based view, voir p. 255
# def listeCategories4(request):
# Variante de la variante précédente (celle avec des #), employant le shortcut render
# au lieu du shortcut render_to_response (ca fait plus de trucs
# supposes utiles quoique au prix d'une degradation des temps de réponse).
# Ce n'est donc pas vraiment equivalent ni a listeCategories3 ni surout à listeCategories2,
# à mon grand désespoir car listeCategories2 me paraît beaucoup plus clair
# et souple. Voir Pinkham 5.6.3 p. 139
# return render(request, \
# 'cestmoilechef/categorie_list.html', \
# {'categorie_list': Categorie.objects.all()})
# Remplacement de la fonction précédente par une class-based view
class CategorieList(View):
def get(self, request):
return render(request, \
'cestmoilechef/categorie_list.html', \
{'categorie_list': Categorie.objects.all()})
# ** B2 - Categorie - C comme Create
# Je crois bien que la méthode qui suit est caduque, vu que Pinkham lui
# substitue la class-based view CategorieCreate en 9.2.2.3 p. 246,
# mais c'est sa façon de faire à lui et il signale qu'écrire quelque chose
# du genre de categorieCreate est ce qui est préconisé dans la plupart
# des tutoriels, donc il vaut mieux garder ce code à titre de référence.
def categorieCreate(request):
# Pompé sur Pinkham, p. 244. Le style n'est pas le mien et il n'est
# pas vraiment aimé par Pinkham, qui le signale juste comme de pratique standard.
# En tout cas, je signale qu'il y a deux return a l'intérieur de boucles if, et
# non pas un seul à la fin de la méthode, comme je fais d'ordinaire.
if request.method == 'POST':
# bind data to form
form = CategorieForm(request.POST)
# if the data is valid:
if form.is_valid(): # L'appel de cette méthode crée errors et cleaned_data
# create new object from data
new_categorie = form.save()
return redirect(new_categorie)
# show webpage for new objects
# else implicite: form contient des données invalides
else: # request.method != 'POST'
# show unbound HTML form
form = CategorieForm()
return render(
request,
'cestmoilechef/categorie_form.html',
{'form': form}
)
class CategorieCreate(View):
form_class = CategorieForm
template_name = 'cestmoilechef/categorie_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()}
)
def post(self, request):
# Attention, code façon Pinkham, avec deux return dans une boucle if
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_categorie = bound_form.save()
return redirect(new_categorie)
else:
return render(
request,
self.template_name,
{'form': bound_form}
)
# ** B3 - Categorie - R comme Read
def categorieDetailPabon(request):
# Comme l'explique Pinkham au bas de la page 129, on peut faire
# ce que je fais ici, mais c'est moche et pas propre. Ca fait rien,
# ca me parait pedagogique.
monFullPath = request.path_info # Et cette variable, on la bidouille comme on veut
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Cette page cherche juste à montrer que l'URL peut être bidouillée à la mimine.</p>\n"
pageEntiere += "<p>monFullPath est une variable et vaut : [%s]</p>\n" % (monFullPath)
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
def categorieDetailPabon2(request, slugUrl):
# Pour rendre les choses plus claires et ne pas écrire des horreurs du genre
# slug=slug, je distingue slug (attribut d'une catégorie) et slugUrl (la
# mention écrite dans l'URL)
try:
categorie = Categorie.objects.get(slug__iexact = slugUrl)
except Categorie.DoesNotExist:
raise Http404
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Cette page cherche juste à vérifier que le slug est bien compris.</p>\n"
pageEntiere += "<p>slug est une variable et vaut : [%s]</p>\n" % (slugUrl)
pageEntiere += "<p>categorie.nom vaut : [%s]</p>\n" % (categorie.nom)
pageEntiere += "<p>categorie.slug vaut : [%s]</p>\n" % (categorie.slug)
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
def categorieDetail(request, slugUrl):
# Voir ci-dessous categorieDetailShortcut, une version abrégée qui fait
# la même chose avec un shortcut, et aussi categorieDetailShortcut2, qui
# fait un petit peu plus avec deux shortcuts, ce qui est sûrement plus
# orthodoxe et recommandé par les bonnes pratiques mais imbitable et
# in-modifiable, à mon grand désespoir.
try:
categorie = Categorie.objects.get(slug__iexact = slugUrl)
except Categorie.DoesNotExist:
raise Http404
template = loader.get_template('cestmoilechef/categorie_detail.html')
context = Context({'categorie': categorie})
output = template.render(context)
return HttpResponse(output)
# def categorieDetailShortcut(request, slugUrl):
# Rigoureusement la même chose que la fonction précédente, mais avec un shortcut
# Même si je l'ai neutralisée, je recommande de la garder pour le cas où il
# faudrait intercaler des lignes
# categorie = get_object_or_404(Categorie, slug__iexact = slugUrl)
# template = loader.get_template('cestmoilechef/categorie_detail.html')
# context = Context({'categorie': categorie})
# output = template.render(context)
# return HttpResponse(output)
def categorieDetailShortcut2(request, slugUrl):
# Un petit peu plus que la version précédente, et donc que categorie_detail
# que pourtant je trouve beaucoup plus claire et plus souple.
# Voir Pinkham 5.6.3 p. 139
categorie = get_object_or_404(Categorie, slug__iexact = slugUrl)
return render(request, \
'cestmoilechef/categorie_detail.html', \
{'categorie': categorie})
# ** B4 - Categorie - U comme Update
class CategorieUpdate(View):
form_class = CategorieForm
model = Categorie
template_name = 'cestmoilechef/categorie_form_update.html'
def get_object(self, slugArg):
return get_object_or_404(
self.model,
slug=slugArg
)
def get(self, request, slugUrl):
maCategorie = self.get_object(slugUrl)
context = {
'form': self.form_class(instance=maCategorie),
'categorie': maCategorie,
}
return render(request, self.template_name, context)
def post(self, request, slugUrl):
maCategorie = self.get_object(slugUrl)
bound_form = self.form_class(
request.POST,
instance=maCategorie
)
if bound_form.is_valid():
new_categorie = bound_form.save()
return redirect(new_categorie)
else:
context = {
'form': bound_form,
'categorie': maCategorie,
}
return render(
request,
self.template_name,
context
)
# ** B5 - Categorie - D comme Delete
class CategorieDelete(View):
def get(self, request, slugUrl):
maCategorie = get_object_or_404(
Categorie,
slug__iexact = slugUrl
# slug au lieu de slug__iexact marcherait
)
return render(request,
'cestmoilechef/categorie_confirm_delete.html',
{'categorie': maCategorie}
)
def post(self, request, slugUrl):
maCategorie = get_object_or_404(
Categorie,
slug__iexact = slugUrl
# slug au lieu de slug__iexact marcherait
)
maCategorie.delete()
return redirect('liste_categories')
# ** B6 - Categorie - P comme Purge
def purgeCategories(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page radioactive est vouée à détruire les catégories de la base.")
mesCategories = Categorie.objects.all()
nbCategories = Categorie.objects.count()
for numCategorie in range(nbCategories - 1, -1, -1):
maCategorie = mesCategories[numCategorie]
monNom = maCategorie.nom
monSlug = maCategorie.slug
ligneAEcrire = "%d - [%s] - [%s]\n" % (numCategorie, monNom, monSlug)
tableauDeLignes.append(ligneAEcrire)
# Je neutralise la ligne qui suit, par prudence
# maCategorie.delete()
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
# ** B7 - Categorie - I comme Import
def importeCategories(request):
# C'est volontairement que cette fonction n'utilise pas de template, pour
# ne pas oublier totalement comment on peut s'y prendre.
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Ceci est voué à remplir la table des categories à partir d'un fichier CSV.</p>\n"
monFichier = Fichier("categories.csv", False)
while monFichier.index < monFichier.longueur:
ligneLue = monFichier.litUneLigne()
ligneAEcrire = "<p>%s</p>" % (ligneLue)
pageEntiere += ligneAEcrire
mesBazars = ligneLue.split(',')
monNom = vireGuill(mesBazars[0])
monSlug = vireGuill(mesBazars[1])
ligneAEcrire = "<p>[%s] - [%s]</p>" % (monNom, monSlug)
pageEntiere += ligneAEcrire
# Je neutralise ce qui suit parce que ca a marche et que ce n'est
# pas voue a etre utilise deux fois
# Categorie.objects.create(nom=monNom, slug=monSlug)
monFichier.close()
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
# ** B8 - Categorie - E comme Export
def exporteCategories(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est vouée à permettre l'export des catégories.")
monFichier = Fichier("categories_export.csv", True)
mesCategories = Categorie.objects.all()
nbCategories = Categorie.objects.count()
for numCategorie in range(nbCategories):
maCategorie = mesCategories[numCategorie]
monNom = maCategorie.nom
monSlug = maCategorie.slug
ligneAEcrire = '"%s","%s"' % (monNom, monSlug)
monFichier.ecritUneLigne(ligneAEcrire)
tableauDeLignes.append(ligneAEcrire)
monFichier.close()
tableauDeLignes.append("En principe, si vous lisez ça, c'est que l'export a eu lieu.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
# * C - Photo, dans l'ordre L-CRUD-PIE
# ** C1 - Photo - L comme List
def listePhotos(request):
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Voici la liste des photos incluses dans la base "
pageEntiere += "(nom abrégé, catégorie, puis nomEntier).</p>\n"
mesPhotos = Photo.objects.all()
nbPhotos = Photo.objects.count()
for numPhoto in range(nbPhotos):
maPhoto = mesPhotos[numPhoto]
monNomAbrege = maPhoto.nomAbrege
monNomComplet = maPhoto.nomComplet
maCateg = maPhoto.categorie.slug
ligneAEcrire = "<p>[%s] - [%s] - [%s]</p>\n" % (monNomAbrege, maCateg, monNomComplet)
pageEntiere += ligneAEcrire
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
def listePhotos2(request):
# Fonction écrite sans shortcuts, et que je trouve beaucoup plus claire et souple,
# mais que Pinkham recommande de remplacer par listePhotos3 ou plutôt listePhotos4
# et même finalement par une class-based view PhotoList.
# Même si je n'utilise pas ce listing, je recommande de le conserver car il
# autorise une certaine souplesse.
photo_list = Photo.objects.all()
template = loader.get_template('cestmoilechef/photo_list.html')
context = Context({'photo_list': photo_list})
output = template.render(context)
return HttpResponse(output)
# def listePhotos3(request):
# Variante de la fonction précédente, avec shortcuts et nonobstant
# deja obsolete car render tend a remplacer render_to_response (ca fait
# plus de trucs au prix d'une degradation des temps de réponse).
# Perso moi-je, je trouve ça de toute façon beaucoup moins clair
# et de surcroît pas souple du tout. Voir Pinkham 5.6.3 p. 139
# return render_to_response('cestmoilechef/photo_list.html',
# {'photo_list': Photo.objects.all()})
# def listePhotos4(request):
# Variante de la variante précédente (celle avec des #), employant le shortcut render
# au lieu du shortcut render_to_response (ca fait plus de trucs
# supposes utiles quoique au prix d'une degradation des temps de réponse).
# Ce n'est donc pas vraiment equivalent ni a listePhotos3 ni surout à listePhotos2,
# à mon grand désespoir car listePhotos2 me paraît beaucoup plus clair
# et souple. Voir Pinkham 5.6.3 p. 139
# Finalement, j'ai remplacé ce listePhotos4 par une class-based view,
# voir juste en dessous
# return render(request, \
# 'cestmoilechef/photo_list.html', \
# {'photo_list': Photo.objects.all()})
# Remplacement de la fonction précédente par une class-based view
class PhotoList(View):
def get(self, request):
return render(request, \
'cestmoilechef/photo_list.html', \
{'photo_list': Photo.objects.all()})
# ** C2 - Photo - C comme Create
class PhotoCreate(View):
form_class = PhotoForm
template_name = 'cestmoilechef/photo_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()}
)
def post(self, request):
# Attention, code façon Pinkham, avec deux return dans une boucle if
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_photo = bound_form.save()
return redirect(new_photo)
else:
return render(
request,
self.template_name,
{'form': bound_form}
)
# ** C3 - Photo - R comme Read
def montrePhotoPrecise(request, nomPhotoUrl):
maPhoto = get_object_or_404(Photo, nomAbrege__iexact = nomPhotoUrl)
template = loader.get_template('cestmoilechef/photo_precise.html')
context = Context({'photo': maPhoto})
output = template.render(context)
return HttpResponse(output)
# ** C4 - Photo - U comme Update
class PhotoUpdate(View): # Inspiré de la p. 259
form_class = PhotoForm
model = Photo
template_name = 'cestmoilechef/photo_form_update.html'
def get_object(self, nomPhotoArg):
return get_object_or_404(
self.model,
nomAbrege=nomPhotoArg
)
def get(self, request, nomPhotoUrl):
maPhoto = self.get_object(nomPhotoUrl)
context = {
'form': self.form_class(instance=maPhoto),
'photo': maPhoto,
}
return render(request, self.template_name, context)
def post(self, request, nomPhotoUrl):
maPhoto = self.get_object(nomPhotoUrl)
bound_form = self.form_class(
request.POST,
instance=maPhoto
)
if bound_form.is_valid():
new_photo = bound_form.save()
return redirect(new_photo)
else:
context = {
'form': bound_form,
'photo': maPhoto,
}
return render(
request,
self.template_name,
context
)
# ** C5 - Photo - D comme Delete
class PhotoDelete(View): # Inspiré de la p. 270
def get(self, request, nomPhotoUrl):
maPhoto = get_object_or_404(
Photo,
nomAbrege = nomPhotoUrl
)
return render(request,
'cestmoilechef/photo_confirm_delete.html',
{'photo': maPhoto}
)
def post(self, request, nomPhotoUrl):
maPhoto = get_object_or_404(
Photo,
nomAbrege = nomPhotoUrl
)
maPhoto.delete()
return redirect('liste_photos')
# ** C6 - Photo - P comme Purge
def purgePhotos(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page radioactive est vouée à détruire les photos de la base.")
mesPhotos = Photo.objects.all()
nbPhotos = Photo.objects.count()
for numPhoto in range(nbPhotos - 1, -1, -1):
maPhoto = mesPhotos[numPhoto]
monNomAbrege = maPhoto.nomAbrege
monNomComplet = maPhoto.nomComplet
maCateg = maPhoto.categorie.slug
ligneAEcrire = "%d - [%s] - [%s] - [%s]" % (numPhoto, monNomAbrege, maCateg, monNomComplet)
tableauDeLignes.append(ligneAEcrire)
# Je neutralise la ligne qui suit, par prudence
# maPhoto.delete()
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
# ** C7 - Photo - I comme Import
def importePhotos(request):
# C'est volontairement que cette fonction n'utilise pas de template, pour
# ne pas oublier totalement comment on peut s'y prendre.
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Ceci est voué à remplir la table des photos à partir d'un fichier CSV.</p>\n"
monFichier = Fichier("portes_classees.csv", False)
while monFichier.index < monFichier.longueur:
ligneLue = monFichier.litUneLigne()
ligneAEcrire = "<p>%s</p>" % (ligneLue)
pageEntiere += ligneAEcrire
mesBazars = ligneLue.split(',')
monNomAbrege = vireGuill(mesBazars[0])
maCategEnClair = vireGuill(mesBazars[1])
maCategEnVrai = Categorie.objects.get(slug=maCategEnClair)
monPathEtNom = vireGuill(mesBazars[2])
ligneAEcrire = "<p>[%s]</p>" % (maCategEnClair)
pageEntiere += ligneAEcrire
# Je neutralise ce qui suit parce que ca a marche et que ce n'est
# pas voue a etre utilise deux fois. A noter que certes ca a
# marche, mais que ca a aussi considerablement ramé.
# Photo.objects.create(nomComplet=monPathEtNom, nomAbrege=monNomAbrege, categorie=maCategEnVrai)
monFichier.close()
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
# ** C8 - Photo - E comme Export
def exportePhotos(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est vouée à permettre l'export des photos.")
monFichier = Fichier("portes_classees_export.csv", True)
mesPhotos = Photo.objects.all()
nbPhotos = Photo.objects.count()
for numPhoto in range(nbPhotos):
maPhoto = mesPhotos[numPhoto]
monNomAbrege = maPhoto.nomAbrege
monNomComplet = maPhoto.nomComplet
maCateg = maPhoto.categorie.slug
ligneAEcrire = '"%s","%s","%s"' % (monNomAbrege, maCateg, monNomComplet)
monFichier.ecritUneLigne(ligneAEcrire)
tableauDeLignes.append(ligneAEcrire)
monFichier.close()
tableauDeLignes.append("En principe, si vous lisez ça, c'est que l'export a eu lieu.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
# * D - Divers à mettre à part
# ** D1 - Petites merdasses diverses
def echoPath(request):
blabla = ""
system("echo $PATH > deleatur.txt")
monFichier = Fichier("deleatur.txt", False)
while monFichier.index < monFichier.longueur:
ligneLue = monFichier.litUneLigne()
ligneAEcrire = "<p>%s</p>\n" % (ligneLue)
blabla += ligneAEcrire
monFichier.close()
system("rm -f deleatur.txt") # Je purge, j'aime pas laisser des saletés
return HttpResponse(blabla)
def lsLong(request):
blabla = ""
system("ls -l > deleatur.txt")
monFichier = Fichier("deleatur.txt", False)
ligneAEcrire = django.get_version()
ligneAEcrire = "Ce site tourne avec Django " + ligneAEcrire
blabla += ligneAEcrire
while monFichier.index < monFichier.longueur:
ligneLue = monFichier.litUneLigne()
ligneAEcrire = "<p>%s</p>\n" % (ligneLue)
blabla += ligneAEcrire
monFichier.close()
system("rm -f deleatur.txt") # Je purge, j'aime pas laisser des saletés
return HttpResponse(blabla)
def multiplication(request):
tableauDeLignes = []
maxMultiplicande = 3
maxMultiplicateur = 5
for multiplicande2 in range(maxMultiplicande):
multiplicande = multiplicande2 + 1
for multiplicateur2 in range(maxMultiplicateur):
multiplicateur = multiplicateur2 + 1
blabla = "%d * %d = %d" % (multiplicande, multiplicateur, \
multiplicande * multiplicateur)
tableauDeLignes.append(blabla)
if multiplicateur2 == maxMultiplicateur - 1 and multiplicande2 < maxMultiplicande - 1:
tableauDeLignes.append("")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
# ** D2 - Usage d'images stockées ailleurs
def imagePorte(request):
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p>Merci bien.</p>\n"
pageEntiere += "<center><img src=\"http://courteline.org/hotes/portes_todito/porte230.jpg\" width=480 height=640></center>\n"
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
def vignettes(request):
pageEntiere = ""
pageEntiere += "<html>\n"
pageEntiere += "<body>\n"
pageEntiere += "<p><a href=\"http://courteline.org/hotes/vignettes/\">Accès à la page des vignettes</a></p>\n"
pageEntiere += "</body>\n"
pageEntiere += "</html>\n"
return HttpResponse(pageEntiere)
# ** D3 - Ajout au site de Pinkham (exportation des posts)
def exportePosts(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est vouée à permettre l'export des posts.")
monFichier = Fichier("posts_exportes.txt", True)
mesPosts = Post.objects.all()
nbPosts = Post.objects.count()
for numPost in range(nbPosts):
monPost = mesPosts[numPost]
monTitre = monPost.title
monFichier.ecritUneLigne(monTitre)
tableauDeLignes.append(monTitre)
monTexte = monPost.text
monFichier.ecritUneLigne(monTexte)
tableauDeLignes.append(monTexte)
if numPost < nbPosts - 1:
monFichier.ecritUneLigne("")
tableauDeLignes.append("")
monFichier.close()
tableauDeLignes.append("En principe, si vous lisez ça, c'est que l'export a eu lieu.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
```
#### File: pronunciamento/dialogue/sinodoju.py
```python
import time
from os import system
from django.http import HttpResponse
from django.template import Context, loader
from django.views.decorators.csrf import csrf_exempt # Pour des formulaires POST libres
from jla_utils.utils import Fichier
from .models import ElementDialogue
class Tunnel:
def __init__(self, longueurArg, generationArg):
self.longueur = longueurArg
self.generation = generationArg
def alimenteBaseDeDonnees (nomEntree, identifiantSerpicon, descriptifTunnel, serveur):
ElementDialogue.objects.create(
nom = nomEntree,
param1 = identifiantSerpicon,
param2 = descriptifTunnel,
param3 = serveur
)
def analyseGraine (ligneLue):
graine = ligneLue[10:len(ligneLue) - 1]
return graine
def analyseNbCell (ligneLue):
nbCellString = ligneLue[9:len(ligneLue)]
nbCell = int(nbCellString)
return nbCell
def analyseTunnel (request):
nomFichTunnel = "resultat_longtun2.txt"
numLigneLue = 0
fichALire = Fichier(nomFichTunnel, 0)
chouBlanc = True # Par defaut
nbCell = 0
graine = ""
mesTunnels = []
while fichALire.index < fichALire.longueur:
ligneLue = fichALire.litUneLigne()
numLigneLue += 1
if numLigneLue == 1:
nbCell = analyseNbCell(ligneLue)
elif numLigneLue == 2:
graine = analyseGraine(ligneLue)
else:
if (len(ligneLue) > 10) and (ligneLue[0:6] == "Tunnel"):
chouBlanc = False
monTunnelNormalise = analyseTunnelMoteur(ligneLue)
mesTunnels.append(monTunnelNormalise)
fichALire.close()
print("Le nombre de cellules est de %d." % (nbCell))
print("La graine est [%s]." % (graine))
nomEntreeDeBase = fabriqueTempsSyntaxeUrl()
identifiantSerpicon = "%d %s" % (nbCell, graine)
nomServeur = "alwaysdata"
if chouBlanc:
alimenteBaseDeDonnees(nomEntreeDeBase, identifiantSerpicon, "Chou blanc !", nomServeur)
else:
for numTunnel in range(len(mesTunnels)):
monTunnel = mesTunnels[numTunnel]
maLongueur = monTunnel.longueur
maGeneration = monTunnel.generation
print("Tunnel de %s a la generation %s" % \
(separateurMille(maLongueur, ' '),
separateurMille(maGeneration, ' ')))
nomEntreeDeBase = fabriqueTempsSyntaxeUrl()
nomEntree = nomEntreeDeBase + "__" + separateurMille(maLongueur, '_')
descriptifTunnel = separateurMille(maLongueur, ' ') + " en " \
+ separateurMille(maGeneration, ' ')
alimenteBaseDeDonnees(nomEntree, identifiantSerpicon, descriptifTunnel, nomServeur)
if numTunnel < len(mesTunnels) - 1:
attend(5.0)
# time.sleep(2.0) # A tout hasard, pour ne pas venir trop vite apres les requetes
# d'analyse_tunnel.py
# lanceSinodoju () # On va laisser courteline s'occuper de relancer amarelia
tableauDeLignes = []
tableauDeLignes.append("Cette page est la page de l'analyse des tunnels.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def attend (dureeEnSecondes):
time.sleep(dureeEnSecondes)
def analyseTunnelMoteur (ligneLue):
chaineLongueur = ""
chaineGeneration = ""
caracLu = ""
numSigne = 10
eTrouve = False
while (not eTrouve) and (numSigne < len(ligneLue)):
signeLu = ligneLue[numSigne]
if signeLu == "e":
eTrouve = True
else:
chaineLongueur += signeLu
numSigne += 1
chaineLongueur = chaineLongueur[0:len(chaineLongueur) - 1] # pour virer l'espace finale
maLongueur = int(vireSigne(chaineLongueur, ' '))
numSigne += 2
chaineGeneration = ligneLue[numSigne:len(ligneLue)]
maGene = int(vireSigne(chaineGeneration, ' '))
monTunnel = Tunnel(maLongueur, maGene)
return monTunnel
def fabriqueTempsSyntaxeGraine ():
graine = time.strftime("jlancey%Y%m%da%Hh%Mm%S", time.localtime())
return graine
def fabriqueTempsSyntaxeUrl ():
# tempsSyntaxeUrl = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime())
tempsSyntaxeUrl = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
return tempsSyntaxeUrl
def lanceSinodoju ():
conn = http.client.HTTPConnection("www.amarelia.ch")
conn.request("GET", "/sinodoju/sinodoju.php")
r1 = conn.getresponse()
print(r1.status, r1.reason)
data1 = r1.read()
# print(data1)
conn.close()
def separateurMille (monEntier, monSeparateur):
maChaine0 = "%d" % (monEntier)
maChaine1 = ""
for numSigne in range(len(maChaine0)):
numSigne2 = len(maChaine0) -1 - numSigne
monSigne = maChaine0[numSigne2]
if (numSigne % 3 == 0) and numSigne > 0:
maChaine1 = monSeparateur + maChaine1
maChaine1 = monSigne + maChaine1
return maChaine1
@csrf_exempt # En théorie, c'est une brèche de sécurité; en pratique... ca depend
def viewSinodoju (request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est la page de Sinodoju.")
graine = fabriqueTempsSyntaxeGraine()
nbBitsFournis = len(graine) * 6
tableauDeLignes.append("La graine est [%s], soit assez pour %d bits." % (graine, nbBitsFournis))
nbCellules = 145
system("./sinodoju.pl %d %s > cr_perl.txt 2> cr2_perl.txt &" % (nbCellules, graine))
tableauDeLignes.append("En principe, si vous lisez ça, c'est qu'un daemon Sinodoju a été lancé.")
tableauDeLignes.append("Donc ça aura un effet... quand le daemon aura fini de travailler.")
tableauDeLignes.append("Ce template a été écrit pour vous rendre la main tout de suite...")
tableauDeLignes.append("... mais des limitations d'AlwaysData, compréhensibles d'ailleurs,")
tableauDeLignes.append("imposent d'attendre quand même la fin du processus. Cette page ne")
tableauDeLignes.append("sert donc qu'à titre de test.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def vireSigne (maChaine, monSigneAVirer):
maChainePurgee = ""
for numSigne in range(len(maChaine)):
monSigne = maChaine[numSigne]
if monSigne != monSigneAVirer:
maChainePurgee += monSigne
return maChainePurgee
```
#### File: pronunciamento/dvd/models.py
```python
from django.db import models
from django.core.urlresolvers import reverse
class Dvd(models.Model):
titre = models.CharField(max_length=80)
slug = models.SlugField(
max_length=31,
unique=True,
help_text='Un slug pour les DVD.')
actClair = models.CharField(max_length=120)
reaClair = models.CharField(max_length=80)
genre = models.CharField(max_length=40)
place = models.CharField(max_length=40)
obs = models.CharField(max_length=160)
class Meta:
ordering = ['titre']
def get_absolute_url(self):
return reverse('dvd_detail', kwargs={'slugUrl': self.slug})
def get_update_url(self):
return reverse('dvd_update', kwargs={'slugUrl': self.slug})
def get_delete_url(self):
return reverse('dvd_delete', kwargs={'slugUrl': self.slug})
def __str__(self):
return self.titre
```
#### File: pronunciamento/dvd/views.py
```python
from django.http import (Http404, HttpResponse)
from django.template import Context, loader
from django.shortcuts import (get_object_or_404, \
redirect, \
# render_to_response, \
render)
from django.views.generic import View # Pour faire des class-based views, voir p. 255
from jla_utils.utils import Fichier, BidouillesTexte
from .models import Dvd
from .forms import DvdForm
def accueilDvd (request):
template = loader.get_template('dvd/accueil.html')
message = "Je veux pouvoir faire un peu de saisie pour mes DVD."
context = Context({'message': message})
output = template.render(context)
return HttpResponse(output)
def nettoieExport (maChaine):
bid = BidouillesTexte()
maChaine = bid.enleveCaracSpec(maChaine)
maChaine = bid.virePointSeul(maChaine)
return maChaine
def nettoieImport (maChaine):
bid = BidouillesTexte()
maChaine = bid.vireGuill(maChaine)
maChaine = bid.remetCaracSpec(maChaine)
return maChaine
class DvdList(View):
def get(self, request):
return render(request, \
'dvd/liste.html', \
{'dvd_list': Dvd.objects.all()})
class DvdCreate(View):
form_class = DvdForm
template_name = 'dvd/dvd_form.html'
def get(self, request):
return render(
request,
self.template_name,
{'form': self.form_class()}
)
def post(self, request):
# Attention, code façon Pinkham, avec deux return dans une boucle if
bound_form = self.form_class(request.POST)
if bound_form.is_valid():
new_element = bound_form.save()
return redirect(new_element)
else:
return render(
request,
self.template_name,
{'form': bound_form}
)
def dvdDetail(request, slugUrl):
try:
monDvd = Dvd.objects.get(slug__iexact = slugUrl)
except Dvd.DoesNotExist:
raise Http404
template = loader.get_template('dvd/dvd_detail.html')
context = Context({'dvd': monDvd})
output = template.render(context)
return HttpResponse(output)
class DvdUpdate(View):
form_class = DvdForm
model = Dvd
template_name = 'dvd/dvd_form_update.html'
def get_object(self, slugArg):
return get_object_or_404(
self.model,
slug=slugArg
)
def get(self, request, slugUrl):
monElement = self.get_object(slugUrl)
context = {
'form': self.form_class(instance=monElement),
'element': monElement,
}
return render(request, self.template_name, context)
def post(self, request, slugUrl):
monDvd = self.get_object(slugUrl)
bound_form = self.form_class(
request.POST,
instance=monDvd
)
if bound_form.is_valid():
new_element = bound_form.save()
return redirect(new_element)
else:
context = {
'form': bound_form,
'dvd': monDvd,
}
return render(
request,
self.template_name,
context
)
class DvdDelete(View):
def get(self, request, slugUrl):
monElement = get_object_or_404(
Dvd,
slug = slugUrl
)
return render(request,
'dvd/dvd_confirm_delete.html',
{'element': monElement}
)
def post(self, request, slugUrl):
monElement = get_object_or_404(
Dvd,
slug = slugUrl
)
monElement.delete()
return redirect('dvd_liste')
def purgeDvd(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page radioactive est vouée à détruire les DVD.")
mesDvd = Dvd.objects.all()
nbDvd = Dvd.objects.count()
tableauDeLignes.append("J'ai compté %d DVD." % (nbDvd))
for numDvd in range(nbDvd - 1, -1, -1):
monDvd = mesDvd[numDvd]
monTitre = monDvd.titre
ligneAEcrire = "%d - [%s]\n" % \
(numDvd, monTitre)
tableauDeLignes.append(ligneAEcrire)
# La ligne qui suit est vouée à être neutralisée par prudence
# monDvd.delete()
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def importeDvd(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est vouée à permettre l'importation des DVD.")
monFichier = Fichier("dvd.csv", False)
ligneLue = monFichier.litUneLigne() # On passe la ligne d'en-tête
while monFichier.index < monFichier.longueur:
ligneLue = monFichier.litUneLigne()
tableauDeLignes.append(ligneLue)
mesBazars = ligneLue.split(',')
monTitre = nettoieImport(mesBazars[0])
monSlug = nettoieImport(mesBazars[1])
monActClair = nettoieImport(mesBazars[2])
monReaClair = nettoieImport(mesBazars[3])
monGenre = nettoieImport(mesBazars[4])
maPlace = nettoieImport(mesBazars[5])
mesObs = nettoieImport(mesBazars[6])
tableauDeLignes.append("[%s], [%s], [%s], [%s], [%s], [%s], [%s]" % \
(monTitre, monSlug, monActClair, monReaClair, monGenre, maPlace, mesObs))
# La ligne qui suit est vouée à être neutralisée si on ne veut pas qu'elle
# soit employée par erreur
# Dvd.objects.create(titre=monTitre, slug=monSlug, actClair=monActClair, \
# reaClair=monReaClair, genre=monGenre, place=maPlace, \
# obs=mesObs)
monFichier.close()
tableauDeLignes.append("En principe, si vous lisez ça, c'est que l'importation a eu lieu.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
def exporteDvd(request):
tableauDeLignes = []
tableauDeLignes.append("Cette page est vouée à permettre l'export des DVD.")
monFichier = Fichier("dvd_export.csv", True)
ligneAEcrire = '"titre","slug","act_clair","rea_clair","genre","place","obs"'
tableauDeLignes.append(ligneAEcrire)
monFichier.ecritUneLigne(ligneAEcrire)
mesDvd = Dvd.objects.all()
nbDvd = Dvd.objects.count()
for numDvd in range(nbDvd):
monDvd = mesDvd[numDvd]
monTitre = nettoieExport(monDvd.titre)
monSlug = nettoieExport(monDvd.slug)
monActClair = nettoieExport(monDvd.actClair)
monReaClair = nettoieExport(monDvd.reaClair)
monGenre = nettoieExport(monDvd.genre)
maPlace = nettoieExport(monDvd.place)
mesObs = nettoieExport(monDvd.obs)
ligneAEcrire = '"%s","%s","%s","%s","%s","%s","%s"' % \
(monTitre, monSlug, monActClair, monReaClair, monGenre, maPlace, mesObs)
monFichier.ecritUneLigne(ligneAEcrire)
tableauDeLignes.append(ligneAEcrire)
monFichier.close()
tableauDeLignes.append("En principe, si vous lisez ça, c'est que l'export a eu lieu.")
template = loader.get_template('cestmoilechef/petite_merdasse.html')
context = Context({ 'tabDeLignes': tableauDeLignes })
output = template.render(context)
return HttpResponse(output)
```
#### File: pronunciamento/jla_utils/utils.py
```python
class BidouillesTexte:
def chomp (self, maChaine):
# remplace la fonction du meme nom en Perl, qui vire le ou les
# caracteres de fin de ligne
index = 0
maChaine2 = ''
while index < len(maChaine):
signe = maChaine[index]
if signe != '\n' and signe != '\r':
maChaine2 += signe
index += 1
return maChaine2
def enleveCaracSpec (self, maChaine):
maChaine = self.rechercheEtRemplace(maChaine, ',', '<vg>')
maChaine = self.rechercheEtRemplace(maChaine, ':', '<2p>')
maChaine = self.rechercheEtRemplace(maChaine, '"', '<gl>')
maChaine = self.rechercheEtRemplace(maChaine, "'", '<ap>')
return maChaine
def rechercheEtRemplace (self, chaine, aChercher, aMettre):
# Attention, cette routine est faite pour traiter des chaines
# banales, constituees d'octets, avec les caracteres UTF-8 codes sur
# DEUX signes et non sur un seul comme dans les chaines Unicode
fini = 0 # false
numSigneDep = 0
while not fini:
if numSigneDep >= len(chaine):
fini = 1 # true
elif len(chaine) - len(aChercher) < numSigneDep:
fini = 1 # true
else:
intro = chaine[0:numSigneDep]
extrait = chaine[numSigneDep:numSigneDep + len(aChercher)]
concl = chaine[numSigneDep + len(aChercher):len(chaine)]
if aChercher == extrait:
chaine = intro + aMettre + concl
numSigneDep += len(aMettre)
else:
numSigneDep += 1
return chaine
def remetCaracSpec (self, maChaine):
maChaine = self.rechercheEtRemplace(maChaine, '<vg>', ',')
maChaine = self.rechercheEtRemplace(maChaine, '<2p>', ':')
maChaine = self.rechercheEtRemplace(maChaine, '<gl>', '"')
maChaine = self.rechercheEtRemplace(maChaine, '<ap>', "'")
return maChaine
def vireGuill (self, mention):
if mention[0] == '"' and mention[len(mention) - 1] == '"':
mention = mention[1:len(mention) - 1]
return mention
def virePointSeul(self, mentionAExporter):
# Ce cretin de Django ne permettant pas de laisser des cases vides
# dans ses formulaires de saisie, je saisis un point dans chaque
# case de saisie que je préférerais laisser vide. Cette routine
# permet de virer ce point (utile au moment de l'exportation en CSV).
if mentionAExporter == '.':
mentionAExporter = ""
return mentionAExporter
class Fichier:
def __init__(self, nomFichierArg, boolEcriture):
self.nomFichier = nomFichierArg
if boolEcriture:
self.fichier = open(self.nomFichier, 'wb') # 'b' pour des octets
self.fichier.seek(0, 0) # Se place au debut du fichier
self.longueur = 0
else:
self.fichier = open(self.nomFichier, 'rb') # 'b' pour des octets
self.fichier.seek(0, 2) # Se place a la fin du fichier
self.longueur = self.fichier.tell()
self.index = 0
def close (self):
self.fichier.close()
def deByteStringAChaineUnicode (self, monByteString):
chaineUnicode = monByteString.decode(encoding='UTF-8')
return chaineUnicode
def deChaineUnicodeAByteString (self, chaineUnicode):
monByteString = chaineUnicode.encode(encoding='UTF-8')
return monByteString
def ecritUneLigne (self, ligneAEcrire):
ligneAEcrire2 = self.deChaineUnicodeAByteString(ligneAEcrire)
for numOctet in range(len(ligneAEcrire2)):
octet = ligneAEcrire2[numOctet:numOctet + 1]
# La ligne precedente a l'air amphigourique, mais
# ligneAEcrire2[numOctet:numOctet + 1] est de type "bytes",
# alors que ligneAEcrire2[numOctet] serait de type "int"
self.ecritUnOctet(octet)
self.ecritUnOctet(b'\n')
def ecritUnOctet (self, signe):
self.fichier.seek(self.index, 0)
self.fichier.write(signe)
self.index += 1
self.longueur += 1
def interromptLecture (self):
self.close()
def litUneLigne (self):
octetLu = ''
ligneLue = b"" # Soit un bytestring vide
finDeLigne = 0 # false
while self.index < self.longueur and not finDeLigne:
octetLu = self.litUnOctet(self.index)
if octetLu == b'\n':
finDeLigne = 1 # true
else:
ligneLue += octetLu
ligneLue2 = self.deByteStringAChaineUnicode(ligneLue)
return ligneLue2
def litUnOctet (self, numOctet):
self.fichier.seek(numOctet, 0)
octet = self.fichier.read(1)
self.index += 1
return octet
def reprendLecture (self):
self.fichier = open(self.nomFichier, 'r')
self.fichier.seek(self.index, 0) # Se place ou on s'etait arrete
def seek (self, numOctet):
self.fichier.seek(numOctet, 0)
self.index = numOctet
```
|
{
"source": "jeanlucchamaa/LED-SuperController",
"score": 3
}
|
#### File: Downsize_Update/ledapp/ledapp.py
```python
import serial, time, datetime
from datetime import timedelta
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
@app.route("/")
def hello():
global hardware
hardware=1
global on
on=False
if 'ser' not in locals():
global ser
ser = serial.Serial('/dev/ttyUSB0', 38400)
return render_template('ui.html')
@app.route("/apply")
def application():
hardware=0
red=int(request.args.get('r'))
green=int(request.args.get('g'))
blue=int(request.args.get('b'))
sendbit=int(request.args.get('s'))
ba=bytearray()
ba[0:3]=[red,green,blue,sendbit]
for index,value in enumerate(ba):
ba[index]=min(255,value+1)
ser.write(ba)
ser.write('\0')
return('potato')
@app.route("/supply")
def supplication():
new=0
r=0
g=0
b=0
if(ser.in_waiting >= 4):
ba=bytearray()
ba[0:4]=[90,90,90,90,90]
i=0
x='w'
while(x != '\0'):
x=ser.read()
ba[i]=x
i=i+1
r=ba[0]-1
g=ba[1]-1
b=ba[2]-1
new=1
return jsonify(red=r,green=g,blue=b,info=new)
@app.route("/nappy")
def nappytime():
on=True
#start=int(request.args.get('start'))
#end=int(request.args.get('end'))
alarm=datetime.datetime(2016,10,19,22,39)
print "initialize"
while(on):
now=datetime.datetime.now()
delta = timedelta(alarm - now)
if(delta.seconds<0):
print "caramel"
break
if __name__ == "__main__":
app.run(processes=2)
```
|
{
"source": "jeanlucf22/mgmol",
"score": 2
}
|
#### File: mgmol/util/averageDistance.py
```python
import sys, string, os
from math import sqrt, acos
from numpy import *
import matplotlib.pyplot as plt
kb_au=3.16678939e-06 # [Ha/K]
au2ps = 2.418885e-05
distances=[]
times=[]
time=0.
def getMassAtom(name):
one=name[0]
mass=0.
if one[0:2] in spmass.keys():
mass=spmass[one[0:0+2]]
else:
mass=spmass[one[0]]
return mass
def analyzeDistance(filename,name0,name1,dt):
global distances
global times
global time
found0=0
found1=0
file=open(filename,'r')
L1=file.readlines()
for line in L1: ## loop over lines of file
words=string.split(line)
if len(words)>1:
if words[0][0:2]=='##' and words[0][0:3]!='###':
name=words[1]
if name[0:1]=='*':
name=name[1:]
if( name0==name ):
x0=eval(words[2])
y0=eval(words[3])
z0=eval(words[4])
found0=1
if( name1==name ):
x1=eval(words[2])
y1=eval(words[3])
z1=eval(words[4])
found1=1
if found0==1 & found1==1 :
d=sqrt((x1-x0)**2+(y1-y0)**2+(z1-z0)**2)
print "#d [Bohr]=",d, ", d [Ang]=",d*0.529177
times.append(time)
distances.append(d)
time=time+dt*au2ps
found0=0
found1=0
file.close()
#main
filesdir=sys.argv[1]
filenames=os.listdir(filesdir)
name0 =sys.argv[2]
name1 =sys.argv[3]
inputs=[]
for filename in filenames:
if 'md_run' in filename:
inputs.append(filename)
inputs.sort()
inputs[0]
file=open(inputs[0],'r')
L1=file.readlines()
for line in L1: ## loop over lines of file
word=string.split(line)
if len(word)>1:
if word[0]=='Timestep':
dt=eval(word[5])
for filename in inputs:
analyzeDistance(filename,name0,name1,dt)
nf=int(ceil(1./(dt*au2ps)))
skip=5
distancep = [ distances[i] for i in range(0, len(distances), skip)]
timesp = [ times[i] for i in range(0, len(distances), skip)]
skip=25
aves1 = [sum(distances[i-nf:i])/nf for i in range(nf, len(distances), skip)]
aves2 = [sum(distances[i-2*nf:i])/(2*nf) for i in range(2*nf, len(distances), skip)]
aves3 = [sum(distances[i-3*nf:i])/(3*nf) for i in range(3*nf, len(distances), skip)]
times1 = [ times[i] for i in range(nf, len(distances), skip)]
times2 = [ times[i] for i in range(2*nf, len(distances), skip)]
times3 = [ times[i] for i in range(3*nf, len(distances), skip)]
print '#time distance'
for i in range(len(distances)):
print times[i],distances[i]
if len(aves1)>1:
print '#Running average over 1 ps, last value: ',aves1[-1]
if len(aves2)>1:
print '#Running average over 2 ps, last value: ',aves2[-1]
if len(aves3)>1:
print '#Running average over 3 ps, last value: ',aves3[-1]
xmax=len(distances)*dt*au2ps
ymin=min(distances)
ymax=max(distances)
plt.figure(1)
plt.subplot(211)
plt.axis([0.,xmax,ymin,ymax])
plt.plot(timesp, distancep, 'go')
plt.ylabel('distance (Bohr)')
#plt.show()
#plt.subplot(212)
#ymin=min(aves2)
#ymax=max(aves2)
#plt.axis([0.,xmax,ymin,ymax])
#plt.plot(times1, aves1, 'ro')
plt.plot(times2, aves2, 'ro')
plt.plot(times3, aves3, 'bo')
#plt.show()
plt.savefig('aves.png')
```
#### File: mgmol/util/averageMDpositions.py
```python
import sys, string, os
from numpy import *
from pdb_tools import printPDBfile
from collections import defaultdict
au2ps = 2.418885e-05
times=[]
#coords={}
coords=defaultdict(list)
nsteps=0
time=0.
dt=0.
def appendPositions(filename):
global coords
global nsteps
global time
global dt
found=0
found_force=0
#print '#',filename
file=open(filename,'r')
L1=file.readlines()
for line in L1: ## loop over lines of file
words=string.split(line)
if len(words)>1:
#find positions
if 'Stepper' in words and 'Forces:' in words:
found_force=1
found=0
nsteps=nsteps+1
time=time+dt*au2ps
times.append(time)
if words[0][0:2]=='##' and found_force:
name=words[1]
found=found+1
valx=eval(words[2])
valy=eval(words[3])
valz=eval(words[4])
if name not in coords:
coords[name]=list()
coords[name].append([valx,valy,valz])
else:
if found>0:
found_force=0
file.close()
#main
filesdir=sys.argv[1]
filenames=os.listdir(filesdir)
inputs=[]
for filename in filenames:
if 'md_run' in filename:
inputs.append(filename)
inputs.sort()
#read 'dt'
inputs[0]
file=open(inputs[0],'r')
L1=file.readlines()
for line in L1: ## loop over lines of file
word=string.split(line)
if len(word)>1:
if word[0]=='Timestep':
dt=eval(word[5])
for filename in inputs:
appendPositions(filename)
#number of steps/ps
nf=int(ceil(1./(dt*au2ps)))
#sampling length in ps
number_ps=3
xyz=[]
names=[]
movables=[]
#loop over atoms
for c in coords.keys():
#print coords[c]
#print len(coords[c])
npts=min(number_ps*nf,len(coords[c]))
names.append(c)
#build lists with latest npts values
x=[]
y=[]
z=[]
for i in range(len(coords[c])-npts,len(coords[c])-1):
x.append(coords[c][i][0])
y.append(coords[c][i][1])
z.append(coords[c][i][2])
#calculate averages with latest npts values
avex = sum(x[-npts:])/npts
avey = sum(y[-npts:])/npts
avez = sum(z[-npts:])/npts
xyz.append(str(avex)+' '+str(avey)+' '+str(avez))
movables.append('1')
na=len(names)
printPDBfile(na,names,xyz,movables,'')
```
#### File: mgmol/util/generate_LinXYZPot.py
```python
import sys, string, struct, getopt
from array import array
opts, args = getopt.getopt(sys.argv[1:], "hf:e:d:bo:")
def usage():
usage = """
-h --help Prints this
-f --file (argument) input file
-e --efield (argument) field
-d --direction (argument) Direction
-o --output (argument) Output
-b --binary binary output
"""
print usage
bin_flag=0
ifilename='0'
ofilename='0'
for o,a in opts:
#print o,a
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-f", "--file"):
ifilename = a
print "Input file is", ifilename
elif o in ("-e", "--efield"):
efield = eval(a)
print "Field is" , efield
elif o in ("-d", "--direction"):
direction = eval(a)
print "Direction is" , direction
elif o in ("-o", "--output"):
print "Output file is " , a
ofilename=a
elif o in ("-b", "--binary"):
bin_flag=1
else:
print 'unknown option'
print opts
usage()
sys.exit()
ifile =open(ifilename,'r')
if bin_flag>0:
print 'Binary output...'
output=open(ofilename,'wb')
else:
output=open(ofilename,'w')
flag=0
origin=[0.,0.,0.]
end=[0.,0.,0.]
while flag<1:
line = ifile.readline()
words=string.split(line)
if words[0][0]!='#':
origin[0]=eval(words[0])
origin[1]=eval(words[1])
origin[2]=eval(words[2])
end[0]=eval(words[3])
end[1]=eval(words[4])
end[2]=eval(words[5])
print 'origin=',origin
print 'end=',end
flag=1
if bin_flag>0:
packed_string = struct.pack('fff',*origin)
output.write(packed_string)
packed_string = struct.pack('fff',*end)
output.write(packed_string)
#float_array = array('f', [0.,-4.69,-14.07])
#float_array = array('f', origin)
#float_array.tofile(output)
else:
output.write('#Cell\n')
output.write(str(origin[0]))
output.write('\t')
output.write(str(origin[1]))
output.write('\t')
output.write(str(origin[2]))
output.write('\t')
output.write(str(end[0]))
output.write('\t')
output.write(str(end[1]))
output.write('\t')
output.write(str(end[2]))
output.write('\n')
flag=0
n=[0,0,0]
while flag<1:
line = ifile.readline()
words=string.split(line)
if words[0][0]!='#':
n[0]=eval(words[0])
n[1]=eval(words[1])
n[2]=eval(words[2])
print 'mesh=',n
flag=1
if bin_flag>0:
packed_string = struct.pack('iii',*n)
output.write(packed_string)
else:
output.write('#Mesh:\n')
output.write(str(n[0]))
output.write('\t')
output.write(str(n[1]))
output.write('\t')
output.write(str(n[2]))
output.write('\n')
output.write('#E-field=')
output.write(str(efield))
output.write(', direction=')
output.write(str(direction))
output.write('\n')
h=((end[0]-origin[0])/n[0],(end[1]-origin[1])/n[1],(end[2]-origin[2])/n[2])
count=0
for i in range(n[0]):
for j in range(n[1]):
for k in range(n[2]):
val=0.
if direction==0:
val=(origin[direction]+i*h[direction])*efield
if direction==1:
val=(origin[direction]+j*h[direction])*efield
if direction==2:
val=(origin[direction]+k*h[direction])*efield
if bin_flag>0:
packed_string = struct.pack('f',val)
output.write(packed_string)
else:
output.write(str(val))
output.write('\n')
count=count+1
if count!=(n[0]*n[1]*n[2]):
print 'ERROR: count!=n[0]*n[1]*n[2]'
```
#### File: mgmol/util/generateLocalGTHpseudo.py
```python
from math import exp, erf, sqrt, pi
#coefficients for H
rloc=0.2
c1=-4.0663326
c2=0.6778322
c3=0.
c4=0.
zion=1.
anumber=1
name="HydrogenGTH_LDA"
mass=1.
def radialfunction(r):
alpha = (r/rloc)**2
val = exp(-0.5*alpha)*(c1+c2*alpha+c3*alpha*alpha+c4*alpha*alpha*alpha)
if r>1.e-8:
val = val - zion*erf(r/(sqrt(2.)*rloc))/r
else:
#print("special case for r = {}".format(r))
val = val -zion*sqrt(2.)/(sqrt(pi)*rloc)
return val
npts = 301
#header
print("# Short description of the species type. One line only!")
print(name)
print("#")
print("White")
print("#radii of balls and covalent bonds")
print("0.4 1.0")
print("# Nlcc flag")
print("0")
print("# Atomic number")
print(anumber)
print("# Atomic mass")
print(mass)
print("# Number of valence electrons")
print(zion)
print("# Gaussian core charge parameter rc")
print("1.")
print("# Number of potentials")
print("1")
print("# l-value for state which is local")
print("0 0")
print("# Local potential radius")
print("3.")
print("# Non-local potential radius")
print("3.")
print("# number of points in radial grid")
print(npts)
print("# log mesh parameter")
print("0.")
print("# radial grid, reference state, and potential for l=0")
#potential
for i in range(npts):
r = round(0.01*i,2)
f = radialfunction(r)
print("{} {}".format(r,f))
```
|
{
"source": "jeanlucmargot/pyvax",
"score": 2
}
|
#### File: pyvax/tests/tests.py
```python
import pyvax as pv
import numpy as np
from pytest import approx
def func_i2(x):
fstr = pv.from_vax_i2(x)
print(np.frombuffer(fstr, dtype=np.int16, count=1))
return np.frombuffer(fstr, dtype=np.int16, count=1)
def func_i4(x):
fstr = pv.from_vax_i4(x)
print(np.frombuffer(fstr, dtype=np.int32, count=1))
return np.frombuffer(fstr, dtype=np.int32, count=1)
def func_f4(x):
fstr = pv.from_vax_r4(x)
print(np.frombuffer(fstr, dtype=np.float32, count=1))
return np.frombuffer(fstr, dtype=np.float32, count=1)
def func_d8(x):
fstr = pv.from_vax_d8(x)
print(np.frombuffer(fstr, dtype=np.float64, count=1))
return np.frombuffer(fstr, dtype=np.float64, count=1)
def func_g8(x):
fstr = pv.from_vax_g8(x)
print(np.frombuffer(fstr, dtype=np.float64, count=1))
return np.frombuffer(fstr, dtype=np.float64, count=1)
def test_i2():
assert func_i2(b'\x01\x00') == 1
assert func_i2(b'\xFF\xFF') == -1
assert func_i2(b'\x00\x01') == 256
assert func_i2(b'\x00\xFF') == -256
assert func_i2(b'\x39\x30') == 12345
assert func_i2(b'\xC7\xCF') == -12345
def test_i4():
assert func_i4(b'\x01\x00\x00\x00') == 1
assert func_i4(b'\xFF\xFF\xFF\xFF') == -1
assert func_i4(b'\x00\x01\x00\x00') == 256
assert func_i4(b'\x00\xFF\xFF\xFF') == -256
assert func_i4(b'\x00\x00\x01\x00') == 65536
assert func_i4(b'\x00\x00\xFF\xFF') == -65536
assert func_i4(b'\x00\x00\x00\x01') == 16777216
assert func_i4(b'\x00\x00\x00\xFF') == -16777216
assert func_i4(b'\x15\xCD\x5B\x07') == 123456789
assert func_i4(b'\xEB\x32\xA4\xF8') == -123456789
def test_f4():
assert func_f4(b'\x80\x40\x00\x00') == approx(1.000000, rel=1e-7)
assert func_f4(b'\x80\xC0\x00\x00') == approx(-1.000000, rel=1e-7)
assert func_f4(b'\x60\x41\x00\x00') == approx(3.500000, rel=1e-7)
assert func_f4(b'\x60\xC1\x00\x00') == approx(-3.500000, rel=1e-7)
assert func_f4(b'\x49\x41\xD0\x0F') == approx(3.141590, rel=1e-7)
assert func_f4(b'\x49\xC1\xD0\x0F') == approx(-3.141590, rel=1e-7)
assert func_f4(b'\xF0\x7D\xC2\xBD') == approx(9.9999999E+36, rel=1e-7)
assert func_f4(b'\xF0\xFD\xC2\xBD') == approx(-9.9999999E+36, rel=1e-7)
assert func_f4(b'\x08\x03\xEA\x1C') == approx(9.9999999E-38, rel=1e-7)
assert func_f4(b'\x08\x83\xEA\x1C') == approx(-9.9999999E-38, rel=1e-7)
assert func_f4(b'\x9E\x40\x52\x06') == approx(1.234568, rel=1e-7)
assert func_f4(b'\x9E\xC0\x52\x06') == approx(-1.234568, rel=1e-7)
def test_d8():
assert func_d8(b'\x80\x40\x00\x00\x00\x00\x00\x00') == approx(1.000000000000000, rel=1e-14)
assert func_d8(b'\x80\xC0\x00\x00\x00\x00\x00\x00') == approx(-1.000000000000000, rel=1e-14)
assert func_d8(b'\x60\x41\x00\x00\x00\x00\x00\x00') == approx(3.500000000000000, rel=1e-14)
assert func_d8(b'\x60\xC1\x00\x00\x00\x00\x00\x00') == approx(-3.500000000000000, rel=1e-14)
assert func_d8(b'\x49\x41\xDA\x0F\x21\xA2\xBE\x68') == approx(3.141592653589793, rel=1e-14)
assert func_d8(b'\x49\xC1\xDA\x0F\x21\xA2\xBE\x68') == approx(-3.141592653589793, rel=1e-14)
assert func_d8(b'\xF0\x7D\xC2\xBD\xBB\x1A\xDB\x48') == approx(1.0000000000000000E+37, rel=1e-14)
assert func_d8(b'\xF0\xFD\xC2\xBD\xBB\x1A\xDB\x48') == approx(-1.0000000000000000E+37, rel=1e-14)
assert func_d8(b'\x08\x03\xEA\x1C\x54\x14\x75\x5C') == approx(9.9999999999999999E-38, rel=1e-14)
assert func_d8(b'\x08\x83\xEA\x1C\x54\x14\x75\x5C') == approx(-9.9999999999999999E-38, rel=1e-14)
assert func_d8(b'\x9E\x40\x52\x06\x62\x14\xE7\xCE') == approx(1.234567890123450, rel=1e-14)
assert func_d8(b'\x9E\xC0\x52\x06\x62\x14\xE7\xCE') == approx(-1.234567890123450, rel=1e-14)
def test_g8():
assert func_g8(b'\x10\x40\x00\x00\x00\x00\x00\x00') == approx(1.000000000000000, rel=1e-14)
assert func_g8(b'\x10\xC0\x00\x00\x00\x00\x00\x00') == approx(-1.000000000000000, rel=1e-14)
assert func_g8(b'\x2C\x40\x00\x00\x00\x00\x00\x00') == approx(3.500000000000000, rel=1e-14)
assert func_g8(b'\x2C\xC0\x00\x00\x00\x00\x00\x00') == approx(-3.500000000000000, rel=1e-14)
assert func_g8(b'\x29\x40\xFB\x21\x44\x54\x18\x2D') == approx(3.141592653589793, rel=1e-14)
assert func_g8(b'\x29\xC0\xFB\x21\x44\x54\x18\x2D') == approx(-3.141592653589793, rel=1e-14)
assert func_g8(b'\xBE\x47\xB8\x17\x57\x43\x1B\x69') == approx(1.0000000000000000E+37, rel=1e-14)
assert func_g8(b'\xBE\xC7\xB8\x17\x57\x43\x1B\x69') == approx(-1.0000000000000000E+37, rel=1e-14)
assert func_g8(b'\x61\x38\x9D\x03\x8A\x42\x8F\x8B') == approx(9.9999999999999999E-38, rel=1e-14)
assert func_g8(b'\x61\xB8\x9D\x03\x8A\x42\x8F\x8B') == approx(-9.9999999999999999E-38, rel=1e-14)
assert func_g8(b'\x13\x40\xCA\xC0\x8C\x42\xDD\x59') == approx(1.234567890123450, rel=1e-14)
assert func_g8(b'\x13\xC0\xCA\xC0\x8C\x42\xDD\x59') == approx(-1.234567890123450, rel=1e-14)
```
|
{
"source": "Jeanluis019/rss-reader",
"score": 3
}
|
#### File: rss_reader/feeds/tasks.py
```python
from logging import getLogger
from django.conf import settings # noqa
from background_task import background
from .models import Feed
logger = getLogger(__name__)
# Execute this task
# 20 seconds after call it
@background(schedule=20)
def update_feeds_posts():
"""
Iterate over all Feeds in order to
update their posts and make sure
users have the latest news from
their Feeds
"""
logger.debug("Background Task 'update_feeds_posts' started")
for feed in Feed.objects.all():
try:
feed.fetch_latest_posts()
except Exception as error:
logger.debug(
'Fail to update posts. '
f'Feed ID: {feed.id} ',
f'Error: {error}')
logger.debug("Background Task 'update_feeds_posts' finished")
```
#### File: feeds/tests/test_models.py
```python
from rss_reader.feeds.models import Feed
def test_is_url_valid_method():
"""
Make sure the 'is_url_valid' method
returns False when the passed url is
invalid
"""
is_valid = Feed.is_url_valid('https://a_bad_url.com')
assert is_valid == False
```
#### File: rss_reader/feeds/views.py
```python
from django.shortcuts import render # noqa
from django.views import View # noqa
from django.contrib.auth.mixins import LoginRequiredMixin # noqa
class IndexView(LoginRequiredMixin, View):
template_name = 'feeds/index.html'
def get(self, request):
return render(request, self.template_name)
```
|
{
"source": "Jean-Lytehouse/Lytehouse-Autocam",
"score": 3
}
|
#### File: app/users/models.py
```python
from datetime import datetime, timedelta
from app import db, bcrypt, LOGGER
from app.utils.misc import make_code
from flask_login import UserMixin
def expiration_date():
return datetime.now() + timedelta(days=1)
class AppUser(db.Model, UserMixin):
id = db.Column(db.Integer(), primary_key=True)
email = db.Column(db.String(255), unique=True, nullable=False)
firstname = db.Column(db.String(100), nullable=False)
lastname = db.Column(db.String(100), nullable=False)
camera1Ip = db.Column(db.String(100), nullable=False)
camera1Name = db.Column(db.String(100), nullable=False)
camera2Ip = db.Column(db.String(100), nullable=False)
camera2Name = db.Column(db.String(100), nullable=False)
camera3Ip = db.Column(db.String(100), nullable=False)
camera3Name = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(255), nullable=False)
verified_email = db.Column(db.Boolean(), nullable=True)
verify_token = db.Column(db.String(255), nullable=True, unique=True, default=make_code)
def __init__(self,
email,
firstname,
lastname,
camera1Ip,
camera1Name,
camera2Ip,
camera2Name,
camera3Ip,
camera3Name,
password):
self.email = email
self.firstname = firstname
self.lastname = lastname
self.camera1Ip = camera1Ip
self.camera1Name = camera1Name
self.camera2Ip = camera2Ip
self.camera2Name = camera2Name
self.camera3Ip = camera3Ip
self.camera3Name = camera3Name
self.set_password(password)
self.verified_email = True
def set_password(self, password):
self.password = <PASSWORD>.generate_password_hash(password)
def deactivate(self):
self.active = False
def verify(self):
self.verified_email = True
def update_email(self, new_email):
self.verified_email = False
self.verify_token = make_code()
self.email = new_email
def delete(self):
self.is_deleted = True
self.deleted_datetime_utc = datetime.now()
class PasswordReset(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('app_user.id'))
code = db.Column(db.String(255), unique=True, default=make_code)
date = db.Column(db.DateTime(), default=expiration_date)
user = db.relationship(AppUser)
db.UniqueConstraint('user_id', 'code', name='uni_user_code')
def __init__(self, user):
self.user = user
```
|
{
"source": "JeanMainguy/MeTAfisher",
"score": 2
}
|
#### File: MeTAfisher/metafisher/compute_tadb_stat.py
```python
import Object_MetaF as obj
import Function_MetaF as fct
import Orf_MetaF as orf
import OutputFct_MetaF as out
import Score_MetaF as score
import csv
import argparse
import os
import logging
import sys
import gzip
import re
import json
def encoder(filename, dict):
logging.info(f'writing json file {filename}')
with open(filename, 'w') as file:
json.dump(dict, file, indent=4)
def write_table(filename, domain_domain_dict):
logging.info(f'writing tsv file {filename}')
domain_list = list(domain_domain_dict)
with open(filename, 'w') as fl:
fl.write('domains\t'+'\t'.join(domain_list)+'\n')
for d in domain_list:
line = [d]
for d_n in domain_list:
nb_pairs_in_common = '0' if d_n not in domain_domain_dict[d] else str(
domain_domain_dict[d][d_n])
line.append(nb_pairs_in_common)
fl.write('\t'.join(line)+'\n')
def init_logging(verbose_flag):
"""Initialise logging."""
if verbose_flag:
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
logging.info('Mode verbose ON')
logging.info('command line: %s', ' '.join(sys.argv))
else:
logging.basicConfig(format="%(levelname)s: %(message)s")
def parse_arguments():
"""Parse command line arguments.
Returns Options object with command line argument values as attributes.
Will exit the program on a command line error.
"""
project_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_tadb_stat_dir = os.path.join(project_dir, "TADB_stat")
default_hmm_db = os.path.join(default_tadb_stat_dir, 'TA_domains.hmm')
parser = argparse.ArgumentParser(
prog='MeTAfisher',
description='Identification of Toxin Antitoxin Systems',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--toxin_faa',
help="Path to the reference toxin protein sequence.", required=True)
parser.add_argument('--antitoxin_faa',
help="Path to the reference antitoxin protein sequence.", required=True)
parser.add_argument("-o", '--outdir',
help="Path to the directory where files will be written",
default=default_tadb_stat_dir)
parser.add_argument("--hmm_db", default=default_hmm_db,
help="path of the HMM database.")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
return args
def domain_domain_pair_association(domain_type_dict, opposite_type_dict={'T': 'AT', 'AT': 'T'}):
"""
Compute domain domain association.
domain_type_dict is a {domain_name:{T:[gene_ids], AT:[gene_ids]} ... }
"""
domain_domain_dict = {}
for domain, type2genes in domain_type_dict.items():
domain_dict = domain_domain_dict.setdefault(domain, {})
for domain_next, type2genes_next in domain_type_dict.items():
if domain_next in domain_dict:
continue
domain_dict_next = domain_domain_dict.setdefault(domain_next, {})
pairs = []
for type, opposite_type in opposite_type_dict.items():
genes = type2genes.setdefault(type, [])
genes_next = type2genes_next.setdefault(opposite_type, [])
pairs += list(set(genes_next) & set(genes))
if len(pairs) > 0:
domain_dict[domain_next] = pairs
domain_dict_next[domain] = pairs
return domain_domain_dict
def extract_db_and_gene_number(gene_id):
gene_number_pattern = re.compile(r'[^\d]+(\d+)$')
try:
gene_number = gene_number_pattern.match(gene_id).group(1)
except AttributeError:
raise AttributeError(f'regex pattern failed to extract gene number in id {id}.')
db_name = gene_id.split('|')[0]
return db_name, gene_number
def parse_tadb_ids(seq_file):
gene_number_to_id = {}
with open(seq_file) as fl:
gene_ids = (l.split()[0][1:] for l in fl if l.startswith('>'))
for i, id in enumerate(gene_ids):
db_and_nb = extract_db_and_gene_number(id)
if db_and_nb in gene_number_to_id:
#raise ValueError(f'db and gene number {db_and_nb} are used twice to identify a sequence in {seq_file}')
logging.critical(f'Gene id {id} is used more than once in {seq_file}')
gene_number_to_id[db_and_nb] = id
# assert i == len(gene_number_to_id), "Same gene number is used at least twice. Check consistency of sequence id of {seq_file}"
return gene_number_to_id
def get_genes_association(toxin_file, antitoxin_file):
toxin_id_to_number = parse_tadb_ids(toxin_file)
antitoxin_id_to_number = parse_tadb_ids(antitoxin_file)
if len(toxin_id_to_number) != len(antitoxin_id_to_number):
logging.critical(
f"Not the same number of toxin genes ({len(toxin_id_to_number)}) and antitoxin genes ({len(antitoxin_id_to_number)}). check files: {toxin_file} and {antitoxin_file}")
gene_numbers = set(toxin_id_to_number) | set(antitoxin_id_to_number)
genes_association = {}
genes_type = {}
for db_name, gene_number in gene_numbers:
try:
antitoxin_id = antitoxin_id_to_number[(db_name, gene_number)]
genes_type[antitoxin_id] = {'AT': 1, 'T': 0}
except KeyError:
logging.critical(f'No antitoxin gene with id {db_name}|AT{gene_number}')
try:
toxin_id = toxin_id_to_number[(db_name, gene_number)]
genes_type[toxin_id] = {'AT': 0, 'T': 1}
except KeyError:
logging.critical(f'No toxin with id {db_name}|T{gene_number}')
if toxin_id and antitoxin_id:
genes_association[antitoxin_id] = {toxin_id: 1}
genes_association[toxin_id] = {antitoxin_id: 1}
return genes_association, genes_type
def domains_genes_association(hmm_result, domain_type_dict):
gene_id_parser = re.compile(r"(?P<db_name>[^|]+)\|(?P<type>[A,T]{1,2})(?P<gene_number>\d+)")
type_name = set()
for hmmhit in fct.hmm_result_parser(hmm_result):
domain = hmmhit.query_name
re_result = gene_id_parser.match(hmmhit.target_name)
type = re_result.group("type") # T or AT
gene_number = re_result.group("gene_number")
domain_type_dict.setdefault(domain, {}).setdefault(type, []).append(gene_number)
# domain_gene_dict.setdefault(domain, []).append(gene_number)
type_name.add(type)
assert len(type_name) == 1
return type_name.pop()
def run_hmmsearch(faa_file, hmm_db, outdir):
element_to_rm = -2 if faa_file.endswith('.gz') else -1
simple_name = '.'.join(os.path.basename(faa_file).split('.')[:-element_to_rm])
hmm_result = os.path.join(outdir, f"{simple_name}.hmmsearch")
fct.hmmsearch(faa_file, hmm_db, hmm_result)
return hmm_result
def main():
"""Orchestrate the execution of the program"""
args = parse_arguments()
init_logging(args.verbose)
toxin_seq_file = args.toxin_faa
antitoxin_seq_file = args.antitoxin_faa
outdir = args.outdir
hmm_db = args.hmm_db
domain_type_dict = {}
# get_genes_association(toxin_seq_file, 'T', domain_type_dict)
genes_association, genes_type = get_genes_association(toxin_seq_file, antitoxin_seq_file)
hmm_result_T = run_hmmsearch(toxin_seq_file, hmm_db, outdir)
type_T = domains_genes_association(hmm_result_T, domain_type_dict)
hmm_result_AT = run_hmmsearch(antitoxin_seq_file, hmm_db, outdir)
type_AT = domains_genes_association(hmm_result_AT, domain_type_dict)
types = {type_T: type_AT, type_AT: type_T}
domain_domain_dict = domain_domain_pair_association(domain_type_dict, types)
# count
domain_domain_count_asso = {}
for d, d_n in domain_domain_dict.items():
domain_domain_count_asso[d] = {k: len(v) for k, v in d_n.items()}
table_file = os.path.join(outdir, 'domain_domain_association.tsv')
write_table(table_file, domain_domain_count_asso)
domain_domain_count_asso.update(genes_association)
json_d_d_file = os.path.join(outdir, 'domain_domain_association.json')
encoder(json_d_d_file, domain_domain_count_asso)
for domain, type_dict in domain_type_dict.items():
domain_type_dict[domain] = {type_gene: len(set(gene_ids))
for type_gene, gene_ids in type_dict.items()}
domain_type_dict.update(genes_type)
json_d_t_file = os.path.join(outdir, 'domain_gene_type.json')
encoder(json_d_t_file, domain_type_dict)
if __name__ == '__main__':
main()
```
|
{
"source": "jean-maradiaga/python-data-mining",
"score": 3
}
|
#### File: python-data-mining/Chapter12/extract_posts.py
```python
import os
import re
from mrjob.job import MRJob
from mrjob.step import MRStep
word_search_re = re.compile(r"[\w']+")
class ExtractPosts(MRJob):
post_start = False
post = []
def mapper(self, key, line):
filename = os.environ["map_input_file"]
gender = filename.split(".")[1]
try:
docnum = int(filename[0])
except:
docnum = 8
# remove leading and trailing whitespace
line = line.strip()
if line == "<post>":
self.post_start = True
elif line == "</post>":
self.post_start = False
yield gender, repr("\n".join(self.post))
self.post = []
elif self.post_start:
self.post.append(line)
if __name__ == '__main__':
ExtractPosts.run()
```
|
{
"source": "jeanmarc2019/PTHacks2019-Planning",
"score": 2
}
|
#### File: app/flaskApp/config.py
```python
import configparser
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path += '/cfg.ini'
class Configuration(object):
def __init__(self,debug=False):
section = "Flask-debug" if debug else "Flask"
cfg = configparser.ConfigParser()
cfg.read(dir_path if debug else "/var/www/html/flaskApp/cfg.ini")
self.debug = cfg.getboolean(section, "DEBUG")
self.csrf_enabled = cfg.getboolean(section,"CSRF_ENABLED")
self.threads_per_page = cfg.getint(section,"THREADS_PER_PAGE")
self.port = cfg.getint(section,"PORT")
self.host = cfg.get(section,"HOST")
```
|
{
"source": "JeanMarc-Moly/mugimugi_client_api",
"score": 3
}
|
#### File: mugimugi_client_api/mugimugi_client_api/abstract_paginated.py
```python
from abc import ABC
from dataclasses import dataclass
from enum import Enum
from typing import AsyncGenerator, ClassVar
from mugimugi_client_api_entity.common.element import Element
from mugimugi_client_api_entity.root import ValidRoot
from ._constants import PARALLEL_PAGES_COUNT, RESPONSE_MAX_COUNT
from .abstract import Params
from .abstract_xml import AbstractXMLAction, AsyncClient
@dataclass
class _AbstractPaginatedAction(ABC):
page: int = 0
class AbstractPaginatedAction(AbstractXMLAction, _AbstractPaginatedAction, ABC):
class Parameter(Enum):
PAGE = "page" # int > 0
PAGES: ClassVar[int] = PARALLEL_PAGES_COUNT
PAGE_SIZE: ClassVar[int] = RESPONSE_MAX_COUNT
def params(self) -> Params:
yield from super().params()
yield AbstractPaginatedAction.Parameter.PAGE.value, self.page
async def query_bulks_fast(
self, client: AsyncClient
) -> AsyncGenerator[ValidRoot, None]:
current, swap = 0, self.page
while True:
self.page = current = current + 1
query = self.get_query(client)
self.page = swap
yield self.send_and_parse(client, query)
async def query_elements(
self, client: AsyncClient
) -> AsyncGenerator[Element, None]:
current, swap = 0, self.page
count = max_ = self.PAGE_SIZE
while count == max_:
self.page = current = current + 1
result = await self.send_and_parse(client, self.get_query(client))
self.page = swap
elements = result.elements
for e in elements:
yield e
count = len(elements)
```
#### File: mugimugi_client_api/mugimugi_client_api/abstract_user_list.py
```python
from __future__ import annotations
from abc import ABC
from dataclasses import dataclass
from enum import Enum
from typing import ClassVar, Iterable
from mugimugi_client_api_entity.enum import ElementNode
from ._constants import REQUEST_EDIT_LIST_MAX_COUNT
from .abstract import Params
from .abstract_xml import AbstractXMLAction
@dataclass
class _AbstractUserListAction(ABC):
books: set[int]
def __init__(self, books: Iterable[int]):
if not books:
raise Exception("Requires at least one book")
self.books = set(books)
class AbstractUserListAction(AbstractXMLAction, _AbstractUserListAction, ABC):
class Parameter(Enum):
ID = "ID"
CONTENT_SEPARATOR: ClassVar[str] = ","
BOOK_ID_PREFIX: ClassVar[str] = ElementNode.BOOK.value
# Beyond this count, books are ignored.
MAX_COUNT_OF_BOOK = REQUEST_EDIT_LIST_MAX_COUNT
def params(self) -> Params:
yield from super().params()
p = self.BOOK_ID_PREFIX
yield self.Parameter.ID.value, self.CONTENT_SEPARATOR.join(
p + str(b) for b in self.books
)
```
#### File: mugimugi_client_api/mugimugi_client_api/search_object.py
```python
from dataclasses import dataclass
from datetime import date
from enum import Enum
from typing import ClassVar, Iterable, Optional
from mugimugi_client_api_entity.enum import ElementPrefix
from mugimugi_client_api_entity.root import BookRoot
from .abstract_paginated import AbstractPaginatedAction, Params
from .enum import Action, ObjectType, SortOrder, YesNo
@dataclass
class SearchObject(AbstractPaginatedAction):
class SortCriterion(Enum):
TITLE = "title"
JAPANESE_TITLE = "jtitle"
PUBLISHED_DATE = "date"
PAGES_COUNT = "pages"
PAGE_VIEWS_COUNT = "page_views"
SCORE = "score"
SUBMITTED_DATE = "added"
LAST_MODIFICATION_DATE = "changed"
KATAKANA_TITLE = "kana"
class Parameter(Enum):
# Q=s&
TITLE = "sn" # str
# MATCH_TYPE = "match" # match # Seems to not work on API
TYPE = "flist" # object
RELEASE_DATE_FROM = "date" # str YYYY-MM-DD
RELEASE_DATE_TO = "date2" # str YYYY-MM-DD
CONTRIBUTOR = "cont"
SUBMITTER = "sub"
SORT_CRITERION = "order" # SortCriterion
SORT_ORDER = "flow" # SortOrder
IS_ADULT_ONLY = "age" # yes_no
IS_ANTHOLOGY = "anth" # yes_no
IS_COPY_BOOK = "bcopy" # yes_no
IS_FREE = "FREE" # yes_no
IS_CENSORED = "scen" # yes_no
# Regroup several parameters of the HTML API:
# - SLIST_CIRCLE,
# - SLIST_AUTHOR,
# - SLIST_PARODY,
# - SLIST_CHAR,
# - SLIST_CONTENT,
# - SLIST_GENRE,
# - SLIST_CONVE,
# - SLIST_COLL
# - SLIST_PUBL
# - SLIST_IMPRINT
# Will have to include ElementPrefix as prefix of each element
# All elements are pipe separated.
# ex: &slist=C:Electro|K:Swimsuit|P:Moetan
CONTENT = "slist"
ACTION: ClassVar[Action] = Action.SEARCH_OBJECT
ROOT: ClassVar[type] = BookRoot
CONTENT_SEPARATOR: ClassVar[str] = "|"
CONTENT_ASSOCIATION: ClassVar[str] = ":"
title: Optional[str] = None
is_adult_only: Optional[YesNo] = None
is_anthology: Optional[YesNo] = None
is_copy_book: Optional[YesNo] = None
is_free: Optional[YesNo] = None
is_censored: Optional[YesNo] = None
object_type: Optional[ObjectType] = None
date_from: Optional[date] = None
date_to: Optional[date] = None
circles: Optional[Iterable[str]] = None
authors: Optional[Iterable[str]] = None
parodies: Optional[Iterable[str]] = None
characters: Optional[Iterable[str]] = None
contents: Optional[Iterable[str]] = None
genres: Optional[Iterable[str]] = None
convention: Optional[str] = None
collection: Optional[str] = None
publisher: Optional[str] = None
imprint: Optional[str] = None
contributor: Optional[str] = None
submitter: Optional[str] = None
sort_criterion: Optional[SortCriterion] = None
sort_order: Optional[SortOrder] = None
def params(self) -> Params:
yield from super().params()
p = self.Parameter
if title := self.title:
yield p.TITLE.value, title
if contributor := self.contributor:
yield p.CONTRIBUTOR.value, contributor
if submitter := self.submitter:
yield p.SUBMITTER.value, submitter
if is_adult_only := self.is_adult_only:
yield p.IS_ADULT_ONLY.value, is_adult_only.value
if is_anthology := self.is_anthology:
yield p.IS_ANTHOLOGY.value, is_anthology.value
if is_copy_book := self.is_copy_book:
yield p.IS_COPY_BOOK.value, is_copy_book.value
if is_free := self.is_free:
yield p.IS_FREE.value, is_free.value
if is_censored := self.is_censored:
yield p.IS_CENSORED.value, is_censored.value
if object_type := self.object_type:
yield p.TYPE.value, object_type.value
if date_from := self.date_from:
yield p.RELEASE_DATE_FROM.value, f"{date_from:Date.FORMAT}"
if date_to := self.date_to:
yield p.RELEASE_DATE_TO.value, f"{date_to:Date.FORMAT}"
if sort_criterion := self.sort_criterion:
yield p.SORT_CRITERION.value, sort_criterion.value
if sort_order := self.sort_order:
yield p.SORT_ORDER.value, sort_order.value
a = self.CONTENT_ASSOCIATION
i = ElementPrefix
query: list[str] = []
if circles := self.circles:
t = i.CIRCLE.value
query.extend(f"{t}{a}{c}" for c in set(circles))
if authors := self.authors:
t = i.AUTHOR.value
query.extend(f"{t}{a}{c}" for c in set(authors))
if parodies := self.parodies:
t = i.PARODY.value
query.extend(f"{t}{a}{c}" for c in set(parodies))
if characters := self.characters:
t = i.CHARACTER.value
query.extend(f"{t}{a}{c}" for c in set(characters))
if contents := self.contents:
t = i.CONTENT.value
query.extend(f"{t}{a}{c}" for c in set(contents))
if genres := self.genres:
t = i.GENRE.value
query.extend(f"{t}{a}{c}" for c in set(genres))
if convention := self.convention:
query.append(f"{i.CONVENTION.value}{a}{convention}")
if collection := self.collection:
query.append(f"{i.COLLECTION.value}{a}{collection}")
if publisher := self.publisher:
query.append(f"{i.PUBLISHER.value}{a}{publisher}")
if imprint := self.imprint:
query.append(f"{i.IMPRINT.value}{a}{imprint}")
if query:
yield p.CONTENT.value, self.CONTENT_SEPARATOR.join(query)
```
|
{
"source": "JeanmarieAlder/bollards-api",
"score": 3
}
|
#### File: bollards_api/api/routes.py
```python
import json
from os import name
from flask_cors import CORS
from bollards_api.models import Bollard
from flask import Blueprint, jsonify
from bollards_api.api.utils import get_neighbours_by_number
api = Blueprint('api', __name__, url_prefix='/api/v1')
CORS(api)
@api.route('/bollards/list')
def bollards_list():
bollards = Bollard.query.all()
resp = []
for bollard in bollards:
resp.append({
"id": bollard.id,
"b_number": bollard.b_number,
"b_letter": bollard.b_letter,
"b_name": bollard.b_name,
"image_icon": bollard.image_icon
})
return jsonify(resp)
@api.route('/bollards/markers')
def bollards_markers():
bollards = Bollard.query.all()
resp = []
for bollard in bollards:
resp.append({
"id": bollard.id,
"b_number": bollard.b_number,
"b_letter": bollard.b_letter,
"b_name": bollard.b_name,
"b_type": bollard.b_type,
"image_icon": bollard.image_icon,
"b_lat": str(bollard.b_lat),
"b_lng": str(bollard.b_lng)
})
return jsonify(resp)
@api.route('/bollards/details/<int:bollard_id>')
def bollards_details(bollard_id):
bollard = Bollard.query.filter_by(id=bollard_id).first_or_404()
images = []
for img in bollard.images:
images.append(img.uri)
neighbours = get_neighbours_by_number(bollard)
return jsonify({
'id': bollard.id,
'b_number': bollard.b_number,
'b_letter': bollard.b_letter,
'b_type': bollard.b_type,
'b_name': bollard.b_name,
'comment': bollard.comment,
'b_lat': str(bollard.b_lat),
'b_lng': str(bollard.b_lng),
'image_icon': bollard.image_icon,
'images': images,
'neighbours': neighbours
})
```
#### File: bollards-api/bollards_api/__init__.py
```python
from flask import Flask
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from bollards_api.config import Config
db = SQLAlchemy()
migrate = Migrate()
bcrypt = Bcrypt()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
bcrypt.init_app(app)
login_manager.init_app(app)
from bollards_api.api.routes import api
from bollards_api.bollards.routes import bollards
from bollards_api.errors.handlers import errors
from bollards_api.main.routes import main
from bollards_api.users.routes import users
app.register_blueprint(main)
app.register_blueprint(users)
app.register_blueprint(bollards)
app.register_blueprint(api)
app.register_blueprint(errors)
return app
```
#### File: bollards_api/users/utils.py
```python
import os
import secrets
from flask import current_app
from PIL import Image
def crop_center(pil_img, crop_width, crop_height):
img_width, img_height = pil_img.size
return pil_img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
def crop_max_square(pil_img):
return crop_center(pil_img, min(pil_img.size), min(pil_img.size))
def crop_save_picture(new_picture, folder_path, fixed_square_size):
random_hex = secrets.token_hex(8)
_, file_ext = os.path.splitext(new_picture.filename)
picture_filename = random_hex + file_ext
picture_path = os.path.join(current_app.root_path, 'static', 'img',
folder_path, picture_filename)
output_size = (fixed_square_size, fixed_square_size)
i = Image.open(new_picture)
i_width = i.width
i_height = i.height
if i_width > fixed_square_size and i_height > fixed_square_size:
i = crop_max_square(i)
# Reduce the size of picture
i.thumbnail(output_size, Image.ANTIALIAS)
i.save(picture_path)
return picture_filename
def save_picture_profile(new_picture):
fixed_square_size = 150
folder_path = 'profile_pics'
return crop_save_picture(new_picture, folder_path, fixed_square_size)
```
#### File: bollards-api/tests/test_main.py
```python
from bollards_api.main.forms import ContactForm
def test_home_page(client):
"""Test that home page displays correctly"""
rv = client.get('/')
assert b'<h1 class="text-center">Welcome to Bollards API</h1>' in rv.data
assert b'<p class="card-text">Discover all bollards between Vaud, Switzerland and France.</p>' in rv.data
assert b'Welcome to the bollards.ch API.' in rv.data
# /home should be equal to /
rv_home = client.get('/home')
assert rv_home.data == rv.data
def test_about_page(client):
rv = client.get('/about')
assert b'42' in rv.data
def test_about_page2(client):
rv = client.get('/about')
assert b'42' in rv.data
def test_contact_form_works(app):
"""Currently not in use"""
with app.app_context():
contactForm = ContactForm()
assert True
def test_404_on_bad_request(client):
rv = client.get('/randomlink')
assert b'<h1>Looks like you ran into 404.</h1>' in rv.data
```
|
{
"source": "JeanmarieAlder/python-swissalti3d-fetcher",
"score": 3
}
|
#### File: JeanmarieAlder/python-swissalti3d-fetcher/util.py
```python
import pandas as pd
df_urls_done = pd.read_csv("input/done.csv", header=None)
def file_allready_dl(url):
for url_done in df_urls_done[0]:
if url_done == url:
print(f"File {url} already downloaded, skipping.")
return True
return False
```
|
{
"source": "jeanmarie-dormoy/GDEB-archi-PS5",
"score": 3
}
|
#### File: GDEB-archi-PS5/assembly/assembleur.py
```python
import sys
def getInstructions(file) :
f = open(file, "r")
lines = f.readlines()
for i in range(0,len(lines)) :
lines[i] = lines[i].strip()
return lines
def translate(lines) :
f = open("out", "w")
res = "v2.0 raw\n"
for line in lines :
spacesplit = line.split(" ")
if len(spacesplit) > 2 :
for i in range(2,len(spacesplit)) :
spacesplit[1]+=spacesplit[i].strip()
spacesplit[0] = spacesplit[0].upper()
if spacesplit[0]=='LDR' :
res += str(getLDR(spacesplit[1])) +"\n"
if spacesplit[0]=='CMP' :
res += str(getCMP(spacesplit[1])) +"\n"
if spacesplit[0]=='STR' :
res += str(getSTR(spacesplit[1])) +"\n"
if spacesplit[0][0] == 'B' :
res += conditional(spacesplit,searchStr(lines,spacesplit[1]+':')) + "\n"
if spacesplit[0]=='MOVS' or spacesplit[0]=='MOV':
res += str(getMOVS(spacesplit[1])) +"\n"
if spacesplit[0]=='MUL' or spacesplit[0]=='MULS' :
res += str(getMUL(spacesplit[1])) +"\n"
if spacesplit[0]=='ADDS' :
res += str(getADDS(spacesplit[1])) +"\n"
if spacesplit[0]=='ADD' :
res += str(getADD(spacesplit[1])) +"\n"
if spacesplit[0]=='LSLS' or spacesplit[0]=='LSL' :
res += str(getLSLS(spacesplit[1])) +"\n"
if spacesplit[0]=='LSRS' :
res += str(getLSLS(spacesplit[1])) +"\n"
if spacesplit[0]=='ASRS' :
res += str(getASRS(spacesplit[1])) +"\n"
if spacesplit[0]=='SUBS' or spacesplit[0]=='SUB' :
res += str(getSUBS(spacesplit[1])) +"\n"
if spacesplit[0]=='ANDS' :
res += str(getANDS(spacesplit[1])) +"\n"
if spacesplit[0]=='EORS' :
res += str(getEORS(spacesplit[1])) +"\n"
if spacesplit[0]=='ADCS' :
res += str(getADCS(spacesplit[1])) +"\n"
if spacesplit[0]=='SBCS' :
res += str(getSBCS(spacesplit[1])) +"\n"
if spacesplit[0]=='RORS' :
res += str(getRORS(spacesplit[1])) +"\n"
if spacesplit[0]=='RSBS' :
res += str(getRSBS(spacesplit[1])) +"\n"
if spacesplit[0]=='CMN' :
res += str(getCMN(spacesplit[1])) +"\n"
if spacesplit[0]=='ORRS' :
res += str(getORRS(spacesplit[1])) +"\n"
if spacesplit[0]=='BICS' :
res += str(getBICS(spacesplit[1])) +"\n"
if spacesplit[0]=='MVNS' :
res += str(getMVNS(spacesplit[1])) +"\n"
f.write(res)
def reformat(val,length):
if (int(val)<0) :
temp = -int(val)
temp = bin(temp)[2:]
new=''
for i in range(0,len(temp)) :
if temp[i] =='0':
new+='1'
if temp[i] =='1':
new+='0'
leng = len(new)
new = int(new,2)+1
new = bin(new)[2:]
for i in range (0,leng-len(new)) :
new = '0'+new
nbzero = length - len(new)
for i in range(0,nbzero) :
new = '1'+new
return new
else :
val = bin(int(val))[2:]
nbzero = length - len(val)
for i in range(0,nbzero) :
val = "0"+val
return val
def getANDS(values) :
values= values.split(',')
rdn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100000000" + rm + rdn
return hex(int(res, 2))[2:]
def getADCS(values) :
values= values.split(',')
rdn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100000101" + rm + rdn
return hex(int(res, 2))[2:]
def getSBCS(values) :
values= values.split(',')
rdn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100000110" + rm + rdn
return hex(int(res, 2))[2:]
def getTST(values) :
values= values.split(',')
rn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100001000" + rm + rn
return hex(int(res, 2))[2:]
def getRSBS(values) :
values= values.split(',')
rd = reformat(values[0][1],3)
rn = reformat(values[1][1],3)
res = "0100001001" + rn + rd
return hex(int(res, 2))[2:]
def getRORS(values) :
values= values.split(',')
rdn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100000111" + rm + rdn
return hex(int(res, 2))[2:]
def getEORS(values) :
values= values.split(',')
rdn = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = "0100000001" + rm + rdn
return hex(int(res, 2))[2:]
def getLSLS(values) :
if (values.find('#')!=-1) :
imm = values.split('#')[1]
imm = reformat(imm,5)
rd = reformat(values[0][len(values[0])-1],3)
rm = reformat(values[1][len(values[1])-1],3)
res = '00000'+imm+rm+rd
return hex(int(res, 2))[2:]
else :
values= values.split(',')
rd = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = '0100000010'+rm+rd
return hex(int(res, 2))[2:]
def getASRS(values) :
if (values.find('#')!=-1) :
imm = values.split('#')[1]
imm = reformat(imm,5)
rd = reformat(values[0][len(values[0])-1],3)
rm = reformat(values[1][len(values[1])-1],3)
res = '00010'+imm+rm+rd
return hex(int(res, 2))[2:]
else :
values= values.split(',')
rd = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = '0100000100'+rm+rd
return hex(int(res, 2))[2:]
def getLSRS(values) :
if (values.find('#')!=-1):
imm = values.split('#')[1]
imm = reformat(imm,5)
rd = reformat(values[0][len(values[0])-1],3)
rm = reformat(values[1][len(values[1])-1],3)
res = '00001'+imm+rm+rd
return hex(int(res, 2))[2:]
else :
values= values.split(',')
rd = reformat(values[0][1],3)
rm = reformat(values[1][1],3)
res = '0100000011'+rm+rd
return hex(int(res, 2))[2:]
def getLDR(values) :
values = values.replace('[','')
values = values.replace(']','')
imm = reformat(values.split('#')[1],8)
values = values.split(',')
rt = reformat(values[0][len(values[0])-1],3)
res = "10011" + rt + imm
return hex(int(res, 2))[2:]
def getADDS(values) :
values = values.replace('[','')
values = values.replace(']','')
if (values.find('#') != -1):
imm = values.split('#')[1]
imm = reformat(imm,3)
values = values.split(',')
rd = reformat(values[0][1],3)
rn = reformat(values[1][1],3)
res = "0001110" + imm + rn + rd
return hex(int(res, 2))[2:]
else :
values = values.split(',')
rd = reformat(values[0][1],3)
rn = reformat(values[1][1],3)
rm = reformat(values[2][1],3)
res = "0001100" + rm + rn + rd
return hex(int(res, 2))[2:]
def getSUBS(values) :
values = values.replace('[','')
values = values.replace(']','')
if((values.find("SP") !=-1 or values.find("sp")!=-1) and values.find('#') != -1) :
values = values.split('#')
a = reformat(values[1],7)
a = '101100001' + a
return hex(int(a, 2))[2:]
if (values.find('#') != -1):
imm = reformat(values.split('#')[1],3)
values = values.split(',')
rd = reformat(values[0][1],3)
rn = reformat(values[1][1],3)
res = "0001111" + imm + rn + rd
return hex(int(res, 2))[2:]
else :
values = values.split(',')
print(values[0][1])
rd = reformat(values[0][1],3)
rn = reformat(values[1][1],3)
rm = reformat(values[2][1],3)
res = "0001101" + rm + rn + rd
return hex(int(res, 2))[2:]
def getADD(values) :
imm = reformat(values.split('#')[1],7)
res = "101100000"+imm
return hex(int(res, 2))[2:]
def getMUL(values) :
values = values.split(',')
rdm = reformat(values[0][len(values[0])-1],3)
rn = reformat(values[1][len(values[0])-1],3)
res = '0100001101'+rn+rdm
return hex(int(res, 2))[2:]
def getMOVS(values) :
if values.find('#')!=-1 :
imm = values.split('#')[1]
values = values.split(',')
imm = reformat(imm,8)
part2 = reformat(values[0][len(values[0])-1],3)
res = "00100" +part2+ imm
return hex(int(res, 2))[2:]
return '0000'
def getSTR(values) :
values = values.replace('[','')
values = values.replace(']','')
imm = values.split('#')[1]
imm = reformat(imm,8)
values = values.split(',')
part2 = reformat(values[0][len(values[0])-1],3)
res = "10010" + part2 + imm
return hex(int(res, 2))[2:]
def getCMN(values) :
values = values.split(",")
rm = reformat(values[1][1],3)
rn = reformat(values[0][1],3)
res = "0100001011"+rm+rn
return hex(int(res, 2))[2:]
def getORRS(values) :
values = values.split(",")
rm = reformat(values[1][1],3)
rdn = reforat(values[0][1],3)
res = "0100001100"+rm+rdn
return hex(int(res, 2))[2:]
def getBICS(values) :
values = values.split(",")
rm = reformat(values[1][1],3)
rdn = reformat(values[0][1],3)
res = "0100001110"+rm+rdn
return hex(int(res, 2))[2:]
def getMVNS(values) :
values = values.split(",")
rm = reformat(values[1][1],3)
rd = reformat(values[0][1],3)
res = "0100001111"+rm+rd
return hex(int(res, 2))[2:]
def getCMP(values) :
values = values.split(",")
rm = reformat(values[1][1],3)
rn = reformat(values[0][1],3)
res = "0100001010"+rm+rn
return hex(int(res, 2))[2:]
def searchStr(lines,strn) :
i=0
d = 0
while i<len(lines) :
if (lines[i][0]=='@') :
d+= 1
if lines[i][len(lines[i])-1] == ':' :
d += 1
if lines[i] == strn:
return i - d + 1
i+=1
return 0;
def getSUB(line):
line = line.split('#')
a = reformat(line[1],7)
a = '101100001' + a
return hex(int(a, 2))[2:]
def conditional(lineArray,position) :
res =""
r = ""
cond = lineArray[0][1:]
imm = bin(position)
imm = imm[2:]
nbzero = 8 - len(imm)
for i in range(0,nbzero) :
imm = "0"+imm
conds = {'EQ' : '0000','NE' : '0001','CS' : '0010','CC' : '0011','MI' : '0100','PL' : '0101','VS' : '0110','VC' : '0111','HI' : '1000','LS' : '1001','GE' : '1010','LT' : '1011','GT' : '1100','LE' : '1101','AL' : '1110','' : '1110'}
r=conds[cond]
res = "1101"+r+imm
return hex(int(res,2))[2:]
if __name__ == '__main__':
if (len(sys.argv) != 2) :
print("N'oubliez pas d'ajouter en argument le fichier d'entree")
else :
translate(getInstructions(sys.argv[1]))
print("Ecriture reussie dans le fichier \'out\'")
```
|
{
"source": "jeanmariepm/vipapi",
"score": 2
}
|
#### File: games/tests/test_player.py
```python
from django.test import client
from rest_framework.test import APIClient
from rest_framework import status
import pytest
@pytest.mark.django_db(transaction=True)
class TestUserCredentials:
def test_browse_players(self):
client = APIClient()
response = client.get('/games/players/')
assert response.status_code == status.HTTP_200_OK
assert len(response.data) == 0
```
#### File: vipapi/home/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse('Use vipveed.herokuapp.com to access')
```
|
{
"source": "JeanMaritain/spvsdkpy",
"score": 3
}
|
#### File: spvsdkpy/test/tool.py
```python
import os
def backup_file(src_dir, file_name, tgt_dir):
if not os.path.isdir(tgt_dir):
os.makedirs(tgt_dir)
def shasum(path):
shasum_res = os.popen(f"shasum -a 256 {path}").readlines()
sha256, file = shasum_res[0].split()[0], shasum_res[0].split()[1]
print(f"shasum 256 value: {sha256}, file: {file}")
return sha256
def copy(source, target):
os.system(f"cp {source} {target}")
print(f"{source} \ncopied to \n{target}")
_src_path = os.path.expanduser(os.path.join(src_dir, file_name))
_tgt_path = os.path.join(os.getcwd(), tgt_dir, file_name)
if os.path.exists(_tgt_path):
if not shasum(_src_path) == shasum(_tgt_path):
copy(_src_path, _tgt_path)
print("target file updated.")
else:
print("target file stills same.\n")
else:
print("target file not found.")
copy(_src_path, _tgt_path)
def clear_spv_local_data_and_logfile(root_path, wid, clear_all=False):
from os import remove, path, listdir
from shutil import rmtree
keep_log = False
log = path.join(root_path, "spvsdk.log")
data = path.join(root_path, wid)
for _path in listdir(root_path):
full_path = path.join(root_path, _path)
if path.isdir(full_path) and _path != wid:
keep_log = True
if path.isdir(full_path) and _path == wid:
rmtree(data)
print(f"{data} removed")
if path.isdir(full_path) and clear_all:
rmtree(full_path)
print(f"{data} removed")
if path.exists(log):
if not keep_log:
remove(log)
print(f"{log} removed")
class AnsiChr(object):
class T(object):
Default, Bold, Light, Italic, Underline, Blink, Inverse, Invisible = 0, 1, 2, 3, 4, 5, 7, 8
# Non_Bold, Non_Underline, Non_Blink = 22, 24, 25
class C(object):
Black, Red, Green, Yellow, Blue, Magenta, Cyan, White, Clear_ = 0, 1, 2, 3, 4, 5, 6, 7, 8
def __init__(self, style=T.Default, front=C.Black, back=C.White):
self.style = style
self.front = front
self.back = back
@classmethod
def rc(cls, x):
return cls.formatted_str(x, cls.T.Default, cls.C.Red, cls.C.Clear_)
@classmethod
def yb(cls, x):
return cls.formatted_str(x, cls.T.Default, cls.C.Yellow, cls.C.Blue)
@classmethod
def bc(cls, x):
return cls.formatted_str(x, cls.T.Default, cls.C.Blue, cls.C.Clear_)
@classmethod
def formatted_str(cls, content, style=T.Default, front=C.Red, back=C.Cyan):
return f'\033[{str(style)};3{str(front)};4{str(back)}m{str(content)}\033[0m'
def stain(self, content):
return self.formatted_str(content, self.style, self.front, self.back)
@staticmethod
def test():
i = 0
for cfk, cfv in AnsiChr.C.__dict__.items():
if cfk[0:1] != "_":
for tk, tv in AnsiChr.T.__dict__.items():
if tk[0:1] != "_":
for cbk, cbv in AnsiChr.C.__dict__.items():
if cbk[0:1] != "_":
i += 1
s = "{0:3}l {2:2}{1:9} {4}{3:7} {6}{5:7}"\
.format(i, tk, tv, cfk, cfv, cbk, cbv)
a = AnsiChr(tv, cfv, cbv).stain(s)
print(a, end='')
print(" ") if i%9 == 0 else print('', end='')
def ctprint(x, c=AnsiChr.rc, end="\n", t=True):
colored_type = AnsiChr.bc(type(x))
colored_value = c(x) if type(x) == str else AnsiChr.yb(x)
s = f"{colored_type}{colored_value}" if t else f"{colored_value}"
print(s, end=end)
```
|
{
"source": "jeanmask/opps-feedcrawler",
"score": 2
}
|
#### File: opps/feedcrawler/models.py
```python
import json
import urllib
import urlparse
from random import getrandbits
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from django.utils import timezone
from opps.core.models import Publishable, Slugged
from opps.containers.models import Container
from opps.channels.models import Channel
from opps.images.models import Image
from opps.utils.text import unescape
RSS_PROCESSOR = 'opps.feedcrawler.processors.rss.RSSProcessor'
RSS_ACTIONS = 'opps.feedcrawler.actions.rss.RSSActions'
def _url_fix(s, charset='utf-8'):
"""Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
class FeedType(models.Model):
name = models.CharField(max_length=255, unique=True)
processor = models.CharField(max_length=255, default=RSS_PROCESSOR)
actions = models.CharField(max_length=255, default=RSS_ACTIONS)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'Feed Type')
verbose_name_plural = _(u'Feed Types')
class Group(models.Model):
name = models.CharField(max_length=250, unique=True)
class Meta:
ordering = ['name']
verbose_name = _(u'Group')
verbose_name_plural = _(u'Groups')
def __unicode__(self):
return self.name
def num_unread(self):
return len(Entry.objects.filter(feed__group=self, read=False))
class Feed(Publishable, Slugged):
title = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
link = models.CharField(max_length=2000, blank=True, null=True)
source_url = models.CharField(max_length=255)
source_username = models.CharField(max_length=255, blank=True, null=True)
source_password = models.CharField(max_length=255, blank=True, null=True)
source_port = models.PositiveIntegerField(blank=True, null=True)
source_root_folder = models.CharField(max_length=255, default="/")
source_json_params = models.TextField(blank=True, null=True)
published_time = models.DateTimeField(blank=True, null=True)
last_polled_time = models.DateTimeField(blank=True, null=True)
group = models.ForeignKey(Group, blank=True, null=True,
verbose_name=_(u"Group or Source"))
feed_type = models.ForeignKey(FeedType)
max_entries = models.PositiveIntegerField(blank=True, null=True)
publish_entries = models.BooleanField(default=True)
channel = models.ForeignKey(
'channels.Channel',
null=True,
blank=True,
on_delete=models.SET_NULL
)
main_image = models.ForeignKey(
'images.Image',
verbose_name=_(u'Feed Image'),
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='feed_image'
)
interval = models.PositiveIntegerField(
blank=True,
null=True,
default=20,
help_text=_(u'interval in minutes (celery only)')
)
class Meta:
ordering = ['title']
verbose_name = _(u'Feed')
verbose_name_plural = _(u'Feeds')
def __unicode__(self):
return self.title
def load_json_params(self):
if self.source_json_params:
try:
return json.loads(self.source_json_params or "{}")
except:
raise ValidationError(_(u'Invalid JSON'))
def clean(self):
self.load_json_params()
@property
def entries(self):
return self.entry_set.all()
# def get_absolute_url(self):
# return "/feed/{0}/{1}".format(self.channel.long_slug, self.slug)
def get_http_absolute_url(self):
protocol, path = "http://{0}/{1}".format(
self.channel, self.slug).split(self.site.domain)
return "{0}{1}/feed{2}".format(protocol, self.site, path)
def get_processor(self, verbose=False):
try:
processor = self.feed_type.processor
_module = '.'.join(processor.split('.')[:-1])
_processor = processor.split('.')[-1]
_temp = __import__(_module, globals(), locals(), [_processor], -1)
Processor = getattr(_temp, _processor)
return Processor(self, verbose=verbose)
except Exception as e:
print str(e)
return
def create_channel(self):
try:
channel = Channel.objects.get(slug=self.slug)
except:
channel = Channel.objects.create(
name=self.title,
slug=self.slug,
published=True,
site=self.site,
user=self.user
)
self.channel = channel
self.save()
return channel
def get_channel(self):
return (self.channel or
Channel.objects.get_homepage(site=self.site) or
self.create_channel())
def save(self, *args, **kwargs):
exclude = {}
self.title = unescape(self.title)
filters = dict(slug=self.slug)
if self.pk is not None:
exclude = dict(pk=self.pk)
if Feed.objects.filter(**filters).exclude(**exclude).exists():
# print("exists creating a new slug")
self.slug = u'{random}-{o.slug}'.format(
o=self, random=getrandbits(16)
)
super(Feed, self).save(*args, **kwargs)
class Entry(Container):
entry_feed = models.ForeignKey(Feed)
entry_title = models.CharField(
max_length=255,
blank=True,
null=True
)
entry_link = models.CharField(max_length=2000, blank=True, null=True)
entry_description = models.TextField(blank=True, null=True)
entry_content = models.TextField(blank=True, null=True)
entry_published_time = models.DateTimeField(auto_now_add=True)
entry_pulled_time = models.DateTimeField(auto_now_add=True)
entry_json = models.TextField(blank=True, null=True)
entry_category = models.CharField(max_length=255, blank=True, null=True)
entry_category_code = models.CharField(max_length=255, blank=True,
null=True)
entry_original_id = models.PositiveIntegerField(blank=True, null=True)
post_created = models.BooleanField(_(u"Post created"), default=False)
def save(self, *args, **kwargs):
self.title = unescape(self.title)
self.hat = unescape(self.hat)
super(Entry, self).save(*args, **kwargs)
def define_main_image(self, archive_link, save=False, *args, **kwargs):
image = Image(
title=u"{0}-{1}".format(self.title, self.id),
slug=u"{0}-{1}".format(self.slug, self.id),
user=self.user,
site=self.site,
archive_link=_url_fix(archive_link),
**kwargs
)
image.save()
self.main_image = image
if save:
self.save()
return image
class Meta:
ordering = ['-entry_published_time']
verbose_name = _(u'Entry')
verbose_name_plural = _(u'Entries')
def __unicode__(self):
return self.title
def get(self, key):
data = self.load_json()
return data.get(key)
def load_json(self):
try:
return json.loads(self.entry_json or "{}")
except:
raise ValidationError(u"Invalid Json")
class ProcessLog(models.Model):
feed = models.ForeignKey(Feed)
type = models.CharField(max_length=255, blank=True, null=True)
text = models.CharField(max_length=255, blank=True, null=True)
log_time = models.DateTimeField(auto_now_add=True, default=timezone.now)
def __unicode__(self):
return self.text
class Meta:
verbose_name = _(u'Process Log')
verbose_name_plural = _(u'Process Logs')
```
#### File: feedcrawler/processors/rss_news.py
```python
import feedparser
import logging
import json
from datetime import datetime, timedelta
from time import mktime
from django.utils import html
from django.template.defaultfilters import striptags
from django.utils.text import slugify
from .base import BaseProcessor
from .category_brasil import CATEGORY_BRASIL
from opps.articles.models import Post
from opps.channels.models import Channel
logger = logging.getLogger()
# Agencia Brasil has a different rule for timezones
TZ_DELTA = timedelta(hours=3)
class RSSProcessor(BaseProcessor):
def fetch(self):
self.parsed = feedparser.parse(self.feed.source_url)
if hasattr(self.parsed.feed, 'bozo_exception'):
msg = "Malformed feed %s" % self.feed.source_url
logger.warning(msg)
self.verbose_print(msg)
return
self.verbose_print("Feed succesfully parsed")
return self.parsed
def update_feed(self):
self.verbose_print("updating feed")
if hasattr(self.parsed, 'published_parsed'):
published_time = datetime.fromtimestamp(
mktime(self.parsed.feed.published_parsed)
)
published_time = published_time - TZ_DELTA
if (self.feed.published_time and
self.feed.published_time >= published_time):
return
self.feed.published_time = published_time
for attr in ['title', 'title_detail', 'link']:
if not hasattr(self.parsed.feed, attr):
msg = 'refresh_feeds. Feed "%s" has no %s'
logger.error(msg % (self.feed.source_url, attr))
self.verbose_print(msg % (self.feed.source_url, attr))
return
if self.parsed.feed.title_detail.type == 'text/plain':
self.feed.title = striptags(self.parsed.feed.title)[:150]
else:
self.feed.title = self.parsed.feed.title[:150]
self.feed.link = self.feed.link or self.parsed.feed.link
try:
if self.parsed.feed.description_detail.type == 'text/plain':
self.feed.description = \
html.escape(self.parsed.feed.description)
else:
self.feed.description = self.parsed.feed.description
except:
pass
self.feed.last_polled_time = datetime.now()
self.feed.save()
self.verbose_print("Feed obj updated %s" % self.feed.last_polled_time)
return len(self.parsed.entries)
def create_entries(self):
self.verbose_print("creating entry")
count = 0
for i, entry in enumerate(self.parsed.entries):
e_id = getattr(entry, 'id', getattr(entry, 'guid', None))
if self.log_created(e_id):
self.verbose_print("Already processed")
continue
if i > self.max_entries_saved:
break
missing_attr = False
for attr in ['title', 'title_detail', 'link', 'description']:
if not hasattr(entry, attr):
msg = 'Feedcrawler refresh_feeds. Entry "%s" has no %s'
logger.error(msg % (entry.link, attr))
missing_attr = True
if missing_attr:
continue
if entry.title == "":
msg = 'Feedcrawler refresh_feeds. Entry "%s" has a blank title'
logger.warning(msg % (entry.link))
continue
if entry.title_detail.type == 'text/plain':
entry_title = html.escape(entry.title)
else:
entry_title = entry.title
if not entry_title:
entry_title = ""
self.verbose_print("will create entry")
if hasattr(entry, 'published_parsed'):
published_time = datetime.fromtimestamp(
mktime(entry.published_parsed)
)
# published_time = pytz.timezone(
# settings.TIME_ZONE
# ).localize(
# published_time,
# is_dst=None
# )
published_time = published_time - TZ_DELTA
now = datetime.now()
if published_time.date() > now.date():
self.verbose_print("Entry date is > now")
self.verbose_print(published_time)
self.verbose_print(now)
published_time = now
elif published_time.date() < now.date():
self.verbose_print(
"Entry time is in the past, skipping: %s - %s"
% (published_time.date(), now.date())
)
continue
if not published_time:
continue
pub_time_str = published_time.strftime("%Y-%m-%d")
slug = slugify(
self.feed.slug + "-" + entry_title[:100] + pub_time_str)
exists = self.entry_model.objects.filter(slug=slug).exists()
if exists:
self.verbose_print("Entry slug exists, skipping")
continue
try:
tags = ",".join([tag.term for tag in entry.tags])
except:
tags = None
db_entry, created = self.entry_model.objects.get_or_create(
entry_feed=self.feed,
entry_link=entry.link,
channel=self.feed.get_channel(),
title=entry_title[:150].replace('"', '"'),
slug=slug[:150],
entry_title=entry_title[:150],
site=self.feed.site,
user=self.feed.user,
published=self.feed.publish_entries,
show_on_root_channel=False,
tags=tags
)
self.verbose_print("Entry found or created!!!")
if created:
if hasattr(entry, 'published_parsed'):
db_entry.entry_published_time = published_time
# Lots of entries are missing description_detail attributes.
# Escape their content by default
if hasattr(entry, 'description_detail') and \
entry.description_detail.type != 'text/plain':
db_entry.entry_description = entry.description
else:
db_entry.entry_description = html.escape(entry.description)
try:
content = None
if hasattr(entry, 'content'):
content = entry.content
if isinstance(content, list) and content:
content = entry.content[0]
if content and content.type != 'text/plain':
db_entry.entry_content = content.value
elif hasattr(entry, 'content'):
db_entry.entry_content = html.escape(content.value)
except Exception, e:
self.verbose_print(str(e))
msg = 'Feedcrawler refresh_feeds. Entry "%s" content error'
logger.warning(msg % (entry.link))
try:
allowed = (str, unicode, dict, list, int, float, long)
entry_source = json.dumps(
{k: v for k, v in entry.iteritems()
if isinstance(v, allowed)}
)
db_entry.entry_json = entry_source
except Exception, e:
self.verbose_print(str(e))
msg = 'Feedcrawler refresh_feeds. Entry "%s" json error'
logger.warning(msg % (entry.link))
# fill Article properties
db_entry.headline = db_entry.entry_description
db_entry.save()
count += 1
if self.verbose:
self.verbose_print(
"Entry fully created %s" % db_entry.title)
self.record_log(e_id)
try:
self.verbose_print('creating post')
db_entry.pub_time_str = pub_time_str
self.create_post(db_entry)
except Exception as e:
self.verbose_print(str(e))
self.verbose_print("%d entries created" % count)
return count
def delete_old_entries(self):
entries = self.entry_model.objects.filter(entry_feed=self.feed)
entries = entries[self.max_entries_saved:]
# Cannot use 'limit' or 'offset' with delete.
for entry in entries:
entry.delete()
if self.verbose:
self.verbose_print("%d entries deleted" % len(entries))
def process(self):
# fetch and parse the feed
self.max_entries_saved = self.feed.max_entries or 1000
self.verbose_print("fetching")
if not self.fetch():
logger.warning("Feed cannot be parsed")
return 0
self.verbose_print("updating")
if not self.update_feed():
logger.info("No entry returned")
return 0
self.verbose_print("creating entries")
created_count = self.create_entries()
self.delete_old_entries()
return created_count
def get_channel_by_slug(self, slug):
if not slug:
return
try:
return Channel.objects.filter(long_slug=slug)[0]
except:
return
def create_post(self, entry):
# match category X channel
channel_slug = CATEGORY_BRASIL.get(self.feed.source_url)
channel = self.get_channel_by_slug(channel_slug) or entry.channel
self.verbose_print(channel_slug)
slug = slugify(entry.entry_title + "-" + entry.pub_time_str)[:150]
if Post.objects.filter(channel=channel,
slug=slug,
site=entry.site).exists():
# slug = str(random.getrandbits(8)) + "-" + slug
self.verbose_print("Post slug exists")
# do not create duplicates
return
post = Post(
title=entry.entry_title[:150],
slug=slug,
content=entry.entry_content or entry.entry_description,
channel=channel,
site=entry.site,
user=entry.user,
show_on_root_channel=True,
published=self.feed.publish_entries,
# hat=entry.hat,
tags=entry.tags,
date_insert=entry.entry_published_time,
date_available=entry.entry_published_time
)
if self.feed.group:
post.source = self.feed.group.name
post.save()
entry.post_created = True
entry.save()
self.verbose_print(
"Post {p.id}- {p.title} - {p.slug} created".format(p=post))
return post
```
#### File: feedcrawler/sample/ftpserver.py
```python
import os,socket,threading,time
#import traceback
allow_delete = False
local_ip = socket.gethostbyname(socket.gethostname())
local_port = 8888
currdir=os.path.abspath('.')
class FTPserverThread(threading.Thread):
def __init__(self,(conn,addr)):
self.conn=conn
self.addr=addr
self.basewd=currdir
self.cwd=self.basewd
self.rest=False
self.pasv_mode=False
threading.Thread.__init__(self)
def run(self):
self.conn.send('220 Welcome!\r\n')
while True:
cmd=self.conn.recv(256)
if not cmd: break
else:
print 'Recieved:',cmd
try:
func=getattr(self,cmd[:4].strip().upper())
func(cmd)
except Exception,e:
print 'ERROR:',e
#traceback.print_exc()
self.conn.send('500 Sorry.\r\n')
def SYST(self,cmd):
self.conn.send('215 UNIX Type: L8\r\n')
def OPTS(self,cmd):
if cmd[5:-2].upper()=='UTF8 ON':
self.conn.send('200 OK.\r\n')
else:
self.conn.send('451 Sorry.\r\n')
def USER(self,cmd):
self.conn.send('331 OK.\r\n')
def PASS(self,cmd):
self.conn.send('230 OK.\r\n')
#self.conn.send('530 Incorrect.\r\n')
def QUIT(self,cmd):
self.conn.send('221 Goodbye.\r\n')
def NOOP(self,cmd):
self.conn.send('200 OK.\r\n')
def TYPE(self,cmd):
self.mode=cmd[5]
self.conn.send('200 Binary mode.\r\n')
def CDUP(self,cmd):
if not os.path.samefile(self.cwd,self.basewd):
#learn from stackoverflow
self.cwd=os.path.abspath(os.path.join(self.cwd,'..'))
self.conn.send('200 OK.\r\n')
def PWD(self,cmd):
cwd=os.path.relpath(self.cwd,self.basewd)
if cwd=='.':
cwd='/'
else:
cwd='/'+cwd
self.conn.send('257 \"%s\"\r\n' % cwd)
def CWD(self,cmd):
chwd=cmd[4:-2]
if chwd=='/':
self.cwd=self.basewd
elif chwd[0]=='/':
self.cwd=os.path.join(self.basewd,chwd[1:])
else:
self.cwd=os.path.join(self.cwd,chwd)
self.conn.send('250 OK.\r\n')
def PORT(self,cmd):
if self.pasv_mode:
self.servsock.close()
self.pasv_mode = False
l=cmd[5:].split(',')
self.dataAddr='.'.join(l[:4])
self.dataPort=(int(l[4])<<8)+int(l[5])
self.conn.send('200 Get port.\r\n')
def PASV(self,cmd): # from http://goo.gl/3if2U
self.pasv_mode = True
self.servsock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.servsock.bind((local_ip,0))
self.servsock.listen(1)
ip, port = self.servsock.getsockname()
print 'open', ip, port
self.conn.send('227 Entering Passive Mode (%s,%u,%u).\r\n' %
(','.join(ip.split('.')), port>>8&0xFF, port&0xFF))
def start_datasock(self):
if self.pasv_mode:
self.datasock, addr = self.servsock.accept()
print 'connect:', addr
else:
self.datasock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.datasock.connect((self.dataAddr,self.dataPort))
def stop_datasock(self):
self.datasock.close()
if self.pasv_mode:
self.servsock.close()
def LIST(self,cmd):
self.conn.send('150 Here comes the directory listing.\r\n')
print 'list:', self.cwd
self.start_datasock()
for t in os.listdir(self.cwd):
k=self.toListItem(os.path.join(self.cwd,t))
self.datasock.send(k+'\r\n')
self.stop_datasock()
self.conn.send('226 Directory send OK.\r\n')
def NLST(self,cmd):
self.conn.send('150 Here comes the directory listing.\r\n')
print 'list:', self.cwd
self.start_datasock()
for t in os.listdir(self.cwd):
self.datasock.send(t+'\r\n')
self.stop_datasock()
self.conn.send('226 Directory send OK.\r\n')
def toListItem(self,fn):
st=os.stat(fn)
fullmode='rwxrwxrwx'
mode=''
for i in range(9):
mode+=((st.st_mode>>(8-i))&1) and fullmode[i] or '-'
d=(os.path.isdir(fn)) and 'd' or '-'
ftime=time.strftime(' %b %d %H:%M ', time.gmtime(st.st_mtime))
return d+mode+' 1 user group '+str(st.st_size)+ftime+os.path.basename(fn)
def MKD(self,cmd):
dn=os.path.join(self.cwd,cmd[4:-2])
os.mkdir(dn)
self.conn.send('257 Directory created.\r\n')
def RMD(self,cmd):
dn=os.path.join(self.cwd,cmd[4:-2])
if allow_delete:
os.rmdir(dn)
self.conn.send('250 Directory deleted.\r\n')
else:
self.conn.send('450 Not allowed.\r\n')
def DELE(self,cmd):
fn=os.path.join(self.cwd,cmd[5:-2])
if allow_delete:
os.remove(fn)
self.conn.send('250 File deleted.\r\n')
else:
self.conn.send('450 Not allowed.\r\n')
def RNFR(self,cmd):
self.rnfn=os.path.join(self.cwd,cmd[5:-2])
self.conn.send('350 Ready.\r\n')
def RNTO(self,cmd):
fn=os.path.join(self.cwd,cmd[5:-2])
os.rename(self.rnfn,fn)
self.conn.send('250 File renamed.\r\n')
def REST(self,cmd):
self.pos=int(cmd[5:-2])
self.rest=True
self.conn.send('250 File position reseted.\r\n')
def RETR(self,cmd):
fn=os.path.join(self.cwd,cmd[5:-2])
#fn=os.path.join(self.cwd,cmd[5:-2]).lstrip('/')
print 'Downlowding:',fn
if self.mode=='I':
fi=open(fn,'rb')
else:
fi=open(fn,'r')
self.conn.send('150 Opening data connection.\r\n')
if self.rest:
fi.seek(self.pos)
self.rest=False
data= fi.read(1024)
self.start_datasock()
while data:
self.datasock.send(data)
data=fi.read(1024)
fi.close()
self.stop_datasock()
self.conn.send('226 Transfer complete.\r\n')
def STOR(self,cmd):
fn=os.path.join(self.cwd,cmd[5:-2])
print 'Uplaoding:',fn
if self.mode=='I':
fo=open(fn,'wb')
else:
fo=open(fn,'w')
self.conn.send('150 Opening data connection.\r\n')
self.start_datasock()
while True:
data=self.datasock.recv(1024)
if not data: break
fo.write(data)
fo.close()
self.stop_datasock()
self.conn.send('226 Transfer complete.\r\n')
class FTPserver(threading.Thread):
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((local_ip,local_port))
threading.Thread.__init__(self)
def run(self):
self.sock.listen(5)
while True:
th=FTPserverThread(self.sock.accept())
th.daemon=True
th.start()
def stop(self):
self.sock.close()
if __name__=='__main__':
ftp=FTPserver()
ftp.daemon=True
ftp.start()
print 'On', local_ip, ':', local_port
raw_input('Enter to end...\n')
ftp.stop()
```
|
{
"source": "jeanmask/opps",
"score": 2
}
|
#### File: opps/articles/search_indexes.py
```python
from datetime import datetime
from django.conf import settings
from haystack.indexes import Indexable
from opps.containers.search_indexes import ContainerIndex
from .models import Post, Album, Link
migration_date = getattr(settings, 'MIGRATION_DATE', None)
if migration_date:
m_date = datetime.strptime(migration_date, "%Y-%m-%d").date()
Post.is_legacy = lambda self: m_date >= self.date_insert.date()
else:
Post.is_legacy = lambda self: False
class PostIndex(ContainerIndex, Indexable):
def get_model(self):
return Post
class AlbumIndex(ContainerIndex, Indexable):
def get_model(self):
return Album
class LinkIndex(ContainerIndex, Indexable):
def get_model(self):
return Link
```
#### File: opps/articles/signals.py
```python
from django.contrib.redirects.models import Redirect
def redirect_generate(sender, instance, created, **kwargs):
obj, create = Redirect.objects.get_or_create(
old_path=instance.get_absolute_url(),
site=instance.site)
obj.new_path = instance.url
obj.save()
```
#### File: opps/articles/views.py
```python
from django.contrib.sites.models import get_current_site
from django.utils import timezone
from django.conf import settings
from opps.views.generic.list import ListView
from opps.containers.views import ContainerList
from opps.containers.models import Container, ContainerBox
from opps.articles.models import Album
class AlbumList(ContainerList):
model = Album
type = 'articles'
def get_template_names(self):
templates = []
domain_folder = self.get_template_folder()
list_name = 'list'
templates.append('{0}/{1}/{2}.html'.format(
self.model._meta.app_label,
self.model._meta.module_name, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}/{3}_paginated.html'.format(
domain_folder, self.model._meta.app_label,
self.model._meta.module_name, list_name))
return templates
def get_queryset(self):
# TODO: refatoring, used super()
self.site = get_current_site(self.request)
self.long_slug = self.get_long_slug()
if not self.long_slug:
return None
self.set_channel_rules()
self.articleboxes = ContainerBox.objects.filter(
channel__long_slug=self.long_slug)
is_paginated = self.page_kwarg in self.request.GET
if not is_paginated:
for box in self.articleboxes:
self.excluded_ids.update(
[a.pk for a in box.ordered_containers()])
filters = {}
filters['site_domain'] = self.site.domain
filters['date_available__lte'] = timezone.now()
filters['published'] = True
filters['child_class'] = 'Album'
if self.channel and self.channel.is_root_node() and not is_paginated:
filters['show_on_root_channel'] = True
queryset = Container.objects.filter(
**filters).exclude(pk__in=self.excluded_ids)
return queryset._clone()
class AlbumChannelList(ListView):
model = Album
type = 'articles'
template_name_suffix = 'album'
def get_template_list(self, domain_folder="containers"):
templates = []
list_name = 'list'
if self.template_name_suffix:
list_fullname = "{0}_{1}".format(self.template_name_suffix,
list_name)
if self.channel:
if self.channel.group and self.channel.parent:
templates.append('{0}/{1}/{2}.html'.format(
domain_folder,
self.channel.parent.long_slug,
list_fullname))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.html'.format(
domain_folder, self.channel.parent.long_slug,
list_fullname))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.html'.format(
domain_folder, self.channel.long_slug, list_fullname))
templates.append('{0}/{1}/{2}.html'.format(
domain_folder, self.channel.long_slug, list_fullname))
for t in self.channel.get_ancestors()[::-1]:
templates.append('{0}/{1}/{2}.html'.format(
domain_folder, t.long_slug, list_fullname))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.html'.format(
domain_folder, t.long_slug, list_fullname))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}_paginated.html'.format(domain_folder,
list_fullname))
templates.append('{0}/{1}/{2}.html'.format(
self.model._meta.app_label,
self.model._meta.module_name,
list_name))
return templates
def get_template_names(self):
domain_folder = self.get_template_folder()
template_list = self.get_template_list(domain_folder)
return template_list
def get_queryset(self):
self.site = get_current_site(self.request)
queryset = super(AlbumChannelList, self).get_queryset()
filters = {}
filters['site_domain'] = self.site.domain
filters['date_available__lte'] = timezone.now()
filters['published'] = True
filters['show_on_root_channel'] = True
queryset = queryset.filter(**filters)
return queryset._clone()
```
#### File: opps/boxes/models.py
```python
import json
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from opps.core.models import Publishable, Channeling
class QuerySet(Publishable):
name = models.CharField(_(u"Dynamic queryset name"), max_length=140)
slug = models.SlugField(
_(u"Slug"),
db_index=True,
max_length=150,
unique=True,
)
model = models.CharField(_(u'Model'), max_length=150)
limit = models.PositiveIntegerField(_(u'Limit'), default=7)
offset = models.PositiveIntegerField(_(u'Offset'), default=0)
order_field = models.CharField(
_(u"Order Field"),
max_length=100,
default='id',
help_text=_(u"Take care, should be an existing field or lookup")
)
order = models.CharField(_('Order'), max_length=1, choices=(
('-', 'DESC'), ('+', 'ASC')))
channel = models.ManyToManyField(
'channels.Channel',
verbose_name=_(u"Channel"),
blank=True,
null=True,
default=None
)
recursive = models.BooleanField(
_("Recursive"),
help_text=_("Bring the content channels and subchannels (tree)"),
default=False
)
filters = models.TextField(
_(u'Filters'),
help_text=_(u'Json format extra filters for queryset'),
blank=True,
null=True
)
excludes = models.TextField(
_(u'Excludes'),
help_text=_(u'Json format for queryset excludes'),
blank=True,
null=True
)
def __init__(self, *args, **kwargs):
"""
to avoid re-execution of methods
its results are cached in a local storage
per instance cache
"""
super(QuerySet, self).__init__(*args, **kwargs)
if not hasattr(self, 'local_cache'):
self.local_cache = {}
def __unicode__(self):
return u"{0} {1} {2}".format(self.name, self.slug, self.model)
def clean(self):
if self.filters:
try:
json.loads(self.filters)
except:
raise ValidationError(_(u'Invalid JSON for filters'))
if self.excludes:
try:
json.loads(self.excludes)
except:
raise ValidationError(_(u'Invalid JSON for excludes'))
try:
# TODO: See how to test queryset before channel exist
# self.get_queryset().all()
pass
except Exception as e:
raise ValidationError(
u'Invalid Queryset: {0}'.format(str(e))
)
if self.offset >= self.limit:
raise ValidationError(_(u'Offset can\'t be equal or higher than'
u'limit'))
if self.recursive:
if not self.channel:
raise ValidationError(_(u"To use recursion (channel) is "
u"necessary to select a channel"))
def get_queryset(self, content_group='default',
exclude_ids=None, use_local_cache=True):
cached = self.local_cache.get('get_queryset')
if use_local_cache and cached:
return cached
exclude_ids = exclude_ids or []
_app, _model = self.model.split('.')
model = models.get_model(_app, _model)
queryset = model.objects.filter(
published=True,
date_available__lte=timezone.now(),
site=self.site
).exclude(child_class='Mirror')
try:
if model._meta.get_field_by_name('show_on_root_channel'):
queryset = queryset.filter(show_on_root_channel=True)
except:
# silently pass when FieldDoesNotExists
pass
if self.channel.exists():
ch_long_slug_in = [
ch.long_slug for ch in self.channel.all()
if ch.published and not ch.homepage]
if self.recursive:
channel_descendants = [
ch.get_descendants(include_self=False)
for ch in self.channel.all()
if ch.published and not ch.homepage]
for children in channel_descendants:
[ch_long_slug_in.append(chi.long_slug)
for chi in children if chi.published]
queryset = queryset.filter(
channel_long_slug__in=ch_long_slug_in)
else:
queryset = queryset.filter(
channel_long_slug__in=ch_long_slug_in)
if self.filters:
filters = json.loads(self.filters)
for key, value in filters.iteritems():
if value == "datetime.now()":
filters[key] = datetime.now()
queryset = queryset.filter(**filters)
if self.excludes:
excludes = json.loads(self.excludes)
for key, value in excludes.iteritems():
if value == "datetime.now()":
excludes[key] = datetime.now()
queryset = queryset.exclude(**excludes)
# importing here to avoid circular imports
from opps.containers.models import Container
if issubclass(model, Container):
queryset = queryset.exclude(
id__in=exclude_ids
)
order_term = self.order_field or 'id'
if self.order == '-':
order_term = "-{0}".format(self.order_field or 'id')
queryset = queryset.order_by(order_term)
result = queryset[self.offset:self.limit]
if use_local_cache:
self.local_cache['get_queryset'] = result
return result
class BaseBox(Publishable, Channeling):
name = models.CharField(_(u"Box name"), max_length=140)
slug = models.SlugField(
_(u"Slug"),
db_index=True,
max_length=150,
)
class Meta:
abstract = True
unique_together = ['site', 'channel_long_slug', 'slug']
def __unicode__(self):
return u"{0}-{1}".format(self.slug, self.site.name)
```
#### File: opps/containers/api.py
```python
from django.utils import timezone
from opps.api import BaseHandler
from .models import Container, ContainerBox
class Handler(BaseHandler):
allowed_methods = ('GET',)
def read(self, request):
filters = request.GET.dict()
filters['date_available__lte'] = timezone.now()
filters['published'] = True
# Removed field from automatic filtering
[filters.pop(b, None) for b in self.blackfield]
return self.model.objects.filter(
**filters)[self._page(request):self._limit(request)]
class ContainerHandler(Handler):
model = Container
class ContainerBoxHandler(Handler):
model = ContainerBox
fields = (
'name',
'slug',
'title',
'title_url',
'channel',
('containers', ())
)
exclude = ['queryset']
```
#### File: contrib/logging/api.py
```python
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from opps.api import BaseHandler
from .models import Logging
class LoggingHandler(BaseHandler):
allowed_methods = ('POST', 'GET')
model = Logging
def create(self, request):
method = getattr(request, request.method)
User = get_user_model()
username = method.get('api_username')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return {}
try:
site = Site.objects.get(domain=method.get('site'))
except Site.DoesNotExist:
site = Site.objects.order_by('id')[0]
log = Logging.objects.create(
user=user,
site=site,
application=method.get('application'),
action=method.get('action'),
text=method.get('text'),
)
return log
```
#### File: contrib/multisite/middleware.py
```python
from django.contrib.sites.models import Site
from django.conf import settings
class DynamicSiteMiddleware(object):
def hosting_parse(self, hosting):
"""
Returns ``(host, port)`` for ``hosting`` of the form ``'host:port'``.
If hosting does not have a port number, ``port`` will be None.
"""
if ':' in hosting:
return hosting.rsplit(':', 1)
return hosting, None
def get_hosting(self, hosting):
domain, port = self.hosting_parse(hosting)
if domain in settings.OPPS_DEFAULT_URLS:
domain = 'example.com'
try:
return Site.objects.get(domain=domain)
except Site.DoesNotExist:
return Site.objects.order_by('id')[0]
def process_request(self, request):
hosting = request.get_host().lower()
site = self.get_hosting(hosting)
request.site = site
settings.SITE_ID = site.id
settings.CACHE_MIDDLEWARE_KEY_PREFIX = "opps_site-{0}".format(site.id)
```
#### File: contrib/notifications/models.py
```python
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from opps.core.models import Publishable
from opps.db import Db
NOTIFICATION_TYPE = (
(u'json', _(u'JSON')),
(u'text', _(u'Text')),
(u'html', _(u'HTML')),
)
class Notification(Publishable):
container = models.ForeignKey('containers.Container')
channel_long_slug = models.CharField(
_(u"Channel long slug"),
max_length=250,
null=True, blank=True,
db_index=True,
)
slug = models.SlugField(
_(u"Slug"),
max_length=150,
null=True, blank=True,
db_index=True,
)
action = models.CharField(_('Action'), max_length=75,
default="message")
type = models.CharField(_('Type'), max_length=10,
choices=NOTIFICATION_TYPE,
default='json')
message = models.TextField(_('Message'))
def save(self, *args, **kwargs):
self.channel_long_slug = self.container.channel_long_slug
self.slug = self.container.slug
super(Notification, self).save(*args, **kwargs)
_db = Db(self.container.get_absolute_url(),
self.container.id)
message = self.message
if self.type == "json":
message = json.dumps(self.message)
_db.publish(json.dumps({
"action": self.action,
"id": self.id,
"published": self.published,
"date": self.date_available.strftime("%D %T"),
"message": message}))
_db.close()
```
#### File: contrib/notifications/views.py
```python
import json
import time
from django.conf import settings
from django.http import StreamingHttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from opps.views.generic.list import ListView
from opps.views.generic.json_views import JSONPResponse
from opps.db import Db
from .models import Notification
class AsyncServer(ListView):
model = Notification
def _db(self, obj):
_db = Db(
obj.container.get_absolute_url(),
obj.container.id)
pubsub = _db.object().pubsub()
pubsub.subscribe(_db.key)
return pubsub
def _queue(self):
try:
obj = self.get_queryset()[0]
except:
obj = False
if not obj:
while True:
msg = u"compatibility: true\n"
msg += u"retry: 10000\n"
msg += u"data: {0}\n\n".format(
json.dumps({"action": "error"}))
yield msg
time.sleep(10)
else:
while True:
pubsub = self._db(obj)
for m in pubsub.listen():
if m['type'] == 'message':
msg = u"compatibility: true\n"
msg += u"retry: 10000\n"
msg += u"data: {0}\n\n".format(m['data'])
yield msg
ping = u"compatibility: true\n"
ping += u"retry: 10000\n"
ping += u"data: {0}\n\n".format(
json.dumps({"action": "ping"}))
yield ping
time.sleep(0.5)
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
response = StreamingHttpResponse(self._queue(),
mimetype='text/event-stream')
response['Cache-Control'] = 'no-cache'
response['Software'] = 'opps-liveblogging'
response['Access-Control-Allow-Origin'] = '*'
response.flush()
return response
class LongPullingServer(ListView, JSONPResponse):
model = Notification
def get_template_names(self):
templates = []
domain_folder = self.get_template_folder()
if not self.long_slug:
templates.append('{0}/none.json'.format(domain_folder))
return templates
list_name = 'list'
if self.template_name_suffix:
list_name = "{0}{1}".format(list_name, self.template_name_suffix)
if self.channel:
# Check layout, change via admin
if self.channel.layout != u'default':
list_name = self.channel.layout
if self.channel.group and self.channel.parent:
templates.append('{0}/{1}/{2}.json'.format(
domain_folder, self.channel.parent.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.json'.format(
domain_folder, self.channel.parent.long_slug,
list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.json'.format(
domain_folder, self.channel.long_slug, list_name))
templates.append('{0}/{1}/{2}.json'.format(
domain_folder, self.channel.long_slug, list_name))
for t in self.channel.get_ancestors()[::-1]:
templates.append('{0}/{1}/{2}.json'.format(
domain_folder, t.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in\
settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}/{2}_paginated.json'.format(
domain_folder, t.long_slug, list_name))
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}_paginated.json'.format(domain_folder,
list_name))
templates.append('{0}/{1}.json'.format(domain_folder, list_name))
return templates
def get_queryset(self):
query = super(LongPullingServer, self).get_queryset()
old_id = self.request.GET.get('old_id', 0)
return query.filter(id__gte=old_id)._clone()
```
#### File: management/commands/update_channel_denormalization.py
```python
from django.core.management.base import BaseCommand, CommandError
from opps.channels.models import Channel
from opps.containers.models import ContainerBox
from opps.articles.models import Post
class Command(BaseCommand):
def handle(self, *args, **options):
models = [Channel, Post, ContainerBox]
for m in models:
[p.save() for p in m.objects.all()]
```
#### File: core/tags/views.py
```python
from django.utils import timezone
from django.contrib.sites.models import get_current_site
from django.conf import settings
from haystack.query import SearchQuerySet
from opps.views.generic.list import ListView
from opps.containers.models import Container
from opps.channels.models import Channel
from .models import Tag
USE_HAYSTACK = getattr(settings, 'OPPS_TAGS_USE_HAYSTACK', False)
class TagList(ListView):
model = Container
def get_template_list(self, domain_folder="containers"):
templates = []
list_name = 'list_tags'
if self.request.GET.get('page') and\
self.__class__.__name__ not in settings.OPPS_PAGINATE_NOT_APP:
templates.append('{0}/{1}_paginated.html'.format(domain_folder,
list_name))
templates.append('{0}/{1}.html'.format(domain_folder, list_name))
return templates
def get_context_data(self, **kwargs):
context = super(TagList, self).get_context_data(**kwargs)
context['tag'] = self.kwargs['tag']
site = get_current_site(self.request)
context['channel'] = Channel.objects.get_homepage(site)
return context
def get_queryset(self):
self.site = get_current_site(self.request)
# without the long_slug, the queryset will cause an error
self.long_slug = 'tags'
self.tag = self.kwargs['tag']
if USE_HAYSTACK:
return self.get_queryset_from_haystack()
return self.get_queryset_from_db()
def get_queryset_from_haystack(self):
models = Container.get_children_models()
sqs = SearchQuerySet().models(*models).filter(
tags=self.tag).order_by('-date_available')
sqs.model = Container
return sqs
def get_queryset_from_db(self):
tags = Tag.objects.filter(slug=self.tag).values_list('name') or []
tags_names = []
if tags:
tags_names = [i[0] for i in tags]
ids = []
for tag in tags_names:
result = self.containers = self.model.objects.filter(
site_domain=self.site,
tags__contains=tag,
date_available__lte=timezone.now(),
published=True
)
if result.exists():
ids.extend([i.id for i in result])
# remove the repeated
ids = list(set(ids))
# grab the containers
self.containers = self.model.objects.filter(id__in=ids)
return self.containers
```
#### File: opps/fields/utils.py
```python
def field_template_read(obj):
"""Use replace because the django template can't read variable with "-"
"""
fields = {}
for o in obj:
fields[o.replace("-", "_")] = obj[o]
return fields
```
#### File: tests/contrib/test_mobile.py
```python
from django.test import TestCase
from django.test.client import RequestFactory
from opps.contrib.mobile import template
from opps.contrib.mobile.middleware import (
MobileDetectionMiddleware, MobileRedirectMiddleware
)
class TestMobileTemplatesDir(TestCase):
def setUp(self):
self.detection_middleware = MobileDetectionMiddleware()
self.factory = RequestFactory()
self.template_loader = template.Loader()
def test_useragent_based_templatedirs(self):
# Override the TEMPLATE_LOADERS and MIDDLEWARE_CLASSES settings
# to use the middlewares in ``opps.contrib.mobile.middleware``
# and the ``opps.contrib.mobile.template.Loader``
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'opps.contrib.mobile.middleware.MobileDetectionMiddleware',
'opps.contrib.mobile.middleware.MobileRedirectMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
)
TEMPLATE_LOADERS = (
'opps.contrib.mobile.template.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS_MOBILE = ('mobile-templates',)
TEMPLATE_DIRS_WEB = ('web-templates',)
custom_settings = self.settings(
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_LOADERS=TEMPLATE_LOADERS,
TEMPLATE_DIRS_MOBILE=TEMPLATE_DIRS_MOBILE,
TEMPLATE_DIRS_WEB=TEMPLATE_DIRS_WEB,
OPPS_CHECK_MOBILE=True,
OPPS_DOMAIN_MOBILE='m.testserver'
)
with custom_settings:
mobile_request = self.factory.get('/', HTTP_USER_AGENT='mobi')
desktop_request = self.factory.get('/',
HTTP_USER_AGENT='Mozilla/5.0')
get_template_sources = self.template_loader.get_template_sources
self.detection_middleware.process_request(desktop_request)
self.assertEqual(
get_template_sources('index.html').next(),
get_template_sources('index.html', TEMPLATE_DIRS_WEB).next()
)
self.detection_middleware.process_request(mobile_request)
self.assertEqual(
get_template_sources('index.html').next(),
get_template_sources('index.html', TEMPLATE_DIRS_MOBILE).next()
)
```
#### File: tests/images/generate.py
```python
from unittest import TestCase
from django.template import Template, Context
from django.conf import settings
class TestImagesTagsImageUrl(TestCase):
url = 'oppsproject.org/path/image.jpg'
generate_url_path = 'opps.images.templatetags.images_tags.image_url'
def render(self, arguments):
source = u'{% load images_tags %}{% image_url ' + arguments + ' %}'
template = Template(source)
rendered = template.render(Context({'url': self.url}))
return rendered.strip()
def test_templatetag_return(self):
self.assertTrue(self.render(u'url unsafe=True'))
def test_should_pass_the_image_url_arg_to_the_helper(self):
if settings.THUMBOR_ENABLED:
image_url = (u'http://localhost:8888/unsafe/localhost:8000/media/'
u'oppsproject.org/path/image.jpg')
else:
image_url = self.url
self.assertEqual(self.render(u'url unsafe=True'), image_url)
def test_should_pass_kwargs_to_the_helper(self):
if settings.THUMBOR_ENABLED:
image_url = (u'http://localhost:8888/unsafe/300x200/'
u'localhost:8000/media/oppsproject.org/path/'
u'image.jpg')
else:
image_url = self.url
self.assertEqual(self.render(u'url width=300 height=200 unsafe=True'),
image_url)
```
#### File: tests/_site/runner.py
```python
from django_coverage.coverage_runner import CoverageRunner
class Runner(CoverageRunner):
def build_suite(self, *args, **kwargs):
suite = super(Runner, self).build_suite(*args, **kwargs)
tests = []
for case in suite:
pkg = case.__class__.__module__.split('.')[0]
if pkg in ['opps']:
tests.append(case)
suite._tests = tests
return suite
```
#### File: tests/views/test_generic_detail.py
```python
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.utils import timezone
from opps.articles.models import Post, Link
from opps.channels.models import Channel
class TemplateNameTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(
username='<EMAIL>',
email='<EMAIL>',
password=User.objects.make_random_password(),
)
self.channel = Channel.objects.create(
name='test channel',
slug='test-channel',
user=self.user,
published=True,
date_available=timezone.now(),
)
self.post = Post.objects.create(
headline=u'a simple headline',
short_title=u'a simple short title',
title=u'a simple title',
hat=u'a simple hat',
channel=self.channel,
user=self.user,
published=True,
date_available=timezone.now(),
)
def test_get_template_name_basic(self):
response = self.client.get(self.post.get_absolute_url())
self.assertTrue(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
['containers/test-channel/a-simple-title/detail.html',
'containers/test-channel/post_detail.html',
'containers/post_detail.html',
'containers/test-channel/detail.html',
'containers/detail.html'])
def test_get_template_name_channel_with_father(self):
channel = Channel.objects.create(
name='child',
slug='child',
parent=self.channel,
user=self.user,
published=True,
date_available=timezone.now(),
)
self.post.channel = channel
self.post.save()
response = self.client.get(self.post.get_absolute_url())
self.assertTrue(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.template_name,
['containers/test-channel/child/a-simple-title/detail.html',
'containers/test-channel/child/post_detail.html',
'containers/test-channel/post_detail.html',
'containers/post_detail.html',
'containers/test-channel/child/detail.html',
'containers/test-channel/detail.html',
'containers/detail.html'])
class LinkResponseToRedirecTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(
username='<EMAIL>',
email='<EMAIL>',
password=User.objects.make_random_password(),
)
self.channel = Channel.objects.create(
name='test channel',
slug='test-channel',
user=self.user,
published=True,
date_available=timezone.now(),
)
self.link = Link.objects.create(
title=u'a simple title',
url=u'http://www.oppsproject.org/',
channel=self.channel,
user=self.user,
published=True,
date_available=timezone.now(),
)
def test_redirect(self):
response = self.client.get(self.link.get_absolute_url())
self.assertTrue(response)
self.assertEqual(response.status_code, 302)
try:
self.assertEqual(response.items()[2][1],
u'http://www.oppsproject.org/')
except:
self.assertEqual(response.items()[3][1],
u'http://www.oppsproject.org/')
class TestAjaxRequests(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(
username='<EMAIL>',
email='<EMAIL>',
password=User.objects.make_random_password(),
)
self.channel = Channel.objects.create(
name='test channel 2',
slug='test-channel-2',
user=self.user,
published=True,
date_available=timezone.now(),
)
self.post = Post.objects.create(
headline=u'a simple headline 2',
short_title=u'a simple short title 2',
title=u'a simple title 2',
hat=u'a simple hat 2',
channel=self.channel,
user=self.user,
published=True,
date_available=timezone.now(),
)
def test_if_ajax_extends_variable_in_context_is_empty_without_ajax(self):
response = self.client.get(self.post.get_absolute_url())
self.assertTrue(response)
self.assertEqual(response.status_code, 200)
def test_get_ajax_extends_variable_in_context(self):
response = self.client.get(self.post.get_absolute_url(),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTrue(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['extends_parent'], 'base_ajax.html')
```
|
{
"source": "JeanMax/babao",
"score": 2
}
|
#### File: src/babao/commands.py
```python
import time
import babao.config as conf
import babao.inputs.ledger.ledgerManager as lm
import babao.inputs.inputManager as im
import babao.inputs.inputBase as ib
import babao.models.modelManager as mm
import babao.utils.date as du
import babao.utils.file as fu
import babao.utils.log as log
import babao.utils.signal as sig
def wetRun(unused_args):
"""Dummy"""
print("Sorry, this is not implemented yet :/")
def dryRun(unused_args):
"""Real-time bot simulation"""
while not sig.EXIT:
if im.fetchInputs():
mm.predictModelsMaybeTrade(
since=du.TIME_TRAVELER.nowMinus(days=ib.REAL_TIME_LOOKBACK_DAYS)
)
def fetch(unused_args):
"""Fetch raw trade data since the beginning of times"""
while not sig.EXIT and not im.fetchInputs():
last_fetch = min(
(i.current_row.name for i in ib.INPUTS if i.current_row is not None)
)
log.info("Fetched data till", du.toStr(last_fetch))
if not sig.EXIT:
log.debug("Fetching done, optimizing database...")
fu.maintenance()
log.info("Database up to date!")
def backtest(args):
"""
Just a naive backtester
It will call the trained strategies on each test data point
"""
now = du.TIME_TRAVELER.getTime(force=True)
t = du.TIME_TRAVELER.getTime()
log.info(
"Test data: from", du.toStr(t),
"to", du.toStr(now)
)
while t < now and not lm.gameOver() and not sig.EXIT:
t += du.secToNano(conf.TIME_INTERVAL * 60) # TODO
im.timeTravel(t)
mm.predictModelsMaybeTrade(
since=du.TIME_TRAVELER.nowMinus(days=ib.REAL_TIME_LOOKBACK_DAYS)
)
score = lm.getGlobalBalanceInQuote()
# hodl = price / big_fat_data_prices[0] * 100
log.info(
"Backtesting done! Score: " + str(round(float(score)))
# + "% vs HODL: " + str(round(hodl)) + "%"
)
if args.graph:
# TODO: exit if graph is closed
while not sig.EXIT:
time.sleep(0.1)
def train(args):
"""Train the various (awesome) algorithms"""
im.timeTravel(ib.SPLIT_DATE)
log.debug(
"Train data: from", du.toStr(du.EPOCH),
"to", du.toStr(ib.SPLIT_DATE)
)
mm.trainModels(since=du.EPOCH)
if args.graph:
log.debug("Plot models on train data")
mm.plotModels(since=du.EPOCH)
log.debug("Plot models on test data")
im.timeTravel(
du.TIME_TRAVELER.getTime(force=True)
) # back to the future
log.debug(
"Test data: from", du.toStr(du.toStr(ib.SPLIT_DATE)),
"to", du.toStr(du.TIME_TRAVELER.getTime())
)
mm.plotModels(since=ib.SPLIT_DATE)
log.debug("Job done!")
if args.graph:
import matplotlib.pyplot as plt
plt.show()
```
#### File: src/babao/graph.py
```python
import os
import sys
import re
from functools import partial
# import traceback
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from matplotlib.widgets import MultiCursor, Button
import babao.config as conf
import babao.inputs.inputBase as ib
import babao.inputs.inputManager as im
import babao.inputs.ledger.ledgerManager as lm
import babao.utils.date as du
import babao.utils.file as fu
import babao.utils.indicators as indic
import babao.utils.log as log
import babao.utils.signal as sig
INDEX = None
DATA = None
INDICATORS_COLUMNS = [
"sma_vwap_9",
"sma_vwap_26",
"sma_vwap_77",
"sma_volume_26",
"sma_volume_77",
]
MAX_LOOK_BACK = 77
class Index():
"""Class keeping track of the current crypto to display"""
def __init__(self, axes, lines):
self.axes = axes
self.lines = lines
self.ind = 0
self._update()
def next(self, unused_event):
"""Show the next crypto"""
self.ind = (self.ind + 1) % len(conf.CRYPTOS)
self._update()
def prev(self, unused_event):
"""Show the previous crypto"""
self.ind = (self.ind - 1) % len(conf.CRYPTOS)
self._update()
def _update(self):
"""Update graph data and zoom after a next/prev click"""
_updateGraph(42, self.lines)
self.axes["vwap"].set_title(conf.CRYPTOS[self.ind].name)
y_min = DATA["vwap"].min()
y_max = DATA["vwap"].max()
space = (y_max - y_min) * 0.05 # 5% space up and down
self.axes["vwap"].set_ylim(
bottom=y_min - space, top=y_max + space
)
y_max = DATA["volume"].max()
space = y_max * 0.05
self.axes["volume"].set_ylim(bottom=0, top=y_max - space)
y_max = max(DATA["signal_line"].max(), DATA["signal_line"].min() * -1)
self.axes["macd"].set_ylim(bottom=-y_max, top=y_max)
self.axes["total-balance"].set_ylim(bottom=0, top=200)
plt.draw()
def _getData():
"""
Initialize ´DATA´ global
Basically read ´conf.MAX_GRAPH_POINTS´ from data files
"""
global DATA
if not os.path.isfile(conf.DB_FILE):
log.warning("Data files not found... Is it your first time around?")
return False
if INDEX is None:
crypto = conf.CRYPTOS[0]
else:
crypto = conf.CRYPTOS[INDEX.ind]
inputs = [
lm.TRADES[crypto],
lm.LEDGERS[conf.QUOTE],
lm.LEDGERS[crypto]
]
if conf.CURRENT_COMMAND == "backtest":
for i in inputs:
i._cache_data = None # pylint: disable=W0212
since = ib.SPLIT_DATE
else:
im.refreshInputs(inputs)
since = lm.TRADES[crypto].current_row.name - du.secToNano(
(MAX_LOOK_BACK + conf.MAX_GRAPH_POINTS) * conf.TIME_INTERVAL * 60
)
DATA = im.readInputs(inputs, since)
DATA.rename(
partial(re.sub, r'.*Trades.*-', ""),
axis="columns", inplace=True
)
DATA.rename(
partial(re.sub, r'.*Ledger' + conf.QUOTE.name + '.*-', "quote-"),
axis="columns", inplace=True
)
DATA.rename(
partial(re.sub, r'.*Ledger.*-', "crypto-"),
axis="columns", inplace=True
)
DATA = DATA.loc[
:,
['close', 'vwap', 'volume', 'quote-balance', 'crypto-balance']
]
DATA = indic.get(DATA, INDICATORS_COLUMNS)
DATA["macd_line"], DATA["signal_line"], DATA["macd"] = indic.macd(
DATA["vwap"],
46, 75, 22,
True
)
if conf.CURRENT_COMMAND == "backtest":
DATA = DATA.fillna(0)
else:
DATA = DATA.dropna()
DATA["total-balance"] = DATA["quote-balance"] \
+ DATA["crypto-balance"] * DATA["close"]
du.toDatetime(DATA)
return True
def _updateGraph(unused_counter, lines):
"""Function called (back) by FuncAnimation, will update graph"""
if sig.EXIT:
sys.exit(0)
if not _getData():
return lines.values()
for key in lines:
lines[key].set_data(DATA.index, DATA[key])
return lines.values()
def _createAxes():
"""Create the different axes we'll need to draw in"""
axes = {}
axes["vwap"] = plt.subplot2grid(
(8, 1), (0, 0), rowspan=5
)
axes["volume"] = plt.subplot2grid(
(8, 1), (5, 0), sharex=axes["vwap"]
)
axes["macd"] = plt.subplot2grid(
(8, 1), (6, 0), sharex=axes["vwap"]
)
axes["total-balance"] = plt.subplot2grid(
(8, 1), (7, 0), sharex=axes["vwap"]
)
return axes
def _createLines(axes):
"""Draw desired lines with matplotlib"""
lines = {}
for key in axes: # TODO: this is *really* ugly
lines[key], = axes[key].plot(
DATA.index,
DATA[key],
# "-+",
label=key,
color="b",
alpha=0.5
)
plt.setp(axes[key].get_xticklabels(), visible=False)
if key == "total-balance":
col = "quote-balance"
lines[col], = axes[key].plot(
DATA.index,
DATA[col],
label=col.replace("_", " "),
color="r",
alpha=0.5
)
elif key == "macd":
for i, col in enumerate(["macd_line", "signal_line"]):
lines[col], = axes[key].plot(
DATA.index,
DATA[col],
label=col.replace("_", " "),
color="r",
alpha=0.7 - 0.2 * (i % 3)
)
else: # add indicators to vol/vwap
for i, col in enumerate(INDICATORS_COLUMNS):
if key in col:
lines[col], = axes[key].plot(
DATA.index,
DATA[col],
label=col.replace("_" + key, "").replace("_", " "),
color="r",
alpha=0.7 - 0.2 * (i % 3)
)
if key == "vwap":
col = "close"
lines[col], = axes[key].plot(
DATA.index,
DATA[col],
label=col,
color="g",
alpha=0.5
)
return lines
def _initGraph():
"""Wrapped to display errors (this is running in a separate process)"""
fig = plt.figure()
axes = _createAxes()
lines = _createLines(axes)
# the assignation is needed to avoid garbage collection...
unused_cursor = MultiCursor( # NOQA: F841
fig.canvas,
list(axes.values()),
useblit=True,
color="black",
lw=0.5,
horizOn=True
) # TODO: redraw me!
plt.setp(axes["total-balance"].get_xticklabels(), visible=True)
for label in axes["total-balance"].xaxis.get_ticklabels():
label.set_rotation(45)
adf = axes["total-balance"].xaxis.get_major_formatter()
adf.scaled[1. / 86400] = "%d/%m/%y %H:%M"
adf.scaled[1. / 1440] = "%d/%m/%y %H:%M"
adf.scaled[1. / 24] = "%d/%m/%y %H:%M"
adf.scaled[1.] = "%d/%m/%y"
adf.scaled[30.] = "%d/%m/%y"
adf.scaled[365.] = "%d/%m/%y"
axes["vwap"].set_ylabel("PRICE")
axes["volume"].set_ylabel("VOL")
axes["macd"].set_ylabel("MACD")
axes["total-balance"].set_ylabel("BAL")
for key in axes:
axes[key].grid(True)
axes[key].legend(
loc="upper left",
prop={'size': 8},
fancybox=True,
framealpha=0.3
)
axes[key].yaxis.set_label_position("right")
axes[key].yaxis.tick_right()
# # not so good when zooming in :/
# last_x = DATA.index[-1]
# last_y = DATA[key].iloc[-1]
# axes[key].annotate(
# str(int(last_y)),
# (last_x, last_y),
# xytext=(
# last_x + (last_x - DATA.index[-21]),
# last_y
# ),
# bbox={"boxstyle": "larrow"}
# ) # TODO: save this, then give it to the update fun
plt.subplots_adjust(top=0.97, left=0.03, right=0.92, hspace=0.05)
global INDEX
INDEX = Index(axes, lines)
axprev = plt.axes([0.13, 0.974, 0.1, 0.025])
axnext = plt.axes([0.75, 0.974, 0.1, 0.025])
bnext = Button(axnext, 'next', color='0.5', hovercolor='0.9')
bprev = Button(axprev, 'prev', color='0.5', hovercolor='0.9')
bnext.on_clicked(INDEX.next)
bprev.on_clicked(INDEX.prev)
# the assignations are needed to avoid garbage collection...
unused_animation = animation.FuncAnimation( # NOQA: F841
fig,
_updateGraph,
fargs=(lines,),
# blit=True, # bug?
interval=5000
)
plt.show() # this is blocking!
def initGraph(log_lock, file_lock):
"""Launch an awesome matplotlib graph!"""
log.setLock(log_lock)
fu.setLock(file_lock)
du.TIME_TRAVELER.setTime(None)
sig.catchSignal()
try:
_getData()
_initGraph()
except Exception as e:
log.warning("Something's bjorked in your graph :/")
log.info("Try to run babao again with the env variable DEBUG_GRAPH set")
# traceback.print_exc()
raise e
sys.exit(0) # we exit explicitly in the subprocess, to avoid double clean
```
#### File: inputs/trades/tradesInputBase.py
```python
from abc import abstractmethod
import numpy as np
import babao.inputs.inputBase as ib
class ABCTradesInput(ib.ABCInput):
"""Base class for any trades input"""
raw_columns = [
"price", "volume"
]
resampled_columns = [
"open", "high", "low", "close", "vwap", "volume", "count"
]
@property
@abstractmethod
def quote(self):
"""
Overide this method with the desired QuoteEnum
ex: self.quote = QuoteEnum.EUR
"""
pass
@property
@abstractmethod
def crypto(self):
"""
Overide this method with the desired CryptoEnum
ex: self.crypto = CryptoEnum.XBT
"""
pass
def _resample(self, raw_data):
p = ib.resampleSerie(raw_data["price"])
resampled_data = p.ohlc()
# tmp var for ordering
v = ib.resampleSerie(raw_data["volume"]).sum()
resampled_data["vwap"] = ib.resampleSerie(
raw_data["price"] * raw_data["volume"]
).sum() / v
resampled_data["volume"] = v
resampled_data["count"] = p.count()
return resampled_data
def fillMissing(self, resampled_data):
resampled_data["count"].fillna(0, inplace=True)
resampled_data["volume"].fillna(0, inplace=True)
resampled_data["vwap"].replace(np.inf, np.nan, inplace=True)
i = resampled_data.index[0]
for col in ["vwap", "close"]:
if np.isnan(resampled_data.loc[i, col]):
if self.current_row is not None:
resampled_data.loc[i, col] = self.current_row.price
else:
resampled_data.loc[i, col] = 0
resampled_data[col].ffill(inplace=True)
c = resampled_data["close"]
resampled_data["open"].fillna(c, inplace=True)
resampled_data["high"].fillna(c, inplace=True)
resampled_data["low"].fillna(c, inplace=True)
return resampled_data
@abstractmethod
def fetch(self):
pass
```
#### File: babao/utils/scale.py
```python
class Scaler:
"""Basic min/max scaler"""
def __init__(self):
self.scale_min = 0
self.scale_max = 100000
def fit(self, arr):
"""Init scaler"""
self.scale_min = arr.min()
self.scale_max = arr.max()
if len(arr.shape) > 1:
self.scale_min = min(self.scale_min)
self.scale_max = max(self.scale_max)
def scale(self, arr):
"""Scale features before train/predict"""
return (
(arr - self.scale_min)
/ (self.scale_max - self.scale_min)
)
def unscale(self, arr):
"""Unscale features after train/predict"""
return (
arr * (self.scale_max - self.scale_min)
+ self.scale_min
)
def scaleFit(self, arr):
"""Scale n Fit"""
self.fit(arr)
return self.scale(arr)
```
#### File: tests/api/test_kraken.py
```python
# import babao.api.kraken as kraken
# import babao.babao as babao
# # TODO: how should we test private api requests?
# def test_getRawTrades():
# babao._init("d") # TODO: hardcode api config?
# raw_data = kraken.getRawTrades("")[0]
# assert raw_data.index[0] > 1500000000
# assert len(raw_data.index) == 1000
# assert not raw_data["price"].empty
# assert not raw_data["volume"].empty
# babao._kthxbye()
```
|
{
"source": "JeanMaximilienCadic/deepspeech2paddle-docker",
"score": 2
}
|
#### File: JeanMaximilienCadic/deepspeech2paddle-docker/infer.py
```python
import sys
import argparse
import functools
import paddle.fluid as fluid
from data_utils.data import DataGenerator
from model_utils.model import DeepSpeech2Model
from model_utils.model_check import check_cuda, check_version
from utils.error_rate import wer, cer
from utils.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('num_samples', int, 10, "# of samples to infer.")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
add_arg('alpha', float, 2.5, "Coef of LM for beam search.")
add_arg('beta', float, 0.3, "Coef of WC for beam search.")
add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
add_arg('use_gpu', bool, True, "Use GPU or not.")
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
"bi-directional RNNs. Not for GRU.")
add_arg('infer_manifest', str,
'data/librispeech/manifest.dev-clean',
"Filepath of manifest to infer.")
add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz',
"Filepath of normalizer's mean & std.")
add_arg('vocab_path', str,
'data/librispeech/vocab.txt',
"Filepath of vocabulary.")
add_arg('lang_model_path', str,
'models/lm/common_crawl_00.prune01111.trie.klm',
"Filepath for language model.")
add_arg('model_path', str,
'./checkpoints/libri/step_final',
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model.")
add_arg('decoding_method', str,
'ctc_beam_search',
"Decoding method. Options: ctc_beam_search, ctc_greedy",
choices = ['ctc_beam_search', 'ctc_greedy'])
add_arg('error_rate_type', str,
'wer',
"Error rate type for evaluation.",
choices=['wer', 'cer'])
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
# yapf: disable
args = parser.parse_args()
def infer():
"""Inference for DeepSpeech2."""
# check if set use_gpu=True in paddlepaddle cpu version
check_cuda(args.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if args.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
data_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config='{}',
specgram_type=args.specgram_type,
keep_transcription_text=True,
place = place,
is_training = False)
batch_reader = data_generator.batch_reader_creator(
manifest_path=args.infer_manifest,
batch_size=args.num_samples,
sortagrad=False,
shuffle_method=None)
infer_data = next(batch_reader())
ds2_model = DeepSpeech2Model(
vocab_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.model_path)
# decoders only accept string encoded in utf-8
vocab_list = [chars for chars in data_generator.vocab_list]
if args.decoding_method == "ctc_greedy":
ds2_model.logger.info("start inference ...")
probs_split = ds2_model.infer_batch_probs(
infer_data=infer_data,
feeding_dict=data_generator.feeding)
result_transcripts = ds2_model.decode_batch_greedy(
probs_split=probs_split,
vocab_list=vocab_list)
else:
ds2_model.init_ext_scorer(args.alpha, args.beta, args.lang_model_path,
vocab_list)
ds2_model.logger.info("start inference ...")
probs_split= ds2_model.infer_batch_probs(
infer_data=infer_data,
feeding_dict=data_generator.feeding)
result_transcripts= ds2_model.decode_batch_beam_search(
probs_split=probs_split,
beam_alpha=args.alpha,
beam_beta=args.beta,
beam_size=args.beam_size,
cutoff_prob=args.cutoff_prob,
cutoff_top_n=args.cutoff_top_n,
vocab_list=vocab_list,
num_processes=args.num_proc_bsearch)
error_rate_func = cer if args.error_rate_type == 'cer' else wer
target_transcripts = infer_data[1]
for target, result in zip(target_transcripts, result_transcripts):
print("\nTarget Transcription: %s\nOutput Transcription: %s" %
(target, result))
print("Current error rate [%s] = %f" %
(args.error_rate_type, error_rate_func(target, result)))
ds2_model.logger.info("finish inference")
def main():
print_arguments(args)
infer()
if __name__ == '__main__':
main()
```
|
{
"source": "JeanMaximilienCadic/FlowerApp-PytorchMobile",
"score": 3
}
|
#### File: JeanMaximilienCadic/FlowerApp-PytorchMobile/train.py
```python
import torch
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
from collections import OrderedDict
from PIL import Image
from torch import Tensor
import torchvision
import shutil
import argparse
import os
from torchsummary import summary
def validation(model, testloader, criterion, device):
test_loss = 0
accuracy = 0
model.to(device)
for images, labels in testloader:
images, labels = images.to(device), labels.to(device)
# images.resize_(images.shape[0], 3, 224, 224)
output = model.forward(images)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def evaluate_model(model, validloader,e ,epochs, running_loss, print_every):
# Make sure network is in eval mode for inference
model.eval()
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
validation_loss, accuracy = validation(model, validloader, criterion, device)
print("Epoch: {}/{}.. ".format(e + 1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss / print_every),
"Validation Loss: {:.3f}.. ".format(validation_loss / len(validloader)),
"Validation Accuracy: {:.3f}".format((accuracy / len(validloader)) * 100))
model.train()
return 0, accuracy
def train(model, trainloader, validloader, epochs, print_every, criterion, optimizer, arch, model_dir="models"):
epochs = epochs
print_every = print_every
steps = 0
# Change to train mode if not already
model.train()
best_accuracy = 0
for e in range(epochs):
running_loss = 0
accuracy = None
for (images, labels) in trainloader:
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
running_loss, accuracy = evaluate_model(model, validloader, e, epochs, running_loss, print_every)
if accuracy is None:
running_loss, accuracy = evaluate_model(model, validloader, e, epochs, running_loss, print_every)
is_best = accuracy > best_accuracy
best_accuracy = max(accuracy, best_accuracy)
save_checkpoint({
'epoch': epochs,
'classifier': model.classifier,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'class_idx_mapping': model.class_idx_mapping,
'best_accuracy': (best_accuracy/len(validloader))*100
}, arch=arch, is_best=is_best, model_dir=model_dir, filename=f'{arch}.ckpt.pth')
def save_checkpoint(state, arch, is_best=False, model_dir="models", filename='checkpoint.pth'):
torch.save(state, os.path.join(model_dir, filename))
if is_best:
shutil.copyfile(os.path.join(model_dir, filename), os.path.join(model_dir,f'{arch}.pth'))
def check_accuracy_on_test(testloader, model):
correct = 0
total = 0
model.to('cuda')
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to('cuda'), labels.to('cuda')
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return 100 * correct / total
def load_data_folder(data_folder="data", batch_size=64):
"""
Loads the dataset into a dataloader.
Arguments:
data_folder: Path to the folder where data resides. Should have two sub folders named "train" and "valid".
Returns:
train_dataloader: Train dataloader iterator.
valid_dataloader: Validation dataloader iterator.
"""
train_dir = os.path.join(data_folder, "train")
valid_dir = os.path.join(data_folder, "valid")
# Define transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(size=224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
validation_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Load the datasets with ImageFolder
train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)
validation_dataset = datasets.ImageFolder(valid_dir, transform=validation_transforms)
# Using the image datasets and the transforms, define the dataloaders
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
valid_dataloader = DataLoader(validation_dataset, shuffle=True, batch_size=batch_size, num_workers=4)
return train_dataloader, valid_dataloader, train_dataset.class_to_idx
def replace_head(model):
for param in model.parameters():
param.requires_grad = False
last_child = list(model.children())[-1]
if type(last_child) == torch.nn.modules.linear.Linear:
input_features = last_child.in_features
head = torch.nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, 4096)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(p=0.5)),
('fc2', nn.Linear(4096, len(class_idx_mapping))),
('output', nn.LogSoftmax(dim=1))]))
model.fc = head
model.classifier = model.fc
elif type(last_child) == torch.nn.Sequential:
input_features = list(last_child.children())[0].in_features
head = torch.nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, 4096)),
('relu', nn.ReLU()),
('dropout', nn.Dropout(p=0.5)),
('fc2', nn.Linear(4096, len(class_idx_mapping))),
('output', nn.LogSoftmax(dim=1))]))
model.classifier = head
return model
def build_model(arch="vgg16", class_idx_mapping=None):
my_local = dict()
exec("model = models.{}(pretrained=True)".format(arch), globals(), my_local)
model = my_local['model']
model = replace_head(model)
model.class_idx_mapping = class_idx_mapping
return model
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("data_dir", help="Directory containing the dataset.",
default="data", nargs="?")
ap.add_argument("--learning_rate", help="Learning rate for Adam optimizer. (default: 0.001)",
default=0.001, type=float)
ap.add_argument("--epochs", help="Number of iterations over the whole dataset. (default: 3)",
default=100, type=int)
ap.add_argument("--model_dir", help="Directory which will contain the model checkpoints.",
default="models")
ap.add_argument("--arch", help="Directory which will contain the model checkpoints.",
default="densenet161")
ap.add_argument("--batch_size",
default=1, type=int)
args = vars(ap.parse_args())
os.system("mkdir -p " + args["model_dir"])
(train_dataloader, valid_dataloader, class_idx_mapping) = load_data_folder(data_folder=args["data_dir"],
batch_size=args["batch_size"])
device = torch.device("cuda:0")
model = build_model(arch=args["arch"], class_idx_mapping=class_idx_mapping)
model.to(device)
summary(model, input_size=(3, 224, 224))
criterion = nn.NLLLoss()
# optimizer = optim.Adam(list(model.children())[-1].parameters(), lr=args["learning_rate"])
optimizer = optim.Adam(model.classifier.parameters(), lr=args["learning_rate"])
train(model=model,
trainloader=train_dataloader,
validloader=valid_dataloader,
epochs=args["epochs"],
print_every=20,
arch=args["arch"],
criterion=criterion,
optimizer=optimizer,
model_dir=args["model_dir"])
# model = torchvision.models.resnet18(pretrained=True)
# model.eval()
# example = torch.rand(1, 3, 224, 224)
# traced_script_module = torch.jit.trace(model, example)
# traced_script_module.save("android/app/src/main/assets/model.pt")
```
#### File: JeanMaximilienCadic/FlowerApp-PytorchMobile/utils.py
```python
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def imshow_original(image, ax=None, title=None, normalize=True):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
if normalize:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.tick_params(axis='both', length=0)
ax.set_xticklabels('')
ax.set_yticklabels('')
return ax
def process_image(img_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
img = Image.open(img_path)
w, h = img.size
if w<h:
size = 256, 999999999
else:
size = 999999999, 256
img.thumbnail(size=size)
w, h = img.size
left = (w - 224) / 2
right = (w + 224) / 2
top = (h - 224) / 2
bottom = (h + 224) / 2
img = img.crop((left, top, right, bottom))
# Convert to numpy array
np_img = np.array(img)/255
# Normalize
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_img = (np_img - mean) / std
np_img = np_img.transpose(2, 0, 1)
return np_img
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
```
|
{
"source": "JeanMaximilienCadic/redison",
"score": 3
}
|
#### File: redison/redison/redis_object.py
```python
import redis
import pickle
import time
from gnutools.utils import id_generator
class RedisObject:
def __init__(self, data=None,id=None, host="127.0.0.1", port="6379", db=0):
self._host = host
self._port = port
self._db = db
self._id = id if id is not None else "x" + id_generator(16)
self._redis = redis.Redis(host=host, port=port, db=db)
self.set(data) if data is not None else None
def load_binary(self, binary):
"""
Nested loading of the json files
:param binary:
:return:
"""
if type(binary)==dict:
return dict([(k, self.load_binary(v)) for k, v in binary])
elif type(binary)==bytes:
return pickle.loads(binary)
def get(self, blocking=False):
"""
Pull the redis object from the database
:return:
"""
redis_object = self._redis.get(self._id)
while blocking:
try:
assert redis_object is not None
blocking=False
except AssertionError:
time.sleep(0.001)
redis_object = self._redis.get(self._id)
return self.load_binary(redis_object)
def set(self, data):
"""
Push the redis object to the database
:param data:
:return:
"""
return self._redis.set(self._id, pickle.dumps(data))
def delete(self):
"""
Delete the redis object from the database
:return:
"""
return self._redis.delete(self._id)
```
|
{
"source": "JeanMaximilienCadic/YoutubeJADataset",
"score": 2
}
|
#### File: JeanMaximilienCadic/YoutubeJADataset/youtube_master.py
```python
from gnutools.www import Youtube
from gnutools.utils import read_csv
from concurrent.futures import ProcessPoolExecutor, as_completed
from tqdm import tqdm
import os
import time
import argparse
import numpy as np
import random
class YoutubeMaster:
def __init__(self, root_convert, world_size=1, rank=0, csv_file=None, id_list=None):
self.root_convert = root_convert
self.ids_completed = self.get_completed(self.root_convert)
if id_list is None:
lines = read_csv(csv_file)
annotations = set([line[0] for line in lines])
N = len(list(annotations))
step = N // world_size
start = np.arange(0, N, step + 1)[rank]
id_list = [annotation for annotation in list(annotations)[start:start + step + 1]]
random.shuffle(id_list)
self.id_list = self.filter_completed(set(id_list))
self.fs=[]
def launch(self):
with ProcessPoolExecutor() as e:
self.fs = [e.submit(self.prepare, id=id, root_convert=self.root_convert) for id in tqdm(self.id_list)]
for f in tqdm(as_completed(self.fs)):
pass
@staticmethod
def prepare(id, root_convert):
yt = Youtube(id=id)
yt.download(root=root_convert)
yt.post_process(fq=16000,
split_audio=True,
split_captions=True)
def get_completed(self, root_convert):
def is_completed(dir):
try:
dirs = os.listdir(dir)
assert dirs.__contains__("audio")
assert dirs.__contains__("text")
files_audio = os.listdir("{dir}/audio".format(dir=dir))
files_text = os.listdir("{dir}/text".format(dir=dir))
assert len(files_audio) == len(files_text)
except:
return False
return True
ids=set()
for id in os.listdir("{root}".format(root=root_convert)):
if is_completed("{root}/{id}".format(root=root_convert, id=id)):
ids.add(id)
return ids
def filter_completed(self, ids):
return ids.difference(self.ids_completed)
if __name__=="__main__":
yt = Youtube(id="e3Odkr4yhD0")
yt.download(root="/mnt/IYONas3/ASR/ja/audio_text/YOUTUBE/all")
yt.post_process(fq=16000,
split_audio=True,
split_captions=True)
```
|
{
"source": "JeanMax/SuperDatabase3000",
"score": 3
}
|
#### File: src/superdatabase3000/packet.py
```python
import collections
import struct
import hashlib
CANARI = 0xdeadbeef
CANARI_SIZE = 4 # unsigned int
CHECKSUM_SIZE = 20 # sha1
INT_SIZE = 8 # unsigned long long
PAYLOAD_MIN_SIZE = 32 # TODO: tweak me based on DbClient requests size: 256-32
PACKET_MIN_SIZE = (
CANARI_SIZE + CHECKSUM_SIZE + INT_SIZE
+ PAYLOAD_MIN_SIZE
) # 64
CHECKSUM_OFFSET = CANARI_SIZE + CHECKSUM_SIZE # we'll start hasing from there
STRUCT_FORMAT = (
"!"
"I" # canari
f"{CHECKSUM_SIZE}s" # checksum
"Q" # payload_size
"{payload_size}s" # payload: complete its size using format
)
Packet = collections.namedtuple(
"Packet",
["canari", "checksum", "payload_size", "payload"]
)
def _checksum(bytes_buf):
"""Return the sha1 digest of the given 'bytes_buf'."""
return hashlib.sha1(bytes_buf[CHECKSUM_OFFSET:]).digest()
def _verify_checksum(ctrl_checksum, bytes_buf):
"""
Return True if the given 'ctrl_checksum' matches the checksum
of 'bytes_buf', otherwise throw a ValueError.
"""
if ctrl_checksum != _checksum(bytes_buf):
raise ValueError("packet: invalid checksum")
return True
def pack(payload, with_checksum=True):
"""
Create a packet from the given 'payload' byte object that you want to send.
If the 'with_checksum' argument is True, the checksum of the payload will
be calculated and inserted in the packet, otherwise the checksum will be
set to zeros.
Returns a bytes object of the created packet (ready to send).
"""
packet = Packet(
canari=CANARI,
checksum=b"\x00" * CHECKSUM_SIZE,
payload_size=len(payload),
payload=payload.ljust(PAYLOAD_MIN_SIZE, b"\x00")
)
payload_size = max(packet.payload_size, PAYLOAD_MIN_SIZE)
try:
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
except struct.error as e:
raise ValueError(f"packet: {e}")
if with_checksum:
packet = packet._replace(checksum=_checksum(bytes_buf))
bytes_buf = struct.pack(
STRUCT_FORMAT.format(payload_size=payload_size),
*packet
)
return bytes_buf
def unpack(bytes_buf, with_checksum=True):
"""
Extract the payload (as a bytes object) from the given 'bytes_buf' packet.
If the 'with_checksum' argument is True, the checksum in the packet will be
checked against a calculated checksum of the packet payload. Otherwise it
will just be ignored.
Returns a bytes object of the extracted payload.
A ValueError will be thrown if an invalid packet is given as 'bytes_buf'
(invalid canari, checksum, payload length)
"""
# first, we try to unpack as if it was a 64 bytes packet
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=PAYLOAD_MIN_SIZE),
bytes_buf[:PACKET_MIN_SIZE]
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if packet.canari != CANARI:
raise ValueError("packet: the canari is dead")
# payload can fit in a 64 bytes packet: just verify checksum, then job done
if packet.payload_size <= PAYLOAD_MIN_SIZE:
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
packet = packet._replace(
payload=packet.payload[:packet.payload_size]
)
return packet
# packet is actually bigger than 64 bytes (extra_payload)
if len(bytes_buf) <= PACKET_MIN_SIZE:
return packet # the payload is incomplete, and checksum not verified
try:
packet = struct.unpack(
STRUCT_FORMAT.format(payload_size=packet.payload_size),
bytes_buf
)
except struct.error as e:
raise ValueError(f"packet: {e}")
packet = Packet(*packet)
if with_checksum:
_verify_checksum(packet.checksum, bytes_buf)
return packet # complete packet with extra payload
```
#### File: src/superdatabase3000/signal.py
```python
import signal
class ExitSignalWatcher():
"""This class allows to store any caught SIGINT/SIGTERM"""
EXIT = 0
def __init__(self):
self.sig_handler = None
@staticmethod
def _signal_handler(signal_code, unused_frame):
"""Just store the exit status, for later safe exiting"""
ExitSignalWatcher.EXIT = 128 + signal_code
def catch(self):
"""Catch signal INT/TERM, so we won't exit while playing with files"""
if self.sig_handler is None:
self.sig_handler = {}
self.sig_handler.setdefault(
signal.SIGINT,
signal.signal(signal.SIGINT, ExitSignalWatcher._signal_handler)
)
self.sig_handler.setdefault(
signal.SIGTERM,
signal.signal(signal.SIGTERM, ExitSignalWatcher._signal_handler)
)
ExitSignalWatcher.EXIT = 0
def restore(self):
"""Restore the previous signal handler"""
signal.signal(signal.SIGINT, self.sig_handler[signal.SIGINT])
signal.signal(signal.SIGTERM, self.sig_handler[signal.SIGTERM])
```
#### File: src/superdatabase3000/socket.py
```python
import os
import socket
import select
import pickle
import collections
import superdatabase3000.packet as pckt
DEFAULT_SOCK_FILENAME = "/tmp/superdatabase3000.sock"
BUF_SIZE = 0x1000 # TODO: tweak me
def _send_to(sock, msg):
"""
Send 'msg' to given 'sock'.
'msg' can be any python object, it well be pickled first, then sent
as a packet to the socket.
"""
payload = pickle.dumps(msg)
bytes_buf = pckt.pack(payload)
del payload
sock.send(bytes_buf[:pckt.PACKET_MIN_SIZE])
bytes_buf = bytes_buf[pckt.PACKET_MIN_SIZE:]
while bytes_buf:
buf_size = min(len(bytes_buf), BUF_SIZE)
sock.send(bytes_buf[:buf_size])
bytes_buf = bytes_buf[buf_size:]
def _recv_from(sock):
"""
Receive a message from the given 'sock'.
The returned message can be any valid python object.
This function will block till a packet is fully received.
"""
bytes_buf = sock.recv(pckt.PACKET_MIN_SIZE)
packet = pckt.unpack(bytes_buf)
if packet.payload_size != len(packet.payload):
bytes_to_read = packet.payload_size - len(packet.payload)
while bytes_to_read:
buf_size = min(bytes_to_read, BUF_SIZE)
bytes_buf += sock.recv(buf_size)
bytes_to_read -= buf_size
packet = pckt.unpack(bytes_buf)
msg = pickle.loads(packet.payload)
return msg
def _msg_head_str(msg):
"""A utility to print the head of the str representation of an object."""
msg_limit = 32
msg_str = f"'{msg}'"
if len(msg_str) > msg_limit:
msg_str = msg_str[:msg_limit] + "'..."
return msg_str
class _SocketBase():
"""Base socket class for client/server socket."""
def __init__(self, sock_filename):
"""Open a unix socket on the given 'sock_filename' path."""
if sock_filename is None:
sock_filename = DEFAULT_SOCK_FILENAME
self.sock_filename = sock_filename
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def __del__(self):
"""Close the previously opened socket."""
self.sock.close()
class SocketClient(_SocketBase):
"""
This class can exchange messages with a SocketServer over a unix socket.
"""
def __init__(self, sock_filename=None):
"""
Initialize the connection to the SocketServer on the given
'sock_filename' unix socket path.
"""
super().__init__(sock_filename)
self._connect()
def _connect(self):
"""
Connect to the SocketServer on 'self.sock_filename'.
This function might raise a socket.error if it fails to connect.
"""
if not os.path.exists(self.sock_filename):
raise socket.error(f"Can't find socket at '{self.sock_filename}'")
self.sock.connect(self.sock_filename)
def send(self, msg):
"""Send a 'msg' (any python object) to the SocketServer."""
_send_to(self.sock, msg)
def recv(self):
"""Receive a message (any python object) from the SocketServer."""
return _recv_from(self.sock)
# This named-tuple is used to represent a client from the server perspective
_Client = collections.namedtuple(
"_Client",
["sock", "to_send_msg_queue"]
)
class SocketServer(_SocketBase):
"""
This class can exchange messages with multiples clients over a unix socket.
"""
def __init__(self, sock_filename=None):
"""
Start listening for SocketClient on the given
'sock_filename' unix socket path.
"""
super().__init__(sock_filename)
self._listen()
self.clients = {} # dict(client_sock_fd: _Client)
def __del__(self):
"""Ensure each connection is closed."""
for client in self.clients.values():
client.sock.close()
super().__del__()
def _listen(self):
"""Listen for SocketClient on 'self.sock_filename'."""
if os.path.exists(self.sock_filename):
os.unlink(self.sock_filename)
# self.sock.setblocking(0)
self.sock.bind(self.sock_filename)
self.sock.listen()
def _accept(self):
"""Accept a new SocketClient."""
client_sock, _ = self.sock.accept()
print("socket: accept:", client_sock)
# client_sock.setblocking(0)
self.clients[client_sock.fileno()] = _Client(
sock=client_sock,
to_send_msg_queue=[]
)
def _remove_client(self, client_sock):
"""Remove a SocketClient."""
print("socket: remove:", client_sock)
fd = client_sock.fileno()
client_sock.close()
del self.clients[fd]
def _handle_error_stream(self, sock):
"""Handle socket 'sock' with an error status returned by select."""
if sock is self.sock:
print("socket: PANIC! error on server sock:", sock)
# it's the server socket itself, so we're screwed anyway :/
else:
print("socket: error on sock:", sock)
self._remove_client(sock)
def _handle_write_stream(self, sock):
"""Handle socket 'sock' which can be written to."""
msg_queue = self.clients[sock.fileno()].to_send_msg_queue
if not msg_queue:
return
msg = msg_queue.pop(0)
print(f"socket: sending message {_msg_head_str(msg)} to sock: {sock}")
_send_to(sock, msg)
def _handle_read_stream(self, sock, on_msg):
"""
Handle socket 'sock' which can be read from.
"""
if sock is self.sock:
self._accept()
return
try:
msg = _recv_from(sock)
except ValueError:
print("socket: received an empty/invalid packet, removing client")
self._remove_client(sock)
return
print(f"socket: received message: {_msg_head_str(msg)}")
on_msg(sock.fileno(), msg)
def poll_events(self, on_msg, timeout=0.5):
"""
Check for events on the server.
This will handle:
- accepting/removing clients
- sending messages (the ones added to the queue with send_to)
- reading messages
If a message is successfully read from the socket, the 'on_msg' callback
will be called with 2 parameters: on_msg(socket_fd, msg)
- socket_fd (int): the socket fd of the client
- msg (object): the message received from the client
(You can use 'socket_fd' to answer to the client with send_to)
"""
inputs = [c.sock for c in self.clients.values()] + [self.sock]
rlist, wlist, xlist = select.select(
inputs,
[c.sock for c in self.clients.values() if c.to_send_msg_queue],
inputs,
timeout
)
for sock in rlist:
self._handle_read_stream(sock, on_msg)
for sock in wlist:
self._handle_write_stream(sock)
for sock in xlist:
self._handle_error_stream(sock)
def send_to(self, client_sock_fd, msg):
"""
Send a message 'msg' (any python object)
to the socket fd 'client_sock_fd'.
Actually it will just put the message in a queue, eh. The message will
be sent on a later call to poll_events if the socket destination can
be written to.
"""
self.clients[client_sock_fd].to_send_msg_queue.append(msg)
```
|
{
"source": "jean/measure-areas",
"score": 3
}
|
#### File: measure-areas/measure_areas/cli.py
```python
import click
from measure_areas.measure_areas import measure_areas
@click.command()
@click.argument('filenames', type=click.Path(exists=True), nargs=-1)
def main(filenames):
"""Console script for measure_areas."""
if not filenames:
print("No files specified")
return
for fn in filenames:
measure_areas(fn)
if __name__ == "__main__":
main()
```
|
{
"source": "jeanmichelscherer/gradam",
"score": 2
}
|
#### File: src/gradam/material_models.py
```python
_author__ = "<NAME>"
__license__ = "CC BY-SA 4.0"
__email__ = "<EMAIL>"
import meshio
from dolfin import *
from .material_properties import *
from .rotation_matrix import *
from .tensors import *
from .mfront_behaviour import *
import numpy as np
from ufl import sign
import ufl
import mgis.fenics as mf
parameters["form_compiler"]["representation"] = 'uflacs'
ufl.algorithms.apply_derivatives.CONDITIONAL_WORKAROUND = True # allows to use a TrialFunction in conditional( ) for spectral_decomposition
# residual stiffness
kres = Constant(1e-6)
def eps(v,dim):
e = sym(grad(v))
if (dim == 2):
return as_matrix([[e[0,0],e[0,1],0.],[e[1,0],e[1,1],0.],[0.,0.,0.]])
else:
return e
def strain2voigt(e):
#return as_vector([e[0,0],e[1,1],e[2,2],2*e[0,1],2*e[1,2],2*e[2,0]])
return as_vector([e[0,0],e[1,1],e[2,2],2*e[1,2],2*e[0,2],2*e[0,1]])
def voigt2stress(s):
return as_tensor([[s[0], s[5], s[4]],[s[5], s[1], s[3]],[s[4], s[3], s[2]]])
def voigt2strain(e):
return as_tensor([[e[0], e[5]/2., e[4]/2.],[e[5]/2., e[1], e[3]/2.],[e[4]/2., e[3]/2., e[2]]])
ppos = lambda x: (abs(x)+x)/2.
pneg = lambda x: x-ppos(x)
Heav = lambda x: (sign(x)+1.)/2.
#Heav = lambda x: ((x/sqrt(x**2+1.e-30))+1.)/2.
class EXD:
""" Elastic Crystal Damage (EXD) model with X variables and associated fracture energies """
def __init__(self,dim,damage_dim,material_parameters,mesh,mf,geometry,behaviour="linear_elasticity",\
mfront_behaviour=None,damage_model="AT1",anisotropic_elasticity="cubic",damage_tensor=[False,0,0]):
self.mp = material_parameters
self.dim = dim
self.damage_dim = damage_dim
self.damage_model = damage_model
self.anisotropic_elasticity = anisotropic_elasticity
self.mesh = mesh
self.mf = mf
self.geometry = geometry
if (self.anisotropic_elasticity=="cubic"):
E=self.mp["E"]
nu=self.mp["nu"]
G=self.mp["G"]
self.moduli=list(zip(E,nu,G))
elif (self.anisotropic_elasticity=="orthotropic"):
E1,E2,E3=self.mp["E1"],self.mp["E2"],self.mp["E3"]
nu12,nu21,nu13,nu31,nu23,nu32=self.mp["nu12"],\
self.mp["nu21"],self.mp["nu13"],self.mp["nu31"],self.mp["nu23"],self.mp["nu32"]
G12,G13,G23=self.mp["G12"],self.mp["G13"],self.mp["G23"]
self.moduli=list(zip(E1,E2,E3,nu12,nu21,nu13,nu31,nu23,nu32,G12,G13,G23))
Gc_ = self.mp["Gc"]
l0_ = self.mp["l0"]
dub_= self.mp["dub"]
self.Gc,self.l0,self.dub = make_fracture_properties_per_domain(self.dim,self.mesh,self.mf,self.damage_dim,Gc_,l0_,dub_)
#if ("B_crystal" in self.mp):
# self.B_crystal = self.mp["B_crystal"]
#el
if ("alpha" in self.mp and "M" in self.mp):
self.alpha = self.mp["alpha"]
self.M = self.mp["M"]
self.damage_induced_anisotropy = self.mp["damage_induced_anisotropy"]
self.damage_tensor = damage_tensor # only implemented in 2D for orthogonal cleavage planes
self.anisotropic_degradation = False
if self.damage_tensor[0]:
self.anisotropic_degradation = True
if ("D_crystal" in self.mp):
self.D_crystal = self.mp["D_crystal"]
self.anisotropic_degradation = True
self.tension_compression_asymmetry = False
self.spheric_deviatoric_decomposition = [False]
self.cleavage_planes_decomposition = False
self.spectral_decomposition = False
self.C, self.R, self.phi1, self.Phi, self.phi2 = self.setup_behaviour()
self.behaviour = behaviour
self.mfront_behaviour = mfront_behaviour
if (not self.behaviour=='linear_elasticity'):
mat_prop = {}
#if (self.anisotropic_elasticity=="cubic"):
#mat_prop = {"YoungModulus": self.E, "PoissonRatio": self.nu} #,\
# #, "ShearModulus": self.G,\
## "YieldStrength": 1000., "HardeningSlope": 0.}
## elif (self.anisotropic_elasticity=="orthotropic"):
## mat_prop = {#"YoungModulus1": self.E1, "YoungModulus2": self.E2, "YoungModulus3": self.E3,\
## #"PoissonRatio12": self.nu12, "PoissonRatio13": self.nu13, "PoissonRatio23": self.nu23,\
## #"ShearModulus12": self.G12, "ShearModulus13": self.G13, "ShearModulus23": self.G23,\
## "YieldStrength": 1000., "HardeningSlope": 0.}
self.mfront_behaviour.set_material_properties(self.dim,self.mesh,self.mf,mat_prop)
self.mfront_behaviour.set_rotation_matrix(self.R.T)
def setup_behaviour(self):
if (self.anisotropic_elasticity=="cubic"):
C, self.y1111, self.y1122, self.y1212, self.E, self.nu, self.G = \
make_cubic_elasticity_stiffness_tensor(self.dim,self.moduli,self.mesh,self.mf)
elif (self.anisotropic_elasticity=="orthotropic"):
C, self.E1, self.E2, self.E3, self.nu12, self.nu21, self.nu13, self.nu31, self.nu23, self.nu32,\
self.G12, self.G13, self.G23 = make_orthotropic_elasticity_stiffness_tensor(self.dim,self.moduli,self.mesh,self.mf)
if (self.anisotropic_degradation and ("D_crystal" in self.mp)):
self.Cdam = []
for n in range(self.damage_dim):
D_array = np.array( self.D_crystal[n] )
D = full_3x3x3x3_to_Voigt_6x6( D_array )
self.Cdam.append( dot(dot(as_matrix(D),C),as_matrix(D)) )
# compute rotation matrix for the set of euler angles associated to the mesh
R, phi1, Phi, phi2 = make_rotation_matrix_from_euler(self.dim, self.geometry, self.mesh, self.mf)
if self.geometry.endswith("single_crystal"):
R, v1, v2, v3 = make_rotation_matrix_from_V1V2V3(self.dim, self.geometry, self.mesh, self.mf)
return C, R, phi1, Phi, phi2
def sigma0(self,e):
return dot(self.R.T,dot(voigt2stress(dot(self.C, e)),self.R))
def sigma(self,v,d,P1,P2,P3):
if (self.tension_compression_asymmetry):
self.strain_decomposition(v,P1,P2,P3)
else:
self.eps_crystal_pos = strain2voigt(dot(self.R,dot(eps(v,self.dim),self.R.T)))
#self.eps_crystal_neg = strain2voigt(dot(self.R,dot(as_tensor(np.zeros((3,3))),self.R.T)))
self.eps_crystal_neg = 0.*self.eps_crystal_pos
if (self.dim==2 and self.damage_dim==2 and self.anisotropic_degradation and self.damage_tensor[0]):
q = self.damage_tensor[1]
p = self.damage_tensor[2]
degrad_type = self.damage_tensor[3]
if (degrad_type == 'Lorentz'):
gamma = self.damage_tensor[4]
r = self.damage_tensor[5]
#d1 = ((1-d[0])/(1+gamma*d[0]))**q*((1-d[1])/(1+gamma*d[1]))**r + kres
#d2 = ((1-d[1])/(1+gamma*d[1]))**q*((1-d[0])/(1+gamma*d[0]))**r + kres
#dd = ((1-d[0])/(1+gamma*d[0]))**p*((1-d[1])/(1+gamma*d[1]))**p + kres
#d1 = ((1-d[0])/(1+gamma*d[0]))**q + kres
#d2 = ((1-d[1])/(1+gamma*d[1]))**q + kres
#dd = ((1-d[0])/(1+gamma*d[0]))**p*((1-d[1])/(1+gamma*d[1]))**p + kres
d1 = ((1-d[0])/(1+gamma*d[0])) + kres
d2 = ((1-d[1])/(1+gamma*d[1])) + kres
dd = ((1-d[0])/(1+gamma*d[0]))*((1-d[1])/(1+gamma*d[1])) + kres
#d1 = ((1-d[0])) + kres
#d2 = ((1-d[1])) + kres
#dd = ((1-d[0]))*((1-d[1])) + kres
elif (degrad_type=='tan'):
gamma = self.damage_tensor[4]
d1 = ((0.5/tan(gamma/2.))*tan(-gamma*(d[0]-0.5)) + 0.5)**q
d2 = ((0.5/tan(gamma/2.))*tan(-gamma*(d[1]-0.5)) + 0.5)**q
dd = ((0.5/tan(gamma/2.))*tan(-gamma*(d[0]-0.5)) + 0.5)**p*((0.5/tan(gamma/2.))*tan(-gamma*(d[1]-0.5)) + 0.5)**p
else:
d1 = (1-d[0])**q + kres
d2 = (1-d[1])**q + kres
dd = (1-d[0])**p*(1-d[1])**p + kres
iD0 = as_tensor([[d1,0,0,0,0,0],[0,d2,0,0,0,0],[0,0,0,0,0,0],\
[0,0,0,dd,0,0],[0,0,0,0,dd,0],[0,0,0,0,0,dd]])
degraded_stiffness = dot(iD0,dot(self.C,iD0))
return dot(self.R.T,dot(voigt2stress(dot(degraded_stiffness,self.eps_crystal_pos)),self.R)) +\
self.sigma0(self.eps_crystal_neg)
if (self.damage_dim==1):
return ((1.-d)**2 + kres)*self.sigma0(self.eps_crystal_pos) + self.sigma0(self.eps_crystal_neg)
#return self.sigma0(self.eps_crystal_neg)
else:
if (self.anisotropic_degradation):
g = []
for n in range(self.damage_dim):
g.append( (1.-d[n])**2 )
q = []
for (i, gi) in enumerate(g):
#q.append( (1-gi) )
q.append( np.prod(g[:i]+g[i+1:])*(1-gi) )
degraded_stiffness = (np.prod(g) + kres)*self.C
for n in range(self.damage_dim):
degraded_stiffness += q[n]*self.Cdam[n]
return dot(self.R.T,dot(voigt2stress(dot(degraded_stiffness,\
self.eps_crystal_pos)),self.R)) +\
self.sigma0(self.eps_crystal_neg)
else:
g = 1.
for n in range(self.damage_dim):
g*=(1.-d[n])**2
return (g+kres)*self.sigma0(self.eps_crystal_pos) + self.sigma0(self.eps_crystal_neg)
#return self.sigma0(self.eps_crystal_neg)
def strain_decomposition(self,v,P1,P2,P3):
e = eps(v,self.dim)
if (self.spheric_deviatoric_decomposition[0]==True):
trace = tr(e)
if (self.spheric_deviatoric_decomposition[1]=='Amor'):
self.eps_crystal_pos = strain2voigt(e - (1./3.)*Heav(-trace)*trace*Identity(3))
self.eps_crystal_neg = strain2voigt((1./3.)*Heav(-trace)*trace*Identity(3))
else:
self.eps_crystal_pos = strain2voigt(e - (1./3.)*trace*Identity(3))
self.eps_crystal_neg = strain2voigt((1./3.)*trace*Identity(3))
elif (self.spectral_decomposition and self.dim==2):
def eig_plus(A):
return (tr(A) + sqrt(tr(A)**2-4*det(A)))/2
def eig_minus(A):
return (tr(A) - sqrt(tr(A)**2-4*det(A)))/2
# Diagonal matrix with positive and negative eigenvalues as the diagonal elements
def diag_eig(A):
lambdap1 = 0.5*(eig_plus(A)+ abs(eig_plus(A)))
lambdap2 = 0.5*(eig_minus(A) + abs(eig_minus(A)))
lambdan1 = 0.5*(eig_plus(A) - abs(eig_plus(A)))
lambdan2 = 0.5*(eig_minus(A) - abs(eig_minus(A)))
matp = as_matrix(((lambdap1,0),(0,lambdap2)))
matn = as_matrix(((lambdan1,0),(0,lambdan2)))
return (matp,matn)
# Eigenvectors of the matrix arranged in columns of matrix
def eig_vecmat(A):
lambdap = eig_plus(A)
lambdan = eig_minus(A)
a = A[0,0]
b = A[0,1]
c = A[1,0]
d = A[1,1]
v11 = lambdap - d
v12 = lambdan - d
nv11 = sqrt(v11**2 + c**2)
nv12 = sqrt(v12**2 + c**2)
a1 = v11/nv11
b1 = v12/nv12
c1 = c/nv11
d1 = c/nv12
v21 = lambdap - a
v22 = lambdan - a
nv21 = sqrt(v21**2 + b**2)
nv22 = sqrt(v22**2 + b**2)
A1 = b/nv21
B1 = b/nv22
C1 = v21/nv21
D1 = v22/nv22
tol = 1.e-10
#if (gt(abs(c),tol)):
# Eigvecmat = as_matrix(((a1,b1),(c1,d1)))
#else:
# if (gt(abs(b),tol)):
# Eigvecmat = as_matrix(((A1,B1),(C1,D1)))
# else:
# Eigvecmat = Identity(2)
Eigvecmat = conditional(gt(abs(c), tol) ,as_matrix(((a1,b1),(c1,d1))), conditional(gt(abs(b), tol),as_matrix(((A1,B1),(C1,D1))),Identity(2)))
return Eigvecmat
def eps_split(A):
P = eig_vecmat(A)
(diag_eigp,diag_eign) = diag_eig(A)
epsp = dot(P,dot(diag_eigp,P.T))
epsn = dot(P,dot(diag_eign,P.T))
epsp = strain2voigt(as_matrix([[epsp[0,0],epsp[0,1],0.],[epsp[1,0],epsp[1,1],0.],[0.,0.,0.]]))
epsn = strain2voigt(as_matrix([[epsn[0,0],epsn[0,1],0.],[epsn[1,0],epsn[1,1],0.],[0.,0.,0.]]))
return (epsp,epsn)
self.eps_crystal_pos, self.eps_crystal_neg = eps_split( as_matrix([[e[0,0],e[0,1]],[e[1,0],e[1,1]]]) )
elif (self.cleavage_planes_decomposition):
self.eps_crystal = dot(self.R,dot(e,self.R.T)) #eps(v,self.dim)
self.eps_crystal_pos = as_tensor(np.zeros((3,3)))
self.eps_crystal_neg = as_tensor(np.zeros((3,3)))
self.n = [as_tensor([1.,0.,0.]),as_tensor([0.,1.,0.]),as_tensor([0.,0.,1.])]
self.epsilon = []
for i in range(3):
#self.n.append(self.R[:,i])
self.epsilon.append(dot(dot(self.eps_crystal,self.n[i]),self.n[i]))
#print(self.n[i],self.epsilon[i])
self.eps_crystal_pos += ppos(self.epsilon[i])*outer(self.n[i],self.n[i])
self.eps_crystal_neg += pneg(self.epsilon[i])*outer(self.n[i],self.n[i])
self.eps_crystal_pos = strain2voigt(self.eps_crystal_pos)
self.eps_crystal_neg = strain2voigt(self.eps_crystal_neg)
else:
self.eps_crystal_pos = dot(as_tensor([[P1,0,0,0,0,0],[0,P2,0,0,0,0],[0,0,P3,0,0,0],\
[0,0,0,1.,0,0],[0,0,0,0,1.,0],[0,0,0,0,0,1.]]),strain2voigt(dot(self.R,dot(e,self.R.T))))
self.eps_crystal_neg = dot(as_tensor([[1.-P1,0,0,0,0,0],[0,1.-P2,0,0,0,0],[0,0,1.-P3,0,0,0],\
[0,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0]]),strain2voigt(dot(self.R,dot(e,self.R.T))))
# self.eps_crystal = dot(self.R,dot(e,self.R.T)) #eps(v,self.dim)
# self.eps_crystal_pos = as_tensor(np.zeros((3,3)))
# self.eps_crystal_neg = as_tensor(np.zeros((3,3)))
# self.n = []
# self.epsilon = []
# for i in range(3):
# self.n.append(self.R[:,i])
# self.epsilon.append(dot(dot(self.eps_crystal,self.n[i]),self.n[i]))
# print(self.n[i],self.epsilon[i])
# self.eps_crystal_pos += ppos(self.epsilon[i])*outer(self.n[i],self.n[i])
# self.eps_crystal_neg += pneg(self.epsilon[i])*outer(self.n[i],self.n[i])
def make_B_sample(self,d,d_prev_iter):
if ("alpha" in self.mp and "M" in self.mp):
I = np.eye(3)
if (self.damage_dim==1):
if (self.damage_induced_anisotropy==True):
k_dam = d_prev_iter
else:
k_dam = 1.0
self.B_crystal = as_tensor(I) + k_dam*self.alpha*(as_tensor(I) - outer(self.M,self.M))
else:
self.B_crystal = []
for n in range(self.damage_dim):
if (self.damage_induced_anisotropy==True):
k_dam = d_prev_iter.sub(n)**2
else:
k_dam = 1.0
self.B_crystal.append( as_tensor(I) + k_dam*self.alpha*(as_tensor(I) - outer(self.M[n],self.M[n])) )
else:
self.B_crystal = as_tensor( np.eye(3) )
if (self.damage_dim==1):
self.B_sample = dot(self.R.T,dot(self.B_crystal,self.R))
if (self.dim==2):
self.B_sample = as_tensor([[self.B_sample[0,0], self.B_sample[0,1]],
[self.B_sample[1,0], self.B_sample[1,1]]])
else:
self.B_sample = []
for n in range(self.damage_dim):
self.B_sample.append(dot(self.R.T,dot(self.B_crystal[n],self.R)))
if (self.dim==2):
self.B_sample[n] = as_tensor([[self.B_sample[n][0,0], self.B_sample[n][0,1]],
[self.B_sample[n][1,0], self.B_sample[n][1,1]]])
def fracture_energy_density(self,d,d_prev_iter):
if self.damage_model == "AT1":
cw = Constant(8/3.)
w = lambda d: d
elif self.damage_model == "AT2":
cw = Constant(2.)
w = lambda d: d**2
self.make_B_sample(d,d_prev_iter)
if (self.damage_dim==1):
# self.B_sample = dot(self.R.T,dot(self.B_crystal,self.R))
# if (self.dim==2):
# self.B_sample = as_tensor([[self.B_sample[0,0], self.B_sample[0,1]],
# [self.B_sample[1,0], self.B_sample[1,1]]])
return [self.Gc/cw*(w(d)/self.l0+self.l0*dot(dot(self.B_sample, grad(d)),grad(d)))]
else:
Efrac = []
# self.B_sample = []
for n in range(self.damage_dim):
# self.B_sample.append(dot(self.R.T,dot(self.B_crystal[n],self.R)))
# if (self.B_crystal == as_tensor(np.eye(3))):
# self.B_sample = as_tensor(np.eye(3))
# if (self.dim==2):
# self.B_sample[n] = as_tensor([[self.B_sample[n][0,0], self.B_sample[n][0,1]],
# [self.B_sample[n][1,0], self.B_sample[n][1,1]]])
Efrac.append( self.Gc[n]/cw*(w(d[n])/self.l0[n]+self.l0[n]*dot(dot(self.B_sample[n], grad(d[n])),grad(d[n]))) )
return Efrac
```
#### File: src/gradam/material_properties.py
```python
_author__ = "<NAME>"
__license__ = "CC BY-SA 4.0"
__email__ = "<EMAIL>"
from dolfin import *
import numpy as np
#from .ELtensor import *
#import matscipy
def make_property_per_domain(dim,
mesh,
mf,
mat_prop_key,
mat_prop):
# interpolate material property
class MP(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = mat_prop[k]
def value_shape(self):
return ()
V0 = FunctionSpace(mesh, "DG", 0)
MP_ = MP(mf)
mp = Function(V0, name=mat_prop_key)
mp.interpolate(MP_)
return mp
def make_cubic_elasticity_stiffness_tensor(dim,
moduli, #=[[E1,nu1,G1], [E2,nu2,G2], ..., [En,nun,Gn]]
mesh,
mf):
# interpolate elasticity moduli
class YOUNG(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][0]
def value_shape(self):
return ()
class POISSON(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][1]
def value_shape(self):
return ()
class SHEAR(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][2]
def value_shape(self):
return ()
V0 = FunctionSpace(mesh, "DG", 0)
E_, nu_, G_ = YOUNG(mf), POISSON(mf), SHEAR(mf)
E, nu, G = Function(V0, name='E'), Function(V0, name='nu'), Function(V0, name='G')
E.interpolate(E_)
nu.interpolate(nu_)
G.interpolate(G_)
y1111 = E*(1.-nu**2)/(1.-3.*nu**2-2.*nu**3)
y1122 = E*nu*(1.+nu)/(1.-3.*nu**2-2.*nu**3)
y1212 = G
C = as_matrix( [[y1111, y1122, y1122, 0., 0., 0. ],
[y1122, y1111, y1122, 0., 0., 0. ],
[y1122, y1122, y1111, 0., 0., 0. ],
[0., 0., 0., y1212, 0., 0., ],
[0., 0., 0., 0., y1212, 0., ],
[0., 0., 0., 0., 0., y1212]])
return C, y1111, y1122, y1212, E, nu, G
def make_orthotropic_elasticity_stiffness_tensor(dim,
moduli, #=[[E1,E2,E3,nu12,nu21,nu13,nu31,nu23,nu32,G12,G13,G23], ...]
mesh,
mf):
# interpolate elasticity moduli
class YOUNG1(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][0]
def value_shape(self):
return ()
class YOUNG2(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][1]
def value_shape(self):
return ()
class YOUNG3(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][2]
def value_shape(self):
return ()
class POISSON12(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][3]
def value_shape(self):
return ()
class POISSON21(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][4]
def value_shape(self):
return ()
class POISSON13(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][5]
def value_shape(self):
return ()
class POISSON31(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][6]
def value_shape(self):
return ()
class POISSON23(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][7]
def value_shape(self):
return ()
class POISSON32(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][8]
def value_shape(self):
return ()
class SHEAR12(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][9]
def value_shape(self):
return ()
class SHEAR13(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][10]
def value_shape(self):
return ()
class SHEAR23(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
values[0] = moduli[k][11]
def value_shape(self):
return ()
V0 = FunctionSpace(mesh, "DG", 0)
E1_,E2_,E3_ = YOUNG1(mf), YOUNG2(mf), YOUNG3(mf)
nu12_,nu21_,nu13_,nu31_,nu23_,nu32_ = POISSON12(mf), POISSON21(mf),\
POISSON13(mf), POISSON31(mf), POISSON23(mf), POISSON32(mf)
G12_,G13_,G23_ = SHEAR12(mf), SHEAR13(mf), SHEAR23(mf)
E1, E2, E3 = Function(V0, name='E1'), Function(V0, name='E2'), Function(V0, name='E3')
nu12, nu21, nu13, nu31, nu23, nu32 = Function(V0, name='nu12'), Function(V0, name='nu21'),\
Function(V0, name='nu13'), Function(V0, name='nu31'), Function(V0, name='nu23'), Function(V0, name='nu32')
G12, G13, G23 = Function(V0, name='G12'), Function(V0, name='G13'), Function(V0, name='G23')
E1.interpolate(E1_)
E2.interpolate(E2_)
E3.interpolate(E3_)
nu12.interpolate(nu12_)
nu21.interpolate(nu21_)
nu13.interpolate(nu13_)
nu31.interpolate(nu31_)
nu23.interpolate(nu23_)
nu32.interpolate(nu32_)
G12.interpolate(G12_)
G13.interpolate(G13_)
G23.interpolate(G23_)
delta = (1.-nu23*nu32-nu31*nu13-nu12*nu21-2.*nu23*nu31*nu12)/(E1*E2*E3)
y1111 = (1.-nu23*nu32)/(delta*E2*E3)
y1122 = (nu21+nu31*nu23)/(delta*E2*E3)
y1133 = (nu31+nu21*nu32)/(delta*E2*E3)
y2211 = (nu12+nu13*nu32)/(delta*E1*E3)
y2222 = (1.-nu31*nu13)/(delta*E1*E3)
y2233 = (nu32+nu31*nu12)/(delta*E1*E3)
y3311 = (nu13+nu12*nu23)/(delta*E1*E2)
y3322 = (nu23+nu21*nu13)/(delta*E1*E2)
y3333 = (1.-nu12*nu21)/(delta*E1*E2)
y2323 = G23
y1313 = G13
y1212 = G12
C = as_matrix( [[y1111, y1122, y1133, 0., 0., 0. ],
[y2211, y2222, y2233, 0., 0., 0. ],
[y3311, y3322, y3333, 0., 0., 0. ],
[0., 0., 0., y2323, 0., 0., ],
[0., 0., 0., 0., y1313, 0., ],
[0., 0., 0., 0., 0., y1212]])
return C, E1, E2, E3, nu12, nu21, nu13, nu31, nu23, nu32, G12, G13, G23
def make_fracture_properties_per_domain(dim,
mesh,
mf,
damage_dim,
Gc_,
l0_,
dub_):
# interpolate fracture properties
class GC(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
for i in range(damage_dim):
values[i] = Gc_[k][i]
def value_shape(self):
if (damage_dim==1):
return ()
else:
return (damage_dim,)
class L0(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
for i in range(damage_dim):
values[i] = l0_[k][i]
def value_shape(self):
if (damage_dim==1):
return ()
else:
return (damage_dim,)
# damage upper bound (=1 if damageable else =0)
class DUB(UserExpression):
def __init__(self, mf, **kwargs):
super().__init__(**kwargs)
self.mf = mf
def eval_cell(self, values, x, cell):
k = self.mf[cell.index]-1
for i in range(damage_dim):
values[i] = dub_[k][i]
def value_shape(self):
if (damage_dim==1):
return ()
else:
return (damage_dim,)
if (damage_dim == 1):
V0 = FunctionSpace(mesh, "DG", 1)
Vd = FunctionSpace(mesh, "CG", 1)
else:
#V0 = VectorFunctionSpace(mesh, "DG", damage_dim)
V0 = VectorFunctionSpace(mesh, "DG", 1, dim=damage_dim)
#Vd = VectorFunctionSpace(mesh, "CG", damage_dim)
Vd = VectorFunctionSpace(mesh, "CG", 1, dim=damage_dim)
GC_, L0_, DUB_ = GC(mf), L0(mf), DUB(mf)
Gc, l0, dub = Function(V0, name='Gc'), Function(V0, name='l0'), Function(Vd, name='Damage upper bound')
Gc.interpolate(GC_)
l0.interpolate(L0_)
dub.interpolate(DUB_)
return Gc, l0, dub
def transfer_function(function, function_space):
temp = Function(function_space, name=function.name())
# function.set_allow_extrapolation(True)
A = PETScDMCollection.create_transfer_matrix(function.ufl_function_space(),
function_space)
temp.vector()[:] = A*function.vector()
return temp
```
#### File: src/gradam/problem.py
```python
_author__ = "<NAME>"
__license__ = "CC BY-SA 4.0"
__email__ = "<EMAIL>"
from dolfin import *
import matplotlib.pyplot as plt
import numpy as np
from ufl import replace
from .material_models import *
from .hybrid_linear_solver import *
from time import time
from time import sleep
import os
from .remesher import *
from .mesh_converter import *
import types
import scipy as sp
import scipy.ndimage
import sys
import subprocess
#import h5py
from mpi4py import MPI as pyMPI
from .j_integral import *
from .version_date import *
## Setting up damage-part optimization solver
class DamageProblem(OptimisationProblem):
def __init__(self, Wtot,DW_d,D2W_d,d):
OptimisationProblem.__init__(self)
self.obj_func = Wtot
self.residual = DW_d
self.Jac = D2W_d
self.var = d
self.bcs = []
def f(self, x):
self.var.vector()[:] = x
return assemble(self.obj_func)
def F(self, b, x):
self.var.vector()[:] = x
assemble(self.residual, b, self.bcs)
def J(self, A, x):
self.var.vector()[:] = x
assemble(self.Jac, A, self.bcs)
class FractureProblem:
def __init__(self,mesh,facets,mat,prefix,loads=[[0,Constant(0.)]],Jcontours=[],mf=None,mvc=None):
self.staggered_solver = dict({"iter_max":500,"tol":1e-4,"accelerated":False,"history_function":False})
self.comm = MPI.comm_world
self.rank = MPI.rank(self.comm)
self.mesh = mesh
self.mf = mf
self.mvc = mvc
self.u_degree = 1
self.d_degree = 1
self.num_vertices = self.mesh.num_vertices() # NOK in parallel, use metric size instead
self.facets = facets
self.dim = mesh.geometric_dimension()
self.mat = mat
self.prefix = prefix
if (not os.path.isdir(self.prefix)):
os.system("mkdir %s" % self.prefix)
self.bcs = []
self.bc_d =[]
self.Uimp = [Expression("t", t=0, degree=0)]
self.incr = 0
self.incr_save = 1
self.t = 0.
self.dtime = 1.e-4
self.desired_dincr = 1.e-2
self.max_dincr = 1.e-2
self.min_dtime = 1.e-5
self.max_dtime = 1.e-3
self.final_time = 1.e-2
self.niter_tot = 0
self.niter_TAO = 0
self.niter_iterative = 0
self.niter_direct = 0
self.set_functions()
self.dx = Measure("dx")
self.ds = Measure("ds")
self.loads = loads
self.load_boundaries = [self.ds]
self.Wext = self.define_external_work()
self.resultant = self.sig[1,1]*self.ds
self.results = XDMFFile(MPI.comm_world,self.prefix+"output.xdmf")
self.results.parameters["flush_output"] = True
self.results.parameters["functions_share_mesh"] = True
self.J_results = XDMFFile(MPI.comm_world,self.prefix+"J_integral.xdmf")
self.J_results.parameters["flush_output"] = True
self.J_results.parameters["functions_share_mesh"] = True
self.checkpoints = XDMFFile(MPI.comm_world,self.prefix+"checkpoint.xdmf")
self.checkpoints.parameters["flush_output"] = True
self.checkpoints.parameters["functions_share_mesh"] = True
self.save_all = True
self.save_intermediate = False
self.save_checkpoints = False
self.write_checkpoint_count = 0
self.use_hybrid_solver = False
self.normal = FacetNormal(mesh)
self.dsJ = Jcontours
self.J = []
self.remesh = False
self.use_remeshing = False
self.remeshing_criterion = 1.e-2
self.remeshing_index = 1
self.nbgrains = -1
self.remesher = None
self.boundaries = []
self.markers = []
self.domains = []
self.domains_markers = []
self.myBCS = self.BCS(self.Vu,self.Vd,self.facets)
self.myResultant = self.Resultant(self.mat,self.u,self.d,self.P1pos,self.P2pos,self.P3pos,self.ds)
self.gaussian_filter_sigma = 1.
self.no_residual_stiffness=[False,0.99]
self.JIntegral = None
self.timings = True
self.null_space_basis = None
self.rigid_body_motion=[]
class BCS():
def __init__(self, Vu, Vd, facets):
self.Vu = Vu
self.Vd = Vd
self.facets = facets
class Resultant():
def __init__(self, mat, u, d, P1pos, P2pos, P3pos, ds):
self.mat = mat
self.u = u
self.d = d
self.P1pos = P1pos
self.P2pos = P2pos
self.P3pos = P3pos
self.ds = ds
def set_functions(self):
# Definition of functions spaces and test/trial functions
self.Vu = VectorFunctionSpace(self.mesh, "CG", self.u_degree, dim=self.dim)
if self.mat.damage_dim == 1:
self.Vd = FunctionSpace(self.mesh, "CG", self.d_degree)
else:
self.Vd = VectorFunctionSpace(self.mesh, "CG", self.d_degree, dim=self.mat.damage_dim)
self.V0 = FunctionSpace(self.mesh, "DG", 0)
self.Vsig = TensorFunctionSpace(self.mesh, "CG", self.u_degree, shape=(3,3))
self.VV = VectorFunctionSpace(self.mesh, "DG", 0, dim=3)
self.Vr = TensorFunctionSpace(self.mesh, "DG", 0, shape=(3,3))
self.Vmetric = FunctionSpace(self.mesh, "CG", self.d_degree)
self.u_ = TestFunction(self.Vu)
self.du = TrialFunction(self.Vu)
self.d_ = TestFunction(self.Vd)
self.dd = TrialFunction(self.Vd)
# Definition of functions
self.u = Function(self.Vu,name="Displacement")
#self.u_prev = Function(self.Vu,name="Previous displacement")
self.d = Function(self.Vd,name="Damage")
self.d_prev = Function(self.Vd,name="Previous damage")
self.d_prev_iter = Function(self.Vd,name="Previous damage in staggered minimization")
#self.dold = Function(self.Vd,name="Old damage")
self.d_lb = Function(self.Vd,name="Lower bound d_n")
self.d_ub = Function(self.Vd,name="Damage upper bound") #"Upper bound 1")
self.d_ar= Function(self.Vd,name="Damage field after remeshing")
self.d_ub = self.mat.dub
self.sig = Function(self.Vsig,name="Stress")
self.eel = Function(self.Vsig,name="ElasticStrain")
self.epspos = Function(self.Vsig,name="Strain (+)")
self.epsneg = Function(self.Vsig,name="Strain (-)")
#self.V1 = Function(self.VV,name="V1")
#self.V2 = Function(self.VV,name="V2")
#self.V3 = Function(self.VV,name="V3")
# if self.staggered_solver["accelerated"]:
# self.tmp_u = Function(self.Vu)
# self.tmp_d = Function(self.Vd)
self.R = Function(self.Vr,name="Rotation matrix")
self.dissipated = Function(self.V0,name="Plastic dissipation")
self.stored = Function(self.V0,name="Stored energy")
self.P1pos = Function(self.V0,name="P1pos")
self.P2pos = Function(self.V0,name="P2pos")
self.P3pos = Function(self.V0,name="P3pos")
self.P1pos.interpolate(Constant(1.))
self.P2pos.interpolate(Constant(1.))
self.P3pos.interpolate(Constant(1.))
self.Efrac_field = Function(self.V0,name="Efrac")
self.d_eq_fiss = Function(self.Vd,name="deq")
if self.mat.damage_dim==1:
self.d_eq_fiss.interpolate(Constant((1.)))
self.d_prev_iter.interpolate(Constant(0.))
else:
self.d_eq_fiss.interpolate(Constant((1.,)*self.mat.damage_dim))
self.d_prev_iter.interpolate(Constant((0.,)*self.mat.damage_dim))
#self.Vstiffness = TensorFunctionSpace(self.mesh, "CG", 1, shape=(6,6))
#self.stiffness = Function(self.Vstiffness,name="Stiffness")
self.metric = Function(self.Vmetric,name="Remeshing metric")
self.metric.interpolate(Constant(0.))
def set_load(self,u):
L = self.loads[0][1]*u[self.loads[0][0]]*self.load_boundaries[0]
for (load,load_boundary) in list(zip(self.loads[1:],self.load_boundaries[1:])):
L += load[1]*u[load[0]]*load_boundary
return L
def define_external_work(self):
return self.set_load(self.u)
#return dot(self.load,self.u)*self.ds
def set_energies(self):
if (not self.mat.behaviour=="linear_elasticity"):
self.mb = self.mat.mfront_behaviour.create_material()
self.solver_u = mf.MFrontNonlinearProblem(self.u, self.mb, quadrature_degree=1, bcs=self.bcs)
self.solver_u.register_external_state_variable("Damage", self.d)
'''
prm = self.solver_u.solver.parameters
#prm['nonlinear_solver'] = 'newton'
prm['linear_solver'] = 'gmres' #'mumps' #'minres' #'cg' #'cg' #'mumps' #'gmres' #'petsc' #'umfpack' #'tfqmr'
#prm['preconditioner'] = 'petsc_amg' #'ilu' # 'sor' # 'icc' # 'petsc_amg'
#prm['krylov_solver']['error_on_nonconvergence'] = True
#prm['krylov_solver']['monitor_convergence'] = True
#prm['krylov_solver']['absolute_tolerance'] = 1E-14
#prm['krylov_solver']['relative_tolerance'] = 1E-14
#prm['krylov_solver']['maximum_iterations'] = 10000
prm['krylov_solver']['nonzero_initial_guess'] = True
prm['preconditioner'] = 'hypre_amg'
prm['absolute_tolerance'] = 1E-6 #-9
prm['relative_tolerance'] = 1E-8 #-8
#prm['maximum_iterations'] = 1000 #25
#prm['relaxation_parameter'] = 1.
##prm['krylov_solver']['gmres']['restart'] = 40
##prm['krylov_solver']['preconditioner']['ilu']['fill_level'] = 0
prm["report"] = True
#prm['lu_solver']['symmetric'] = True #False
'''
self.solver_u.solver = PETScSNESSolver('newtonls') #'newtontr') #'newtonls')
prm = self.solver_u.solver.parameters
#prm['nonlinear_solver'] = 'snes'
#prm['line_search'] = 'bt' #'cp' #'cp' #'nleqerr' # 'bt' # 'basic' # 'l2'
prm['line_search'] = 'nleqerr'
#prm['linear_solver'] = 'mumps'
prm['linear_solver'] = 'cg' #'gmres' #'cg' #'gmres'
prm['preconditioner'] = 'amg' #'hypre_amg'
prm['krylov_solver']['nonzero_initial_guess'] = False # True
#prm['maximum_iterations'] = 50
prm['absolute_tolerance'] = 1E-5
#prm['relative_tolerance'] = 1E-8
#prm['report'] = False #True
self.load = self.set_load(self.u)
self.solver_u.set_loading(self.load)
self.dissipated.vector().set_local(self.mb.data_manager.s1.dissipated_energies)
self.dissipated.vector().apply("insert")
self.stored.vector().set_local(self.mb.data_manager.s1.stored_energies)
self.stored.vector().apply("insert")
#print(max(_dummy_function.vector()[:]))
self.sigma()
#self.eps_elas()
## self.Wel = 0.5*inner(self.sig,self.eel)*self.dx
## self.Wel = 0.5*(1.-self.d)**2*inner(self.sig,self.eel)*self.dx
self.Wel = (1.-self.d)**2*self.stored*self.dx
#self.Wel = 0.5*self.stored*self.dx
self.Wdis = (1.-self.d)**2*self.dissipated*self.dx
else:
# Definition of energy densities
# self.Wel = 0.5*inner(self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos),eps(self.u))*self.dx
# self.Wel = 0.5*inner(self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos,self.P3pos),eps(self.u,self.dim))*self.dx
self.sigma()
self.Wel = 0.5*inner(self.sig,eps(self.u,self.dim))*self.dx
self.Efrac = self.mat.fracture_energy_density(self.d,self.d_prev_iter)
self.Wfrac = sum(self.Efrac)*self.dx
self.Wtot = self.Wel + self.Wfrac - self.Wext
if (not self.mat.behaviour=="linear_elasticity"):
self.Wtot += self.Wdis
# Definition of J integral
if (self.dim == 2):
normal3 = as_vector([self.normal[0],self.normal[1],0.])
sigma_n3 = dot(normal3, self.sig)
sigma_n = as_vector([sigma_n3[0],sigma_n3[1]])
elif (self.dim == 3):
normal3 = self.normal
sigma_n = dot(normal3, self.sig)
if (not self.dsJ==[]):
self.J=[]
for c in self.dsJ:
#self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1] \
# - inner(sigma_n, grad(self.u)[:,0]) ) * c ) # for outer boundaries
self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1]) * c )
self.J.append( (- inner(sigma_n, grad(self.u)[:,0]) ) * c ) # for outer boundaries
for c in self.dsJ:
#self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[0] \
# - inner(sigma_n, grad(self.u)[:,1]) ) * c ) # for outer boundaries
self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[0] ) * c)
self.J.append( (- inner(sigma_n, grad(self.u)[:,1]) ) * c ) # for outer boundaries
# self.J.append( (0.5*inner(self.sig,eps(self.u,self.dim))*self.normal[1] \
# - inner(sigma_n, grad(self.u)[:,0]) )('-') * c ) # for inner boundaries
# Definition of energy derivatives
self.DW_u = derivative(self.Wtot,self.u,self.u_)
self.D2W_u = derivative(self.DW_u,self.u,self.du)
self.DW_d = derivative(self.Wtot,self.d,self.d_)
self.D2W_d = derivative(self.DW_d,self.d,self.dd)
def set_problems(self):
if not self.rigid_body_motion==[]:
null_space = [interpolate(n, self.Vu).vector() for n in self.rigid_body_motion]
# Make into unit vectors
[normalize(n, 'l2') for n in null_space]
# Create null space basis object
self.null_space_basis = VectorSpaceBasis(null_space)
# Setting up displacement-part linear solver
# LinearVariationalProblem(lhs(self.D2W_u),replace(self.Wext,{self.u:self.u_}), self.u, self.bcs)
if (self.mat.behaviour=='linear_elasticity'):
self.load = self.set_load(self.u_)
if (self.use_hybrid_solver):
#self.solver_u = HybridLinearSolver(lhs(self.D2W_u),dot(self.load,self.u_)*self.ds,\
self.solver_u = HybridLinearSolver(lhs(self.D2W_u),self.load,\
self.u,bcs=self.bcs,parameters={"iteration_switch": 5,\
"user_switch": True},null_space_basis=self.null_space_basis) #not self.remesh or (self.niter>0)})
else:
if (not self.mat.tension_compression_asymmetry):
#self.problem_u = LinearVariationalProblem(lhs(self.D2W_u),dot(self.load,self.u_)*self.ds,self.u,self.bcs)
self.problem_u = LinearVariationalProblem(lhs(self.D2W_u),self.load,self.u,self.bcs)
self.solver_u = LinearVariationalSolver(self.problem_u)
self.solver_u.parameters["linear_solver"] = "mumps"
else:
self.problem_u = NonlinearVariationalProblem(self.DW_u,self.u,self.bcs,J=self.D2W_u)
self.solver_u = NonlinearVariationalSolver(self.problem_u)
prm = self.solver_u.parameters
prm['nonlinear_solver'] = 'newton'
prm['newton_solver']['linear_solver'] = 'mumps' #'gmres' #'mumps' #'petsc'
prm['newton_solver']['error_on_nonconvergence'] = False #True
prm['newton_solver']['absolute_tolerance'] = 1E-9
prm['newton_solver']['relative_tolerance'] = 1E-8
prm['newton_solver']['maximum_iterations'] = 25 #10000 #25
prm['newton_solver']['relaxation_parameter'] = 1.0
prm['newton_solver']['lu_solver']['report'] = True
#prm['newton_solver']['lu_solver']['reuse_factorization'] = False
#prm['newton_solver']['lu_solver']['same_nonzero_pattern'] = False
prm['newton_solver']['lu_solver']['symmetric'] = False
prm['newton_solver']['krylov_solver']['error_on_nonconvergence'] = True
prm['newton_solver']['krylov_solver']['absolute_tolerance'] = 1E-7
prm['newton_solver']['krylov_solver']['relative_tolerance'] = 1E-5
prm['newton_solver']['krylov_solver']['maximum_iterations'] = 1000
prm['newton_solver']['krylov_solver']['nonzero_initial_guess'] = True
if prm['newton_solver']['linear_solver'] == 'gmres':
prm['newton_solver']['preconditioner'] = 'ilu'
#self.solver_u.parameters["newton_solver"]["linear_solver"] = "mumps"
self.solver_d = PETScTAOSolver()
self.solver_d.parameters["method"] = "tron"
self.solver_d.parameters["line_search"] = "gpcg"
self.solver_d.parameters["linear_solver"] = "cg" #"mumps" #"mumps" #'cg", "gltr", "gmres", "nash"
#self.solver_d.parameters["preconditioner"] = "hypre_amg"
self.solver_d.parameters["maximum_iterations"] = 5000
# self.solver_d.parameters["gradient_absolute_tol"] = self.staggered_solver["tol"]
# self.solver_d.parameters["gradient_relative_tol"] = self.staggered_solver["tol"]
# self.solver_d.parameters["gradient_t_tol"] = self.staggered_solver["tol"]
self.solver_d.parameters["gradient_absolute_tol"] = 1.e-4
self.solver_d.parameters["gradient_relative_tol"] = 1.e-4
self.solver_d.parameters["gradient_t_tol"] = 1.e-4
#@profile
def staggered_solve(self):
DeltaE = 1.
self.niter = 0
if(self.remesh == False):
self.d_prev.assign(self.d)
# boundary conditions for damage problem
for bc in self.bc_d:
bc.apply(self.d_lb.vector())
while (DeltaE>self.staggered_solver["tol"]) and (self.niter<self.staggered_solver["iter_max"]):
if self.rank == 0:
print(" Iteration %i "%(self.niter))
# u-solve : gives u_{n+1}^{pred} from d_{n}
tic_u = time()
if self.mat.behaviour=='linear_elasticity':
count = self.solver_u.solve()
self.sigma()
else:
self.solver_u.dt = self.dtime
count = self.solver_u.solve(self.u.vector())
self.dissipated.vector().set_local(self.mb.data_manager.s1.dissipated_energies)
self.dissipated.vector().apply("insert")
self.stored.vector().set_local(self.mb.data_manager.s1.stored_energies)
self.stored.vector().apply("insert")
self.sigma()
self.runtime_u += time() - tic_u
if self.use_hybrid_solver:
self.niter_iterative += count[0]
self.niter_direct += count[1]
else:
self.niter_direct += 1
'''
# Update pos/neg indicators
Eloc = strain2voigt(dot(self.mat.R,dot(eps(self.u,self.dim),self.mat.R.T)))
#self.P1pos = Heav(Eloc[0])
self.P1pos.assign(local_project(Heav(Eloc[0]),self.V0))
#self.P2pos = Heav(Eloc[1])
self.P2pos.assign(local_project(Heav(Eloc[1]),self.V0))
#self.P3pos = Heav(Eloc[2])
self.P3pos.assign(local_project(Heav(Eloc[2]),self.V0))
# print("\nP1pos = ", np.sum(self.P1pos.vector()[:]))
'''
# u-update
# self.u_prev.assign(self.u)
tic_assemble = time()
Etot_old = assemble(self.Wtot)
self.runtime_assemble += time() - tic_assemble
# d-solve : gives d_{n+1}^{pred} from u^{n+1}
#self.dold.assign(self.d)
dam_prob = DamageProblem(self.Wtot,self.DW_d,self.D2W_d,self.d)
tic_d = time()
self.niter_TAO += self.solver_d.solve(dam_prob, self.d.vector(), self.d_lb.vector(), self.d_ub.vector())[0]
self.runtime_d += time() - tic_d
self.d_prev_iter.assign(self.d)
# Energy computation
tic_assemble = time()
Etot = assemble(self.Wtot)
self.runtime_assemble += time() - tic_assemble
if (not Etot==0.):
DeltaE = abs(Etot_old/Etot-1)
else:
DeltaE = abs(Etot_old - Etot)
self.niter += 1
self.niter_tot += 1
if self.rank == 0:
print(" Energy variation : %.5e"%(DeltaE))
# if self.save_intermediate == True:
# self.user_postprocess()
if self.mat.damage_dim==2 and self.no_residual_stiffness==True:
d1,d2 = self.equalize_damage(self.d,self.d_ub,0.99)
assign(self.d.sub(0),d1)
assign(self.d.sub(1),d2)
'''
# Update lower bound to account for irreversibility
self.d_lb.assign(self.d)
self.max_dincr = norm(self.d.vector()-self.d_prev.vector(),'linf')
if (self.max_dincr==0.):
self.max_dincr = self.desired_dincr/2.
'''
# Update pos/neg indicators
Eloc = strain2voigt(dot(self.mat.R,dot(eps(self.u,self.dim),self.mat.R.T)))
#self.P1pos = Heav(Eloc[0])
self.P1pos.assign(local_project(Heav(Eloc[0]),self.V0))
#self.P2pos = Heav(Eloc[1])
self.P2pos.assign(local_project(Heav(Eloc[1]),self.V0))
#self.P3pos = Heav(Eloc[2])
self.P3pos.assign(local_project(Heav(Eloc[2]),self.V0))
# print("\nP1pos = ", np.sum(self.P1pos.vector()[:]))
# print("\nP2pos = ", np.sum(self.P2pos.vector()[:]))
def equalize_damage(self, d, d_ub, threshold):
d1,d2 = self.d.split(deepcopy = True)
d1_ub,d2_ub = d_ub.split(deepcopy = True)
d1_ = d1.vector()[:]
d2_ = d2.vector()[:]
d1_ub_ = d1_ub.vector()[:]
d2_ub_ = d2_ub.vector()[:]
np.place(d1_, d2_>threshold, d1_ub_)
np.place(d2_, d1_>threshold, d2_ub_)
d1.vector()[:] = d1_
d2.vector()[:] = d2_
return d1, d2
def export_damage(self,t):
tic_export = time()
self.results.write(self.d,t)
#self.results.write(self.d_eq_fiss,t)
if ((not self.write_checkpoint_count % 10) and self.save_checkpoints):
self.results.write_checkpoint(self.d,"Damage",t,append=True)
self.write_checkpoint_count = 1
self.write_checkpoint_count += 1
self.runtime_export += time() - tic_export
def export_J(self,t):
tic_export = time()
self.sigma()
#os.system('rm %s' % self.prefix+"J_integral.xdmf")
if (self.mat.behaviour=="linear_elasticity"):
sigma = Function(self.Vsig,name="Stress")
sigma.assign(local_project(self.sig,self.Vsig)) #, solver_type='cg', preconditioner_type='hypre_amg'))
self.results.write(sigma,t)
else:
#sigma = Function(self.Vsig,name="Stress")
#sigma.assign(local_project((1.-self.d)**2*self.sig,self.Vsig))
#self.results.write(sigma,t)
self.results.write(self.solver_u.get_flux("Stress", project_on=("DG", 0)),t)
#self.results.write((1.-self.d)**2*self.sig,t)
self.J_results.write(self.u,t)
self.J_results.write(self.sig,t)
self.runtime_export += time() - tic_export
def export_all(self,t):
tic_export = time()
self.sigma()
if (self.mat.behaviour=="linear_elasticity"):
sigma = Function(self.Vsig,name="Stress")
sigma.assign(local_project(self.sig,self.Vsig))
self.results.write(sigma,t)
else:
#sigma = Function(self.Vsig,name="Stress")
#sigma.assign(local_project((1.-self.d)**2*self.sig,self.Vsig)) #, solver_type='cg', preconditioner_type='hypre_amg'))
#Vsig = TensorFunctionSpace(self.mesh, "DG", 0, shape=(3,3))
#sigma.assign(local_project(self.sig,Vsig))
#self.results.write(sigma,t)
self.results.write(self.solver_u.get_flux("Stress", project_on=("DG", 0)),t)
#self.results.write((1.-self.d)**2*self.sig,t)
self.results.write(self.u,t)
##self.epspos.assign(local_project(dot(self.mat.R.T,dot(voigt2strain(self.mat.eps_crystal_pos),self.mat.R)),self.Vsig)) #, solver_type='cg', preconditioner_type='hypre_amg'))
##self.epsneg.assign(local_project(dot(self.mat.R.T,dot(voigt2strain(self.mat.eps_crystal_neg),self.mat.R)),self.Vsig)) #, solver_type='cg', preconditioner_type='hypre_amg'))
##self.results.write(self.epspos,t)
##self.results.write(self.epsneg,t)
# write rotation matrix
#self.R.assign(project(self.mat.R,self.Vr)) #, solver_type='cg', preconditioner_type='hypre_amg'))
#self.results.write(self.R,t)
self.results.write(self.mat.phi1,t)
self.results.write(self.mat.Phi,t)
self.results.write(self.mat.phi2,t)
if (not self.mat.behaviour=='linear_elasticity'):
for var in self.mb.get_internal_state_variable_names():
self.results.write(self.solver_u.get_state_variable(var,project_on=('DG',0)),t)
#self.V1.assign(local_project(self.mat.R[0,:],self.VV))
#self.V2.assign(local_project(self.mat.R[1,:],self.VV))
#self.V3.assign(local_project(self.mat.R[2,:],self.VV))
#self.results.write(self.V1,t)
#self.results.write(self.V2,t)
#self.results.write(self.V3,t)
#self.results.write(self.P1pos,t)
#self.results.write(self.P2pos,t)
#self.results.write(self.P3pos,t)
#self.Efrac_field.assign(local_project(sum(self.Efrac),self.V0))
#self.results.write(self.Efrac_field,t)
#self.stiffness.assign(local_project(self.mat.C,self.Vstiffness))
#self.results.write(self.stiffness,t)
if ((not self.write_checkpoint_count % 10) and self.save_checkpoints):
self.checkpoints.write_checkpoint(self.u,self.u.name(),t,append=True)
self.runtime_export += time() - tic_export
def solve(self):
self.startup_message()
#log = [[0]*13]
if (self.rank==0):
f = open(self.prefix+"results.txt","a")
Jint_cols = ''
if (not self.dsJ==[]):
Jint_cols = "[15-%s]_J_integrals " % (16+len(self.dsJ)-1)
f.write("#1_incr 2_time 3_F 4_Eel 5_Ed 6_Ep 7_Etot 8_niter 9_niter_tot 10_niter_TAO 11_niter_iterative 12_niter_direct 13_runtime 14_runtime_u 15_runtime_d "\
+ Jint_cols + "[%s-%s]_Efrac_i " % (16+len(self.dsJ),16+len(self.dsJ)+self.mat.damage_dim-1) + "\n")
f.close()
if (not self.JIntegral==None):
for contour in self.JIntegral.keys():
J_file = open(self.prefix+'J_integral_%s.txt' % contour,'a')
J_file.write("#1_incr 2_time J_left J_bot J_right J_top J_tot\n")
J_file.close()
if (self.timings==True):
f = open(self.prefix+"timings.txt","a")
f.write("#1_incr 2_time 3_runtime_u 4_runtime_d 5_runtime_assemble "+\
"6_runtime_export 7_runtime_JIntegral 8_runtime_remeshing_mmg 9_runtime_remeshing_interpolation 10_runtime_tot "+\
"11_number_of_vertices\n")
f.close()
self.runtime = 0. #time()
self.runtime_u = 0.
self.runtime_d = 0.
self.runtime_assemble = 0.
self.runtime_export = 0.
self.runtime_remeshing_mmg = 0.
self.runtime_remeshing_interpolation = 0.
self.runtime_JIntegral = 0.
self.set_energies()
self.set_problems()
while (self.t < self.final_time):
tic = time()
if (self.remesh == False):
self.dtime = max(self.dtime*self.desired_dincr/self.max_dincr,self.min_dtime)
self.dtime = min(self.dtime,self.max_dtime)
if ((self.t + self.dtime) > self.final_time):
self.dtime = self.final_time - self.t
if (self.incr==0):
self.dtime = 0
for load in self.loads:
load[1].t = self.t + self.dtime
for uimp in self.Uimp:
uimp.t = self.t + self.dtime
if self.rank == 0:
print( "Increment %i | Loading : %.5e"%(self.incr,self.t+self.dtime))
self.staggered_solve()
if (self.use_remeshing):
self.remeshing()
if self.remesh == False:
# Update lower bound to account for irreversibility
self.d_lb.assign(self.d)
self.max_dincr = norm(self.d.vector()-self.d_prev.vector(),'linf')
if (self.max_dincr==0.):
self.max_dincr = self.desired_dincr/2.
self.t += self.dtime
if ((not self.incr % self.incr_save) or (self.incr < 1)):
if self.save_all:
self.export_all(self.t/self.final_time)
self.export_damage(self.t/self.final_time)
if(not self.JIntegral==None):
self.export_J(self.t/self.final_time)
tic_assemble = time()
F = assemble(self.resultant)
Eel = assemble(self.Wel)
Ed = assemble(self.Wfrac)
Ep = 0.
if (not self.mat.behaviour=="linear_elasticity"):
Ep = assemble(self.Wdis)
Etot = assemble(self.Wtot)
Efrac = [assemble(Efrac_i*self.dx) for Efrac_i in self.Efrac]
Jint=[]
if (not self.J==[]):
for j in self.J:
Jint.append( assemble(j) )
self.runtime_assemble += time() - tic_assemble
if ((not self.JIntegral==None) and self.rank==0):
tic_JIntegral = time()
for contour in self.JIntegral.keys():
if ((not self.incr % self.JIntegral[contour].incr_save) or (self.incr < 1)):
self.JIntegral[contour].compute_J_integral(contour,self.incr, self.t)
self.runtime_JIntegral += time() - tic_JIntegral
self.runtime += time() - tic
if (self.rank==0):
log = ([self.incr,self.t,F,Eel,Ed,Ep,Etot,self.niter,self.niter_tot, \
self.niter_TAO,self.niter_iterative,self.niter_direct,self.runtime,\
self.runtime_u,self.runtime_d] + Jint + Efrac)
f = open(self.prefix+"results.txt","a")
f.write(' '.join(map(str, log))+"\n")
f.close()
if (self.timings==True):
timings = ([self.incr,self.t,self.runtime_u,self.runtime_d,self.runtime_assemble,\
self.runtime_export,self.runtime_JIntegral,self.runtime_remeshing_mmg,self.runtime_remeshing_interpolation,self.runtime,\
self.num_vertices])
f = open(self.prefix+"timings.txt","a")
f.write(' '.join(map(str, timings))+"\n")
f.close()
self.incr += 1
if (self.rank==0):
f = open(self.prefix+"results.txt","a")
f.write("# elapsed time in solve() = %.3f seconds\n" % self.runtime)
f.write("# elapsed time in solve_u() = %.3f seconds\n" % self.runtime_u)
f.write("# elapsed time in solve_d() = %.3f seconds\n" % self.runtime_d)
f.write("# elapsed time in assemble() = %.3f seconds\n" % self.runtime_assemble)
f.write("# elapsed time in export() = %.3f seconds\n" % self.runtime_export)
f.write("# elapsed time in JIntegral() = %.3f seconds\n" % self.runtime_JIntegral)
f.write("# elapsed time in remeshing_mmg() = %.3f seconds\n" % self.runtime_remeshing_mmg)
f.write("# elapsed time in remeshing_interpolation() = %.3f seconds\n" % self.runtime_remeshing_interpolation)
f.close()
def remeshing(self):
tic_remeshing_mmg = time()
self.d_var = Function(self.Vd,name="Damage variation")
self.d_var.vector()[:] = self.d.vector()[:] - self.d_ar.vector()[:]
max_d_var = max(self.d_var.vector()) #norm(self.d_var.vector(),'linf') #max(self.d_var.vector())
self.comm.barrier()
max_d_var = self.comm.allreduce(max_d_var, op=pyMPI.MAX)
if (max_d_var > self.remeshing_criterion):
#if (np.hstack(self.comm.allgather(self.d_var.compute_vertex_values())).max() > self.remeshing_criterion):
self.remesh = True
#self.d_var_smooth = filter_function(self.d,self.d_var,self.Vd,self.gaussian_filter_sigma,\
# self.mat.damage_dim,self.dim)
#self.metric_field = np.array(self.d_var_smooth.compute_vertex_values())
self.metric = self.remesher.metric(self.metric,self.mat.damage_dim,self.d,self.Vd,self.remeshing_index)
if (self.rank == 0):
metric = meshio.xdmf.read(self.remesher.mesh_path+"metric_%s.xdmf" % self.remeshing_index).point_data["v:metric"][:,0]
self.num_vertices = metric.size
self.remesher.write_sol(metric,self.num_vertices,self.remeshing_index)
geo_tmpl = 'mmg_tmp'
self.remesher.remesh(self.dim,geo_tmpl,self.nbgrains,self.remeshing_index)
xdmf = MeshioMsh2Xdmf(self.dim,self.remesher.mesh_path+self.remesher.mesh_file+'_remeshed_%s'\
% self.remeshing_index,extras='')
xdmf.write_xdmf_mesh()
xdmf.read_xdmf_mesh()
self.mesh = xdmf.mesh
#self.num_vertices = self.mesh.num_vertices() # NOK in parallel, use metric size insteadn.num
self.mf = xdmf.mf
self.facets = xdmf.facets
self.mvc = xdmf.mvc
self.dx = xdmf.dx
self.normal = FacetNormal(self.mesh)
hmin, hmax = self.mesh.hmin(), self.mesh.hmax()
self.comm.barrier()
hmin, hmax = self.comm.allreduce(hmin, op=pyMPI.MIN), self.comm.allreduce(hmax, op=pyMPI.MAX)
if (self.rank == 0):
print("max cell size =", hmax)
print("min cell size =", hmin)
self.runtime_remeshing_mmg += time() - tic_remeshing_mmg
tic_remeshing_interpolation = time()
for (boundary,marker) in list(zip(self.boundaries,self.markers)):
boundary().mark(self.facets, marker)
for (domain,domain_marker) in list(zip(self.domains,self.domains_markers)):
domain().mark(self.mf, domain_marker)
if (not self.dsJ==[]):
mfJ = MeshFunction("size_t", self.mesh, 1)
dsj = Measure("ds", subdomain_data=mfJ)
#dSJ = Measure("dS", subdomain_data=mfJ)
for (i,(Jcontour,Jmarker)) in enumerate(list(zip(self.jcontours,self.jmarkers))):
Jcontour().mark(mfJ, Jmarker)
self.dsJ[i] = dsj(Jmarker)
self.ds = Measure("ds", subdomain_data=self.facets)
self.mat = EXD(self.mat.dim,self.mat.damage_dim,self.mat.mp,\
self.mesh,self.mf,self.mat.geometry,behaviour=self.mat.behaviour,mfront_behaviour=self.mat.mfront_behaviour,\
damage_model=self.mat.damage_model,anisotropic_elasticity=self.mat.anisotropic_elasticity,\
damage_tensor=self.mat.damage_tensor)
# Re-Definition of functions spaces
self.Vu = VectorFunctionSpace(self.mesh, "CG", self.u_degree, dim=self.dim)
if self.mat.damage_dim == 1:
self.Vd = FunctionSpace(self.mesh, "CG", self.d_degree)
else:
self.Vd = VectorFunctionSpace(self.mesh, "CG", self.d_degree, dim=self.mat.damage_dim)
self.V0 = FunctionSpace(self.mesh, "DG", 0)
self.Vsig = TensorFunctionSpace(self.mesh, "CG", 1, shape=(3,3))
self.VV = VectorFunctionSpace(self.mesh, "DG", 0, dim=3)
self.Vr = TensorFunctionSpace(self.mesh, "DG", 0, shape=(3,3))
#self.Vstiffness = TensorFunctionSpace(self.mesh, "CG", 1, shape=(6,6))
self.Vmetric = FunctionSpace(self.mesh, "CG", self.d_degree)
self.u_ = TestFunction(self.Vu)
self.du = TrialFunction(self.Vu)
self.d_ = TestFunction(self.Vd)
self.dd = TrialFunction(self.Vd)
# Interpolation of functions onto the new function spaces
tmp = self.u
self.u = Function(self.Vu,name="Displacement")
LagrangeInterpolator.interpolate(self.u,tmp)
#tmp = self.u_prev
#self.u_prev = Function(self.Vu,name="Previous displacement")
#LagrangeInterpolator.interpolate(self.u_prev,tmp)
tmp = self.d
self.d = Function(self.Vd,name="Damage")
LagrangeInterpolator.interpolate(self.d,tmp)
tmp = self.d_prev
self.d_prev = Function(self.Vd,name="Previous Damage")
LagrangeInterpolator.interpolate(self.d_prev,tmp)
tmp = self.d_prev_iter
self.d_prev_iter = Function(self.Vd,name="Previous damage in staggered minimization")
LagrangeInterpolator.interpolate(self.d_prev_iter,tmp)
#tmp = self.dold
#self.dold = Function(self.Vd,name="Old Damage")
#LagrangeInterpolator.interpolate(self.dold,tmp)
tmp = self.d_ub
self.d_ub = Function(self.Vd,name="Damage upper bound")
#LagrangeInterpolator.interpolate(self.d_ub,tmp)
self.d_ub = self.mat.dub
tmp = self.d_lb
self.d_lb = Function(self.Vd,name="Lower bound d_n")
LagrangeInterpolator.interpolate(self.d_lb,tmp)
self.d_ar= Function(self.Vd,name="Damage field after remeshing")
self.d_ar.vector()[:] = self.d.vector()[:]
#LagrangeInterpolator.interpolate(self.d_ar,self.d)
tmp = self.metric
self.metric = Function(self.Vmetric,name="Remeshing metric")
LagrangeInterpolator.interpolate(self.metric,tmp)
#tmp = self.sig
self.sig = Function(self.Vsig,name="Stress")
self.eel = Function(self.Vsig,name="ElasticStrain")
#LagrangeInterpolator.interpolate(self.sig,tmp)
#tmp = self.V1
#self.V1 = Function(self.VV,name="V1")
#LagrangeInterpolator.interpolate(self.V1,tmp)
#tmp = self.V2
#self.V2 = Function(self.VV,name="V2")
#LagrangeInterpolator.interpolate(self.V2,tmp)
#tmp = self.V3
#self.V3 = Function(self.VV,name="V3")
#LagrangeInterpolator.interpolate(self.V3,tmp)
#tmp = self.R
self.R = Function(self.Vr,name="Rotation matrix")
#LagrangeInterpolator.interpolate(self.R,tmp)
self.myBCS.Vu = self.Vu
self.myBCS.Vd = self.Vd
self.myBCS.facets = self.facets
self.bcs, self.bc_d = self.myBCS.make_bcs(self.dim,self.mat.damage_dim)
self.myResultant.u = self.u
self.myResultant.d = self.d
self.myResultant.P1pos = self.P1pos
self.myResultant.P2pos = self.P2pos
self.myResultant.P3pos = self.P3pos
self.myResultant.ds = self.ds
self.resultant = self.sig[1,1]*self.ds(5)
#self.resultant = self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos,self.P3pos)[1,1]*self.dsJ[3]
#self.resultant = self.myResultant.make_resultant() #*Measure("ds", subdomain_data=self.facets)
self.Wext = self.define_external_work()
self.set_energies()
self.set_problems()
self.remeshing_index += 1
self.runtime_remeshing_interpolation += time() - tic_remeshing_interpolation
else:
self.remesh = False
self.set_problems() #self.solver_u.params["user_switch"] = (not self.remesh) #
def sigma(self):
if (self.mat.behaviour=="linear_elasticity"):
self.sig = self.mat.sigma(self.u,self.d,self.P1pos,self.P2pos,self.P3pos)
else:
s = self.solver_u.get_flux("Stress", project_on=("DG", 0))
if self.dim==2:
sig = as_matrix([[s[0], s[3]/sqrt(2.), 0.],\
[s[3]/sqrt(2.), s[1], 0.],\
[0., 0., s[2]]])
else:
sig = as_matrix([[s[0], s[3]/sqrt(2.), s[4]/sqrt(2.)],\
[s[3]/sqrt(2.), s[1], s[5]/sqrt(2.)],\
[s[4]/sqrt(2.), s[5]/sqrt(2.), s[2]]])
#self.sig.vector()[:] = sig.vector()
#self.sig = Function(self.Vsig,name="Stress")
self.sig.assign(local_project(sig, self.Vsig))
#self.sig *= (1.-self.d)**2
def eps_elas(self):
if (not self.mat.behaviour=="linear_elasticity"):
e = self.solver_u.get_state_variable("ElasticStrain", project_on=("DG", 0))
if self.dim==2:
e = as_matrix([[e[0], e[3]/sqrt(2.), 0.],\
[e[3]/sqrt(2.), e[1], 0.],\
[0., 0., e[2]]])
else:
e = as_matrix([[e[0], e[3]/sqrt(2.), e[4]/sqrt(2.)],\
[e[3]/sqrt(2.), e[1], e[5]/sqrt(2.)],\
[e[4]/sqrt(2.), e[5]/sqrt(2.), e[2]]])
self.eel.assign(local_project(e, self.Vsig))
def startup_message(self):
if (self.rank==0):
version, date = get_version_date(package_name='gradam')
print(' ##################################################################\n',\
'########## gradam-%s ##########\n' % version,\
'########## <NAME> (C) ##########\n',\
'########## <EMAIL> ##########\n',\
'########## Installed on: %s ##########\n' % date,\
'##################################################################\n')
# def user_postprocess(self):
# if (self.niter % 10) ==0:
# intermediate_loading = self.load_steps[self.incr-1]+self.niter/float(self.staggered_solver["iter_max"])*(self.load_steps[self.incr]-self.load_steps[self.incr-1])
# self.export_damage(intermediate_loading/self.final_time)
# if self.save_all:
# self.export_all(intermediate_loading/self.final_time)
def local_project(v,V,u=None):
dv = TrialFunction(V)
v_ = TestFunction(V)
a_proj = inner(dv, v_)*dx
b_proj = inner(v, v_)*dx
solver = LocalSolver(a_proj, b_proj)
solver.factorize()
if u is None:
u = Function(V)
solver.solve_local_rhs(u)
return u
else:
solver.solve_local_rhs(u)
return
def gaussian_filter(v,sigma):
values = v.vector()[:].T #recover values of d as numpy array
values = sp.ndimage.filters.gaussian_filter(values, sigma, mode='constant')
filtered = Function(v.function_space()) # generate a new function
filtered.vector()[:] = values.T
return filtered
def filter_function(u,v,V,sigma,damage_dim,dim):
xyz = V.tabulate_dof_coordinates()
xyz = xyz.reshape((int(len(xyz)/damage_dim),damage_dim,dim))[:,0,:]
x = xyz[:,0]
y = xyz[:,1]
#z = xyz[:,2]
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
Ly, Lx = xmax-xmin, ymax-ymin # Lx and Ly are inverted volontarily
N = x.shape[0]
#zmin, zmax = z.min(), z.max()
#grid_x, grid_y, grid_z = np.mgrid[xmin:xmax:(x.shape*1j), ymin:ymax:(y.shape*1j), zmin:zmax:(z.shape*1j)]
# xi = range(x.shape[0]) #np.linspace(0,x.shape[0])
# yi = range(y.shape[0]) #np.linspace(0,y.shape[0])
grid_x, grid_y = np.mgrid[xmin:xmax:(int(((xmax-xmin)/(ymax-ymin))*np.sqrt(N*Lx/Ly))*1j),
ymin:ymax:(int(((ymax-ymin)/(xmax-xmin))*np.sqrt(N*Ly/Lx))*1j)]
#grid_x, grid_y = np.mgrid[xmin:xmax:(100*1j), ymin:ymax:(100*1j)]
field = u.vector().get_local()
field = field.reshape((int(len(field)/damage_dim),damage_dim))
field = LA.norm(field,ord=np.inf,axis=1)
values = u.vector().get_local()
values = values.reshape((int(len(values)/damage_dim),damage_dim))
values = LA.norm(values,ord=np.inf,axis=1)
for (i,value) in enumerate(field):
if (field[i]>0.5):
values[i] = max(values[i],values.max()/10.)
from scipy.interpolate import griddata
image = griddata(xyz, values, (grid_x, grid_y), method='cubic') #nearest
image = values.max()*image/image.max()
image_filtered = sp.ndimage.filters.gaussian_filter(image, sigma, mode='constant')
image_filtered = values.max()*image_filtered/image_filtered.max()
# x_coords = {value: index for index, value in list(zip(xi,x))}
# y_coords = {value: index for index, value in list(zip(yi,y))}
# plt.imshow(image)
values_filtered = np.zeros_like(values)
q,p=image_filtered.shape
for (i,(xic,yic)) in enumerate(list(zip(x,y))):
#print(int(np.round(xic*(x.shape[0]-1))),int(np.round(yic*(y.shape[0]-1))))
values_filtered[i] = image_filtered[min(max(int(np.floor(((xic-xmin)/(ymax-ymin))*np.floor(np.sqrt(N*Lx/Ly)))),0),q-1),\
min(max(int(np.floor(((yic-ymin)/(xmax-xmin))*np.floor(np.sqrt(N*Ly/Lx)))),0),p-1)]
# values_filtered[i] = image_filtered[int(np.floor(((xic-xmin)/(ymax-ymin))*np.sqrt(N-1))),\
# int(np.floor(((yic-ymin)/(xmax-xmin))*np.sqrt(N-1)))]
# if (field[i]==1.):
# values_filtered[i] = values.max()
#values_filtered = image_filtered.T
values_filtered[values_filtered<values_filtered.max()/100.]=0
# plt.imshow( griddata(xyz, values_filtered, (grid_x, grid_y), method='cubic') )
v_filtered = Function(v.function_space().sub(0).collapse()) # generate a new function
v_filtered.vector()[:] = values_filtered
return v_filtered
```
#### File: src/gradam/remesher.py
```python
_author__ = "<NAME>"
__license__ = "CC BY-SA 4.0"
__email__ = "<EMAIL>"
import os
from dolfin import *
import numpy as np
import meshio
from mpi4py import MPI as pyMPI
import matplotlib.pyplot as plt
from .hybrid_linear_solver import *
# import subprocess
class Remesher:
def __init__(self,mesh_path='',mesh_file='',sol_file='',beta=None,delta=None,number_of_nodes_index=None,\
sol_min=None,sol_max=None,save_files=False):
self.mesh_path = mesh_path
self.mesh_file = mesh_file
self.sol_file = sol_file
self.beta = beta
self.delta = delta
self.number_of_nodes_index = number_of_nodes_index # index of the nodes number in msh files (depends on msh version)
self.sol_min = sol_min
self.sol_max = sol_max
self.save_files = save_files
def diffusion(self,v,V):
dv = TrialFunction(V)
v_ = TestFunction(V)
a = dv*v_*dx + self.delta**2*dot(grad(dv), grad(v_))*dx
L = v*v_*dx #inner(v, v_)*dx
u = Function(V)
solver_metric = HybridLinearSolver(a,L,u,bcs=[],parameters={"iteration_switch": 5,"user_switch": True})
#solve(a == L, u)
if (MPI.rank(MPI.comm_world)==0):
print("Solving the pseudo heat equation to compute the remeshing metric field")
solver_metric.solve()
return u
def metric(self,previous_metric,damage_dim,d,Vd,remeshing_index):
VV = Vd
if (damage_dim>1):
VV = Vd.sub(0).collapse()
diffuse = Function(VV,name="Diffused damage")
metric_field = Function(VV,name="v:metric")
metric_field.vector()[:] = diffuse.vector()[:]
if (damage_dim>1):
for k in range(damage_dim):
diffuse = self.diffusion(d.sub(k),VV)
metric_field.vector()[:] = np.maximum(metric_field.vector()[:], diffuse.vector()[:]) #element wise maximum #or # += diffuse.compute_vertex_values() #sum
else:
diffuse = self.diffusion(d,VV)
metric_field.vector()[:] = np.maximum(metric_field.vector()[:], diffuse.vector()[:])
mini, maxi = min(metric_field.vector()), max(metric_field.vector())
pyMPI.COMM_WORLD.barrier()
mini, maxi = pyMPI.COMM_WORLD.allreduce(mini, op=pyMPI.MIN), pyMPI.COMM_WORLD.allreduce(maxi, op=pyMPI.MAX)
metric_field.vector()[:] = (metric_field.vector()[:] - mini)/(max(maxi - mini,1.e-6))
metric_field.vector()[:] = np.maximum(metric_field.vector()[:], previous_metric.vector()[:]) #/!\ prevents mesh to coarsen
xdmf = XDMFFile(pyMPI.COMM_WORLD, self.mesh_path+"metric_%s.xdmf" % remeshing_index)
xdmf.write(metric_field)
xdmf.close()
return metric_field
def write_uniform_sol(self,uniform_metric):
s = open(self.mesh_path+self.mesh_file+'_remeshed_0.sol','w')
f = open(self.mesh_path+self.mesh_file+'.msh', "r")
lines = f.readlines()
f.close()
i = 0
number_of_nodes = 0
while (number_of_nodes==0):
if lines[i].startswith("$Nodes"):
number_of_nodes = int(lines[i+1].split()[self.number_of_nodes_index])
i+=1
s.write(\
'MeshVersionFormatted 2\n\\n\Dimension 3\n\\n\SolAtVertices\n\%s\n\1 1\n\\n'%number_of_nodes)
for i in range(number_of_nodes):
s.write('%s\n' % uniform_metric)
s.write('\nEND')
s.close()
def write_sol(self,metric_field,number_of_nodes,remeshing_index):
'''
f = open(self.mesh_path+self.mesh_file+'_remeshed_%s.msh' % (remeshing_index-1), "r")
lines = f.readlines()
f.close()
i = 0
number_of_nodes = 0
while (number_of_nodes==0):
if lines[i].startswith("$Nodes"):
number_of_nodes = int(lines[i+1].split()[self.number_of_nodes_index])
i+=1
'''
s = open(self.mesh_path+self.sol_file+'_remeshed_%s.sol' % (remeshing_index-1),'w')
s.write('MeshVersionFormatted 2\n\nDimension 3\n\nSolAtVertices\n%s\n1 1\n\n'%number_of_nodes)
for i in range(number_of_nodes):
#new_sol = min( max( self.sol_max*(1. - 2.*metric_field[i]) , self.sol_min) , self.sol_max )
#new_sol = min( max( self.sol_max*(1. - 5.*metric_field[i]) , self.sol_min) , self.sol_max )
new_sol = max( self.sol_max*max((1. - self.beta*metric_field[i]),0.)**0.5 , self.sol_min)
s.write( '%s\n' % format(new_sol, '.4f') )
s.write('\nEND')
s.close()
def remesh(self,dim,geo_tmpl,nbgrains,remeshing_index):
if (remeshing_index==1):
self.convert_msh2mesh(dim,remeshing_index-1)
oldmesh = self.mesh_file+"_remeshed_%s" % (remeshing_index-1)
newmesh = self.mesh_file+"_remeshed_%s" % (remeshing_index) #+1)
medit = ""
if (dim==2):
medit = "-3dMedit %s" % dim
command = "mmg%sd_O3 %s %s" % (dim,medit,self.mesh_path+oldmesh+'.mesh')
print("\nCalling MMG to perform remeshing: %s \n" % command )
os.system(command)
#subprocess.call(["mmg%sd_O3" % dim, "-3dMedit", "%s" % dim, "%s" % (mesh_path+oldmesh+'.mesh')] )
f = open(self.mesh_path+geo_tmpl+'.geo.tmpl','r')
lines = f.readlines()
f.close()
geo = self.mesh_path+geo_tmpl+'.geo'
out = open(geo,'w')
for line in lines:
new_line = line
if "!oldmesh" in line:
new_line = line.replace("!oldmesh",oldmesh)
if "!newmesh" in line:
new_line = line.replace("!newmesh",newmesh)
if "!nbgrains" in line:
new_line = line.replace("!nbgrains",str(nbgrains))
out.write(new_line)
out.close()
print("\nCalling GMSH inline to save MMG output as new mesh and future MMG input\n")
os.system("gmsh -%s %s" % (dim,geo))
#subprocess.call(["gmsh", "-%s" % dim, "%s" % geo])
if (not self.save_files):
self.cleanup_files(remeshing_index-1)
def convert_msh2mesh(self,dim,remeshing_index):
geo = self.mesh_path+'convert_0.geo'
c = open(geo,'w')
c.write('Merge "%s_remeshed_%s.msh";\n' % (self.mesh_file,remeshing_index))
c.write('Save "%s_remeshed_%s.mesh";' % (self.mesh_file,remeshing_index))
c.close()
os.system("gmsh -%s %s" % (dim,geo))
def cleanup_files(self,remeshing_index):
files = [f for f in os.listdir(self.mesh_path) if '_'+str(remeshing_index)+'.' in f]
if (remeshing_index==0):
files.remove(self.mesh_file+'_remeshed_0.msh')
for f in files:
os.system("rm %s%s" % (self.mesh_path,f))
#os.system("rm %s*_%s.*" % (self.mesh_path,remeshing_index))
```
#### File: gradam/tests/single_crystal_tension.py
```python
_author__ = "<NAME>"
__license__ = "CC BY-SA 4.0"
__email__ = "<EMAIL>"
import numpy as np
from dolfin import *
from mshr import *
from gradam import *
set_log_level(40) # remove unnecessary console outputs
# Create mesh
mesh_folder = "meshes/"
dim=2
refinement_level = 3
hole_spacing = 0
aspect_ratio = .01
L, W, R = 50., 1., 0.01
N = 150
if(0):
domain = Rectangle(Point(0, 0), Point(L, W)) \
- Ellipse(Point(L/2-hole_spacing, 0.), R/aspect_ratio, R, 10) \
- Ellipse(Point(L/2+hole_spacing, W), R/aspect_ratio, R, 10)
mesh = generate_mesh(domain, N)
# mesh refinement
for r in range(refinement_level):
class CentralPart(SubDomain):
def inside(self, x, on_boundary):
return L/2-2.*(((1+1/aspect_ratio)*R)/2-hole_spacing) <= x[0] <= L/2+2.*(((1+1/aspect_ratio)*R)/2+hole_spacing)
to_refine = MeshFunction("bool", mesh, 2)
CentralPart().mark(to_refine, True)
mesh = refine(mesh, to_refine)
# plt.figure()
# plot(mesh)
# plt.show()
xdmf_file = XDMFFile(MPI.comm_world, mesh_folder+"single_crystal_bar.xdmf")
xdmf_file.write(mesh)
#mesh_file = File(mesh_folder+"single_crystal_bar.xml")
#mesh_file << mesh
# Load mesh
mesh_path = mesh_folder+"single_crystal_bar"
#mesh = Mesh(mesh_path+".xml")
xdmf_file = XDMFFile(MPI.comm_world, mesh_path+".xdmf")
mesh = Mesh()
xdmf_file.read(mesh)
print("number cells =", mesh.num_cells())
print("max cell size =", mesh.hmax())
# Define boundaries and boundary integration measure
facets = MeshFunction("size_t", mesh, 1)
facets.set_all(0)
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], L)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], W)
class Crack(SubDomain):
def inside(self, x, on_boundary):
return True if near(x[0],L/2,0.02) and near(x[1],W/2,0.02) else False
Left().mark(facets, 1)
Right().mark(facets, 2)
Bottom().mark(facets, 3)
Top().mark(facets, 4)
Crack().mark(facets, 5)
# make MeshFunction for grain(s) subdomain(s)
one_func = MeshFunction("size_t",mesh,2)
for cell in cells(mesh):
one_func[cell] = 1
mf = one_func
dx = Measure("dx", subdomain_data=mf)
# define material parameters
damage_tensor = True
q_, p_ = 1., .5
q, p = Constant(q_), Constant(p_)
gamma_, r_ = 4.0, 0.0
gamma, r = Constant(gamma_), Constant(r_)
zener = 1.
E_, nu_ = 200., 0.3
G_ = zener*E_/(2.*(1.+nu_))
E = [E_]*1
nu = [nu_]*1
G = [G_]*1
# damage model parameters
damage_dim = 2
l0_= 4.e-1
l0 = [[l0_]*damage_dim]*1
Gc_= 1.e-1
Gc = [[Gc_]*damage_dim]*1
dub = [[.99]*damage_dim]*1
# make the fracture toughness anisotropy tensor B(3x3) in the crystal frame
I_ = np.eye(3)
I = as_tensor(I_)
if (damage_dim==1):
B_crystal = I
else:
B_crystal = []
D_crystal = []
alp = 0. # set a value > 0 to use the AFE model
alpha = Constant(alp)
M_ = [[1., 0., 0.], [0.,1.,0]]
M = [as_vector([1., 0., 0.]), as_vector([0.,1.,0])]
P = [ [[0., 1., 0.],[0., 0., 1.]], [[0., 0., 1.],[1., 0., 0.]] ]
if dim == 3:
M_.append([0., 0., 1.])
M.append(as_vector([0., 0., 1.]))
P.append([[1., 0., 0.],[0., 1., 0.]])
for n in range(damage_dim):
B_crystal.append(as_tensor(I) + alpha*(as_tensor(I) - outer(M[n],M[n])))
D_crystal.append(0.5*( np.einsum('ij,kl->ikjl',I_,I_) + np.einsum('ij,kl->iljk',I_,I_) ) -\
np.einsum('i,j,k,l->ijkl',M_[n],M_[n],M_[n],M_[n]) -\
np.einsum('i,j,k,l->ijkl',M_[n],P[n][0],M_[n],P[n][0]) -\
np.einsum('i,j,k,l->ijkl',M_[n],P[n][1],M_[n],P[n][1]) -\
np.einsum('i,j,k,l->ijkl',P[n][0],M_[n],P[n][0],M_[n]) -\
np.einsum('i,j,k,l->ijkl',P[n][1],M_[n],P[n][1],M_[n]) )
# initialize material model class
material_parameters = {"E":E, "nu":nu, "G":G, "Gc":Gc, "l0": l0,\
"B_crystal": B_crystal, "D_crystal": D_crystal, "dub": dub}
mat = EXD(dim,damage_dim,material_parameters,mesh,mf,mesh_path,damage_model="AT1",\
damage_tensor=[damage_tensor,q,p,'Lorentz',gamma,r])
suffix = mat.__class__.__name__
mat.anisotropic_degradation = True # set this to False and alp>0 to use the AFE model
#mat.tension_compression_asymmetry = False #True # not implemented yet
# initialize problem class
save_folder = "TensionLocalized/" # The path must exist
problem = FractureProblem(mesh,facets,mat,save_folder,load=Constant((0.,)*dim))
problem.max_dtime = 1.e-2
problem.final_time = 2.
problem.use_hybrid_solver = True
# setup boundary conditions
problem.Uimp = [Expression("t", t=0, degree=0)]
problem.bcs = [DirichletBC(problem.Vu.sub(0), Constant(0.), facets, 1),
DirichletBC(problem.Vu.sub(0), problem.Uimp[0], facets, 2)]
problem.bc_d = [DirichletBC(problem.Vd.sub(0), Constant(.01), facets, 5)]
# compute resultant force
problem.ds = Measure('ds')(subdomain_data=facets)
problem.resultant = problem.mat.sigma(problem.u,problem.d,problem.P1pos,problem.P2pos,problem.P3pos)[0,0]*problem.ds(2)
# Increase staggered solver accuracy
problem.staggered_solver["tol"]=1e-6
# solve problem
if (not os.path.isfile(save_folder+"output.xdmf")):
print("START")
problem.solve()
else:
print("Please remove data from %s to launch a new simulation" % save_folder)
```
|
{
"source": "jeanmichelscherer/mef90",
"score": 3
}
|
#### File: mef90/bin/exoProject2D.py
```python
def parse():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("inputFile", help = "The name of the exodus file to fix.", type = str)
parser.add_argument("outputFile", help = "The name of the exodus file to be written.", type = str)
args = parser.parse_args()
return args
def main():
import sys
if sys.version_info.major == 3:
import exodus3 as exo
else:
import exodus2 as exo
import numpy as np
options = parse()
print options
exoin = exo.exodus(options.inputFile,mode='r')
numNodes = exoin.num_nodes()
numElems = exoin.num_elems()
numBlocks = exoin.num_blks()
numNodeSets = exoin.num_node_sets()
exoout=exo.exodus(options.outputFile, mode='w',title=exoin.title(),numDims=2,numNodes=numNodes,
numElems=numElems,numBlocks=numBlocks,numNodeSets=numNodeSets,numSideSets=0)
exoout.put_coord_names(exoin.get_coord_names()[0:2])
coord = exoin.get_coords()
exoout.put_coords(coord[0],coord[1],coord[2])
# cell sets
blkids = exoin.get_elem_blk_ids()
for id in blkids:
blkName = exoin.get_elem_blk_name(id)
(elemType,numElemInBlock,numNodesPerElem,numAttr) = exoin.elem_blk_info(id)
exoout.put_elem_blk_info(id,elemType,numElemInBlock,numNodesPerElem,0) #ignoring attributes
exoout.put_elem_connectivity(id,exoin.get_elem_connectivity(id)[0])
# node sets
setids = exoin.get_node_set_ids()
for id in setids:
# e.get_node_set_params() -> get number of nodes and distribution factors
numNodes,numDistFactors = exoin.get_node_set_params(id)
exoout.put_node_set_params(id,numNodes,numDistFactors)
exoout.put_node_set_name(id,exoin.get_node_set_name(id))
exoout.put_node_set(id,exoin.get_node_set_nodes(id))
exoin.close()
### Adding a QA record, needed until visit fixes its exodus reader
import datetime
import os.path
import sys
QA_rec_len = 32
QA = [os.path.basename(sys.argv[0]),os.path.basename(__file__),datetime.date.today().strftime('%Y%m%d'),datetime.datetime.now().strftime("%H:%M:%S")]
exoout.put_qa_records([[ q[0:31] for q in QA],])
exoout.close()
if __name__ == "__main__":
import sys
sys.exit(main())
```
#### File: python/pymef90/energies.py
```python
def plot(energies, showwork=False):
import matplotlib.pyplot as plt
plt.plot(energies[:,1], energies[:,2], 'r-', label='Elastic energy')
if showwork:
plt.plot(energies[:,1], energies[:,3], 'k-', label='External Forces')
plt.plot(energies[:,1], energies[:,4], 'g-', label='Surface energy')
plt.plot(energies[:,1], energies[:,5], 'b-', label='Total energy', lw=2)
plt.grid()
plt.legend(loc=0)
plt.xlabel('t')
plt.ylabel('Energy')
plt.title('Energies vs normalized time')
###
return 0
def getlaststep2(fname):
### open file
f=open(fname)
### Read last line in a string
lastline = f.readlines()[-1]
laststep = lastline.rsplit()[0]
return(int(laststep))
def getlaststep(fname):
import numpy as np
### open file
ener = np.readtxt(fname)
print(ener[-1])
return ener[-1][0]
def save(fname, energies):
import numpy as np
np.savetxt(fname, energies, fmt='%7d %13.5E %13.5E %13.5E %13.5E %13.5E %13.5E')
def fixBT(energies, laststep=None):
import numpy as np
###
if laststep == None:
laststep = int(energies[-1,0])
else:
laststep = min(int(energies[-1,0]), int(laststep))
maxstep = energies.shape[0]
###
energiesBT = np.zeros([laststep,energies.shape[1]])
###
i = 0
while True:
step = energies[i,0]
energiesBT[step-1,:] = energies[i,:]
i += 1
if step == laststep or i >= maxstep:
break
return energiesBT
def computeG(t, Eel, l):
import numpy as np
###
G = -(Eel[1:]/np.power(t[1:],2)-Eel[:-1]/np.power(t[:-1],2)) / (l[1:]-l[:-1])
###
return G
def ReadCompositeEnergies(prefix, stepmin=None, stepmax=None):
import numpy as np
###
toughness = np.loadtxt(prefix+'.CST', skiprows=1, usecols=[1])
all_energies = []
for blk in range(toughness.shape[0]):
blkfile="%s-%.4i.enerblk" % (prefix, blk+1)
all_energies.append(np.loadtxt(blkfile))
###
if stepmin == None:
tmin = 0
else:
tmin = int(stepmin)
if stepmax == None:
tmax = all_energies[0].shape[0]
else:
tmax = int(stepmax)
###
Eel = np.sum(e[tmin:tmax,2] for e in all_energies)
l = np.sum(e[tmin:tmax,4]/k for (e,k) in zip(all_energies, toughness))
t = all_energies[0][tmin:tmax,1]
return Eel, l, t, toughness
def ReadCompositeEnergiesBT(prefix, laststep=None):
import numpy as np
###
toughness = np.loadtxt(prefix+'.CST', skiprows=1, usecols=[1])
all_energies = []
for blk in range(toughness.shape[0]):
blkfile="%s-%.4i.enerblk" % (prefix, blk+1)
all_energies.append(np.loadtxt(blkfile))
###
### Compute last step and larger step
###
if laststep == None:
laststep = int(all_energies[0][-1,0])
else:
lasstep = min(int(laststep), int(all_energies[0][-1,0]))
maxstep = all_energies[0].shape[0]
###
### Initialize all_energies_BT
###
all_energiesBT=[]
for e in all_energies:
all_energiesBT.append(np.zeros([int(laststep),e.shape[1]]))
###
### Remove redundant computations
###
i = 0
while True:
step = all_energies[0][i,0]
for (energiesBT, energies) in zip(all_energiesBT, all_energies):
energiesBT[step-1,:] = energies[i,:]
i += 1
if step == int(laststep) or i >= maxstep:
break
###
### Extract Elastic Energy, length, time, toughness
##
Eel = np.sum(e[:,2] for e in all_energiesBT)
l = np.sum(e[:,4]/k for (e,k) in zip(all_energiesBT, toughness))
t = all_energiesBT[0][:,1]
return Eel, l, t, toughness
```
#### File: python/pymef90/jobs.py
```python
def HookeLawIsotropicEnu3D(E,nu):
lmbda = E * nu / (1. + nu) / (1. - 2. * nu)
mu = E / (1. + nu) * .5
A=(lmbda + 2. * mu,lmbda, lmbda, 0.,0.,0.,
lmbda + 2. * mu,lmbda, 0.,0.,0.,
lmbda + 2.*mu, 0.,0.,0.,
mu,0.,0.,
mu,0.,
mu)
return(A)
def HookeLawIsotropicEnu2DPlainStress(E,nu):
lmbda = E * nu / (1. - nu*nu)
mu = E / (1. + nu) * .5
A=(lmbda + 2. * mu,lmbda, 0.,
lmbda + 2. * mu,0.,
mu)
return(A)
def HookeLawIsotropicEnu2DPlainStrain(E,nu):
lmbda = E * nu / (1. + nu) / (1. - 2. * nu)
mu = E / (1. + nu) * .5
A=(lmbda + 2. * mu,lmbda, 0.,
lmbda + 2. * mu,0.,
mu)
return(A)
def PrepareJob(Geometry,Parameters,debug=False):
import hashlib
import shutil
import os
import sys
Parameters['hash'] = hashlib.sha1(repr(Geometry).encode('utf-8')).hexdigest()
if os.getenv('PBS_JOBID'):
Parameters['jobid'] = os.getenv('PBS_JOBID')
elif os.getenv('JOB_ID'):
Parameters['jobid'] = os.getenv('JOB_ID')
elif os.getenv('SLURM_JOB_ID'):
Parameters['jobid'] = os.getenv('SLURM_JOB_ID')
else:
Parameters['jobid'] = '0000'
###
### Create a long file prefix if necessary
###
#if Parameters['prefix']:
# for k in sorted(Geometry.keys()):
# Parameters['prefix'] +='-%s_%s'%(k,Geometry[k])
#else:
# Parameters['prefix'] = Parameters['jobid']
if not Parameters['prefix']:
Parameters['prefix'] = Parameters['jobid']
###
### Find where the script was submitted from
###
if os.getenv('PBS_O_WORKDIR'):
# We are runnning inside a PBS job
submitdir = os.getenv('PBS_O_WORKDIR')
elif os.getenv('SGE_O_WORKDIR'):
# We are running inside a SGE job
submitdir = os.getenv('SGE_O_WORKDIR')
elif os.getenv('SLURM_SUBMIT_DIR'):
# We are running inside a SLURM/SBATCH job
submitdir = os.getenv('SLURM_SUBMIT_DIR')
else:
# We are running in interactive mode
submitdir = os.getcwd()
###
### Set workdir
###
if Parameters['workdir']:
###
### Try to figure out if workdir is a relative or absolute path
###
if not Parameters['workdir'][0] == '/':
Parameters['workdir'] = os.path.join(submitdir,Parameter['workdir'])
else:
if os.getenv('PBS_O_WORKDIR'):
# We are runnning inside a PBS job
Parameters['workdir'] = os.path.join(os.getenv('PBS_O_WORKDIR'),Parameters['jobid'])
elif os.getenv('SGE_O_WORKDIR'):
# We are running inside a SGE job
Parameters['workdir'] = os.path.join(os.getenv('SGE_O_WORKDIR'),Parameters['jobid'])
elif os.getenv('SLURM_SUBMIT_DIR'):
# We are running inside a SBATCH / SRUN job
Parameters['workdir'] = os.path.join(os.getenv('SLURM_SUBMIT_DIR'),Parameters['jobid'])
else:
# We are running in interactive mode
Parameters['workdir'] = os.path.join(os.getcwd(),Parameters['jobid'])
###
### Find the argument file
### Try absolute path then submission directory, then script directory
###
for root in ['/',submitdir,os.path.dirname(os.path.abspath(__file__))]:
if debug:
print ('searching for yamlfile in {0}'.format(root))
if os.path.isfile(os.path.join(root,Parameters['yamlfile'])):
Parameters['yamlfile'] = os.path.join(root,Parameters['yamlfile'])
break
###
### Find the meshes location
### Try absolute path then submission directory, then script directory
###
if 'meshdir' in Parameters.keys():
for root in ['/',submitdir,os.path.dirname(os.path.abspath(__file__))]:
if debug:
print ('searching for meshdir in {0}'.format(root))
if os.path.isdir(os.path.join(root,Parameters['meshdir'])):
Parameters['meshdir'] = os.path.join(root,Parameters['meshdir'])
break
if not os.path.isdir(Parameters['meshdir']):
os.makedirs(Parameters['meshdir'])
sys.exit(-1)
Parameters['MEF90_DIR'] = os.getenv("MEF90_DIR")
Parameters['PETSC_DIR'] = os.getenv("PETSC_DIR")
Parameters['PETSC_ARCH'] = os.getenv("PETSC_ARCH")
Parameters['scriptdir'] = os.path.dirname(os.path.abspath(__file__))
return Parameters
```
#### File: python/pymef90/parse.py
```python
import yaml
import argparse
def parse(parser,key=None):
args = parser.parse_args()
if key:
arg_dict = args.__dict__
if arg_dict[key]:
print('WARNING: reading options from YAML file. Duplicate and positional command line options will be ignored. \n')
data = yaml.load(arg_dict[key],Loader=yaml.FullLoader)
# replace the file object with its name
arg_dict[key] = arg_dict[key].name
# optional and positional arguments
for group in parser._action_groups:
if group.title in ['optional arguments', 'positional arguments']:
for key, value in data.items():
if not isinstance(value, dict):
arg_dict[key] = value
# groups
for group in parser._action_groups:
if group.title in data.keys():
for key, value in data[group.title].items():
if isinstance(value, list):
for v in value:
arg_dict[key].append(v)
else:
arg_dict[key] = value
return args
def parseGroup(parser,args,groupName):
for group in parser._action_groups:
if group.title == groupName:
return argparse.Namespace(**{a.dest:getattr(args,a.dest,None) for a in group._group_actions})
return argparse.Namespace()
```
#### File: jeanmichelscherer/mef90/vDefTempBC.py
```python
import sys
def parse(args=None):
import argparse
### Get options from the command line
parser = argparse.ArgumentParser(description='Compute boundary displacement for a surfing computation')
parser.add_argument('-i','--inputfile',help='input file',default=None)
parser.add_argument('-o','--outputfile',help='output file',default=None)
parser.add_argument("--time_min",type=float,help="first time step",default=0.)
parser.add_argument("--time_max",type=float,help="last time step",default=1.)
parser.add_argument("--time_numstep",type=int,help="number of time step",default=11)
parser.add_argument("--tmin",type=float,help="Min. Temp.",default=20.)
parser.add_argument("--tmax",type=float,help="Max. Temp.",default=60.)
parser.add_argument("--lc",type=float,help="Characteristic width",default=.1)
return parser.parse_args()
def exoformat(e):
global_variable_name = ["Elastic Energy","Work","Surface Energy","Total Energy"]
if e.num_dimensions() == 2:
node_variable_name = ["Temperature","Damage","Displacement_X","Displacement_Y"]
element_variable_name = ["External_Temperature","Heat_Flux","Pressure_Force",
"Force_X","Force_Y",
"Stress_XX","Stress_YY","Stress_XY"]
else:
node_variable_name = ["Temperature","Damage","Displacement_X","Displacement_Y","Displacement_Z"]
element_variable_name = ["External_Temperature","Heat_Flux","Pressure_Force",
"Force_X","Force_Y","Force_Z",
"Stress_XX","Stress_YY","Stress_ZZ","Stress_YZ","Stress_XZ","Stress_XY"]
e.set_global_variable_number(0)
e.set_node_variable_number(len(node_variable_name))
for i in range(len(node_variable_name)):
e.put_node_variable_name(node_variable_name[i],i+1)
e.set_element_variable_number(len(element_variable_name))
for i in range(len(element_variable_name)):
e.put_element_variable_name(element_variable_name[i],i+1)
e.set_element_variable_truth_table([True] * e.numElemBlk.value * len(element_variable_name))
return(0)
def cart2polar(x, y):
import numpy as np
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
def surfingBC(e,t,Xc,cslist,vslist,E,nu,ampl):
import exodus as exo
import numpy as np
kappa = (3.0-nu)/(1.0+nu)
mu = E / (1. + nu) * .5
dim = e.num_dimensions()
X,Y,Z=e.get_coords()
U = np.zeros([3,len(X)],dtype=exo.c_double)
csoffset = [e.elem_blk_info(set)[1] for set in cslist]
for set in get_elem_blk_ids:
connect = e.get_elem_connectivity(set)
for cid in range(connect[1]):
vertices = [connect[0][cid*connect[2]+c] for c in range(connect[2])]
for v in vertices:
r,theta = cart2polar(X[v-1]-Xc[0]-t,Y[v-1]-Xc[1])
z = Z[v-1]-Xc[2]
U[0,v-1] = ampl * np.sqrt(r / np.pi * .5) / mu * .5 * np.cos(theta * .5) * (kappa - np.cos(theta))
U[1,v-1] = ampl * np.sqrt(r / np.pi * .5) / mu * .5 * np.sin(theta * .5) * (kappa - np.cos(theta))
U[2,v-1] = 0.0
for set in vslist:
for v in e.get_node_set_nodes(set):
r,theta = cart2polar(X[v-1]-xc-t,Y[v-1]-yc)
z = Z[v-1]
U[0,v-1] = ampl * np.sqrt(r / np.pi * .5) / mu * .5 * np.cos(theta * .5) * (kappa - np.cos(theta))
U[1,v-1] = ampl * np.sqrt(r / np.pi * .5) / mu * .5 * np.sin(theta * .5) * (kappa - np.cos(theta))
U[2,v-1] = 0.0
return U
def temperature(e,time,Tmin,Tmax,lc):
import exodus as exo
import numpy as np
dim = e.num_dimensions()
X,Y,Z=e.get_coords()
T = np.zeros([len(X)],dtype=exo.c_double)
for set in e.get_elem_blk_ids():
connect = e.get_elem_connectivity(set)
for cid in range(connect[1]):
vertices = [connect[0][cid*connect[2]+c] for c in range(connect[2])]
for v in vertices:
x = np.abs(X[v-1])
if x > lc:
T[v-1] = Tmin
else:
T[v-1] = Tmin + (time*(Tmax - Tmin)) * (lc-x) / lc
return T
def main():
import exodus as exo
import numpy as np
options = parse()
print options.inputfile
exoin = exo.exodus(options.inputfile,mode='r')
exoout = exoin.copy(options.outputfile)
exoin.close()
exoformat(exoout)
dim = exoout.num_dimensions()
step = 0
for t in np.linspace(options.time_min,options.time_max,options.time_numstep):
print "writing step",step+1,t
exoout.put_time(step+1,t)
T = temperature(exoout,t,options.tmin,options.tmax,options.lc)
X,Y,Z=exoout.get_coords()
exoout.put_node_variable_values("Temperature",step+1,T)
step += 1
exoout.close()
###
### compute boundary displacement at vertex sets
###
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jeanmidevacc/french-presidential-election-2022-data-collecter",
"score": 2
}
|
#### File: jeanmidevacc/french-presidential-election-2022-data-collecter/google_trends_collecter.py
```python
import json
from datetime import datetime
from time import sleep
import pandas as pd
import argparse
import boto3
import os
from pytrends.request import TrendReq
def set_job():
parser = argparse.ArgumentParser(description='Collect tweets')
parser.add_argument('--configuration', type=str, help='Configuration file for the job', default="./configuration.json")
parser.add_argument('--candidates', type=str, help='Configuration file for the job', default="./candidates.json")
parser.add_argument('--area', type=str, help='area for study', default="fr")
parser.add_argument('--period', type=str, help='time period', default="hourly")
args = parser.parse_args()
with open(args.configuration) as f:
configuration = json.load(f)
with open(args.candidates) as f:
candidates = json.load(f)
return configuration, candidates, args.area, args.period
file_extension = ".csv.gz"
if __name__ == '__main__':
configuration, candidates, area, period = set_job()
date_collect = datetime.utcnow().strftime("%Y%m%d_%H%M")
partition = datetime.utcnow().strftime('%Y%m%d')
timeframe = "now 1-H"
prefix = ""
wait_time = 5
if period == "daily":
timeframe = "now 1-d"
prefix = "daily_"
wait_time = 60
elif period == "weekly":
timeframe = "now 7-d"
prefix = "weekly_"
wait_time = 60
s3_client = boto3.client('s3', aws_access_key_id=configuration["aws"]["key"], aws_secret_access_key=configuration["aws"]["secret"])
if area == "fr":
pytrends = TrendReq(hl='fr-FR', tz=360, timeout=(5,10))
else:
pytrends = TrendReq(tz=360, timeout=(5,10))
for key, item in candidates.items():
print(key, item["name"])
kw_list = [item["name"]]
file_name = f"{key}_{date_collect}{file_extension}"
if area == "fr":
pytrends.build_payload(kw_list, cat=0, timeframe=timeframe, geo='FR')
else:
pytrends.build_payload(kw_list, cat=0, timeframe=timeframe)
# Get the interest over time
dfp_iot = pytrends.interest_over_time()
if len(dfp_iot) > 0:
dfp_iot.columns = ["interest", "is_partial"]
dfp_iot.reset_index(inplace=True)
dfp_iot["candidate"] = key
dfp_iot["date_collect"] = date_collect
dfp_iot.to_csv("tmp_iot.csv.gz", index=None)
# Upload the file to s3
response = s3_client.upload_file("tmp_iot.csv.gz", configuration["aws"]["bucket"], f'data/raw/google_trends/{area}/{prefix}interest_over_time/{partition}/{file_name}')
# Get the interest on region
dfp_ibr = pytrends.interest_by_region(resolution='COUNTRY', inc_low_vol=True, inc_geo_code=False)
if len(dfp_ibr) > 0:
dfp_ibr.columns = ["interest"]
dfp_ibr.reset_index(inplace=True)
dfp_ibr["candidate"] = key
dfp_ibr["date_collect"] = date_collect
dfp_ibr.to_csv("tmp_ibr.csv.gz", index=None)
# Upload the file to s3
response = s3_client.upload_file("tmp_ibr.csv.gz", configuration["aws"]["bucket"], f'data/raw/google_trends/{area}/{prefix}interest_by_region/{partition}/{file_name}')
dict_related_topics = pytrends.related_topics()
for key_rt, dfp_rt in dict_related_topics[kw_list[0]].items():
if isinstance(dfp_rt, pd.DataFrame):
dfp_rt["candidate"] = key
dfp_rt["date_collect"] = date_collect
dfp_rt.to_csv("tmp_rt.csv.gz", index=None)
# Upload the file to s3
response = s3_client.upload_file("tmp_rt.csv.gz", configuration["aws"]["bucket"], f'data/raw/google_trends/{area}/{prefix}related_topics_{key_rt}/{partition}/{file_name}')
dict_related_queries = pytrends.related_queries()
for key_rq, dfp_rq in dict_related_queries[kw_list[0]].items():
if isinstance(dfp_rq, pd.DataFrame):
dfp_rq["candidate"] = key
dfp_rq["date_collect"] = date_collect
dfp_rq.to_csv("tmp_rq.csv.gz", index=None)
# Upload the file to s3
response = s3_client.upload_file("tmp_rq.csv.gz", configuration["aws"]["bucket"], f'data/raw/google_trends/{area}/{prefix}related_queries_{key_rq}/{partition}/{file_name}')
sleep(wait_time)
# break
```
|
{
"source": "jeanmira/Trabalho-sobre-metodos-numericos",
"score": 3
}
|
#### File: jeanmira/Trabalho-sobre-metodos-numericos/main.py
```python
import matplotlib.pyplot as plt
import biblioteca as bib
import numpy as np
# -----------------------------------------------------------------------------#
def f(s, a):
fi, r, z = a
bo = 0.4
if(s != 0):
return np.array([2-bo*z-np.sin(fi)/r, np.cos(fi), np.sin(fi)])
else:
return np.array([2-bo*z, np.cos(fi), np.sin(fi)])
# Método numérico de Euler
se, re = bib.edoEuler(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Heun
sh, rh = bib.edoHeun(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Runge-Kutta
sr, rr = bib.edoRungeKutta(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Runge-Kutta-Fehlberg
sf, rf = bib.edoRungeKuttaFehlberg(
f, (0, 0, 0), 0, 52, 0.01, 10 ** -3, 4, 0.1)
# bib.planilha(400, se, re, sh, rh, sr, rr, sf, rf)
bib.grafico(se, re, sh, rh, sr, rr, sf, rf)
# bib.gota(re, rh, rr, rf)
```
|
{
"source": "jeanmonet/html_form_to_dict",
"score": 3
}
|
#### File: html_form_to_dict/tests/test_html_form_to_dict.py
```python
import pytest
from html_form_to_dict import html_form_to_dict
def test_html_form_to_dict__empty():
html = '''
<form>
<input type="text" name="my-input">
<textarea name="my-textarea">\n</textarea>
</form>'''
assert html_form_to_dict(html) == {'my-input': '', 'my-textarea': ''}
def test_html_form_to_dict__with_value():
html = '''
<form>
<input type="text" name="my-input" value="my-input-value">
<textarea name="my-textarea">\nmy-textarea-value</textarea>
<input type="checkbox" name="my-checkbox" value="my-checkbox-value" checked>
</form>'''
assert html_form_to_dict(html) == {'my-input': 'my-input-value',
'my-textarea': 'my-textarea-value',
'my-checkbox': 'my-checkbox-value',
}
def test_html_form_to_dict__checkboxes_checked():
html = '''
<form>
<input type="checkbox" name="my-checkbox" value="v1" checked>
<input type="checkbox" name="my-checkbox" value="v2" checked>
</form>'''
assert html_form_to_dict(html) == {
'my-checkbox': ['v1', 'v2'],
}
def test_html_form_to_dict__checkboxes_unchecked():
html = '''
<form>
<input type="checkbox" name="my-checkbox" value="v1">
<input type="checkbox" name="my-checkbox" value="v2">
</form>'''
assert html_form_to_dict(html) == {'my-checkbox': []}
def test_html_form_to_dict__unknown_key():
html = '''
<form>
<input type="checkbox" name="name" value="value">
</form>'''
data = html_form_to_dict(html)
with pytest.raises(KeyError):
data['typo']
def test_html_form_to_dict__select_single():
html = '''
<form>
<select name="cars" id="cars">
<option value="volvo">Volvo</option>
<option value="saab" selected>Saab</option>
<option value="mercedes">Mercedes</option>
</select>
<form>
'''
assert html_form_to_dict(html) == {'cars': 'saab'}
def test_html_form_to_dict__select_multiple():
html = '''
<form>
<select name="cars" id="cars" multiple>
<option value="volvo" selected>Volvo</option>
<option value="saab">Saab</option>
<option value="mercedes" selected>Mercedes</option>
</select>
<form>
'''
assert html_form_to_dict(html) == {'cars': ['volvo', 'mercedes']}
def test_form_by_index_name_or_id():
html = '''
<form name="one" id="id1">
<input type="text" name="my_input" value="some value">
</form>
<form name="two" id="id2">
<input type="text" name="my_input" value="some other value">
</form>
'''
# by name
assert html_form_to_dict(html, name="one") == {'my_input': 'some value'}
assert html_form_to_dict(html, name="two") == {'my_input': 'some other value'}
with pytest.raises(ValueError) as excinfo:
html_form_to_dict(html, name='unknown')
assert str(excinfo.value) == '''No form with name="unknown" found. Found forms with these names: ['one', 'two']'''
# by id
assert html_form_to_dict(html, id="id1") == {'my_input': 'some value'}
assert html_form_to_dict(html, id="id2") == {'my_input': 'some other value'}
with pytest.raises(ValueError) as excinfo:
html_form_to_dict(html, id='unknown')
assert str(excinfo.value) == '''No form with id="unknown" found. Found forms with these ids: ['id1', 'id2']'''
# by index
assert html_form_to_dict(html, 1) == {'my_input': 'some other value'}
with pytest.raises(IndexError):
html_form_to_dict(html, 2)
class DummyClient:
def __init__(self):
self.calls = []
def get(self, url, data):
self.calls.append(('get', url, data))
def post(self, url, data):
self.calls.append(('post', url, data))
def test_form_data__submit():
html = '''
<form action="my-url">
<input type="text" name="my_input" value="some value">
</form>'''
data = html_form_to_dict(html)
client = DummyClient()
data.submit(client)
assert client.calls == [('get', 'my-url', {'my_input': 'some value'})]
html = '''
<form action="my-url" method=POST>
<input type="text" name="my_input" value="some value">
</form>'''
data = html_form_to_dict(html)
client = DummyClient()
data.submit(client)
assert client.calls == [('post', 'my-url', {'my_input': 'some value'})]
html = '''
<form hx-get="my-url">
<input type="text" name="my_input" value="some value">
</form>'''
data = html_form_to_dict(html)
client = DummyClient()
data.submit(client)
assert client.calls == [('get', 'my-url', {'my_input': 'some value'})]
html = '''
<form hx-post="my-url">
<input type="text" name="my_input" value="some value">
</form>'''
data = html_form_to_dict(html)
client = DummyClient()
data.submit(client)
assert client.calls == [('post', 'my-url', {'my_input': 'some value'})]
```
|
{
"source": "jeanmonet/jupyterlab-git",
"score": 2
}
|
#### File: jupyterlab_git/tests/test_ignore.py
```python
import pytest
from jupyterlab_git.git import Git
from .testutils import FakeContentManager, maybe_future
@pytest.mark.parametrize("ignore_content", [None, "dummy", "dummy\n"])
@pytest.mark.asyncio
async def test_ensure_gitignore(tmp_path, ignore_content):
# Given
ignore_file = tmp_path / ".gitignore"
if ignore_content is not None:
ignore_file.write_text(ignore_content)
# When
actual_response = await Git(FakeContentManager("/bin")).ensure_gitignore(
str(tmp_path)
)
# Then
assert {"code": 0} == actual_response
content = ignore_file.read_text()
assert len(content) == 0 or content.endswith("\n")
@pytest.mark.asyncio
async def test_ensure_gitignore_failure(tmp_path):
# Given
ignore_file = tmp_path / ".gitignore"
ignore_file.write_text("dummy")
ignore_file.chmod(200) # Set read only to generate an error
# When
response = await Git(FakeContentManager("/bin")).ensure_gitignore(str(tmp_path))
# Then
assert response["code"] == -1
@pytest.mark.asyncio
async def test_ignore(tmp_path):
# Given
ignore_file = tmp_path / ".gitignore"
ignore_file.write_text("dummy")
file_ignore = "to_ignore.txt"
# When
response = await Git(FakeContentManager("/bin")).ignore(str(tmp_path), file_ignore)
# Then
assert {"code": 0} == response
content = ignore_file.read_text()
content.endswith("{}\n".format(file_ignore))
@pytest.mark.asyncio
async def test_ignore_failure(tmp_path):
# Given
ignore_file = tmp_path / ".gitignore"
ignore_file.write_text("dummy")
ignore_file.chmod(200) # Set read only to generate an error
# When
response = await Git(FakeContentManager("/bin")).ignore(
str(tmp_path), "to_ignore.txt"
)
# Then
assert response["code"] == -1
```
|
{
"source": "jeanmonet/twisted",
"score": 2
}
|
#### File: conch/ssh/_keys_pynacl.py
```python
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ed25519
from nacl.exceptions import BadSignatureError
from nacl.signing import SigningKey, VerifyKey
class Ed25519PublicKey(ed25519.Ed25519PublicKey):
def __init__(self, data: bytes):
self._key = VerifyKey(data)
def __bytes__(self) -> bytes:
return bytes(self._key)
def __hash__(self) -> int:
return hash(bytes(self))
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self._key == other._key
def __ne__(self, other: object) -> bool:
return not (self == other)
@classmethod
def from_public_bytes(cls, data: bytes) -> ed25519.Ed25519PublicKey:
return cls(data)
def public_bytes(
self,
encoding: serialization.Encoding,
format: serialization.PublicFormat,
) -> bytes:
if (
encoding is not serialization.Encoding.Raw
or format is not serialization.PublicFormat.Raw
):
raise ValueError("Both encoding and format must be Raw")
return bytes(self)
def verify(self, signature: bytes, data: bytes) -> None:
try:
self._key.verify(data, signature)
except BadSignatureError as e:
raise InvalidSignature(str(e))
class Ed25519PrivateKey(ed25519.Ed25519PrivateKey):
def __init__(self, data: bytes):
self._key = SigningKey(data)
def __bytes__(self) -> bytes:
return bytes(self._key)
def __hash__(self) -> int:
return hash(bytes(self))
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self._key == other._key
def __ne__(self, other: object) -> bool:
return not (self == other)
@classmethod
def generate(cls) -> ed25519.Ed25519PrivateKey:
return cls(bytes(SigningKey.generate()))
@classmethod
def from_private_bytes(cls, data: bytes) -> ed25519.Ed25519PrivateKey:
return cls(data)
def public_key(self) -> ed25519.Ed25519PublicKey:
return Ed25519PublicKey(bytes(self._key.verify_key))
def private_bytes(
self,
encoding: serialization.Encoding,
format: serialization.PrivateFormat,
encryption_algorithm: serialization.KeySerializationEncryption,
) -> bytes:
if (
encoding is not serialization.Encoding.Raw
or format is not serialization.PrivateFormat.Raw
or not isinstance(encryption_algorithm, serialization.NoEncryption)
):
raise ValueError(
"Encoding and format must be Raw and "
"encryption_algorithm must be NoEncryption"
)
return bytes(self)
def sign(self, data: bytes) -> bytes:
return self._key.sign(data).signature
```
|
{
"source": "jeanmonod/taxi",
"score": 2
}
|
#### File: taxi/taxi/commands.py
```python
from ConfigParser import NoOptionError
import calendar
import datetime
from taxi import remote
from taxi.exceptions import CancelException, UsageError
from taxi.projects import Project
from taxi.timesheet import (
NoActivityInProgressError, Timesheet, TimesheetCollection, TimesheetFile
)
from taxi.timesheet.entry import TimesheetEntry, EntriesCollection
from taxi.timesheet.parser import ParseError
from taxi.settings import Settings
from taxi.utils import file
from taxi.utils.structures import OrderedSet
class BaseCommand(object):
def __init__(self, app_container):
self.options = app_container.options
self.arguments = app_container.arguments
self.view = app_container.view
self.projects_db = app_container.projects_db
self.settings = app_container.settings
def setup(self):
pass
def validate(self):
pass
def run(self):
pass
class BaseTimesheetCommand(BaseCommand):
def get_timesheet_collection(self, skip_cache=False):
timesheet_collection = getattr(self, '_current_timesheet_collection',
None)
if timesheet_collection is not None and not skip_cache:
return timesheet_collection
timesheet_collection = TimesheetCollection()
timesheet_files = self.get_files(
self.options['unparsed_file'],
int(self.settings.get('nb_previous_files'))
)
self.alias_mappings = self.settings.get_aliases()
for file_path in timesheet_files:
timesheet_file = TimesheetFile(file_path)
try:
timesheet_contents = timesheet_file.read()
except IOError:
timesheet_contents = ''
t = Timesheet(
EntriesCollection(
timesheet_contents,
self.settings.get('date_format')
),
self.alias_mappings,
timesheet_file
)
# Force new entries direction if necessary
if (self.settings.get('auto_add') in [
Settings.AUTO_ADD_OPTIONS['TOP'],
Settings.AUTO_ADD_OPTIONS['BOTTOM']]):
t.entries.add_date_to_bottom = (
self.settings.get('auto_add') ==
Settings.AUTO_ADD_OPTIONS['BOTTOM']
)
timesheet_collection.timesheets.append(t)
# Fix `add_date_to_bottom` attribute of timesheet entries based on
# previous timesheets. When a new timesheet is started it won't have
# any direction defined, so we take the one from the previous
# timesheet, if any
previous_timesheet = None
for timesheet in reversed(timesheet_collection.timesheets):
if (timesheet.entries.add_date_to_bottom is None
and previous_timesheet
and previous_timesheet.entries.add_date_to_bottom
is not None):
timesheet.entries.add_date_to_bottom = (
previous_timesheet.entries.add_date_to_bottom
)
previous_timesheet = timesheet
setattr(self, '_current_timesheet_collection', timesheet_collection)
return timesheet_collection
def get_files(self, filename, nb_previous_files):
date_units = ['m', 'Y']
smallest_unit = None
for date in date_units:
if '%%%s' % date in filename:
smallest_unit = date
break
if smallest_unit is None:
return OrderedSet([filename])
files = OrderedSet()
file_date = datetime.date.today()
for i in xrange(0, nb_previous_files + 1):
files.add(file.expand_filename(filename, file_date))
if smallest_unit == 'm':
if file_date.month == 1:
file_date = file_date.replace(day=1,
month=12,
year=file_date.year - 1)
else:
file_date = file_date.replace(day=1,
month=file_date.month - 1)
elif smallest_unit == 'Y':
file_date = file_date.replace(day=1, year=file_date.year - 1)
return files
class AddCommand(BaseCommand):
"""
Usage: add search_string
Searches and prompts for project, activity and alias and adds that as a new
entry to .tksrc.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
search = self.arguments
projects = self.projects_db.search(search, active_only=True)
projects = sorted(projects, key=lambda project: project.name)
if len(projects) == 0:
self.view.msg(
u"No active project matches your search string '%s'" %
''.join(search)
)
return
self.view.projects_list(projects, True)
try:
number = self.view.select_project(projects)
except CancelException:
return
project = projects[number]
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings,
numbered_activities=True)
try:
number = self.view.select_activity(project.activities)
except CancelException:
return
retry = True
while retry:
try:
alias = self.view.select_alias()
except CancelException:
return
if self.settings.activity_exists(alias):
mapping = self.settings.get_aliases()[alias]
overwrite = self.view.overwrite_alias(alias, mapping)
if not overwrite:
return
elif overwrite:
retry = False
# User chose "retry"
else:
retry = True
else:
retry = False
activity = project.activities[number]
self.settings.add_alias(alias, project.id, activity.id)
self.settings.write_config()
self.view.alias_added(alias, (project.id, activity.id))
class AliasCommand(BaseCommand):
"""
Usage: alias [alias]
alias [project_id]
alias [project_id/activity_id]
alias [alias] [project_id/activity_id]
- The first form will display the mappings whose aliases start with the
search string you entered
- The second form will display the mapping(s) you've defined for this
project and all of its activities
- The third form will display the mapping you've defined for this exact
project/activity tuple
- The last form will add a new alias in your configuration file
You can also run this command without any argument to view all your
mappings.
"""
MODE_SHOW_MAPPING = 0
MODE_ADD_ALIAS = 1
MODE_LIST_ALIASES = 2
def validate(self):
if len(self.arguments) > 2:
raise UsageError()
def setup(self):
if len(self.arguments) == 2:
self.alias = self.arguments[0]
self.mapping = self.arguments[1]
self.mode = self.MODE_ADD_ALIAS
elif len(self.arguments) == 1:
self.alias = self.arguments[0]
self.mode = self.MODE_SHOW_MAPPING
else:
self.alias = None
self.mode = self.MODE_LIST_ALIASES
def run(self):
# 2 arguments, add a new alias
if self.mode == self.MODE_ADD_ALIAS:
self._add_alias(self.alias, self.mapping)
# 1 argument, display the alias or the project id/activity id tuple
elif self.mode == self.MODE_SHOW_MAPPING:
mapping = Project.str_to_tuple(self.alias)
if mapping is not None:
for m in self.settings.search_aliases(mapping):
self.view.mapping_detail(m, self.projects_db.get(m[1][0]))
else:
self.mode = self.MODE_LIST_ALIASES
# No argument, display the mappings
if self.mode == self.MODE_LIST_ALIASES:
for m in self.settings.search_mappings(self.alias):
self.view.alias_detail(
m,
self.projects_db.get(m[1][0]) if m[1] is not None else None
)
def _add_alias(self, alias_name, mapping):
project_activity = Project.str_to_tuple(mapping)
if project_activity is None:
raise UsageError("The mapping must be in the format xxxx/yyyy")
if self.settings.activity_exists(alias_name):
existing_mapping = self.settings.get_aliases()[alias_name]
confirm = self.view.overwrite_alias(alias_name, existing_mapping,
False)
if not confirm:
return
self.settings.add_alias(alias_name, project_activity[0],
project_activity[1])
self.settings.write_config()
self.view.alias_added(alias_name, project_activity)
class AutofillCommand(BaseTimesheetCommand):
"""
Usage: autofill
Fills your timesheet up to today, for the defined auto_fill_days.
"""
def run(self):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
today = datetime.date.today()
last_day = calendar.monthrange(today.year, today.month)
last_date = datetime.date(today.year, today.month, last_day[1])
timesheet_collection = self.get_timesheet_collection()
t = timesheet_collection.timesheets[0]
t.prefill(auto_fill_days, last_date)
t.file.write(t.entries)
self.view.msg(u"Your entries file has been filled.")
else:
self.view.err(u"The parameter `auto_fill_days` must be set to "
"use this command.")
class KittyCommand(BaseCommand):
"""
|\ _,,,---,,_
/,`.-'`' -. ;-;;,_
|,4- ) )-,_..;\ ( `'-'
'---''(_/--' `-'\_)
Soft kitty, warm kitty
Little ball of fur
Happy kitty, sleepy kitty
Purr, purr, purr
"""
def run(self):
self.view.msg(self.__doc__)
class CleanAliasesCommand(BaseCommand):
"""
Usage: clean-aliases
Removes aliases from your config file that point to inactive projects.
"""
def run(self):
aliases = self.settings.get_aliases()
inactive_aliases = []
for (alias, mapping) in aliases.iteritems():
# Ignore local aliases
if mapping is None:
continue
project = self.projects_db.get(mapping[0])
if (project is None or not project.is_active() or
(mapping[1] is not None
and project.get_activity(mapping[1]) is None)):
inactive_aliases.append(((alias, mapping), project))
if not inactive_aliases:
self.view.msg(u"No inactive aliases found.")
return
if not self.options.get('force_yes'):
confirm = self.view.clean_inactive_aliases(inactive_aliases)
if self.options.get('force_yes') or confirm:
self.settings.remove_aliases(
[item[0][0] for item in inactive_aliases]
)
self.settings.write_config()
self.view.msg(u"%d inactive aliases have been successfully"
" cleaned." % len(inactive_aliases))
class CommitCommand(BaseTimesheetCommand):
"""
Usage: commit
Commits your work to the server.
"""
def run(self):
timesheet_collection = self.get_timesheet_collection()
if (self.options.get('date', None) is None
and not self.options.get('ignore_date_error', False)):
non_workday_entries = (
timesheet_collection.get_non_current_workday_entries()
)
if non_workday_entries:
self.view.non_working_dates_commit_error(
non_workday_entries.keys()
)
return
self.view.pushing_entries()
r = remote.ZebraRemote(self.settings.get('site'),
self.settings.get('username'),
self.settings.get('password'))
all_pushed_entries = []
all_failed_entries = []
for timesheet in timesheet_collection.timesheets:
entries_to_push = timesheet.get_entries(
self.options.get('date', None), exclude_ignored=True,
exclude_local=True, exclude_unmapped=True, regroup=True
)
(pushed_entries, failed_entries) = r.send_entries(
entries_to_push, self.alias_mappings, self._entry_pushed
)
local_entries = timesheet.get_local_entries(
self.options.get('date', None)
)
local_entries_list = []
for (date, entries) in local_entries.iteritems():
local_entries_list.extend(entries)
for entry in local_entries_list + pushed_entries:
entry.commented = True
for (entry, _) in failed_entries:
entry.fix_start_time()
# Also fix start time for ignored entries. Since they won't get
# pushed, there's a chance their previous sibling gets commented
for (date, entries) in timesheet.get_ignored_entries().items():
for entry in entries:
entry.fix_start_time()
timesheet.file.write(timesheet.entries)
all_pushed_entries.extend(pushed_entries)
all_failed_entries.extend(failed_entries)
ignored_entries = timesheet_collection.get_ignored_entries(
self.options.get('date', None)
)
ignored_entries_list = []
for (date, entries) in ignored_entries.iteritems():
ignored_entries_list.extend(entries)
self.view.pushed_entries_summary(all_pushed_entries,
all_failed_entries,
ignored_entries_list)
def _entry_pushed(self, entry, error):
self.view.pushed_entry(entry, error, self.alias_mappings)
class EditCommand(BaseTimesheetCommand):
"""
Usage: edit
Opens your zebra file in your favourite editor.
"""
def run(self):
timesheet_collection = None
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError:
pass
if timesheet_collection:
t = timesheet_collection.timesheets[0]
if (self.settings.get('auto_add') !=
Settings.AUTO_ADD_OPTIONS['NO']
and not self.options.get('forced_file')):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
t.prefill(auto_fill_days, limit=None)
t.file.write(t.entries)
try:
editor = self.settings.get('editor')
except NoOptionError:
editor = None
file.spawn_editor(self.options['file'], editor)
try:
timesheet_collection = self.get_timesheet_collection(True)
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(regroup=True),
self.alias_mappings, self.settings
)
class HelpCommand(BaseCommand):
"""
YO DAWG you asked for help for the help command. Try to search Google in
Google instead.
"""
def __init__(self, application_container):
super(HelpCommand, self).__init__(application_container)
self.commands_mapping = application_container.commands_mapping
def setup(self):
if len(self.arguments) == 0:
raise UsageError()
else:
self.command = self.arguments[0]
def run(self):
if self.command == 'help':
self.view.command_usage(self)
else:
if self.command in self.commands_mapping:
self.view.command_usage(self.commands_mapping[self.command])
else:
self.view.err(u"Command %s doesn't exist." % self.command)
class SearchCommand(BaseCommand):
"""
Usage: search search_string
Searches for a project by its name. The letter in the first column
indicates the status of the project: [N]ot started, [A]ctive, [F]inished,
[C]ancelled.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
projects = self.projects_db.search(self.arguments)
projects = sorted(projects, key=lambda project: project.name.lower())
self.view.search_results(projects)
class ShowCommand(BaseCommand):
"""
Usage: show project_id
Shows the details of the given project_id (you can find it with the search
command).
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
try:
int(self.arguments[0])
except ValueError:
raise UsageError("The project id must be a number")
def setup(self):
self.project_id = int(self.arguments[0])
def run(self):
try:
project = self.projects_db.get(self.project_id)
except IOError:
raise Exception("Error: the projects database file doesn't exist. "
"Please run `taxi update` to create it")
if project is None:
self.view.err(
u"The project `%s` doesn't exist" % (self.project_id)
)
else:
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings)
class StartCommand(BaseTimesheetCommand):
"""
Usage: start project_name
Use it when you start working on the project project_name. This will add
the project name and the current time to your entries file. When you're
finished, use the stop command.
"""
def validate(self):
if len(self.arguments) != 1:
raise UsageError()
def setup(self):
self.project_name = self.arguments[0]
def run(self):
today = datetime.date.today()
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
return
t = timesheet_collection.timesheets[0]
# If there's a previous entry on the same date, check if we can use its
# end time as a start time for the newly started entry
today_entries = t.get_entries(today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
duration = (new_entry_start_time, None)
e = TimesheetEntry(self.project_name, duration, '?')
t.entries[today].append(e)
t.file.write(t.entries)
class StatusCommand(BaseTimesheetCommand):
"""
Usage: status
Shows the summary of what's going to be committed to the server.
"""
def setup(self):
self.date = self.options.get('date', None)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(self.date, regroup=True),
self.alias_mappings,
self.settings
)
class StopCommand(BaseTimesheetCommand):
"""
Usage: stop [description]
Use it when you stop working on the current task. You can add a description
to what you've done.
"""
def setup(self):
if len(self.arguments) == 0:
self.description = None
else:
self.description = ' '.join(self.arguments)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
current_timesheet = timesheet_collection.timesheets[0]
current_timesheet.continue_entry(
datetime.date.today(),
datetime.datetime.now().time(),
self.description
)
except ParseError as e:
self.view.err(e)
except NoActivityInProgressError:
self.view.err(u"You don't have any activity in progress for today")
else:
current_timesheet.file.write(current_timesheet.entries)
class UpdateCommand(BaseCommand):
"""
Usage: update
Synchronizes your project database with the server and updates the shared
aliases.
"""
def setup(self):
self.site = self.settings.get('site')
self.username = self.settings.get('username')
self.password = self.settings.get('password')
def run(self):
self.view.updating_projects_database()
aliases_before_update = self.settings.get_aliases()
local_aliases = self.settings.get_aliases(include_shared=False)
r = remote.ZebraRemote(self.site, self.username, self.password)
projects = r.get_projects()
self.projects_db.update(projects)
# Put the shared aliases in the config file
shared_aliases = {}
for project in projects:
if project.is_active():
for alias, activity_id in project.aliases.iteritems():
self.settings.add_shared_alias(alias, project.id,
activity_id)
shared_aliases[alias] = (project.id, activity_id)
aliases_after_update = self.settings.get_aliases()
self.settings.write_config()
self.view.projects_database_update_success(aliases_before_update,
aliases_after_update,
local_aliases,
shared_aliases,
self.projects_db)
```
#### File: tests/commands/test_edit.py
```python
import os
import tempfile
from freezegun import freeze_time
from taxi.utils.file import expand_filename
from . import CommandTestCase
class EditCommandTestCase(CommandTestCase):
@freeze_time('2014-01-21')
def test_autofill_with_specified_file(self):
"""
Edit with specified date should not autofill it.
"""
config = self.default_config.copy()
options = self.default_options.copy()
config['default']['auto_fill_days'] = '0,1,2,3,4,5,6'
options['file'] = self.entries_file
self.run_command('edit', options=options)
with open(self.entries_file, 'r') as f:
self.assertEqual(f.read(), '')
@freeze_time('2014-01-21')
def test_edit_utf8_file(self):
"""
Editing a file that contains accents should not crash.
"""
self.write_entries("""20/01/2014
alias_1 2 préparation du café pour l'évènement
""")
self.run_command('edit')
def test_edit_status(self):
config = self.default_config.copy()
tmp_entries_dir = tempfile.mkdtemp()
os.remove(self.entries_file)
self.entries_file = os.path.join(tmp_entries_dir, '%m_%Y.txt')
config['default']['file'] = self.entries_file
with freeze_time('2014-01-21'):
self.write_entries("""20/01/2014
alias_1 2 hello world
""")
with freeze_time('2014-02-21'):
self.write_entries("""20/02/2014
alias_1 2 hello world
""")
stdout = self.run_command('edit', config_options=config)
self.assertIn('Monday 20 january', stdout)
def test_prefill_entries_add_to_bottom(self):
config = self.default_config.copy()
tmp_entries_dir = tempfile.mkdtemp()
os.remove(self.entries_file)
self.entries_file = os.path.join(tmp_entries_dir, '%m_%Y.txt')
config['default']['file'] = self.entries_file
with freeze_time('2014-01-21'):
self.write_entries("""20/01/2014
alias_1 2 hello world
21/01/2014
alias_1 1 foo bar
""")
with freeze_time('2014-02-21'):
self.write_entries("""20/02/2014
alias_1 2 hello world
""")
self.run_command('edit', config_options=config)
with open(expand_filename(self.entries_file), 'r') as f:
lines = f.readlines()
self.assertEqual('20/02/2014\n', lines[0])
self.assertEqual('21/02/2014\n', lines[3])
```
|
{
"source": "jean-moorman/mjrl",
"score": 2
}
|
#### File: algos/model_accel/model_accel_npg.py
```python
import numpy as np
import copy
import torch
import torch.nn as nn
import pickle
import mjrl.envs
import os
import time as timer
from torch.autograd import Variable
from mjrl.utils.gym_env import GymEnv
from mjrl.algos.model_accel.nn_dynamics import WorldModel
import mjrl.samplers.core as trajectory_sampler
# utility functions
import mjrl.utils.process_samples as process_samples
from mjrl.utils.logger import DataLog
from mjrl.algos.model_accel.sampling import policy_rollout
# Import NPG
from mjrl.algos.npg_cg import NPG
class ModelAccelNPG(NPG):
def __init__(self, learned_model=None,
refine=False,
kappa=5.0,
plan_horizon=10,
plan_paths=100,
reward_function=None,
termination_function=None,
**kwargs):
super(ModelAccelNPG, self).__init__(**kwargs)
if learned_model is None:
print("Algorithm requires a (list of) learned dynamics model")
quit()
elif isinstance(learned_model, WorldModel):
self.learned_model = [learned_model]
else:
self.learned_model = learned_model
self.refine, self.kappa, self.plan_horizon, self.plan_paths = refine, kappa, plan_horizon, plan_paths
self.reward_function, self.termination_function = reward_function, termination_function
def to(self, device):
# Convert all the networks (except policy network which is clamped to CPU)
# to the specified device
for model in self.learned_model:
model.to(device)
try: self.baseline.model.to(device)
except: pass
def is_cuda(self):
# Check if any of the networks are on GPU
model_cuda = [model.is_cuda() for model in self.learned_model]
model_cuda = any(model_cuda)
baseline_cuda = next(self.baseline.model.parameters()).is_cuda
return any([model_cuda, baseline_cuda])
def train_step(self, N,
env=None,
sample_mode='trajectories',
horizon=1e6,
gamma=0.995,
gae_lambda=0.97,
num_cpu='max',
env_kwargs=None,
init_states=None,
reward_function=None,
termination_function=None,
truncate_lim=None,
truncate_reward=0.0,
**kwargs,
):
ts = timer.time()
# get the correct env behavior
if env is None:
env = self.env
elif type(env) == str:
env = GymEnv(env)
elif isinstance(env, GymEnv):
env = env
elif callable(env):
env = env(**env_kwargs)
else:
print("Unsupported environment format")
raise AttributeError
# get correct behavior for reward and termination
reward_function = self.reward_function if reward_function is None else reward_function
termination_function = self.termination_function if termination_function is None else termination_function
if reward_function: assert callable(reward_function)
if termination_function: assert callable(termination_function)
# simulate trajectories with the learned model(s)
# we want to use the same task instances (e.g. goal locations) for each model in ensemble
paths = []
# NOTE: We can optionally specify a set of initial states to perform the rollouts from
# This is useful for starting rollouts from the states in the replay buffer
init_states = np.array([env.reset() for _ in range(N)]) if init_states is None else init_states
assert type(init_states) == list
assert len(init_states) == N
for model in self.learned_model:
# dont set seed explicitly -- this will make rollouts follow tne global seed
rollouts = policy_rollout(num_traj=N, env=env, policy=self.policy,
learned_model=model, eval_mode=False, horizon=horizon,
init_state=init_states, seed=None)
# use learned reward function if available
if model.learn_reward:
model.compute_path_rewards(rollouts)
else:
rollouts = reward_function(rollouts)
num_traj, horizon, state_dim = rollouts['observations'].shape
for i in range(num_traj):
path = dict()
obs = rollouts['observations'][i, :, :]
act = rollouts['actions'][i, :, :]
rew = rollouts['rewards'][i, :]
path['observations'] = obs
path['actions'] = act
path['rewards'] = rew
path['terminated'] = False
paths.append(path)
# NOTE: If tasks have termination condition, we will assume that the env has
# a function that can terminate paths appropriately.
# Otherwise, termination is not considered.
if callable(termination_function): paths = termination_function(paths)
# remove paths that are too short
paths = [path for path in paths if path['observations'].shape[0] >= 5]
# additional truncation based on error in the ensembles
if truncate_lim is not None and len(self.learned_model) > 1:
for path in paths:
pred_err = np.zeros(path['observations'].shape[0] - 1)
for model in self.learned_model:
s = path['observations'][:-1]
a = path['actions'][:-1]
s_next = path['observations'][1:]
pred = model.predict(s, a)
model_err = np.mean((s_next - pred)**2, axis=-1)
pred_err = np.maximum(pred_err, model_err)
violations = np.where(pred_err > truncate_lim)[0]
truncated = (not len(violations) == 0)
T = violations[0] + 1 if truncated else obs.shape[0]
T = max(4, T) # we don't want corner cases of very short truncation
path["observations"] = path["observations"][:T]
path["actions"] = path["actions"][:T]
path["rewards"] = path["rewards"][:T]
if truncated: path["rewards"][-1] += truncate_reward
path["terminated"] = False if T == obs.shape[0] else True
if self.save_logs:
self.logger.log_kv('time_sampling', timer.time() - ts)
self.seed = self.seed + N if self.seed is not None else self.seed
# compute returns
process_samples.compute_returns(paths, gamma)
# compute advantages
process_samples.compute_advantages(paths, self.baseline, gamma, gae_lambda)
# train from paths
eval_statistics = self.train_from_paths(paths)
eval_statistics.append(N)
# log number of samples
if self.save_logs:
num_samples = np.sum([p["rewards"].shape[0] for p in paths])
self.logger.log_kv('num_samples', num_samples)
# fit baseline
if self.save_logs:
ts = timer.time()
error_before, error_after = self.baseline.fit(paths, return_errors=True)
self.logger.log_kv('time_VF', timer.time()-ts)
self.logger.log_kv('VF_error_before', error_before)
self.logger.log_kv('VF_error_after', error_after)
else:
self.baseline.fit(paths)
return eval_statistics
def get_action(self, observation):
if self.refine is False:
return self.policy.get_action(observation)
else:
return self.get_refined_action(observation)
def get_refined_action(self, observation):
# TODO(Aravind): Implemenet this
# This function should rollout many trajectories according to the learned
# dynamics model and the policy, and should refine around the policy by
# incorporating reward based refinement
raise NotImplementedError
```
#### File: run_experiments/utils/visualize_trajectories.py
```python
import pickle
import click
import json
import numpy as np
import torch
import mjrl.envs
import trajopt.envs
import mj_envs
import mjrl.utils.tensor_utils as tensor_utils
from mjrl.utils.gym_env import GymEnv
from mjrl.algos.model_accel.sampling import evaluate_policy
DESC = '''
Helper script to visualize optimized trajectories (list of trajectories in trajopt format).\n
USAGE:\n
$ python viz_trajectories.py --file path_to_file.pickle\n
'''
@click.command(help=DESC)
@click.option('--file', type=str, help='pickle file with trajectories', required= True)
@click.option('--seed', type=int, default=123)
@click.option('--noise_level', type=float, default=0.0)
@click.option('--num_episodes', type=int, help='number of times to play trajectories', default=5)
@click.option('--config', type=str, help='if provided MPC params from here will be used.', default=None)
@click.option('--device_path', type=str, default=None)
def main(file, seed, noise_level, num_episodes, config, device_path):
exp_data = pickle.load(open(file, 'rb'))
policy = exp_data['policy']
model = exp_data['fitted_model']
model = model[-1] if type(model) == list else model
env_id = policy.env.env_id
render = True
# TODO(Aravind): Map to hardware if device_path is specified
env = GymEnv(env_id)
policy.env = env
env.set_seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if config is not None:
try:
with open(config, 'r') as f:
config = eval(f.read())
except:
with open(config, 'r') as f:
config = json.load(f)
policy.plan_horizon = config['plan_horizon']
policy.num_traj = config['plan_paths']
policy.kappa = config['kappa']
policy.filter_coefs = [config['filter_coefs'][k] for k in ['f1', 'f2', 'f3', 'f4']]
policy.omega = config['omega'] if 'omega' in config.keys() else 0.0
# TODO(Aravind): Implement capability to set predicted state for rendering purposes
# evaluate_policy(env, policy, model, noise_level, real_step=False, num_episodes=num_episodes, visualize=render)
evaluate_policy(env, policy, model, noise_level, real_step=True, num_episodes=num_episodes, visualize=render)
# final close out
env.reset()
if __name__ == '__main__':
main()
```
#### File: mjrl/envs/swimmer.py
```python
import numpy as np
from gym import utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'swimmer.xml', 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.data.qpos[0]
delta = (xposafter - xposbefore)
# make agent move in the negative x direction
reward = -10.0 * delta
done = False
ob = self.get_obs()
return ob, reward, done, self.get_env_infos()
def get_obs(self):
return np.concatenate([
self.data.qpos.flat[2:],
self.data.qvel.flat,
])
def reset_model(self):
qpos_init = self.init_qpos.copy()
qpos_init[2] = self.np_random.uniform(low=-np.pi, high=np.pi)
self.set_state(qpos_init, self.init_qvel)
self.sim.forward()
return self.get_obs()
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
self.set_state(qp, qv)
self.sim.forward()
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.trackbodyid = 1
self.viewer.cam.type = 1
self.sim.forward()
self.viewer.cam.distance = self.model.stat.extent*1.2
```
#### File: mjrl/policies/mpc_actor.py
```python
import numpy as np
from trajopt.utils import gather_paths_parallel
class MPCActor(object):
def __init__(self, env, H, paths_per_cpu,
num_cpu=1,
kappa=1.0,
gamma=1.0,
mean=None,
filter_coefs=None,
seed=123,
):
self.env, self.seed = env, seed
self.n, self.m = env.observation_dim, env.action_dim
self.H, self.paths_per_cpu, self.num_cpu = H, paths_per_cpu, num_cpu
self.mean, self.filter_coefs, self.kappa, self.gamma = mean, filter_coefs, kappa, gamma
if mean is None:
self.mean = np.zeros(self.m)
if filter_coefs is None:
self.filter_coefs = [np.ones(self.m), 1.0, 0.0, 0.0]
self.env.reset()
self.env.set_seed(seed)
self.env.reset(seed=seed)
self.act_sequence = np.ones((self.H, self.m)) * self.mean
self.ctr = 1
def score_trajectory(self, paths):
scores = np.zeros(len(paths))
for i in range(len(paths)):
scores[i] = 0.0
for t in range(paths[i]["rewards"].shape[0]):
scores[i] += (self.gamma**t)*paths[i]["rewards"][t]
return scores
def get_action(self, env_state):
# Set to env_state
# Shoot trajectories
# Return optimal action
seed = self.seed + self.ctr * 1000
paths = gather_paths_parallel(self.env.env_id,
env_state,
self.act_sequence,
self.filter_coefs,
seed,
self.paths_per_cpu,
self.num_cpu,
)
num_traj = len(paths)
R = self.score_trajectory(paths)
S = np.exp(self.kappa*(R-np.max(R)))
act = np.sum([paths[i]["actions"][0] * S[i] for i in range(num_traj)], axis=0)
act = act / (np.sum(S) + 1e-6)
return act
```
#### File: tests/hydra/hydra_policy_opt_job_script.py
```python
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.quadratic_baseline import QuadraticBaseline
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.npg_cg import NPG
from mjrl.algos.batch_reinforce import BatchREINFORCE
from mjrl.algos.ppo_clip import PPO
from mjrl.utils.train_agent import train_agent
import os
import json
import gym
import mjrl.envs
# import mj_envs
import time as timer
import pickle
import hydra
from omegaconf import DictConfig, OmegaConf
# ===============================================================================
# Process Inputs
# ===============================================================================
def preprocess(job_data):
if not os.path.exists(job_data.job_name):
os.mkdir(job_data.job_name)
assert 'algorithm' in job_data.keys()
assert any([job_data.algorithm == a for a in ['NPG', 'NVPG', 'VPG', 'PPO']])
assert 'sample_mode' in job_data.keys()
job_data.alg_hyper_params = dict() if 'alg_hyper_params' not in job_data.keys() else job_data.alg_hyper_params
EXP_FILE = job_data.job_name + '/job_config.json'
with open(EXP_FILE, 'w') as fp:
# json.dump(job_data, f, indent=4)
OmegaConf.save(config=job_data, f=fp.name)
if job_data.sample_mode == 'trajectories':
assert 'rl_num_traj' in job_data.keys()
job_data.rl_num_samples = 0 # will be ignored
elif job_data.sample_mode == 'samples':
assert 'rl_num_samples' in job_data.keys()
job_data.rl_num_traj = 0 # will be ignored
else:
print("Unknown sampling mode. Choose either trajectories or samples")
exit()
# ===============================================================================
# Train Loop
# ===============================================================================
@hydra.main(config_name="hydra_npg_config", config_path="config")
def train_loop(job_data: DictConfig) -> None:
print("========================================")
print("Job Configuration")
print("========================================")
preprocess(job_data)
print(OmegaConf.to_yaml(job_data))
e = GymEnv(job_data.env)
policy_size = tuple(eval(job_data.policy_size))
vf_hidden_size = tuple(eval(job_data.vf_hidden_size))
policy = MLP(e.spec, hidden_sizes=policy_size, seed=job_data.seed, init_log_std=job_data.init_log_std)
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=job_data.vf_batch_size, hidden_sizes=vf_hidden_size,
epochs=job_data.vf_epochs, learn_rate=job_data.vf_learn_rate)
# Construct the algorithm
if job_data.algorithm == 'NPG':
# Other hyperparameters (like number of CG steps) can be specified in config for pass through
# or default hyperparameters will be used
agent = NPG(e, policy, baseline, normalized_step_size=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'VPG':
agent = BatchREINFORCE(e, policy, baseline, learn_rate=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'NVPG':
agent = BatchREINFORCE(e, policy, baseline, desired_kl=job_data.rl_step_size,
seed=job_data.seed, save_logs=True, **job_data.alg_hyper_params)
elif job_data.algorithm == 'PPO':
# There are many hyperparameters for PPO. They can be specified in config for pass through
# or defaults in the PPO algorithm will be used
agent = PPO(e, policy, baseline, save_logs=True, **job_data.alg_hyper_params)
else:
NotImplementedError("Algorithm not found")
print("========================================")
print("Starting policy learning")
print("========================================")
ts = timer.time()
train_agent(job_name=job_data.job_name,
agent=agent,
seed=job_data.seed,
niter=job_data.rl_num_iter,
gamma=job_data.rl_gamma,
gae_lambda=job_data.rl_gae,
num_cpu=job_data.num_cpu,
sample_mode=job_data.sample_mode,
num_traj=job_data.rl_num_traj,
num_samples=job_data.rl_num_samples,
save_freq=job_data.save_freq,
evaluation_rollouts=job_data.eval_rollouts)
print("========================================")
print("Job Finished. Time taken = %f" % (timer.time()-ts))
print("========================================")
if __name__ == "__main__":
train_loop()
```
|
{
"source": "JEanne1305/group24-flood-system",
"score": 4
}
|
#### File: group24-flood-system/floodsystem/flood.py
```python
from floodsystem.station import MonitoringStation
def stations_level_over_threshold(stations, tol):
# create an empty list for station with level over threshold
list_of_stations_over_threshold=[]
for station in stations:
# check the consistency of water level
if station.latest_level!=None:
if MonitoringStation.typical_range_consistent(station) is True:
# if the range data is validated
if station.latest_level>tol:
station_tuple=(station.name, MonitoringStation.relative_water_level(station)) # create the tuple for individual stations
list_of_stations_over_threshold.append(station_tuple)
return list_of_stations_over_threshold
#Task 2C Jeanne
def stations_highest_rel_level(stations, N):
station_level={}
n=0
for i in range(len(stations)):
name=stations[i].name
typ_range=stations[i].typical_range
if typ_range == None:
break
else:
station_level[name]=typ_range[1]
a_tuple=station_level.items()
a_list=list(a_tuple)
#print(a_list)
#sort the list of tuple by the highest level
sorted_level = sorted(a_list, key=lambda tup: tup[1], reverse=True)
#create a list that contains N stations with highest relative level
outcome1 =sorted_level[:N]
outcome2=[]
print(outcome1[1])
for i in range(len(outcome1)):
for name in outcome1[i]:
#print(name)
for station in stations:
#print('222')
if station.name==name :
outcome2.append(station)
outcome = sorted(outcome2, key=lambda x: x.typical_range[1],reverse=True)
return outcome
```
#### File: group24-flood-system/floodsystem/geo.py
```python
from floodsystem.utils import sorted_by_key # noqa
from haversine import haversine
def stations_by_distance(stations, p):
station_list=[] #create an empty list to store the output
for station in stations:
distance=haversine(station.coord, p) # in kilometers
station_tuple=(station.name, distance) # create the tuple for individual stations
station_list.append(station_tuple)
# sort stations by distance
return sorted_by_key(station_list, 1)
# Task 1D
# <NAME> (yx357)
def rivers_with_station(stations): # return a set of names of rivers that have at least one monitoring station
rivers_set=set()
for station in stations:
rivers_set.add(station.river)
return rivers_set
def stations_by_river(stations): # returns a dictionary of rivers (key: river, value: array of station names)
rivers_dict={}
for station in stations:
if station.river in rivers_dict:
rivers_dict[station.river].append(station.name)
rivers_dict[station.river].sort()
else:
rivers_dict[station.river]=[station.name] # create a list of station names
return rivers_dict
#Task 1C
#Jeanne
def stations_within_radius(stations,centre,r):
required=[]
for i in stations:
distance=haversine(i.coord,centre) #calculate the distance
#between the station and the centre
if distance<=r:
required.append(i.name)#if within the range, add it to the list
return sorted(required)
#Task 1E
#Jeanne - lab group 24 river
def rivers_by_station_number(stations, N):
print("hello")
a=0
required={}
for station in stations:
#a+=1
#if a>40:
# break
river=station.river
if river in required:
required[river]+=1
else:
required[river]=1
print(required)
a_tuple=required.items()
a_list=list(a_tuple)
#print(a_list)
#sort the list of tuple by the number of station
final_version = sorted(a_list, key=lambda tup: tup[1], reverse=True)
#create a list that contains N rivers with largest no. of stations
outcome=final_version[:N]
#see if any rivers after the 'Nth' river has the same no. of stations,
#if so, add it to the outcome list
M=N-1
while final_version[M][1]==final_version[M+1][1] and M<=(len(final_version)-1):
outcome.append(final_version[M+1])
M+=1
return outcome
```
#### File: JEanne1305/group24-flood-system/Task1B.py
```python
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance
def run():
stations=build_station_list()
cambridge=(52.2053, 0.1218)
x=stations_by_distance(stations, cambridge)
print("The closest 10 entries:", x[:10])
print("The furthest 10 entries:", x[-10:])
if __name__ == "__main__":
print("*** Task 1B: CUED Part IA Flood Warning System ***")
run()
```
|
{
"source": "jeanne-ber/vicreg",
"score": 2
}
|
#### File: jeanne-ber/vicreg/distributed.py
```python
import torch
import os
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
os.environ['RANK'] = str(args.rank)
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
return
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
```
|
{
"source": "jeannechaverot/CovidForecasting",
"score": 3
}
|
#### File: jeannechaverot/CovidForecasting/funcs.py
```python
import numpy as np
from functools import reduce
import operator as op
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
#SMOOTHING#
def gauss_filter(x, K, parity='even'):
# constant
A = 2*K
#upper bound of sum
B = K
if parity == 'odd':
A += 1
B += 1
const = 1/(2**A)
x_filtered = []
# x elements that will see their value change
r_x = np.arange(K, len(x)-K)
# range of k
r_k = np.arange(-K,B+1)
for i in range(len(x)):
if i not in r_x:
x_filtered.append(x[i])
else:
# list on which we will save values to be summed to yield new x_tilde_t
ls = []
for k in r_k:
#x_{t-k}
comb = ncr(A, K+k)
#print('i: ',i,'k: ',k)
x_tk = x[i-k]
#print(comb, x_tk, comb*x_tk)
#print(ls)
ls.append(int(comb*x_tk*const))
#print(ls)
x_filtered.append(np.sum(ls))
return x_filtered
def ncr(n, r):
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
def find_best_K(X, y, parity, with_validation=True, model='quadratic'):
"""Returns optimal K such that MAPE error is minimized for the gaussian smoothing"""
X_new = np.zeros(X.shape)
N = X.shape[1]
Ks = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
mapes = []
pct_80 = int(np.ceil(80*len(X)/100))
for K in Ks:
for j in range(X.shape[1]):
#print(j)
X_new[:,j]= gauss_filter(X[:,j], K, parity)
X_train, X_test = X_new[:pct_80], X_new[pct_80:]
y_train, y_test =y[:pct_80], y[pct_80:]
if model == 'quadratic':
# if we want to find the smallest MAPE error based on advancement validation
if with_validation:
mapes.append(advancement_val(X_new, y)[1])
# if we want to find the smallest MAPE error based on last 20% testing
else:
index = find_best_index(X_train, X_test, y_train, y_test, 'mape',X.shape[1])
P, q, G, h = generate_params(X_train, y_train, index,N)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
mapes.append(mape(y_test, y_pred))
# baseline model, we do not do any validation, only select on last 20%
else:
k = find_optimum_k(X_train, X_test, y_train, y_test)[3][0]
y_pred = baseline_model_k(X_train, X_test, y_train, y_test, k)[1]
mapes.append(mape(y_test, y_pred))
return Ks[np.argmin(mapes)], min(mapes)
# ----- BASELINE ----- #
def baseline_model_k(X_train, X_test, y_train, y_test, k):
"""k is such that y_pred[i] = y_train[i-k]"""
y_acc = list(y_train)
y_pred = []
for i in range(len(y_test)):
y_pred.append(y_acc[-k])
y_acc.append(y_acc[-k])
#y_pred = y_train[-k:-k-len(y_test):-1]
return y_acc, y_pred
def plot_baseline(X_train, X_test, y_train, y_test, y, k, pct, country):
y_pred_full = baseline_model_k(X_train, X_test, y_train, y_test,k)[0]
plt.plot(y_pred_full, 'g', y, 'b')
plt.xlabel('Day')
plt.ylabel('Number of Daily Recovered')
plt.legend(['Predicted value','True value'])
plt.title('Prediction of the number of deaths in ' + country + ' using baseline model with k=' + str(k)+'\n with a MAPE of ' + str(mape(y_test,baseline_model_k(X_train, X_test, y_train, y_test,k)[1]))[:5] + ' on the last 20% of testing data')
plt.axvline(x=pct-1)
def baseline_error(X_train, X_test, y_train, y_test, k):
y_pred = baseline_model_k(X_train, X_test, y_train, y_test, k)[1]
loss = mape(y_test, y_pred)
return loss
def find_optimum_k(X_train, X_test, y_train, y_test):
K = 30
maes = {}
mapes = {}
for k in range(1,K):
y_pred = baseline_model_k(X_train, X_test, y_train, y_test, k)[1]
mapes[k] = baseline_error(X_train, X_test, y_train, y_test, k)
maes[k] = mae(y_test, y_pred)
return maes, sorted(maes, key=maes.get), mapes, sorted(mapes, key=mapes.get)
def simple_exponential_smoothing(x, alpha):
result = [x[0]] # first value is same as series
for n in range(1, len(x)):
result.append(alpha * x[n] + (1 - alpha) * x[n-1])
return result
def exponential_smoothing(x, rho, K):
const = (1-rho)/(1-rho**(K+1))
new_x = []
# range of x
r_x = np.arange(K, len(x)-K)
# range of k
r_k = np.arange(0,K)
for i in range(len(x)):
if i not in r_x:
new_x.append(x[i])
else:
ls = []
for k in r_k:
ls.append(int(const*rho**k*x[i-k]))
new_x.append(np.sum(ls))
return new_x
def find_best_alpha(X, y, N, model='simple',K=0, with_validation=True):
"""Returns optimal alpha such that MAPE error is minimized,along with the MAPE index error in question, and its value"""
X_new = np.zeros(X.shape)
alphas = [round(0.05*i, 2) for i in range(20)]
mapes = []
pct_80 = int(np.ceil(80*len(X)/100))
if model=='simple':
for alpha in alphas:
for j in range(X.shape[1]):
X_new[:,j]= simple_exponential_smoothing(X[:,j], alpha)
if with_validation:
mapes.append(advancement_val(X_new, y)[1])
else:
X_train, X_test = X_new[:pct_80], X_new[pct_80:]
y_train, y_test =y[:pct_80], y[pct_80:]
index = find_best_index(X_train, X_test, y_train, y_test, 'mape', N)
P, q, G, h = generate_params(X_train, y_train, index,N)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
mapes.append(mape(y_test,y_pred))
else:
for alpha in alphas:
for j in range(X.shape[1]):
X_new[:,j]= exponential_smoothing(X[:,j], alpha,K)
if with_validation:
mapes.append(advancement_val(X_new, y)[1])
else:
X_train, X_test = X_new[:pct_80], X_new[pct_80:]
y_train, y_test =y[:pct_80], y[pct_80:]
index = find_best_index(X_train, X_test, y_train, y_test, 'mape', N)
P, q, G, h = generate_params(X_train, y_train, index,N)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_test@gamma
mapes.append(mape(y_test, y_pred))
return alphas[np.argmin(mapes)], min(mapes)
def advancement_val(X, y):
# We want our train set to be of size 40, and then we shift of 10 data points at each new iteration.
# the size of our test set is the rest of the dataset points
splits = int(np.floor((X.shape[0] - 40)/10))
#print('number of splits for validation:', splits)
N = X.shape[1]
mapes = []
y_vals = []
y_preds = []
for i in range(splits):
begin = 10*i
end = 40 + 10*i
X_tr = X[begin:end,:]
y_tr = y[begin:end]
X_te = X[end:,:]
y_te = y[end:]
index = find_best_index(X_tr, X_te, y_tr, y_te, 'mape', N)
P, q, G, h = generate_params(X_tr, y_tr, index, N, 10e-5)
gamma = cvxopt_solve_qp(P, q, G, h)
y_pred = X_te@gamma
y_vals.append(y_te)
y_preds.append(y_pred)
mapes.append(mape(y_te, y_pred))
y_vals = [item for sublist in y_vals for item in sublist]
y_preds =[item for sublist in y_preds for item in sublist]
return mapes, np.mean(mapes)
def apply_smoothing(X, K, parity):
new_X = np.zeros(X.shape)
for j in range(X.shape[1]):
new_X[:,j] = gauss_filter(X[:,j], K, parity=parity)
return new_X
# ----------------------------#
# LOSS FUNCTIONS
# ----------------------------#
def mape(y_test, y_pred):
return np.mean(np.abs((y_pred-y_test)/y_test))
def mspe(y_test, y_pred):
return np.mean(np.square((y_pred-y_test)/y_test))
def maape(y_test, y_pred):
return np.mean(np.arctan(np.abs((y_pred-y_test)/y_test)))
def mae(y_test, y_pred):
return np.mean(np.abs(y_test - y_pred))
# ----------------------------------#
# QUADRATIC OPTIMIZATION
# ----------------------------------#
import cvxopt
def create_M(N):
M = np.zeros((N,N))
for i in range(N):
for j in range(N):
if i==0:
if j == 0:
M[i,j]=1
else:
M[i,j]=0
elif (i==j):
M[i,j]=1
elif (j == (i-1)):
M[i,j] = -1
else:
M[i,j]=0
return M
def generate_G(index,N):
"""index: represents k^*, gamma_{k^*} is such that gamma_0 <= gamma_1 <= ...<= gamma_{k^*} >= ... >= gamma_N
This function generates a matrix G such that either gamma_index or gamma_{index+1} is the maximum
"""
#this constraint verifies the gaussian-like distribution of the gamma
G = np.zeros((N,N))
for i in range(0, index):
for j in range(N):
if (i==j):
G[i,j] = 1
elif (j == i+1):
G[i,j] = -1
for i in range(index, N):
for j in range(N):
if (i==j):
G[i,j] = -1
elif (j == i+1):
G[i,j] = 1
# we do not put any condition on idx_th element, and use this line to verify that all gammas are superior or
# equal to zero
#G[index,:] = 0
#G[index, 0] = -1
#this constraint verifies that -gamma_i <= 0 <=> gamma_i >= 0 forall i
# for i in range(N, 2*N):
# for j in range(N):
# if (i==N+j):
# G[i,j]=-1
return G
def generate_params(X_train, y_train,k,N,lambda_=10e-15):
M = create_M(N)
M_tilde = M.T @ M
X_tilde = X_train.T @ X_train
P = X_tilde + lambda_*(M_tilde)
q = -X_train.T@y_train
G = generate_G(k,N)
h = np.zeros((N,1))
for i in range(len(h)):
h[i] = -0.0000001
return P, q, G, h
def find_best_index(X_train, X_test, y_train, y_test, loss,N):
"""Returns index of maximum gamma that minimizes the mae loss"""
loss = {}
for k in range(N):
P, q, G, h = generate_params(X_train, y_train, k, N, lambda_=10e-5)
gammas = cvxopt_solve_qp(P,q, G, h)
if not (gammas is None):
y_pred = X_test@gammas
loss[k] = mape(y_test,y_pred)
# in case optimal solution is not found
else:
loss[k] = 999999999
return min(loss, key=loss.get)
def cvxopt_solve_qp(P, q, G=None, h=None, A=None, b=None):
cvxopt.solvers.options['show_progress'] = False
P = .5 * (P + P.T) # make sure P is symmetric
args = [cvxopt.matrix(P), cvxopt.matrix(q)]
if G is not None:
args.extend([cvxopt.matrix(G), cvxopt.matrix(h)])
if A is not None:
args.extend([cvxopt.matrix(A), cvxopt.matrix(b)])
sol = cvxopt.solvers.qp(*args)
if 'optimal' not in sol['status']:
return None
return np.array(sol['x']).reshape((P.shape[1],))
```
|
{
"source": "Jeanne-Chris/DXCPythonBootcamp",
"score": 3
}
|
#### File: Jeanne-Chris/DXCPythonBootcamp/testing.py
```python
import mathfuncs
#import pytest
def test_add():
assert mathfuncs.add(7, 3) == 10
assert mathfuncs.add(7) == 9
def test_multiply():
assert mathfuncs.multiply(7, 3) == 21
assert mathfuncs.multiply(7, 2) == 14
```
|
{
"source": "jeannefukumaru/multilingual-bert-as-service",
"score": 3
}
|
#### File: multilingual-bert-as-service/mbert_client/utils.py
```python
import zmq
import numpy
import torch
import json
def send_array_and_str(socket, A, sentence, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(dtype = str(A.dtype),shape = A.shape)
socket.send_string(sentence, flags|zmq.SNDMORE)
socket.send_json(md, flags|zmq.SNDMORE)
return socket.send(A, flags, copy=copy, track=track)
def recv_array_and_str(socket, flags=0, copy=True, track=False):
"""recv a numpy array and sentence"""
sentence = socket.recv_string(flags=flags)
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = memoryview(msg)
A = numpy.frombuffer(buf, dtype=md['dtype'])
return sentence, A.reshape(md['shape'])
def preprocess(text, tokenizer):
'''tokenize text into subwords and convert to indices
:param text str: text to be preprocessed
:param tokenizer: BertTokenizer object
:output: torch tensor of vocab ids
'''
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens]).numpy()
return tokens_tensor
```
#### File: multilingual-bert-as-service/req-rep-architecture/resp_server.py
```python
from absl import logging
import numpy as np
import time
import zmq
import torch
from transformers import BertTokenizer, BertModel
from ..utils import send_array, recv_array
from argparse import ArgumentParser
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
model = BertModel.from_pretrained('bert-base-multilingual-cased')
model.eval()
def preprocess(text, tokenizer):
'''tokenize text into subwords and convert to indices
:param text str: text to be preprocessed
:param tokenizer: BertTokenizer object
:output: torch tensor of vocab ids
'''
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens])
return tokens_tensor
if __name__=="__main__":
# parser = ArgumentParser('setup bert-multilingual for serving')
# parser.add_argument("model-dir", help="model where pretrained model is stored")
# args = parser.parse_args()
while True:
message = socket.recv_json()
print('received request: %s' % message)
# add generator prefetch option
tokens_tensor = preprocess(message, tokenizer)
# Predict hidden states features for each layer
with torch.no_grad():
# See the models docstrings for the detail of the inputs
outputs = model(tokens_tensor)
# Transformers models always output tuples.
# See the models docstrings for the detail of all the outputs
# In our case, the first element is the hidden state of the last layer of the Bert model
encoded_layers = outputs[0].numpy()
send_array(socket, encoded_layers)
```
|
{
"source": "JeanneGasser/basic_cypting",
"score": 4
}
|
#### File: JeanneGasser/basic_cypting/beta_version.py
```python
from nltk import RegexpTokenizer
toknizer = RegexpTokenizer(r'''\w'|\w+|[^\w\s]''')
from string import punctuation
import unidecode
#Class construction
class EncryptDecrypt:
"""
For each letter return the numerical position in alphabet
Or for each number return the corresponding letter
"""
def encrypt(self, text):
#Remove accent, caps and excess white space
text = unidecode.unidecode(text.lower().strip())
token = toknizer.tokenize(text)
#ord return ascii code of the letter. In order to have the alphabet position : -96
return " ".join(
["-".join([str(ord(l)-96) for l in word if l.isalpha()])
for word in token if word not in punctuation])
def decrypt(self, text):
#chr gives the char attached to an ascii code. Since we're using letter position, need to add +96
#Encrypted word given in format xx-xx-xx, hence the split.
to_decrypt = [word.split("-") for word in text.split(" ")]
return " ".join(
[("".join([chr(int(l)+96) for l in word]))
for word in to_decrypt])
#User input and class output
print("Bienvenue, avec ce programme vous allez pouvoir chiffrer ou déchiffrer du texte. \n \
Chiffrement : lettres = position numérique dans l'alphabet")
textfile = input("Veuillez entrer votre texte ou un nom de fichier texte avec le chemin \n")
if ".txt" in textfile:
txt = open(textfile,"r").read()
what_to_do = input("Voulez vous décrypter ou encrypter \n \n")
if unidecode.unidecode(what_to_do.lower().strip())=="encrypter":
open(textfile.split(".")[0] + "_crypted.txt","w").write(EncryptDecrypt().encrypt(txt))
print("Fichier encrypté et enregistré")
elif unidecode.unidecode(what_to_do.lower().strip())=="decrypter":
open(textfile.split(".")[0] + "_decrypted.txt","w").write(EncryptDecrypt().decrypt(txt))
print("Fichier décrypté et enregistré")
else:
print("Veuillez entrer une commande valide: Encrypter ou Decrypter")
else:
what_to_do = input("Voulez vous décrypter ou encrypter \n \n")
if what_to_do.lower().strip()=="encrypter":
print(EncryptDecrypt().encrypt(textfile))
elif unidecode.unidecode(what_to_do.lower().strip())=="decrypter":
print(EncryptDecrypt().decrypt(textfile))
else:
print("Veuillez entrer une commande valide: Encrypter ou Decrypter")
```
|
{
"source": "JeannieDaniel/twitterati",
"score": 3
}
|
#### File: src/twitterati/query_params.py
```python
import datetime
import pytz
def get_conversation_query_params(conversation_id, max_result=100):
return {'query': 'conversation_id:{}'.format(conversation_id),
'tweet.fields': 'author_id,conversation_id,created_at,in_reply_to_user_id,lang,referenced_tweets',
'user.fields': 'id,created_at,username,name,description,location,public_metrics,url,verified,entities',
'expansions': 'author_id,in_reply_to_user_id,referenced_tweets.id',
'max_results': max_result
}
def get_tweet_query_params():
return {
'tweet.fields': 'author_id,conversation_id,created_at,in_reply_to_user_id,lang,referenced_tweets',
'user.fields': 'id,created_at,username,name,description,location,public_metrics,url,verified,entities',
'expansions': 'author_id,in_reply_to_user_id,referenced_tweets.id',
}
def get_followers_query_params(max_results = 100):
return {'user.fields': 'id,created_at,username,name,description,location,public_metrics,url,verified,entities',
'max_results':100}
def get_user_query_params():
return {'user.fields':'id,created_at,username,name,description,location,public_metrics,url,verified,entities'}
def get_recent_search_query_params(search_query, period, max_results=100):
# Calculate startdate if period is provided
if period is not None:
d = datetime.datetime.utcnow() + datetime.timedelta(days=-period)
start_time = d.replace(tzinfo=pytz.UTC).isoformat()
query_params = {
'query': search_query,
'user.fields': 'id,created_at,username,name,description,location,public_metrics,url,verified,entities',
'tweet.fields': 'author_id,in_reply_to_user_id,created_at,conversation_id,public_metrics,lang,geo,referenced_tweets,entities',
'expansions': 'author_id',
'max_results': max_results,
'start_time' : start_time,
}
return query_params
else:
query_params = {
'query': search_query,
'user.fields': 'id,created_at,username,name,description,location,public_metrics,url,verified,entities',
'tweet.fields': 'author_id,in_reply_to_user_id,created_at,conversation_id,public_metrics,lang,geo,referenced_tweets,entities',
'expansions': 'author_id',
'max_results': max_results
}
return query_params
```
|
{
"source": "jeanniesarah/gann-square",
"score": 4
}
|
#### File: gann-square/gann/core.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright 2015, <NAME>'
import datetime
def f(x):
"""
Helper function to determine number on a Gann square at coords: x, 0.
If x = 0 -> 1
If x < 0 -> f(-x) - 4 * (-x)
Else -> f(x-1) + 8 * x - 3
:param x: x position
:return: value
"""
return 1 if x == 0 else (f(-x) - 4 * (-x) if x < 0 else f(x-1) + 8 * x - 3)
def get_number_by_pos(x, y):
"""
Function to determine number on a Gann square at coordinates: x, y.
Assuming Gann square coordinates system is:
____ ____ ____
|-1 1|0 1|1 1|
|-1 0|0 0|1 0|
|-1-1|0 -1|1 -1|
(0, 0) = 1
ToDo: simplify, refactor
:param x: x position
:param y: y position
:return: value
"""
if x >= 0: # x >= 0
if y <= x: # y <= x
if y >= 0: # y >= 0
val = f(x) - y
else:
if abs(y) <= x: # |y| <= x
val = f(x) + abs(y)
else:
val = f(abs(y)) + 2 * abs(y) - x
else:
val = f(y) - 2 * y + x
else:
if y >= 0: # y >= 0
if y <= abs(x): # y <= |x|
val = f(x) + y
else:
val = f(y) - 2 * y + x
else: # x < 0, y < 0
if abs(y) < abs(x): # |y| < |x|
val = f(x) + y
elif abs(y) == abs(x): # |y| == |x|
val = (abs(y) * 2 + 1) ** 2
else:
val = (abs(y) * 2 + 1) ** 2 - (abs(y) - abs(x))
return val
def get_date_from_pos(x, y, base):
"""
Function to determine date on a Gann square at coordinates: x, y with base date 'base'
Assuming Gann square coordinates system is:
____ ____ ____
|-1 1|0 1|1 1|
|-1 0|0 0|1 0|
|-1-1|0 -1|1 -1|
:param x: x position
:param y: y position
:param base: base date at position (0, 0)
:return: date for (x, y)
"""
days = get_number_by_pos(x, y)
d = base + datetime.timedelta(days=days-1) # -1 because origin is 1
return d
```
|
{
"source": "jeanoliveira92/metaheuristicas-mosp",
"score": 3
}
|
#### File: metaheuristicas-mosp/HeuristicaPopulacional/grasp.py
```python
import random
import numpy as np
import globals as g
import math
from HeuristicaRefinamento import heuristicaRefinamento as hr
from HeuristicaConstrutiva import heuristicaConstrutiva as hc
# CONSTRUTIVA GRASP
def gerarMatrizOrdenada():
# COPIA DA MATRIZ ORIGINAL
matrixOriginal = g.matPaPe
# SOMA O PESO TOTAL DE CADA LINHA
somaLinhas = np.sum(matrixOriginal, axis=1)
# CRIA UM VETOR DE INDICES
indices = list(range(0, g.nrows))
# ADICIONA O VETOR DE INDICE AO VETOR DE PESOS/LINHA
matrixOrdenada = np.c_[indices, somaLinhas]
# REALIZA A ORDENAÇÃO DECRESCENTE DOS VALORES
matrixOrdenada = matrixOrdenada[matrixOrdenada[:,1].argsort()[::-1]]
# REMOVE O VETOR DE PESOS E DEIXA APENAS O DE INDICES
#matrixOrdenada = matrixOrdenada[::,0]
# GERA A NOVA MATRIX ORDENADA
return matrixOrdenada
def construtivaGrasp(matrixOrdenada):
ALPHA = random.uniform(0.0, 1.0)
# REMOVE O VETOR DE PESOS E DEIXA APENAS O DE INDICES. ENCONTRA A ULTIMA POSICAO DO ELEMENTO CE NO VETOR PARA CORTAR
tempMat = matrixOrdenada[::, 1]
# GERA OS VALORES DE CORTE SUPERIOR E INFERIOR
cMin = np.min(matrixOrdenada[::1], axis=0)[1]
cMax = np.max(matrixOrdenada[::1], axis=0)[1]
ce = math.floor(cMin + ( ALPHA * (cMax - cMin)))
while(not np.any(tempMat == ce)):
cMax = cMax-1
ce = math.floor(cMin + (ALPHA * (cMax - cMin)))
limiteSuperior = list(tempMat)[::1].index(ce)
# VETOR DE INDICE DOS ELEMENTOS PARTICIONADOS
indicesDasAmostras = matrixOrdenada[limiteSuperior:][::1]
# REMOVE RCL DA MATRIZ ORIGINAL
matrixOrdenada = matrixOrdenada[:limiteSuperior]
# EMBARALHA OS ELEMENTOS SELECIONADOS DA MATRIZ
indicesDasAmostras = random.sample(list(indicesDasAmostras), len(indicesDasAmostras))
# REMOVE OS PESOS DOS INDECES
indicesDasAmostras = np.array(indicesDasAmostras)[::, 0]
matrixOrdenada = matrixOrdenada[::, 0]
# EMBARALHA OS RESTANTES DA RCL
random.shuffle(matrixOrdenada)
# CONCATENA O RCL COM O RESTANTE INICAL E REMOVE A COLUNA DOS PESOS
# FICANDO APENAS OS INDICES DA MATRIX ORIGINAL
ordemPecaPadrao = np.concatenate((
indicesDasAmostras,
matrixOrdenada
))
return ordemPecaPadrao
# HEURISTICA POPULACIONAL GRASP - FIRST IMPROVEMEMENT
def graspFim(ordemDasPilhas):
resultadoBom = np.max(hc.PilhasAbertas(ordemDasPilhas))
# ORDENA A MATRIZ DE FORMA CRESCENTE
matOrd = gerarMatrizOrdenada()
i = 0
while i < 150:
ordemDasPilhasAtual = construtivaGrasp(matOrd)
ordemDasPilhasAtual = hr.FirstImprovementMethod(ordemDasPilhasAtual)
resultadoMelhor = np.max(hc.PilhasAbertas(ordemDasPilhasAtual))
if resultadoMelhor < resultadoBom :
ordemDasPilhas = ordemDasPilhasAtual
resultadoBom = resultadoMelhor
i = -1
i = i+1
return ordemDasPilhas
# HEURISTICA POPULACIONAL GRASP - <NAME>
def graspRum(ordemDasPilhas):
resultadoBom = np.max(hc.PilhasAbertas(ordemDasPilhas))
# ORDENA A MATRIZ DE FORMA CRESCENTE
matOrd = gerarMatrizOrdenada()
for counter in range(100):
ordemDasPilhasAtual = construtivaGrasp(matOrd)
ordemDasPilhasAtual = hr.RandonUpHillMethod(list(ordemDasPilhasAtual), 100)
resultadoMelhor = np.max(hc.PilhasAbertas(ordemDasPilhasAtual))
if resultadoMelhor < resultadoBom :
ordemDasPilhas = ordemDasPilhasAtual
resultadoBom = resultadoMelhor
return ordemDasPilhas
```
|
{
"source": "jeanollion/dataset_iterator",
"score": 3
}
|
#### File: dataset_iterator/datasetIO/atomic_file_handler.py
```python
import os
# code from aparamon: https://github.com/h5py/h5py/issues/934
class AtomicFileHandler:
def __init__(self, path):
self.fd = os.open(path, os.O_RDONLY)
self.pos = 0
def seek(self, pos, whence=0):
if whence == 0:
self.pos = pos
elif whence == 1:
self.pos += pos
else:
self.pos = os.lseek(self.fd, pos, whence)
return self.pos
def tell(self):
return self.pos
def read(self, size):
b = os.pread(self.fd, size, self.pos)
self.pos += len(b)
return b
```
#### File: dataset_iterator/dataset_iterator/tile_utils.py
```python
import itertools
from math import ceil, floor
import numpy as np
from numpy.random import randint, random
from .utils import ensure_multiplicity
from scipy.ndimage import zoom
OVERLAP_MODE = ["NO_OVERLAP", "ALLOW", "FORCE"]
def extract_tile_function(tile_shape, perform_augmentation=True, overlap_mode=OVERLAP_MODE[1], min_overlap=1, n_tiles=None, random_stride=False, augmentation_rotate=True):
def func(batch, is_mask):
tiles = extract_tiles(batch, tile_shape=tile_shape, overlap_mode=overlap_mode, min_overlap=min_overlap, n_tiles=n_tiles, random_stride=random_stride, return_coords=False)
if perform_augmentation:
tiles = augment_tiles_inplace(tiles, rotate = augmentation_rotate and all([s==tile_shape[0] for s in tile_shape]), n_dims=len(tile_shape))
return tiles
return func
def extract_tiles(batch, tile_shape, overlap_mode=OVERLAP_MODE[1], min_overlap=1, n_tiles=None, random_stride=False, return_coords=False):
"""Extract tiles.
Parameters
----------
batch : numpy array
dimensions BYXC or BZYXC (B = batch)
tile_shape : tuple
tile shape, dimensions YX or ZYX. Z,Y,X,must be inferior or equal to batch dimensions
overlap_mode : string
one of ["NO_OVERLAP", "ALLOW", "FORCE"]
"NO_OVERLAP" maximum number of tiles so that they do not overlap
"ALLOW" maximum number of tiles that fit in the image, allowing overlap
"FORCE" maximum number of tiles that fit in the image while enforcing a minimum overlap defined by min_overlap. If min_overlap is less than zero, it enforces a distance between tiles
min_overlap : integer or tuple
min overlap along each spatial dimension. only used in mode "FORCE"
n_tiles : int
if provided overlap_mode and min_overlap are ignored
random_stride : bool
whether tile coordinates should be randomized, within the gap / overlap zone
return_coords : bool
whether tile coodinates should be returned
Returns
-------
numpy array, ([numpy array])
tiles concatenated along first axis, (tiles coordinates)
"""
image_shape = batch[0].shape[1:-1] if isinstance(batch, (list, tuple)) else batch.shape[1:-1]
tile_shape = ensure_multiplicity(len(image_shape), tile_shape)
if n_tiles is None:
tile_coords = _get_tile_coords_overlap(image_shape, tile_shape, overlap_mode, min_overlap, random_stride)
else:
assert len(image_shape)==2, "only 2d images supported when specifying n_tiles"
_, n_tiles_yx = get_stride_2d(image_shape, tile_shape, n_tiles)
tile_coords = _get_tile_coords(image_shape, tile_shape, n_tiles_yx, random_stride)
if len(image_shape)==2:
tile_fun = lambda b : np.concatenate([b[:, tile_coords[0][i]:tile_coords[0][i] + tile_shape[0], tile_coords[1][i]:tile_coords[1][i] + tile_shape[1]] for i in range(len(tile_coords[0]))])
else:
tile_fun = lambda b : np.concatenate([b[:, tile_coords[0][i]:tile_coords[0][i] + tile_shape[0], tile_coords[1][i]:tile_coords[1][i] + tile_shape[1], tile_coords[2][i]:tile_coords[2][i] + tile_shape[2]] for i in range(len(tile_coords[0]))])
if isinstance(batch, (list, tuple)):
return [tile_fun(b) for b in batch]
else:
return tile_fun(batch)
if return_coords:
return tiles, tile_coords
else:
return tiles
def extract_tile_random_zoom_function(tile_shape, perform_augmentation=True, overlap_mode=OVERLAP_MODE[1], min_overlap=1, n_tiles=None, random_stride=False, augmentation_rotate=True, zoom_range=[0.6, 1.6], aspect_ratio_range=[0.6, 1.6], interpolation_order=1):
def func(batch, is_mask):
if isinstance(batch, (list, tuple)):
is_mask = ensure_multiplicity(len(batch), is_mask)
order = [0 if m else interpolation_order for m in is_mask]
tiles = extract_tiles_random_zoom(batch, tile_shape=tile_shape, overlap_mode=overlap_mode, min_overlap=min_overlap, n_tiles=n_tiles, random_stride=random_stride, zoom_range=zoom_range, aspect_ratio_range=aspect_ratio_range, interpolation_order=order)
if perform_augmentation:
tiles = augment_tiles_inplace(tiles, rotate = augmentation_rotate and all([s==tile_shape[0] for s in tile_shape]), n_dims=len(tile_shape))
return tiles
return func
def extract_tiles_random_zoom(batch, tile_shape, overlap_mode=OVERLAP_MODE[1], min_overlap=1, n_tiles=None, random_stride=False, zoom_range=[0.6, 1.6], aspect_ratio_range=[0.6, 1.6], interpolation_order=1):
"""Extract tiles with random zoom.
Parameters
----------
batch : numpy array
dimensions BYXC or BZYXC (B = batch)
tile_shape : tuple
tile shape, dimensions YX or ZYX. Z,Y,X,must be inferior or equal to batch dimensions
overlap_mode : string
one of ["NO_OVERLAP", "ALLOW", "FORCE"]
"NO_OVERLAP" maximum number of tiles so that they do not overlap
"ALLOW" maximum number of tiles that fit in the image, allowing overlap
"FORCE" maximum number of tiles that fit in the image while enforcing a minimum overlap defined by min_overlap. If min_overlap is less than zero, it enforces a distance between tiles
min_overlap : integer or tuple
min overlap along each spatial dimension. only used in mode "FORCE"
n_tiles : int
if provided overlap_mode and min_overlap are ignored
random_stride : bool
whether tile coordinates should be randomized, within the gap / overlap zone
zoom_range : list
[min zoom ratio, max zoom ratio]
aspect_ratio_range : list
aspect ratio relative to the first axis.
[min aspect ratio, max aspect ratio]
interpolation_order : int
The order of the spline interpolation passed to scipy.ndimage.zoom
Returns
-------
numpy array
tiles concatenated along first axis
"""
image_shape = batch[0].shape[1:-1] if isinstance(batch, (list, tuple)) else batch.shape[1:-1]
rank = len(image_shape)
assert rank in [2, 3], "only 2D or 3D images are supported"
aspect_ratio_range = ensure_multiplicity(2, aspect_ratio_range)
assert aspect_ratio_range[0]<=aspect_ratio_range[1], "invalid aspect_ratio_range"
aspect_ratio_range = [1./aspect_ratio_range[1], 1./aspect_ratio_range[0]]
zoom_range = ensure_multiplicity(2, zoom_range)
assert zoom_range[0]<=zoom_range[1], "invalid zoom range"
tile_shape = ensure_multiplicity(len(image_shape), tile_shape)
if n_tiles is None:
tile_coords = _get_tile_coords_overlap(image_shape, tile_shape, overlap_mode, min_overlap, random_stride)
else:
assert len(image_shape)==2, "only 2d images supported when specifying n_tiles"
_, n_tiles_yx = get_stride_2d(image_shape, tile_shape, n_tiles)
tile_coords = _get_tile_coords(image_shape, tile_shape, n_tiles_yx, random_stride)
zoom = random(tile_coords[0].shape[0]) * (zoom_range[1] - zoom_range[0]) + zoom_range[0]
aspect_ratio = [random(tile_coords[0].shape[0]) * (aspect_ratio_range[1] - aspect_ratio_range[0]) + aspect_ratio_range[0] for ax in range(1, len(image_shape)) ]
tile_size_fun = lambda ax : np.rint(zoom * tile_shape[ax]).astype(int) if ax==0 else np.rint(zoom * aspect_ratio[ax-1] * tile_shape[ax]).astype(int)
r_tile_shape = [tile_size_fun(ax) for ax in range(len(image_shape))]
if rank==2:
tile_fun = lambda b,o : np.concatenate([_zoom(b[:, tile_coords[0][i]:tile_coords[0][i] + r_tile_shape[0][i], tile_coords[1][i]:tile_coords[1][i] + r_tile_shape[1][i]], tile_shape, o) for i in range(len(tile_coords[0]))])
else:
tile_fun = lambda b,o : np.concatenate([_zoom(b[:, tile_coords[0][i]:tile_coords[0][i] + r_tile_shape[0][i], tile_coords[1][i]:tile_coords[1][i] + r_tile_shape[1][i], tile_coords[2][i]:tile_coords[2][i] + r_tile_shape[2][i]], tile_shape, o) for i in range(len(tile_coords[0]))])
if isinstance(batch, (list, tuple)): # multi-channel case
interpolation_order= ensure_multiplicity(len(batch), interpolation_order)
return [tile_fun(b, interpolation_order[i]) for i, b in enumerate(batch)]
else:
return tile_fun(batch, interpolation_order)
def _zoom(batch, target_shape, order):
ratio = [i / j for i, j in zip(target_shape, batch.shape[1:-1])]
return zoom(batch, zoom = [1] + ratio + [1], order=order)
def get_stride_2d(image_shape, tile_shape, n_tiles):
if n_tiles == 1:
return (image_shape[0], image_shape[1]), (1, 1)
assert len(image_shape)==2, "only available for 2d images"
tile_shape = ensure_multiplicity(2, tile_shape)
Sy = image_shape[0] - tile_shape[0]
Sx = image_shape[1] - tile_shape[1]
assert Sy>=0, "tile size is too high on first axis"
assert Sx>=0, "tile size is too high on second axis"
a = - n_tiles + 1
b = Sy + Sx
c = Sx*Sy
d = b**2 - 4*a*c
d = np.sqrt(d)
r1 = (-b+d)/(2*a)
r2 = (-b-d)/(2*a)
stride = r1 if r1>r2 else r2
n_tiles_x = (Sx / stride) + 1
n_tiles_y = (Sy / stride) + 1
n_tiles_x_i = round(n_tiles_x)
n_tiles_y_i = round(n_tiles_y)
if abs(n_tiles_x_i-n_tiles_x)<abs(n_tiles_y_i-n_tiles_y):
n_tiles_x = n_tiles_x_i
n_tiles_y = n_tiles // n_tiles_x
else:
n_tiles_y = n_tiles_y_i
n_tiles_x = n_tiles // n_tiles_y
stride_x = Sx // (n_tiles_x - 1) if n_tiles_x > 1 else image_shape[1]
stride_y = Sy // (n_tiles_y - 1) if n_tiles_y > 1 else image_shape[0]
return (stride_y, stride_x), (n_tiles_y, n_tiles_x)
def _get_tile_coords(image_shape, tile_shape, n_tiles, random_stride=False):
n_dims = len(image_shape)
assert n_dims == len(tile_shape), "tile rank should be equal to image rank"
assert n_dims == len(n_tiles), "n_tiles should have same rank as image"
tile_coords_by_axis = [_get_tile_coords_axis(image_shape[i], tile_shape[i], n_tiles[i], random_stride=random_stride) for i in range(n_dims)]
return [a.flatten() for a in np.meshgrid(*tile_coords_by_axis, sparse=False, indexing='ij')]
def _get_tile_coords_overlap(image_shape, tile_shape, overlap_mode=OVERLAP_MODE[1], min_overlap=1, random_stride=False):
n_dims = len(image_shape)
min_overlap = ensure_multiplicity(n_dims, min_overlap)
assert n_dims == len(tile_shape), "tile shape should be equal to image shape"
tile_coords_by_axis = [_get_tile_coords_axis_overlap(image_shape[i], tile_shape[i], overlap_mode, min_overlap[i], random_stride) for i in range(n_dims)]
return [a.flatten() for a in np.meshgrid(*tile_coords_by_axis, sparse=False, indexing='ij')]
def _get_tile_coords_axis_overlap(size, tile_size, overlap_mode=OVERLAP_MODE[1], min_overlap=1, random_stride=False):
if tile_size==size:
return [0]
assert tile_size<size, "tile size must be inferior or equal to size"
o_mode = OVERLAP_MODE.index(overlap_mode)
assert o_mode>=0 and o_mode<=2, "invalid overlap mode"
if o_mode==0:
n_tiles = int(size/tile_size)
elif o_mode==1:
n_tiles = ceil(size/tile_size)
elif o_mode==2:
assert min_overlap<tile_size, "invalid min_overlap: value: {} should be <{}".format(min_overlap, tile_size)
if min_overlap>=0:
n_tiles = 1 + ceil((size - tile_size)/(tile_size - min_overlap)) # size = tile_size + (n-1) * (tile_size - min_overlap)
else:
n_tiles = floor((size - min_overlap)/(tile_size - min_overlap)) # n-1 gaps and n tiles: size = n * tile_size + (n-1)*-min_overlap
return _get_tile_coords_axis(size, tile_size, n_tiles, random_stride)
def _get_tile_coords_axis(size, tile_size, n_tiles, random_stride=False):
if n_tiles==1:
coords = [(size - tile_size)//2]
if random_stride and coords[0]>0:
coords += randint(-coords[0], size-(coords[0]+tile_size), size=1)
return coords
if n_tiles==2:
coords = [0, size-tile_size]
if random_stride:
gap = size - 2 * tile_size
if gap>1:
delta = randint(0, gap//2, size=2)
coords[0] += delta[0]
coords[1] -= delta[1]
return coords
sum_stride = np.abs(n_tiles * tile_size - size)
stride = np.array([0]+[sum_stride//(n_tiles-1)]*(n_tiles-1), dtype=int)
remains = sum_stride%(n_tiles-1)
stride[1:remains+1] += 1
if np.sign(n_tiles * tile_size - size)>0:
stride=-stride
stride = np.cumsum(stride)
coords = np.array([tile_size*idx + stride[idx] for idx in range(n_tiles)])
# print("before random: n_tiles: {}, tile_size: {} size: {}, stride: {}, coords: {}".format(n_tiles, tile_size, size, stride, coords))
if random_stride:
spacing = (size-tile_size)//(n_tiles-1)
if spacing >= tile_size: # no overlap
half_mean_gap = floor(0.5 * (spacing-tile_size) )
else: # overlap
half_mean_gap = ceil(0.5 * spacing )
coords += randint(-half_mean_gap, half_mean_gap+1, size=n_tiles)
coords[0] = max(coords[0], 0)
coords[-1] = min(coords[-1], size-tile_size)
# print("after random: spacing: {}, gap: {}, coords: {}".format(spacing, half_mean_gap, coords))
return coords
def augment_tiles(tiles, rotate, n_dims=2):
flip_axis = [1, 2, (1,2)] if n_dims==2 else [2, 3, (2,3)]
flips = [np.flip(tiles, axis=ax) for ax in flip_axis]
augmented = np.concatenate([tiles]+flips, axis=0)
if rotate:
rot_axis = (1, 2) if n_dims==2 else (2, 3)
augmented = np.concatenate((augmented, np.rot90(augmented, k=1, axes=rot_axis)))
return augmented
AUG_FUN_2D = [
lambda img : img,
lambda img : np.flip(img, axis=0),
lambda img : np.flip(img, axis=1),
lambda img : np.flip(img, axis=(0, 1)),
lambda img : np.rot90(img, k=1, axes=(0,1)),
lambda img : np.rot90(img, k=3, axes=(0,1)), # rot + flip0
lambda img : np.rot90(np.flip(img, axis=1), k=1, axes=(0,1)),
lambda img : np.rot90(np.flip(img, axis=(0, 1)), k=1, axes=(0,1))
]
AUG_FUN_3D = [
lambda img : img,
lambda img : np.flip(img, axis=1),
lambda img : np.flip(img, axis=2),
lambda img : np.flip(img, axis=(1, 2)),
lambda img : np.rot90(img, k=1, axes=(1,2)),
lambda img : np.rot90(img, k=3, axes=(1,2)), # rot + flip0
lambda img : np.rot90(np.flip(img, axis=2), k=1, axes=(1,2)),
lambda img : np.rot90(np.flip(img, axis=(1, 2)), k=1, axes=(1,2))
]
def augment_tiles_inplace(tiles, rotate, n_dims=2):
aug_fun = AUG_FUN_2D if n_dims==2 else AUG_FUN_3D
n_tiles = tiles[0].shape[0] if isinstance(tiles, (tuple, list)) else tiles.shape[0]
aug = randint(0, len(aug_fun) if rotate else len(aug_fun)/2, size=n_tiles)
if isinstance(tiles, (tuple, list)):
for bidx in range(len(tiles)):
for b in range(n_tiles):
if aug[b]>0: # 0 is identity
tiles[bidx][b] = aug_fun[aug[b]](tiles[bidx][b])
else:
for b in range(n_tiles):
if aug[b]>0: # 0 is identity
tiles[b] = aug_fun[aug[b]](tiles[b])
return tiles
```
|
{
"source": "jeanollion/dlutils",
"score": 3
}
|
#### File: distnet/keras_models/intensity_transformer.py
```python
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Conv2D, Input, Flatten, LeakyReLU
from tensorflow.keras.models import Model
def get_intensity_transformer(input_size=64, n_down=2, n_filters=4, n_filters_max=16, batch_mean=True, name="intensity_transformer", dtype="float32"):
input = Input(shape = (input_size, input_size, 1), name="{}_input".format(name), dtype=dtype)
conv = input
filters = n_filters
for l in range(n_down):
conv = Conv2D(filters, kernel_size = (3,3), strides=2, padding='valid', activation=LeakyReLU(alpha=0.5), name="{}_conv_{}".format(name, l))(conv)
filters *=2
if n_filters_max>0:
filters = min(filters, n_filters_max)
conv_flat = Flatten()(conv)
offset = Dense(1, name = "{}_offset".format(name))(conv_flat)
scale = Dense(1, activation = "relu", name = "{}_scale".format(name))(conv_flat)
if batch_mean:
offset = K.mean(offset, axis=0)
scale = K.mean(scale, axis=0)
return Model(input, [offset, scale])
def plug_intensity_transformer(model, intensity_transformer_model, shared_input=True):
if not shared_input:
input = Input(shape = model.input.shape[1:], name="input_to_transform_"+model.name)
thumb_input = intensity_transformer_model.input
offset, scale = intensity_transformer_model.outputs
scaled_input = ( input - offset ) * scale
output = model(scaled_input)
scaled_output = output / scale + offset
return Model([input, thumb_input], scaled_output)
else:
input = Input(shape = intensity_transformer_model.input.shape[1:], name="input_to_transform_"+model.name)
offset, scale = intensity_transformer_model(input)
scaled_input = ( input - offset ) * scale
output = model(scaled_input)
scaled_output = output / scale + offset
return Model(input, scaled_output)
```
#### File: distnet/keras_models/layers.py
```python
from tensorflow import pad
from tensorflow.keras.layers import Layer, GlobalAveragePooling2D, Reshape, Conv2D, Multiply, Conv3D
from ..utils.helpers import ensure_multiplicity, get_nd_gaussian_kernel
from tensorflow.python.keras.engine.input_spec import InputSpec
import tensorflow as tf
import numpy as np
class ReflectionPadding2D(Layer):
def __init__(self, paddingYX=(1, 1), **kwargs):
paddingYX = ensure_multiplicity(2, paddingYX)
self.padding = tuple(paddingYX)
super().__init__(**kwargs)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1] + 2 * self.padding[0], input_shape[2] + 2 * self.padding[1], input_shape[3])
def call(self, input_tensor, mask=None):
padding_height, padding_width = self.padding
return pad(input_tensor, [[0,0], [padding_height, padding_height], [padding_width, padding_width], [0,0] ], 'REFLECT')
def get_config(self):
config = super().get_config().copy()
config.update({"padding": self.padding})
return config
class ConstantConvolution2D(Layer):
def __init__(self, kernelYX, reflection_padding=False, **kwargs):
assert len(kernelYX.shape)==2
for ax in [0, 1]:
assert kernelYX.shape[ax]>=1 and kernelYX.shape[ax]%2==1, "invalid kernel size along axis: {}".format(ax)
self.kernelYX = kernelYX[...,np.newaxis, np.newaxis]
self.reflection_padding=reflection_padding
self.padL = ReflectionPadding2D([(dim-1)//2 for dim in self.kernelYX.shape[:-2]] ) if self.reflection_padding else None
self.n_chan = kwargs.pop("n_chan", None)
super().__init__(**kwargs)
def build(self, input_shape):
n_chan = input_shape[-1] if input_shape[-1] is not None else self.n_chan
if n_chan is None:
self.kernel=None
return
kernel = tf.constant(self.kernelYX, dtype=tf.float32)
if n_chan>1:
self.kernel = tf.tile(kernel, [1, 1, n_chan, 1])
else:
self.kernel = kernel
self.pointwise_filter = tf.eye(n_chan, batch_shape=[1, 1])
def compute_output_shape(self, input_shape):
if self.reflection_padding:
return input_shape
radY = (self.kernelYX.shape[0] - 1) // 2
radX = (self.kernelYX.shape[1] - 1) // 2
return (input_shape[0], input_shape[1] - radY * 2, input_shape[2] - radX * 2, input_shape[3])
def call(self, input_tensor, mask=None):
if self.kernel is None: #build was initiated with None shape
return input_tensor
if self.padL is not None:
input_tensor = self.padL(input_tensor)
return tf.nn.separable_conv2d(input_tensor, self.kernel, self.pointwise_filter, strides=[1, 1, 1, 1], padding='VALID')
def get_config(self):
config = super().get_config().copy()
config.update({"kernelYX": self.kernelYX, "reflection_padding":reflection_padding})
return config
class Gaussian2D(ConstantConvolution2D):
def __init__(self, radius=1, **kwargs):
gauss_ker = get_nd_gaussian_kernel(radius=self.radius, ndim=2)[...,np.newaxis, np.newaxis]
super().__init__(kernelYX = gauss_ker, **kwargs)
def channel_attention(n_filters, activation='relu'): # TODO TEST + make layer or model + set name to layers
def ca_fun(input):
gap = GlobalAveragePooling2D()(input)
gap = Reshape((1, 1, n_filters))(gap) # or use dense layers and reshape afterwards
conv1 = Conv2D(kernel_size=1, filters = n_filters, activation=activation)(gap)
key = Conv2D(kernel_size=1, filters = n_filters, activation='sigmoid')(conv1)
return Multiply()([key, input])
return ca_fun
############## TEST #####################
from tensorflow.python.framework import tensor_shape
class SplitContextCenterConv2D(Layer):
def __init__(self, filters, kernelYX, padding="same", **kwargs): #REFLECT
kernelYX=ensure_multiplicity(2, kernelYX)
for k in kernelYX:
assert k%2==1, "kernel size must be uneven on all spatial axis"
if padding=="same":
padding = "CONSTANT"
name = kwargs.pop('name', None)
self.padding_constant_value = kwargs.pop('constant_values', 0)
self.convL = Conv3D(filters=filters, kernel_size = (kernelYX[0], kernelYX[1], 2), padding="valid", name = name+"conv" if name is not None else None, **kwargs)
self.input_spec = InputSpec(ndim=4)
self.padding = padding
self.ker_center = [(k-1)//2 for k in kernelYX]
super().__init__(name)
def compute_output_shape(self, input_shape):
if self.padding=="valid":
return (input_shape[0], input_shape[1] - self.convL.kernel_size[0] + 1 , input_shape[2] - self.convL.kernel_size[1] + 1, self.filters)
else:
return (input_shape[0], input_shape[1], input_shape[2], self.filters)
def build(self, input_shape):
super().build(input_shape)
input_shape = tensor_shape.TensorShape(input_shape)
self.n_channels = int(input_shape[-1])//2
conv_input_shape = input_shape[:-1] + (2, self.n_channels)
self.convL.build(conv_input_shape)
kernel_mask = np.ones(shape=(self.convL.kernel_size)+( self.n_channels, self.convL.filters )) # broadcasting
kernel_mask[self.ker_center[0],self.ker_center[1],0]=0
kernel_mask[:,:self.ker_center[1],1]=0
kernel_mask[:,(self.ker_center[1]+1):,1]=0
kernel_mask[:self.ker_center[0],self.ker_center[1],1]=0
kernel_mask[(self.ker_center[0]+1):,self.ker_center[1],1]=0
self.kernel_mask = tf.convert_to_tensor(kernel_mask, dtype=tf.bool)
def call(self, input_tensor, mask=None):
if self.padding!="valid": # we set explicitely the padding because convolution is performed with valid padding
padding_height, padding_width = self.ker_center
input_tensor = pad(input_tensor, [[0,0], [padding_height, padding_height], [padding_width, padding_width], [0,0] ], mode = self.padding, constant_values=self.padding_constant_value, name = self.name+"pad" if self.name is not None else None)
# convert to 5D tensor -> split in channel dimension to create a new spatial dimension. assumes channel last
# in : BYX[2xC], out: BYX2C
if self.n_channels==1:
conv_in = input_tensor[...,tf.newaxis]
else:
context, center = tf.split(input_tensor, 2, axis=-1)
conv_in = tf.concat([context[...,tf.newaxis, :], center[...,tf.newaxis, :]], axis=-2)
self.convL.kernel.assign(tf.where(self.kernel_mask, self.convL.kernel, tf.zeros_like(self.convL.kernel))) # set explicitely the unused weights to zero
conv = self.convL(conv_in) # BYX1F (valid padding on last conv axis -> size 1)
return conv[:, :, :, 0, :]
# TODO add periodic padding so that each color has access to the 2 other ones. test to perform the whole net in 4D. see https://stackoverflow.com/questions/39088489/tensorflow-periodic-padding
class Conv3DYXC(Layer):
def __init__(self, filters, kernelYX, padding="same", **kwargs): #padding can also be REFLECT
self.kernelYX=tuple(ensure_multiplicity(2, kernelYX))
for k in kernelYX:
assert k%2==1, "kernel size must be uneven on all spatial axis"
self.ker_center = [(k-1)//2 for k in kernelYX]
if padding=="same":
padding = "CONSTANT"
self._name = kwargs.pop('name', "Conv3DYXC")
self.padding_constant_value = kwargs.pop('constant_values', 0)
self.input_spec = InputSpec(ndim=4)
self.padding = padding
self.filters=filters
self.conv_args = kwargs
super().__init__(self._name)
def compute_output_shape(self, input_shape):
if self.padding=="valid":
return (input_shape[0], input_shape[1] - self.convL.kernel_size[0] + 1 , input_shape[2] - self.convL.kernel_size[1] + 1, self.filters)
else:
return (input_shape[0], input_shape[1], input_shape[2], self.filters)
def build(self, input_shape):
super().build(input_shape)
input_shape = tensor_shape.TensorShape(input_shape)
n_channels = int(input_shape[-1])
self.convL = Conv3D(filters=self.filters, kernel_size = self.kernelYX + (n_channels,), padding="valid", name = self._name+"conv" if self._name is not None else None, **self.conv_args)
conv_input_shape = input_shape + (1,)
self.convL.build(conv_input_shape)
def call(self, input_tensor, mask=None):
if self.padding!="valid": # we set explicitely the padding because convolution is performed with valid padding
padding_height, padding_width = self.ker_center
input_tensor = pad(input_tensor, [[0,0], [padding_height, padding_height], [padding_width, padding_width], [0,0] ], mode = self.padding, constant_values=self.padding_constant_value, name = self.name+"pad" if self.name is not None else None)
conv_in = input_tensor[...,tf.newaxis] #BYXC1 (convert to 5D tensor)
conv = self.convL(conv_in) # BYX1F (valid padding on last conv axis -> size 1)
return conv[:, :, :, 0, :] # BYXF
```
#### File: distnet/keras_models/self_attention.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
```
|
{
"source": "jeanollion/n2v_dataset_iterator",
"score": 2
}
|
#### File: n2v_dataset_iterator/n2v_dataset_iterator/value_manipulators.py
```python
import numpy as np
# MOST CODE FROM NOISE TO VOID: https://github.com/juglab/n2v/blob/master/n2v/utils/n2v_utils.py
def pm_cst(value = 0):
def cst_fun(patch, coords, dims):
return [value] * len(coords[0])
return cst_fun
def pm_min():
def min_fun(patch, coords, dims):
vmin = patch.min()
return [vmin] * len(coords[0])
return min_fun
def pm_normal_withoutCP():
def normal_withoutCP(patch, coords, dims):
vals = []
for coord in zip(*coords):
rand_coords = random_neighbor(patch.shape, coord)
vals.append(patch[tuple(rand_coords)])
return vals
return normal_withoutCP
def pm_uniform_withCP(local_sub_patch_radius):
def random_neighbor_withCP_uniform(patch, coords, dims):
vals = []
for coord in zip(*coords):
sub_patch = get_subpatch(patch, coord,local_sub_patch_radius)
rand_coords = [np.random.randint(0, s) for s in sub_patch.shape[0:dims]]
vals.append(sub_patch[tuple(rand_coords)])
return vals
return random_neighbor_withCP_uniform
def pm_normal_additive(pixel_gauss_sigma):
def pixel_gauss(patch, coords, dims):
vals = []
for coord in zip(*coords):
vals.append(np.random.normal(patch[tuple(coord)], pixel_gauss_sigma))
return vals
return pixel_gauss
def pm_normal_fitted(local_sub_patch_radius):
def local_gaussian(patch, coords, dims):
vals = []
for coord in zip(*coords):
sub_patch = get_subpatch(patch, coord, local_sub_patch_radius)
axis = tuple(range(dims))
vals.append(np.random.normal(np.mean(sub_patch, axis=axis), np.std(sub_patch, axis=axis)))
return vals
return local_gaussian
def pm_identity(local_sub_patch_radius):
def identity(patch, coords, dims):
vals = []
for coord in zip(*coords):
vals.append(patch[coord])
return vals
return identity
def random_neighbor(shape, coord):
rand_coords = sample_coords(shape, coord)
while np.any(rand_coords == coord):
rand_coords = sample_coords(shape, coord)
return rand_coords
def sample_coords(shape, coord, sigma=4):
return [normal_int(c, sigma, s) for c, s in zip(coord, shape)]
def normal_int(mean, sigma, w):
return int(np.clip(np.round(np.random.normal(mean, sigma)), 0, w - 1))
def get_subpatch(patch, coord, local_sub_patch_radius):
start = np.maximum(0, np.array(coord) - local_sub_patch_radius)
end = start + local_sub_patch_radius*2 + 1
shift = np.minimum(0, patch.shape - end)
start += shift
end += shift
slices = [ slice(s, e) for s, e in zip(start, end)]
return patch[tuple(slices)]
```
|
{
"source": "jeanollion/TaLiSSman",
"score": 2
}
|
#### File: talissman/training/training.py
```python
import edt
import numpy as np
import h5py
from dataset_iterator import MultiChannelIterator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import mean_squared_error as mse
from dataset_iterator.tile_utils import extract_tile_random_zoom_function
from .data_augmentation import *
from ..model import get_unet
def get_train_test_iterators(dataset,
center_range:list, scale_range:list,
group_keyword:list = None,
group_proportion:list = None,
batch_size:int = 4,
n_z:int=5, z_step=2,
tile_params:dict = dict(tile_shape=(256, 256), n_tiles=9, zoom_range=[0.6, 1.6], aspect_ratio_range=[0.75, 1.5] ),
elasticdeform_parameters:dict = {},
noise_intensity:float = 0.035,
raw_feature_name:str="/raw", label_feature_name:str="/regionLabels",
training_selection_name:str="train/", validation_selection_name:str="eval/" ):
"""Generates image iterators for training/validation.
Perform data augmentation: pick random slices, random zoom / random flip / rotate90, add radnom iid gaussian noise / gaussian blur, elastic deformation, random normalization
Parameters
----------
dataset : datasetIO or path to dataset (str)
dataset
if group_keyword are provided, dataset must be a .h5 file. group_keyword is expected in the path of the datasets, after validation_selection_name or training_selection_name.
center_range : list
[min center, max center] for normalization in data augmentation. When several groups are defined, one array per group should be given.
scale_range : list
[min scale, max scale] for normalization in data augmentation. When several groups are defined, one array per group should be given.
group_keyword : list
list of keywords that define distinct groups (optional, can be None): usually a dataset name.
group_proportion : list
list of proportion for each group in each mini batch (statistically) (optional, can be None)
batch_size : int
size of mini batch
n_z : int
number of z-slices that will be picked from the dataset.
z_step : type
step between picked z-slices
tile_params : dict
parameter for tiling:
tile_shape: shape (Y,X) of returned tiles
n_tiles: number of tiles per image
zoom_range: random zoom applied at cropping to Y, X
aspect_ratio_range: limits the aspect ratio distortion generated by the random zoom
elasticdeform_parameters : dict
None for no elastic deformation OR parameters for elasticdeform augmentation:
order: interpolation order
mode: out-of-bound strategy
points : number of reference points
sigma : intensity of deformation
noise_intensity : float
intensity of added iid gaussian noise
raw_feature_name : str
input images dataset name
label_feature_name : str
labeled images dataset name
training_selection_name : str
name of the training selection
validation_selection_name : str
name of the validation selection
Returns
-------
data iterator
tensor shape: batch_size * n_tiles, tile_shape[0], tile_shape[1], n_z
"""
extract_tile_function = extract_tile_random_zoom_function(**tile_params) if tile_params is not None else None
def random_channel_slice(nchan): # random offset on chosen slices to simulate focus variations
noff=nchan-1-(n_z-1)*z_step + 1
off = np.random.randint(noff)
idx = [off + z_step*i for i in range(n_z)]
return idx
if group_keyword is not None and isinstance(group_keyword, (list, tuple)):
assert len(center_range)==len(group_keyword) and len(scale_range)==len(group_keyword), "when several groups are provided, as many center_range / center_range arrays should be provided"
for i in range(len(group_keyword)):
assert len(center_range[i])==2 and len(scale_range[i])==2, "invalid scale_range / center_range"
assert group_proportion is None or len(group_proportion)==len(group_keyword)
scale_fun = [[get_normalization_fun(center_range[i], scale_range[i]), None] for i in range(len(group_keyword))]
validation_grp = [_join_grp_kw(validation_selection_name, kw) for kw in group_keyword]
training_grp = [_join_grp_kw(training_selection_name, kw) for kw in group_keyword]
else:
scale_fun = [[get_normalization_fun(center_range, scale_range), None]]
if group_keyword is None:
validation_grp = validation_selection_name
training_grp = training_selection_name
else:
validation_grp = _join_grp_kw(validation_selection_name, group_keyword)
training_grp = _join_grp_kw(training_selection_name, group_keyword)
params = dict(dataset=dataset,
group_scaling = scale_fun,
group_proportion=group_proportion,
channel_keywords=[raw_feature_name, label_feature_name], # channel keyword must correspond to the name of the extracted features
output_channels= [1],
mask_channels = [1],
channel_slicing_channels = {0:random_channel_slice},
elasticdeform_parameters = elasticdeform_parameters,
extract_tile_function = extract_tile_function,
channels_postprocessing_function = _channels_postpprocessing(get_illumination_aug_fun(None, noise_intensity), _get_edt_fun()),
batch_size=batch_size,
perform_data_augmentation=True,
shuffle=True)
train_it = MultiChannelIterator(group_keyword=training_grp, **params)
test_it = MultiChannelIterator(group_keyword=validation_grp, **params)
return train_it, test_it
def _get_edt_fun():
return lambda labels : np.stack([edt.edt(labels[i,...,0], black_border=False)[...,np.newaxis] for i in range(labels.shape[0])])
def _channels_postpprocessing(raw_fun, label_fun):
def fun(batch_by_channel):
batch_by_channel[0] = raw_fun(batch_by_channel[0])
batch_by_channel[1] = label_fun(batch_by_channel[1])
return fun
def _join_grp_kw(kw1, kw2):
if kw1 is None or len(kw1)==0:
return kw2
elif kw2 is None or len(kw2)==0:
return kw1
else:
return kw1 + '.*' + kw2 # TODO : only works with h5py datasets
def get_model(model_params:dict=dict(), learning_rate:float = 2e-4, saved_weights_file:str=None):
"""Initialize model.
Parameters
----------
model_params : dict
parameters for ..unet.get_unet
learning_rate : float
initial learning rate
saved_weights_file : str
path to saved weight
Returns
-------
compiled model
"""
model = get_unet(**model_params)
if saved_weights_file is not None:
model.load_weights(saved_weights_file)
model.compile(optimizer=Adam(learning_rate), loss=mse)
return model
```
|
{
"source": "Jeanot-Zubler/clock-out-timer",
"score": 3
}
|
#### File: Jeanot-Zubler/clock-out-timer/time_left.py
```python
import re
import datetime as dt
file_name = "ReportExport.rtf"
def get_lines_from_file(f):
matches = []
regex_string = r"\s(\d{2}.\d{2})((?:\s{1,2}\d{4})+)"
for line in f:
match = re.search(regex_string, line)
if match != None:
matches.append(match.groups())
return matches
def match_to_times(match):
year = dt.datetime.now().year
day, month = match[0].split(".")
times = match[1].split()
datetimes = [dt.datetime(year, int(month), int(day), int(
time[:2]), int(time[2:])) for time in times]
return datetimes
def calculate_leaving_time(datetimes, target_time="8:30"):
h, m = target_time.split(":")
target_time = dt.timedelta(hours=int(h), minutes=int(m))
worked = dt.timedelta(0)
now = dt.datetime.now().replace(second=0, microsecond=0)
i = 1
while i < len(datetimes):
worked += datetimes[i] - datetimes[i-1]
i += 2
if len(datetimes) % 2 == 0:
print(
f"You have worked {worked} so far and are currently off the clock.")
else:
worked += now - datetimes[-1]
print(
f"You have worked {worked} so far and are currently on the clock.")
if worked < target_time:
print(
f"You will be finished at {now+target_time-worked}.")
else:
print(f"You made {worked-target_time} of overtime so far.")
def main(file_name="ReportExport.rtf", target_time="8:30"):
with open(file_name, "r") as f:
matches = get_lines_from_file(f)
calculate_leaving_time(match_to_times(matches[-1]), target_time)
if __name__ == "__main__":
main()
```
|
{
"source": "jeanpaulrsoucy/archivist",
"score": 2
}
|
#### File: archivist/utils/common.py
```python
from datetime import datetime
import pytz
# import classes
from archivist.classes.Archivist import Archivist as a
# define functions
def get_datetime():
tz = a.config["project"]["tz"]
t = datetime.now(pytz.timezone(tz))
return t
```
|
{
"source": "jean-philippe-martin/semantic-notes",
"score": 3
}
|
#### File: jean-philippe-martin/semantic-notes/kb.py
```python
from typing import List, Iterable, Dict, Set, Union, Any
from collections import defaultdict
# Check the type annotations like this:
# mypy --py2 graph.py
# The type that goes into a KB, and what a KB
# still mostly feels like (modulo the few added features).
KBDict = Dict[str, Dict[str, List[Any]]]
class KB(dict):
"""A very simple "knowledge base".
It's organized in pages. Each page has a name and a set of key/value pairs.
The keys are called "attributes" of the page.
Each value is a list (possibly with a single element).
The KB can be used like a normal dict, but it offers a few convenience
features and includes the idea that pages may have multiple names.
To set an alternate name for a page, set its 'aka' property (see tests).
Page lookup is also case-insensitive:
if you have pages 'aaa' and 'AAA' then a lookup for 'AAA' will return
the latter, but a lookup for 'aAA' will return the former.
Two names refer to the same page if and only if normalize_page returns
the same value for both.
Examples:
>>> ppl={'John':{'eye_color': ['blue']}}
>>> k=KB(ppl)
>>> k['john']['eye_color']
['blue']
>>> ppl['Bob']={'aka': ['Bobby tables'], 'eye_color': ['brown']}
>>> k=KB(ppl)
>>> k['bobby tables']['eye_color']
['brown']
>>> k.get_attribute('bobby tables', 'eye_color')
['brown']
"""
def __init__(self, dict_of_dict):
# type: (KBDict) -> None
self.aka = {} # type: Dict[str, str]
self.update(dict_of_dict)
self._fill_aka()
def __getitem__(self, key):
# type: (str) -> Dict[str, List[Any]]
return dict.__getitem__(self,self.normalize_page(key))
def get(self, key, default=None):
# type: (str, Any) -> Dict[str, List[Any]]
return dict.get(self,self.normalize_page(key), default)
def has_key(self, page):
# type: (str) -> bool
return dict.has_key(self, self.normalize_page(page))
def normalize_page(self, key):
# type: (str) -> str
"""page name or alias -> page name"""
if self.aka.has_key(key):
return self.aka[key]
if dict.has_key(self, key):
return key
# If the page name has capitals, and we do too but in different places,
# should still find the page name.
# If the page doesn't exist, return the name unmodified.
return self.aka.get(key.lower(), key)
def is_same_page(self, a, b):
# type: (str, str) -> bool
"""true if a,b are names of the same page, even if aliases."""
return self.normalize_page(a) == self.normalize_page(b)
def get_attribute(self, key, attribute, default=None):
# type: (str, str, List[Any]) -> List[Any]
"""kb[key][attribute], or None if either's missing."""
page = self.get(key, None)
if not page: return default
return page.get(attribute, default)
def get_unique_attribute(self, key, attribute, default=None):
# type: (str, str, List[Any]) -> Any
"""kb[key][attribute][0], or None if either's missing."""
return unique(self.get_attribute(key,attribute,default))
def _fill_aka(self):
# type: () -> None
for k,v in dict.items(self):
a = v.get("aka", [])
for b in a:
if self.aka.get(b)!=None:
print(str(b)+" is defined twice: as "+self.aka[b] + " and "+k)
continue
self.aka[b]=k
# put in 'baby' as an aka for the 'Baby' page, *unless* 'baby' is already
# a registered aka for something else.
for k in dict.keys(self):
if not self.aka.has_key(k.lower()):
self.aka[k.lower()] = k
a = self.get(k).get("aka", [])
for b in a:
if not self.aka.has_key(b.lower()):
self.aka[b.lower()] = k
KB_or_Dict = Union[KB, KBDict]
def merge(kblist):
# type: (List[KBDict]) -> KBDict
"""Merges the dicts together into a single one by appending all the keys.
Example:
>>> a = {'mars': {'isa': ['planet']}}
>>> b = {'mars': {'color': ['red']}}
>>> sorted(merge([a,b])['mars'].keys())
['color', 'isa']
"""
ret = {} # type: KBDict
for kb in kblist:
for k, v in kb.items():
if k not in ret:
ret[k] = defaultdict(list)
for attrib, values in v.items():
ret[k][attrib] += values
return ret
def unique(value):
"""Check that there is only one value in the list, and return it.
>>> kb = KB({'John':{'eye_color': ['blue']}})
>>> unique(kb.get_attribute('John', 'eye_color'))
'blue'
This is handy in the context of KB, where everything's a list but
it's common to expect that there's only one value.
"""
if not value: return None
if len(value)!=1:
raise ValueError('Expected a single value, got multiple (%s)' % len(value))
return value[0]
def unlist(value_or_list):
x=value_or_list
if isinstance(x, str) or isinstance(x, unicode):
return x
try:
if len(x)==1:
return x[0]
except:
pass
return x
```
#### File: jean-philippe-martin/semantic-notes/split.py
```python
from typing import List, Iterable, Dict, Set, Union, Tuple
def strings(lines):
# type: (Iterable[str]) -> Iterable[Tuple[str, List[str]]]
"""splits the lines into (title, lines) sections."""
ret=[] # type: List[str]
title=''
ready=False
for l in lines:
if len(l)>2 and l[0]=='[' and (l[-1]==']' or l.endswith(']\n')):
if ready:
yield (title, ret)
title=l.strip()[1:-1]
ret=[]
ready=True
else:
ret.append(l)
if ready:
yield (title, ret)
def file(filename):
# type: (str) -> Iterable[Tuple[str, List[str]]]
"""File name -> (title, lines) sections."""
with open(filename, 'rt') as f:
# we force evaluation while the file is still open
return list(strings(f))
```
#### File: jean-philippe-martin/semantic-notes/test_interpret.py
```python
import doctest
import interpret
from parse import units
import unittest
import graph
from kb import unique
from parse import unit_perhaps
class TestPlanets(unittest.TestCase):
"Tests for parse.py."
def test_planets(self):
# shorthand
kg = units.kg
# load the data
pages, kb = interpret.file('testdata/planets.txt')
# The KB holds the quantities with units, so we can combine them
# meaningfully even though one is in kg and the other in "earth_mass".
got = unique(kb['the earth']['mass']) + unique(kb['mars']['mass'])
self.assertTrue(6E24*kg < got < 7E24*kg)
def test_sum(self):
# shorthand
kg = units.kg
# load the data
pages, kb = interpret.file('testdata/planets.txt')
# grab everything that's a planet
planets = graph.ancestors(kb, 'isa', 'planet')
# sum their mass
total_mass = sum(unique(kb[x]['mass']) for x in planets)
# we don't have all the planets in the sample... once we do,
# we'll have to update this test!
self.assertTrue(6E24*kg < total_mass < 8E24*kg)
def test_split(self):
foo=['abc', 12, 'd\ne', 13]
b,a = interpret.split_contents(foo, '\n')
self.assertEquals(b, ['abc', 12, 'd'])
self.assertEquals(a, ['e', 13])
foo=['abc', 12, 'de']
b,a = interpret.split_contents(foo, '\n')
self.assertEquals(b, foo)
self.assertEquals(a, [])
foo=['abc', 12, '\n']
b,a = interpret.split_contents(foo, '\n')
self.assertEquals(b, ['abc',12,''])
self.assertEquals(a, [''])
def test_split_all_one(self):
foo=['abc', 12, 'd\ne', 13]
ret = interpret.split_all(foo, '\n')
b=ret[0]
a=ret[1]
self.assertEquals(b, ['abc', 12, 'd'])
self.assertEquals(a, ['e', 13])
foo=['abc', 12, 'de']
ret = interpret.split_all(foo, '\n')
b=ret[0]
self.assertEquals(b, foo)
self.assertEquals(len(ret), 1)
foo=['abc', 12, '\n']
ret = interpret.split_all(foo, '\n')
b=ret[0]
a=ret[1]
self.assertEquals(b, ['abc', 12, ''])
self.assertEquals(a, [''])
def test_split_all_multi(self):
foo=['abc', 12, 'd\ne', 13, '\n', 14]
parts = interpret.split_all(foo, '\n')
self.assertEquals(parts[0], ['abc', 12, 'd'])
self.assertEquals(parts[1], ['e', 13, ''])
self.assertEquals(parts[2], ['', 14])
self.assertEquals(len(parts), 3)
foo=['', 'name,parent,parent']
parts = interpret.split_all(foo, ',')
self.assertEquals(parts, [['', 'name'], ['parent'], ['parent']])
def test_table(self):
p,kb=interpret.file('testdata/table.txt')
html = p['table demo'].html()
self.assertTrue('<td>' in html)
self.assertTrue('<th>' in html)
self.assertTrue('<table' in html)
def test_image(self):
p,kb=interpret.file('testdata/img.txt')
html = p['basic image'].html()
self.assertTrue('<img' in html)
self.assertTrue('src="foo.jpg"' in html)
html = p['image with width'].html()
self.assertTrue('<img' in html)
self.assertTrue('width="50%"' in html)
self.assertTrue('src="foo.jpg"' in html)
def test_instance_table(self):
p,kb=interpret.file('testdata/instancetable.txt')
html = p['Planet'].html()
self.assertTrue('<td>' in html)
self.assertTrue('<th>' in html)
self.assertTrue('<table' in html)
self.assertTrue('earth' in kb)
self.assertTrue('aka' in kb['planet'])
# "Planet" is not a planet (it's a category).
self.assertFalse(kb.get_unique_attribute('Planet', 'isa'))
self.assertTrue(kb.get_unique_attribute('earth', 'isa') == 'Planet')
# units are parsed correctly
self.assertTrue(unit_perhaps('12000 km') < kb['earth']['diameter'][0] < unit_perhaps('13000 km'))
self.assertTrue(kb.get_unique_attribute('earth', 'color') == 'blue')
# the info is merged with that section's
self.assertTrue(kb.get_unique_attribute('earth', 'mostly') == 'water')
if __name__ == '__main__':
unittest.main()
```
#### File: jean-philippe-martin/semantic-notes/test_kb.py
```python
import doctest
import kb
import unittest
class TestKB(unittest.TestCase):
"Tests for kb.py."
def test_get(self):
d={'a': {'b': 'c'}}
x = kb.KB(d)
self.assertEqual(x['a']['b'], 'c')
self.assertEqual(x.get('a', 'default')['b'], 'c')
self.assertEqual(x.get('z', {'b': 'x'})['b'], 'x')
def test_is_same_page(self):
d={'a': {'b': 'c'}}
x = kb.KB(d)
self.assertEqual(x.is_same_page('a', 'a'), True)
self.assertEqual(x.is_same_page('a', 'z'), False)
def test_get_attribute(self):
d={'a': {'b': 'c'}}
x = kb.KB(d)
self.assertEqual(x.get_attribute('a', 'b'), 'c')
self.assertEqual(x.get_attribute('a', 'z'), None)
self.assertEqual(x.get_attribute('z', 'z'), None)
def test_keys(self):
d={'a': {'b': 'c'}, 'x': {'y': 'z'}}
x = kb.KB(d)
self.assertEqual(sorted(list(x.keys())), ['a','x'])
def test_aka(self):
d={'a': {'b': 'c', 'aka': ['alpha']}}
x = kb.KB(d)
self.assertEqual(x.get_attribute('alpha','b'), 'c')
self.assertEqual(x['alpha']['b'], 'c')
def test_has_key(self):
d={'a': {'b': 'c', 'aka': ['alpha']}}
x = kb.KB(d)
self.assertEqual(x.has_key('a'), True)
self.assertEqual(x.has_key('alpha'), True)
def test_case_insensitive(self):
d={'bob': {'w': 'guy'}, 'BOB': {'w': 'abbreviation'}, 'Bobby': {'w': 'name'},
'Ace': {'w': '1'}, 'aCe': {'w': '2'}, 'acE': {'w': '3'}}
x = kb.KB(d)
self.assertEqual(x.get_attribute('bob','w'), 'guy')
self.assertEqual(x.get_attribute('BOB','w'), 'abbreviation')
self.assertEqual(x.get_attribute('Bob','w'), 'guy')
self.assertEqual(x['bobby']['w'], 'name')
self.assertEqual(x['BOBby']['w'], 'name')
self.assertEqual(x.is_same_page('bobby', 'BobbY'), True)
self.assertEqual(x.is_same_page('bob', 'BOB'), False)
self.assertEqual(x.get_attribute('Ace','w'), '1')
self.assertEqual(x.get_attribute('aCe','w'), '2')
self.assertEqual(x.get_attribute('acE','w'), '3')
self.assertEqual(x.is_same_page('Ace', 'acE'), False)
def test_case_insensitive_and_aka(self):
d={'bob': {'w': 'guy', 'aka': ['Bobby', 'foo', 'roar']}, 'BOB': {'w': 'abbreviation'},
'bobbY': {'w': 'other guy'}, 'foO': {'w': 'Mr.T'}}
x = kb.KB(d)
self.assertEqual(x.is_same_page('bob', 'Bobby'), True)
self.assertEqual(x.is_same_page('bob', 'ROAR'), True)
self.assertEqual(x.is_same_page('bob', 'foo'), True)
self.assertEqual(x.is_same_page('bob', 'foO'), False)
# 'FOO' could be redirected to either bob or foO, spec doesn't say
class TestDocs(unittest.TestCase):
def test_docs(self):
doctest.testmod(kb)
if __name__ == '__main__':
unittest.main()
```
#### File: jean-philippe-martin/semantic-notes/test_parse.py
```python
import doctest
import parse
from parse import Tagged
import unittest
class TestParse(unittest.TestCase):
"Tests for parse.py."
def test_no_crash(self):
x = parse.string('hi')
y = parse.string('hi `b(world)')
z = parse.string('hi `b(`i(small) world)')
z = parse.string('`very(`very `deep(thoughts) `/)')
def test_backtick_legal_in_tag(self):
x = parse.string('`big`hello(world)')
self.assertEqual(len(x.contents), 1)
self.assertEqual(x.contents[0].tag, 'big`hello')
self.assertEqual(str(x),'`big`hello(world)')
def test_strings_together(self):
x = parse.string('hi')
self.assertEqual(len(x.contents), 1)
x = parse.string('hi world')
self.assertEqual(len(x.contents), 1)
x = parse.string('hi\nbig\nworld')
self.assertEqual(len(x.contents), 1)
x = parse.string('TITLE\n\nsome text\nblabla\nfinal line\n')
self.assertEqual(len(x.contents), 1)
def test_first_tag_empty(self):
x = parse.string('hello\nthere')
self.assertEqual(x.tag, '')
def test_paren_tag(self):
x = parse.string('hello `b(world)')
self.assertEqual(len(x.contents), 2)
self.assertEqual(x.contents[0], 'hello ')
self.assertEqual(x.contents[1].tag, 'b')
self.assertEqual(x.contents[1].contents[0], 'world')
def test_block_tag(self):
for s in [
'hello `b(world)',
'hello `b world`/',
'hello `b world`/ ',
'hello `b world`/\n',
'hello `b\nworld`/']:
x = parse.string(s)
self.assertEqual(len(x.contents), 2)
self.assertEqual(x.contents[0], 'hello ')
self.assertEqual(x.contents[1].tag, 'b')
self.assertEqual(x.contents[1].contents[0], 'world')
def test_strings(self):
ss=['hello', '\b(world)']
x=parse.strings(ss)
self.assertEqual(len(x.contents), 2)
ss=('hello', '\b(world)')
x=parse.strings(ss)
self.assertEqual(len(x.contents), 2)
ss=[x for x in ('hello', '\b(world)')]
x=parse.strings(ss)
self.assertEqual(len(x.contents), 2)
class TestDocs(unittest.TestCase):
def test_docs(self):
doctest.testmod(parse, globs={'parse':parse})
if __name__ == '__main__':
unittest.main()
```
#### File: jean-philippe-martin/semantic-notes/transform.py
```python
from collections import namedtuple
from kb import KB
from kb import KB_or_Dict
from typing import List, Iterable, Dict, Set, Union, Any, Tuple, Callable, NamedTuple
def addvalue(kb, page, attribute, newvalue):
# type: (KB_or_Dict, str, str, Any) -> None
if not kb.has_key(page): kb[page]={}
if not kb[page].has_key(attribute): kb[page][attribute]=[]
if not newvalue in kb[page][attribute]:
kb[page][attribute] += [newvalue]
def addsymmetricalrelation(kb, page1, page2, attribute):
# type: (KB_or_Dict, str, str, str) -> None
addvalue(kb, page1, attribute, page2)
addvalue(kb, page2, attribute, page1)
def getvalues(kb, page, attribute):
# type: (KB_or_Dict, str, str) -> List[Any]
if not kb.has_key(page): return []
if not kb[page].has_key(attribute): return []
return kb[page][attribute]
## Page selectors: given the kb and a source and target pages,
## return True or False.
PageSelector = Callable[[KB_or_Dict, str, str], bool]
def ofa(whatitmustbe):
# type: (Any) -> PageSelector
"""Page selector that picks sources that 'isa' the argument."""
return lambda kb, src, tgt: whatitmustbe in getvalues(kb, src, 'isa')
def whoisa(whatitmustbe):
# type: (Any) -> PageSelector
"""Page selector that picks targets that 'isa' the argument."""
return lambda kb, src, tgt: whatitmustbe in getvalues(kb, tgt, 'isa')
## Page rules: given the kb and a page (source).
## Return the list of matching pages (targets).
PageRule = Callable[[KB_or_Dict, str], List[Any]]
def the(attribute, pageselector=lambda kb, src, tgt: True):
# type: (str, PageSelector) -> PageRule
"""a rule that returns the value of that attribute, if any.
if pageselector is set then only return values for pages that match it."""
return lambda kb, k: [p for p in kb[k].get(attribute, []) if pageselector(kb, k, p)]
def chain_inner(kb, k, attributes):
"from a page, return the set of pages obtained by following the attributes, in order."
candidates = set([k])
for at in attributes:
next = []
for src in candidates:
next += kb[src].get(at,[])
candidates = set(next)
return candidates
def chain(attributes, pageselector=lambda kb, src, tgt: True):
# type: (List[str], PageSelector) -> PageRule
"""a rule that returns the pages that we arrive to after following the chain."""
return lambda kb, k: [p for p in chain_inner(kb, k, attributes) if pageselector(kb, k, p)]
def hasa(attribute):
# type: (str) -> PageRule
"""A rule that picks everyone who has that attribute."""
return lambda kb, k: [k] if getvalues(kb, k, attribute) else []
## Page actions: given the kb, a source page and the page being acted on.
## Returns nothing, but updates the kb.
PageAction = Callable[[KB_or_Dict, str, str], None]
def isa(whatitbecomes):
# type: (Any) -> PageAction
"""an action that adds an (isa,whatitbecomes) relationship."""
return lambda kb, src, tgt: addvalue(kb, tgt, 'isa', whatitbecomes)
def imtheir(relation):
# type: (str) -> PageAction
"""an action that adds relation(src) to the target."""
return lambda kb, src, tgt: addvalue(kb, tgt, relation, src)
def isalsomy(relation):
# type: (str) -> PageAction
"""an action that adds relation(tgt) to the source."""
return lambda kb, src, tgt: addvalue(kb, src, relation, tgt)
Rule = namedtuple('Rule', ['pagerule', 'pageactions']) # type: Tuple[PageRule, List[PageAction]]
def apply_rules(kb, rules):
# type: (KB_or_Dict, List[Rule]) -> None
"""Modify the kb by applying all the provided rules."""
for src in kb.keys():
for rule in rules:
targets = rule.pagerule(kb, src)
if not targets: continue
for action in rule.pageactions:
for tgt in targets:
action(kb, src, str(tgt))
```
|
{
"source": "jeanphilippemercier/microquake",
"score": 3
}
|
#### File: core/data/write_ot.py
```python
import os
import numpy as np
from obspy.core.inventory.response import InstrumentSensitivity, PolesZerosResponseStage
from obspy.core.inventory.util import Frequency
import instResp
from instResp.libInst import get_corner_freq_from_pole, getResponse
from instResp.libNom import RC, WA, Accelerometer
from instResp.plotResp import plotResponse
from loguru import logger
from microquake.core.data.inventory import (
Inventory,
load_inventory_from_excel,
test_print_OT_xml_summary
)
from microquake.core.data.response_utils import read_cable_file, read_sensor_types_file
ns_tag = 'mq'
ns = 'MICROQUAKE'
def get_sensitivity(resistance):
if resistance == 0:
# print(" This is a most likely an Accelerometer!")
sensitivity = 1.0
elif resistance % 3500 == 0:
# print(" This is a high-gain geophone --> Use 80 V/m/s")
sensitivity = resistance/3500 * 80
elif resistance % 375 == 0:
# print(" This is a low-gain geophone --> Use 28.8 V/m/s")
sensitivity = resistance/375 * 28.8
else:
# print(" Unknown resistance [%s] --> use default sensitivity=1.0" % resistance)
sensitivity = 1.0
return sensitivity
def fix_OT_responses(inventory):
'''
Replace the generic (placeholder) channel responses in inventory
with calculated responses.
Response calculations are made using values in the station extras dict
like damping, coil_resistance, etc.
'''
for station in inventory.stations():
logger.info("sta:{}".format(station.code))
extras = station.extra
resistance = extras.coil_resistance.value
f0 = extras.resonance_frequency.value
damp = extras.damping.value
cable_cap = extras.cable_capacitance_pF_per_meter.value
cable_len = extras.cable_length.value
sensitivity = get_sensitivity(resistance)
extras['calculated_pz_sensitivity'] = {'namespace': ns, 'value': sensitivity}
logger.info("resistance:%f sensitivity:%f cable_cap:%f len:%f" %
(resistance, sensitivity, cable_cap, cable_len))
pz_generator = WA
input_units = "M/S"
if station.motion == "ACCELERATION":
pz_generator = Accelerometer
input_units = "M/S**2"
pzs = pz_generator(per=1/f0, damp=damp, gain=1.0, normalize=True, normalize_freq=100.)
# MTH: JP wants sensitivity set to 1.0 since OT data already scaled to velocity/accel:
pzs.sensitivity = 1.0
pzs.sensitivity_f = 100.
freqs = np.logspace(-5, 4., num=2000)
# pzs.name = station.sensor_id
pzs.name = extras.model.value
pzs.unitsIn = input_units
pzs.unitsOut = "V"
if cable_cap == 0 or cable_len == 0:
logger.info("Cable capacity or cable length set to 0 --> ignoring")
else:
# Cable capacity in pF (=10^-12 Farads):
cable_capacity = cable_cap * 1e-12 * cable_len
tau = resistance * cable_capacity
f_rc = 1./tau
# print("cap_per_m:%s x len:%f = %f x R=%f --> tau=%f fc=1/tau=%g" % \
# (cable_cap, cable_len, cable_capacity, resistance, tau, f_rc))
pz_rc = RC(tau=tau)
pzs.append_pole(pz_rc.poles[0])
pzs.normalize_to_a0(norm_freq=100)
resp = getResponse(pzs, freqs, removeZero=False, useSensitivity=False)
title = 'sta:%s f0=%.0f Hz h=%.2f sensitivity=%.2f' % \
(station.code, f0, damp, sensitivity)
logger.info("Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[0]))
fc_low = -999.
if station.motion == "VELOCITY":
fc_low = get_corner_freq_from_pole(pzs.poles[0])
# elif station.motion == "ACCELERATION":
fc_high = 1e6
if pzs.poles.size == 3:
logger.info("** High-f Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[2]))
fc_high = get_corner_freq_from_pole(pzs.poles[2])
extras['min_frequency'] = {'namespace': ns, 'value': float(fc_low)}
extras['max_frequency'] = {'namespace': ns, 'value': float(fc_high)}
# if station.code == '2':
# if 1:
# plotResponse(resp, freqs, title=title, xmin=1, xmax=10000., ymin=.01, ymax=6, title_font_size=8)
# exit()
response = station.channels[0].response
instrument_sensitivity = response.instrument_sensitivity
instrument_sensitivity.value = 1.
instrument_sensitivity.frequency = 100.
stages = response.response_stages
# Insert OT geophone or accelerometer response in first stage of response:
stages[0] = convert_pz_to_obspy(pzs)
# Use generic digitizer for stage 2 with output sample rate = 6KHz
stages[2].name = "Generic Digitizer = Placeholder for IMS Digitizer"
stages[2].stage_gain = 1
stages[2].decimation_input_sample_rate = Frequency(12000.)
stages[2].decimation_factor = 2
response.response_stages = stages[0:3]
for channel in station.channels:
channel.response = response
return 1
def write_OT_xml(sensor_file, sensor_type_file, cable_file, xml_outfile='OT.xml'):
'''
Deprecated - used when network metadata was spread over individual csv files
'''
# This import will fail as load_inventory not anymore here
from microquake.core.data.inventory import load_inventory
# <MV 190905>
print("write_OT_xml: xml_outfile=%s" % xml_outfile)
cables = read_cable_file(cable_file)
sensor_types = read_sensor_types_file(sensor_type_file)
inventory = load_inventory(sensor_file)
for cable in cables:
logger.info("cable:%s --> %s" % (cable, cables[cable]))
for sensor in sensor_types:
logger.info("sensor:%s --> %s" % (sensor, sensor_types[sensor]))
for station in inventory.stations():
logger.info("sta:%s sensor_id:%s" % (station.code, station.sensor_id))
logger.info(sensor_types)
sensor_type = sensor_types[station.sensor_id]
cable_cap = cables[station.cable_type]
extras = station.extra
extras['output_resistance_ohm'] = {'namespace': ns, 'value': float(sensor_type['output resistance (ohm)'])}
extras['resonance_frequency'] = {'namespace': ns, 'value': sensor_type['resonance frequency (Hz)']}
extras['damping'] = {'namespace': ns, 'value': float(sensor_type['damping'])}
extras['cable_pF_capacitance_per_m'] = {'namespace': ns, 'value': float(cable_cap)}
resistance = float(sensor_type['output resistance (ohm)'])
if resistance == 0:
# print(" This is a most likely an Accelerometer!")
sensitivity = 1.0
elif resistance % 3500 == 0:
# print(" This is a high-gain geophone --> Use 80 V/m/s")
sensitivity = resistance/3500 * 80
elif resistance % 375 == 0:
# print(" This is a low-gain geophone --> Use 28.8 V/m/s")
sensitivity = resistance/375 * 28.8
else:
# print(" Unknown resistance [%s] --> use default sensitivity=1.0" % resistance)
sensitivity = 1.0
pz_generator = WA
input_units = "M/S"
if station.motion == "ACCELERATION":
pz_generator = Accelerometer
input_units = "M/S**2"
f0 = float(sensor_type['resonance frequency (Hz)'])
damp = float(sensor_type['damping'])
pzs = pz_generator(per=1/f0, damp=damp, gain=1.0, normalize=True, normalize_freq=100.)
# MTH: JP wants sensitivity set to 1.0 since OT data already scaled to velocity/accel:
pzs.sensitivity = 1.0
pzs.sensitivity_f = 100.
extras['calculated_pz_sensitivity'] = {'namespace': ns, 'value': float(sensitivity)}
freqs = np.logspace(-5, 4., num=2000)
pzs.name = station.sensor_id
pzs.unitsIn = input_units
pzs.unitsOut = "V"
if cable_cap == "0":
# print("No cable cap set --> Skip!")
# print(pzs)
pass
else:
cable_len = float(station.cable_length)
# Cable capacity in pF (=10-12 Farads):
cable_capacity = float(cable_cap) * 1e-12 * cable_len
tau = resistance * cable_capacity
f_rc = 1./tau
# print("cap_per_m:%s x len:%f = %f x R=%f --> tau=%f fc=1/tau=%g" % \
# (cable_cap, cable_len, cable_capacity, resistance, tau, f_rc))
pz_rc = RC(tau=tau)
pzs.append_pole(pz_rc.poles[0])
pzs.normalize_to_a0(norm_freq=100)
resp = getResponse(pzs, freqs, removeZero=False, useSensitivity=False)
title = 'sta:%s sensor_type:%s f0=%.0f Hz h=%.2f sensitivity=%.2f' % \
(station.code, station.sensor_id, f0, damp, sensitivity)
logger.info("Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[0]))
fc_low = -999.
if station.motion == "VELOCITY":
fc_low = get_corner_freq_from_pole(pzs.poles[0])
# elif station.motion == "ACCELERATION":
fc_high = 1e6
if pzs.poles.size == 3:
logger.info("** High-f Corner freq:%f" % get_corner_freq_from_pole(pzs.poles[2]))
fc_high = get_corner_freq_from_pole(pzs.poles[2])
extras['min_frequency'] = {'namespace': ns, 'value': float(fc_low)}
extras['max_frequency'] = {'namespace': ns, 'value': float(fc_high)}
# if station.code == '2':
# if 1:
# plotResponse(resp, freqs, title=title, xmin=1, xmax=10000., ymin=.01, ymax=6, title_font_size=8)
# exit()
from obspy.core.inventory.response import InstrumentSensitivity
from obspy.core.inventory.util import Frequency
"""
:type instrument_sensitivity:
:class:`~obspy.core.inventory.response.InstrumentSensitivity`
:param instrument_sensitivity: The total sensitivity for the given
channel, representing the complete acquisition system expressed as
a scalar.
def __init__(self, value, frequency, input_units,
output_units, input_units_description=None,
output_units_description=None, frequency_range_start=None,
frequency_range_end=None, frequency_range_db_variation=None):
"""
response = station.channels[0].response
instrument_sensitivity = response.instrument_sensitivity
instrument_sensitivity.value = 1.
instrument_sensitivity.frequency = 100.
stages = response.response_stages
# Insert OT geophone or accelerometer response in first stage of response:
stages[0] = convert_pz_to_obspy(pzs)
# Use generic digitizer for stage 2 with output sample rate = 6KHz
stages[2].name = "Generic Digitizer = Placeholder for IMS Digitizer"
stages[2].stage_gain = 1
stages[2].decimation_input_sample_rate = Frequency(12000.)
stages[2].decimation_factor = 2
response.response_stages = stages[0:3]
for channel in station.channels:
channel.response = response
inventory.write(xml_outfile, format='STATIONXML', nsmap={ns_tag: ns})
return 1
def convert_pz_to_obspy(pz: instResp.polezero.polezero) -> PolesZerosResponseStage:
''' Convert internal polezero object to obspy PolesZeroResponseStage
'''
stage_sequence_number = 1
stage_gain = pz.sensitivity
stage_gain_frequency = pz.sensitivity_f
normalization_factor = pz.a0
normalization_frequency = pz.sensitivity_f
zeros = pz.zeros
poles = pz.poles
if zeros is None:
logger.debug("Inside convert_pz: zeros = None")
zeros = []
if pz.type == 'A':
pz_transfer_function_type = "LAPLACE (RADIANS/SECOND)"
elif pz.type == 'B':
pz_transfer_function_type = "LAPLACE (HERTZ)"
else:
pz_transfer_function_type = "DIGITAL (Z-TRANSFORM)"
input_units = pz.unitsIn
output_units = pz.unitsOut
pz_stage = PolesZerosResponseStage(stage_sequence_number,
stage_gain,
stage_gain_frequency,
input_units,
output_units,
pz_transfer_function_type,
normalization_frequency,
zeros,
poles,
normalization_factor=normalization_factor,
name=pz.name,
)
return pz_stage
def test_read_xml(xmlfile):
inventory = Inventory.load_from_xml('OT.xml')
for station in inventory.networks[0].stations:
print(station.code, station.loc, station.sensor_id, station.extra.damping)
for channel in station.channels:
# channel.plot(min_freq=1., output=output)
print(channel.code, channel.dip, channel.azimuth)
return
def main():
if 'SPP_COMMON' not in os.environ:
logger.error("Set your SPP envs!")
exit(2)
path = os.environ['SPP_COMMON']
xls_file = os.path.join(path, 'inventory_snapshot.xlsx')
inventory = load_inventory_from_excel(xls_file)
success = fix_OT_responses(inventory)
xml_out = os.path.join(path, 'OT.xml')
inventory.write(xml_out, format='STATIONXML', nsmap={ns_tag: ns})
exit()
sensor_file = os.path.join(path, 'sensors.csv')
sensor_types_file = os.path.join(path, 'sensor_types.csv')
cables_file = os.path.join(path, 'cables.csv')
success = write_OT_xml(sensor_file, sensor_types_file, cables_file,
xml_outfile=xml_out)
assert success == 1
exit()
# test_read_xml('OT.xml')
test_print_OT_xml_summary(xml_out)
exit()
test_read_stationxml('resources/ANMO.xml', 'ANMO2.xml')
test_read_stationxml('resources/OT.xml', 'OT2.xml')
test_read_csv_write_stationxml(sensor_csv, 'OT_new.xml')
test_print_OT_xml_summary('OT_new.xml')
return
if __name__ == "__main__":
main()
```
#### File: core/helpers/grid.py
```python
import os
import numpy as np
from loguru import logger
from numpy.linalg import norm
from obspy.core import UTCDateTime
from obspy.core.event import WaveformStreamID
from obspy.realtime.signal import kurtosis
from scipy.interpolate import interp1d
from microquake.core.stream import Trace
from microquake.core.data.grid import read_grid
from microquake.core.event import Arrival, Pick
from microquake.core.helpers.velocity import get_current_velocity_model_id
from microquake.core.settings import settings
from microquake.core.simul.eik import ray_tracer
def get_grid(station_code, phase, grid_type='time'):
"""
get a travel time grid for a given station and a given phase
:param station_code: station code
:param phase: Phase ('P' or 'S')
:param grid_type: type of grid ('time', 'take_off', 'azimuth')
:return:
"""
nll_dir = settings.nll_base
f_tt = os.path.join(nll_dir, 'time', 'OT.%s.%s.%s.buf'
% (phase.upper(), station_code, grid_type))
tt_grid = read_grid(f_tt, format='NLLOC')
return tt_grid
def get_grid_point(station_code, phase, location,
grid_coordinates=False, grid_type='time'):
"""
get value on a grid at a given point inside the grid
:param station_code: Station code
:param phase: Phase ('P' or 'S')
:param location: point where the value is interpolated
:param grid_coordinates: whether the location is expressed in grid
coordinates or in model coordinates (default True)
:param grid_type: type of grid ('time', 'take_off', 'azimuth')
:return:
"""
tt = get_grid(station_code, phase, grid_type=grid_type)
return tt.interpolate(location, grid_coordinate=grid_coordinates)[0]
def get_ray(station_code, phase, location, grid_coordinate=False,
max_iter=100):
"""
return a ray for a given location - station pair for a given phase
:param station_code: station code
:param phase: phase ('P', 'S')
:param location: start of the ray
:param grid_coordinate: whether start is expressed in grid (Default=True)
:param max_iter: maximum number of iteration (Default=100)
coordinates or model coordinates (default False)
:return:
"""
travel_time = get_grid(station_code, phase, grid_type='time')
return ray_tracer(travel_time, location,
grid_coordinates=grid_coordinate, max_iter=max_iter)
def create_arrivals_from_picks(picks, event_location, origin_time):
"""
create a set of arrivals from a list of picks
:param picks: list of microquake.core.event.Pick
:param event_location: event location list, tuple or numpy array
:param origin_time: event origin_time
:return: list of microquake.core.event.Arrival
"""
arrivals = []
for pick in picks:
station_code = pick.waveform_id.station_code
arrival = Arrival()
arrival.phase = pick.phase_hint
phase = pick.phase_hint
ray = get_ray(station_code, phase, event_location)
arrival.distance = ray.length
# for node in ray.nodes:
# print(node)
# xoff = ray.nodes[-2][0] - ray.nodes[-1][0]
# yoff = ray.nodes[-2][1] - ray.nodes[-1][1]
# zoff = ray.nodes[-2][2] - ray.nodes[-1][2]
# baz = np.arctan2(xoff,yoff)
# if baz < 0:
# baz += 2.*np.pi
# pick.backazimuth = baz*180./np.pi
predicted_tt = get_grid_point(station_code, phase,
event_location)
predicted_at = origin_time + predicted_tt
arrival.time_residual = pick.time - predicted_at
# print("create_arrivals: sta:%3s pha:%s pick.time:%s
arrival.takeoff_angle = get_grid_point(station_code, phase,
event_location,
grid_type='take_off')
arrival.azimuth = get_grid_point(station_code, phase,
event_location, grid_type='azimuth')
# MTH: arrival azimuth/takeoff should be in degrees - I'm pretty
# sure the grids store them in radians (?)
arrival.azimuth *= 180. / np.pi
if arrival.azimuth < 0:
arrival.azimuth += 360.
arrival.takeoff_angle *= 180. / np.pi
arrival.pick_id = pick.resource_id.id
arrival.earth_model_id = get_current_velocity_model_id(phase)
arrivals.append(arrival)
return arrivals
def estimate_origin_time(stream, event_location):
"""
estimate the origin time given an estimate of the event location and
a set of traces
:param stream: a microquake.core.Stream object containing a series
of traces
:param event_location: event location (list, tuple or numpy array)
:return: estimate of the origin time
"""
# import matplotlib.pyplot as plt
start_times = []
end_times = []
sampling_rates = []
stream = stream.detrend('demean')
for trace in stream:
start_times.append(trace.stats.starttime.datetime)
end_times.append(trace.stats.endtime.datetime)
sampling_rates.append(trace.stats.sampling_rate)
min_starttime = UTCDateTime(np.min(start_times)) - 1.0
max_endtime = UTCDateTime(np.max(end_times))
max_sampling_rate = np.max(sampling_rates)
shifted_traces = []
npts = np.int((max_endtime - min_starttime) * max_sampling_rate)
t_i = np.arange(0, npts) / max_sampling_rate
for phase in ['P', 'S']:
for trace in stream.composite():
station = trace.stats.station
tt = get_grid_point(station, phase, event_location)
trace.stats.starttime = trace.stats.starttime - tt
data = np.nan_to_num(trace.data)
# dividing by the signal std yield stronger signal then
# dividing by the max. Dividing by the max amplifies the
# noisy traces as signal is more homogeneous on these traces
data /= np.std(data)
# data /= np.max(np.abs(data))
sr = trace.stats.sampling_rate
start_samp = int((trace.stats.starttime - min_starttime) *
trace.stats.sampling_rate)
end_samp = start_samp + trace.stats.npts
t = np.arange(start_samp, end_samp) / sr
try:
f = interp1d(t, data, bounds_error=False, fill_value=0)
except:
continue
shifted_traces.append(np.nan_to_num(f(t_i)))
shifted_traces = np.array(shifted_traces)
w_len_sec = 50e-3
w_len_samp = int(w_len_sec * max_sampling_rate)
stacked_trace = np.sum(np.array(shifted_traces) ** 2, axis=0)
stacked_trace /= np.max(np.abs(stacked_trace))
#
i_max = np.argmax(np.sum(np.array(shifted_traces) ** 2, axis=0))
if i_max - w_len_samp < 0:
pass
stacked_tr = Trace()
stacked_tr.data = stacked_trace
stacked_tr.stats.starttime = min_starttime
stacked_tr.stats.sampling_rate = max_sampling_rate
o_i = np.argmax(stacked_tr)
# k = kurtosis(stacked_tr, win=30e-3)
# diff_k = np.diff(k)
# o_i = np.argmax(np.abs(diff_k[i_max - w_len_samp: i_max + w_len_samp])) \
# + i_max - w_len_samp
origin_time = min_starttime + o_i / max_sampling_rate
# Tracer()()
return origin_time
def fix_arr_takeoff_and_azimuth(cat, vp_grid, vs_grid):
"""
Currently NLLoc is *not* calculating the takeoff angles at the source.
These default to -1 so that when microquake.nlloc reads last.hyp it
returns -1 for these values.
Here we re-create the arrivals from the picks & the NLLoc location
so that it populates the takeoff and azimuth angles.
Also, we add the relevant angles at the receiver (backazimuth and
incidence) to the arrivals.
"""
for event in cat:
origin = event.preferred_origin()
ev_loc = origin.loc
vp = vp_grid.interpolate(ev_loc)[0]
vs = vs_grid.interpolate(ev_loc)[0]
picks = []
for arr in origin.arrivals:
picks.append(arr.pick_id.get_referred_object())
# MTH: create_arrivals_from_picks will create an entirely new set of
# arrivals (new resource_ids)
# it will set arr.distance (looks exactly same as nlloc's
# arr.distance)
# it will set arr.time_residual *** DIFFERS *** from
# arr.time_residual nlloc calcs/reads from last.hypo
# it will fix the missing azim/theta that nlloc set to -1
# it will drop nlloc arr.time_weight field
arrivals = create_arrivals_from_picks(picks, ev_loc, origin.time)
# Now set the receiver angles (backazimuth and incidence angle)
for arr in arrivals:
pk = arr.pick_id.get_referred_object()
sta = pk.waveform_id.station_code
pha = arr.phase
st_loc = settings.inventory.get_station(sta).loc
xoff = ev_loc[0] - st_loc[0]
yoff = ev_loc[1] - st_loc[1]
zoff = np.abs(ev_loc[2] - st_loc[2])
H = np.sqrt(xoff * xoff + yoff * yoff)
alpha = np.arctan2(zoff, H)
beta = np.pi / 2. - alpha
takeoff_straight = alpha * 180. / np.pi + 90.
inc_straight = beta * 180. / np.pi
if pha == 'P':
v = vp
v_grid = vp_grid
elif pha == 'S':
v = vs
v_grid = vs_grid
p = np.sin(arr.takeoff_angle * np.pi / 180.) / v
v_sta = v_grid.interpolate(st_loc)[0]
inc_p = np.arcsin(p * v_sta) * 180. / np.pi
# I have the incidence angle now, need backazimuth so rotate to
# P,SV,SH
back_azimuth = np.arctan2(xoff, yoff) * 180. / np.pi
if back_azimuth < 0:
back_azimuth += 360.
arr.backazimuth = back_azimuth
arr.inc_angle = inc_p
origin.arrivals = arrivals
return
def synthetic_arrival_times(event_location, origin_time, stations=[]):
"""
calculate synthetic arrival time for all the station and returns a
list of microquake.core.event.Pick object
:param event_location: event location
:param origin_time: event origin time
:param stations: list of stations
:return: list of microquake.core.event.Pick
"""
picks = []
stations = settings.inventory.stations()
for phase in ['P', 'S']:
for station in stations:
# station = station.code
# st_loc = site.select(station=station).stations()[0].loc
st_loc = station.loc
dist = norm(st_loc - event_location)
# if (phase == 'S') and (dist < 100):
# continue
try:
at = origin_time + get_grid_point(station.code, phase,
event_location,
grid_coordinates=False)
# Catching error when grid file do not exist
except OSError as exc:
logger.warning(
f'Cannot read grid for station {station.code}'
f' ({station.site.name}), phase {phase}: {exc}')
continue
except ValueError as exc:
logger.warning(
f'Error reading grid for station {station.code}'
f' ({station.site.name}), phase {phase}: {exc}')
continue
wf_id = WaveformStreamID(
network_code=settings.get('project_code'),
station_code=station.code)
# station_code=station)
pk = Pick(time=at, method='predicted', phase_hint=phase,
evaluation_mode='automatic',
evaluation_status='preliminary', waveform_id=wf_id)
picks.append(pk)
return picks
```
#### File: core/simul/eik.py
```python
def angles(travel_time):
"""
This function calculate the take off angle and azimuth for every grid point
given a travel time grid calculated using an Eikonal solver
:param travel_time: travel_time grid
:type travel_time: ~microquake.core.data.grid.GridData with seed property
(travel_time.seed).
:rparam: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
import numpy as np
gds_tmp = np.gradient(travel_time.data)
gds = [-gd for gd in gds_tmp]
tmp = np.arctan2(gds[0], gds[1]) # azimuth is zero northwards
azimuth = travel_time.copy()
azimuth.type = 'ANGLE'
azimuth.data = tmp
if len(travel_time.shape) == 3:
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
tmp = np.arctan2(hor, -gds[2])
# takeoff is zero pointing down
takeoff = travel_time.copy()
takeoff.type = 'ANGLE'
takeoff.data = tmp
return azimuth, takeoff
else:
return azimuth
def ray_tracer(travel_time, start, grid_coordinates=False, max_iter=1000):
"""
This function calculates the ray between a starting point (start) and an end
point, which should be the seed of the travel_time grid, using the gradient
descent method.
:param travel_time: travel time grid with a seed defined
:type travel_time: ~microquake.core.data.grid.GridData with an additional
seed property(travel_time.seed). Note that seed is automatically added to
the travel time grid by the Eikonal solver or when read from NLLOC grid
file.
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_coordinates: if true grid coordinates (indices,
not necessarily integer are used, else real world coordinates are used
(x, y, z) (Default value False)
:param max_iter: maximum number of iteration
:rtype: numpy.array
"""
import numpy as np
from microquake.core import GridData
from microquake.core.event import Ray
if grid_coordinates:
start = np.array(start)
start = travel_time.transform_from(start)
origin = travel_time.origin
spacing = travel_time.spacing
end = np.array(travel_time.seed)
start = np.array(start)
# calculating the gradient in every dimension at every grid points
gds_tmp = np.gradient(travel_time.data)
gds = [GridData(gd, origin=origin, spacing=spacing,) for gd in gds_tmp]
dist = np.linalg.norm(start - end)
cloc = start # initializing cloc "current location" to start
gamma = spacing / 2 # gamma is set to half the grid spacing. This
# should be
# sufficient. Note that gamma is fixed to reduce
# processing time.
nodes = [start]
iter_number = 0
while dist > spacing / 2:
if iter_number > max_iter:
break
if dist < spacing * 4:
gamma = spacing / 4
gvect = np.array([gd.interpolate(cloc, grid_coordinate=False,
order=1)[0] for gd in gds])
cloc = cloc - gamma * gvect / np.linalg.norm(gvect)
nodes.append(cloc)
dist = np.linalg.norm(cloc - end)
iter_number += 1
nodes.append(end)
ray = Ray(nodes=nodes)
return ray
def eikonal_solver(velocity, seed, seed_label, *args, **kwargs):
"""
Eikonal solver based of scikit fast marching solver interfaced for
microquake
:param velocity: velocity grid
:type velocity: ~microquake.core.data.grid.GridData
:param seed: numpy array location of the seed or origin of seismic wave in model coordinates
(usually location of a station or an event)
:type seed: numpy array
:param seed_label: seed label (name of station)
:type seed_label: basestring
"""
import skfmm
import numpy as np
seed = np.array(seed)
phi = -1*np.ones_like(velocity.data)
seed_coord = velocity.transform_to(seed)
phi[tuple(seed_coord.astype(int))] = 1
tt = skfmm.travel_time(phi, velocity.data, dx=velocity.spacing, *args,
**kwargs)
tt_grid = velocity.copy()
tt_grid.data = tt
tt_grid.seed = seed
tt_grid.seed_label = seed_label
return tt_grid
def sensitivity_location(velocity, seed, location, perturbation=0.1, h=1):
"""
Calculate the sensitivity kernel for location in seed
:param velocity: a velocity grid
:type velocity: microquake.core.data.GridData
:param seed: seed for traveltime grid
:type seed: numpy.array
:param location: location at which the sensitivity is evaluated
:type location: numpy.array
:param perturbation: perturbation to the location in the same using as loc (m, km etc)
:type perturbation: float
:param h:
:rparam: location sensitivity at the provided location
:rtype: numpy.array
"""
# creating a buffer around velocity in all dimensions
# works only in 3D ...
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
buf = 2
x = np.arange(-buf, velocity.data.shape[0] + buf)
y = np.arange(-buf, velocity.data.shape[1] + buf)
z = np.arange(-buf, velocity.data.shape[2] + buf)
Y, X, Z = np.meshgrid(y, x, z)
X1 = X.ravel()
Y1 = Y.ravel()
Z1 = Z.ravel()
coords = np.vstack((X1, Y1, Z1))
vel = velocity.copy()
vel.data = map_coordinates(velocity.data, coords, mode='nearest').reshape(X.shape)
traveltime = eikonal_solver(vel, seed)
h = float(h)
spc = traveltime.spacing
shape = np.array(traveltime.shape)
frechet = []
end = traveltime.transform_to(location) + buf
for j in range(len(seed)):
new_end1 = end.copy()
new_end1[(end[:, j] + perturbation < shape[j]) & (end[:, j] + perturbation > 0), j] += perturbation
new_end2 = end.copy()
new_end2[(end[:, j] - perturbation < shape[j]) & (end[:, j] - perturbation > 0), j] -= perturbation
perturbated_tt1 = map_coordinates(traveltime.data, new_end1.T, order=1, mode='nearest')
perturbated_tt2 = map_coordinates(traveltime.data, new_end2.T, order=1, mode='nearest')
f = (perturbated_tt1 - perturbated_tt2) / ((new_end1[:, j] - new_end2[:, j]) * spc)
frechet.append(f)
frechet = np.array(frechet)
return frechet.T
# TODO: sensitivity_velocity is broken, travel_time undefined
def sensitivity_velocity(velocity, seed, start_points, perturbation=0.1, h=1):
"""
Calculate the sensitivity kernel (Frechet derivative, dt/dV)
for every velocity element (v_i)
The sensitivity is calculated as follows for all velocity element i:
dt/dv_i = l_i * (v_i+ - v_i-) / (2 * perturbation), where
l_i = T_i / v_i, v_i+ and v_i- are is v_i +/- perturbation, respectively.
T represents the travel time grid calculated using the eikonal_solver
:param velocity: velocity grid
:type velocity: microquake.core.data.GridData
:param seed: seed for traveltime grid (usually sensor location)
:type seed: numpy.array
:param start_points: start points for the ray tracing, usually event location
:type start_points: numpy.array coordinates
:param time: time grid
:type time: microquake.core.data.GridData
:param perturbation: velocity perturbation
:param h:
:return: sensitivity kernel for velocity
"""
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.sparse import csr_matrix
# initiating the frechet derivative matrix
n_node = np.prod(velocity.shape)
n_measurement = len(start_points)
F = csr_matrix((n_measurement, n_node), dtype=np.float32)
# adding buffer to the velocity
buf = 2 # related to the use of cubic spline with
csr_matrix
x = []
for dim in range(len(velocity.data.shape)):
x.append(np.arange(-buf, velocity.data.shape[0] + buf))
if len(velocity.data.shape) == 3:
X, Y, Z = np.meshgrid(x[0], x[1], x[2])
X1 = X.ravel()
Y1 = Y.ravel()
Z1 = Z.ravel()
coords = np.vstack((X1, Y1, Z1))
else:
X, Y = np.meshgrid(x[0], x[1], x[2])
X1 = X.ravel()
Y1 = Y.ravel()
coords = np.vstack((X1, Y1))
vel = velocity.copy()
vel.data = map_coordinates(velocity.data, coords, mode='nearest').reshape(X.shape)
# travel_time = EikonalSolver(vel, seed)
for start in start_points:
ray = ray_tracer(travel_time, start)
for segment in ray:
pass
```
#### File: core/util/borehole.py
```python
import pandas as pd
import numpy as np
import vtk
from dxfwrite import DXFEngine as dxf
from io import BytesIO
class Borehole:
def __init__(self, depth=None, x=None, y=None, z=None, collar=None,
toe=None, magnetic_declination=0):
"""
:param depth: depth vector
:param x: x coordinate
:param y: y coordinate
:param z: z coordinate
:param collar: collar location vector (x, y, z)
:param toe: toe location vector (x, y, z)
:param magnetic_declination: magnetic declination in degree from
true north
"""
self.depth = depth
self.x = x
self.y = y
self.z = z
self.collar = collar
self.toe = toe
self.magnetic_declination = magnetic_declination
pass
@classmethod
def from_gyro_file(cls, gyro_file, collar,
magnetic_declination=0, resolution=1, **kwargs):
gyro_df = read_gyro_file(gyro_file, **kwargs)
collar = np.array(collar)
magnetic_declination = magnetic_declination
trace_df = gyro_to_borehole_trace(gyro_df, collar,
magnetic_declination, dl=resolution)
x = trace_df['x'].values
y = trace_df['y'].values
z = trace_df['z'].values
toe = np.array([x[-1], y[-1], z[-1]])
depth = np.array(trace_df.index)
return cls(depth=depth, x=x, y=y, z=z, collar=collar, toe=toe,
magnetic_declination=magnetic_declination)
@property
def trace(self):
"""
return a dictionary containing the trace of the borehole
:return: borehole trace
"""
dict_out = {'depth': self.depth,
'x': self.x,
'y': self.y,
'z': self.z}
return dict_out
@property
def length(self):
return np.max(self.depth)
@property
def dip_azimuth(self):
h = np.sqrt(x ** 2 + y ** 2)
dip = np.arctan2(h, z)
azimuth = np.arctan2(y, x)
return {'depth': self.depth, 'dip': dip, 'azimuth': azimuth}
def resample(self, resolution=1):
dict_data = {'depth': self.depth,
'x': self.x,
'y': self.y,
'z': self.z}
df = pd.DataFrame(dict_data)
df.set_index('depth')
df = df.reindex(np.arange(np.min(np.array(df.index)),
np.max(np.array(df.index)) + resolution,
resolution))
df = df.apply(pd.Series.interpolate)
x = df['x'].values
y = df['y'].values
z = df['z'].values
depth = np.array(df.index)
return {'depth': depth, 'x': x, 'y': y, 'z': z}
def interpolate(self, depth):
"""
:param depth: depth along the borehole
:return: x, y, z at a specific depth along the borehole
"""
x_i = np.interp(depth, self.depth, self.x)
y_i = np.interp(depth, self.depth, self.y)
z_i = np.interp(depth, self.depth, self.z)
return x_i, y_i, z_i
def orientation(self, depth, collar_to_toe=True):
"""
returns the unit vector representing the orientation of a sensor
aligned along the borehole axis.
:param depth: depth or distance along the borhole
:param collar_to_toe: True if pointing towards the collar.
False if pointing towards the toe.
:return: a unit vector representing the orientation
"""
l1 = self.interpolate(depth + 1) # towards toe
l2 = self.interpolate(depth - 1) # towards collar
if collar_to_toe:
orientation = l1 - l2
else:
orientation = l2 - l1
orientation /= np.linalg.norm(orientation)
return orientation
def to_vtk_poly_data(self):
"""
:return: a vtk polydata object
"""
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
line_vtk = vtk.vtkLine()
for k in range(0, len(self.x) - 1):
x0 = self.x[k]
y0 = self.y[k]
z0 = self.z[k]
x1 = self.x[k+1]
y1 = self.y[k+1]
z1 = self.z[k+1]
id1 = points.InsertNextPoint((x0, y0, z0))
line_vtk.GetPointIds().SetId(0, id1)
id2 = points.InsertNextPoint((x1, y1, z1))
line_vtk.GetPointIds().SetId(1, id2)
lines.InsertNextCell(line_vtk)
poly_data = vtk.vtkPolyData()
poly_data.SetPoints(points)
poly_data.SetLines(lines)
return poly_data
def write_to_vtp(self, vtp_file_path):
"""
write the borehole trace to a VTP file
:param vtp_file_path:
:return:
"""
vtk_poly_data = self.to_vtk_poly_data()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(vtp_file_path)
writer.SetInputData(vtk_poly_data)
return writer.Write()
def write_to_dxf(self, dxf_file_path):
drawing = dxf.drawing(dxf_file_path)
for k in range(0, len(self.x) - 1):
x0 = self.x[k]
y0 = self.y[k]
z0 = self.z[k]
x1 = self.x[k+1]
y1 = self.y[k+1]
z1 = self.z[k+1]
drawing.add(dxf.line((x0, y0, z0), (x1, y1, z1), color=7))
drawing.save()
return
def read_gyro_file(gyro_file, header=7):
"""
read a gyro survey file. The gyro survey file must contain three columns
(DEPTH, DIP, AZI (MAG))
:param gyro_file: full path to the gyro file
:param header: the number of header lines
:return: pandas dataframe contining two column
"""
df = pd.read_excel(gyro_file, 'Sheet1', header=7)
gyro_df = pd.DataFrame()
gyro_df['depth'] = df['DEPTH']
gyro_df['azimuth (mag)'] = df['AZI (MAG)']
gyro_df['dip'] = df['DIP']
gyro_df = gyro_df.set_index('depth')
return gyro_df
def gyro_to_borehole_trace(gyro_df, collar, magnetic_declination,
dl=1):
"""
convert gyro survey data expressed in dip and azimuth to the x, y and z
trace
:param gyro_df: a dataframe containing containing the depth, dip and
azimuth
:param collar: list or array containing the borehole collar coordinates
:param magnetic_declination: magnetic declination in degrees from north
:return: a dataframe sampled at a resolution of dl containing the depth,
azimuth, dip, x, y an z along the borehole
"""
gyro = gyro_df[gyro_df['dip'].notnull()]
gyro['azimuth'] = gyro['azimuth (mag)'] - magnetic_declination
gyro['azimuth'] = gyro['azimuth'] / 180 * np.pi
gyro['dip'] = gyro['dip'] / 180 * np.pi
gyro['azimuth (mag)'] = gyro['azimuth (mag)'] / 180 * np.pi
gyro = gyro.reindex(np.arange(np.min(np.array(gyro.index)),
np.max(np.array(gyro.index)) + dl, dl))
gyro = gyro.apply(pd.Series.interpolate)
x = np.zeros(len(gyro))
x[0] = collar[0]
y = np.zeros(len(gyro))
y[0] = collar[1]
z = np.zeros(len(gyro))
z[0] = collar[2]
dip = gyro['dip']
azimuth = gyro['azimuth']
for k in range(0, len(gyro.index) - 1):
dy_x = dl * np.cos(gyro.iloc[k]['dip'])
dy = dy_x * np.cos(gyro.iloc[k]['azimuth'])
dx = dy_x * np.sin(gyro.iloc[k]['azimuth'])
dz = dl * np.sin(gyro.iloc[k]['dip'])
x[k + 1] = x[k] + dx
y[k + 1] = y[k] + dy
z[k + 1] = z[k] - dz
gyro['x'] = x
gyro['y'] = y
gyro['z'] = z
return gyro
def borehole_collar_toe_to_trace(collar, toe, dl=1):
"""
Return the borehole trace assuming a straight line between the borehole
collar and toe
:param collar: borehole collar coordinates
:param toe: borehole toe coordinates
:param dl: resolution of the output trace
:return:
"""
df = pd.DataFrame()
max_depth = np.linalg.norm(toe - collar)
depth = np.arange(0, max_depth, dl)
x_borehole = np.linspace(collar[0], toe[0], len(depth))
y_borehole = np.linspace(collar[1], toe[1], len(depth))
z_borehole = np.linspace(collar[2], toe[2], len(depth))
h_borehole = np.sqrt(x_borehole ** 2 + y_borehole ** 2)
dip = np.arctan2(h_borehole, z_borehole)
azimuth = np.arctan2(y_borehole, x_borehole)
df['x'] = x_borehole
df['y'] = y_borehole
df['z'] = z_borehole
df['dip'] = dip
df['azimuth'] = azimuth
df['depth'] = depth
df = df.set_index('depth')
return df
```
#### File: core/util/serializer.py
```python
def encode_pickle(obj):
"""Encodes any python object as a
:py:mod:`base64` string.
"""
import pickle
import bz2
buf = pickle.dumps(obj)
comp_ser_obj = bz2.compress(buf)
return comp_ser_obj
def decode_pickle(compressed_obj):
"""Decodes a :py:mod:`base64` string into a
:py:class:`obspy.core.stream.Stream`.
"""
import pickle
import bz2
ser_obj = bz2.decompress(compressed_obj)
obj = pickle.loads(ser_obj)
return obj
def encode_base64(buffered_object):
"""
Encode an event for storage in the database
:param event: a microquake.core.event object
:return: encoded event object in compressed (bz2) format
"""
from bz2 import compress
from base64 import b64encode
return b64encode(compress(buffered_object))
def decode_base64(encoded_object):
"""
decode an event stored in the database
:param encoded_event: compressed serialized object stored in the DB
:return: microquake.core.event.event
"""
from bz2 import decompress
from base64 import b64decode
return decompress(b64decode(encoded_object))
```
#### File: db/models/redis.py
```python
from io import BytesIO
from microquake.core import read, read_events
from microquake.db.connectors import connect_redis
def set_event(event_id, catalogue=None, fixed_length=None, context=None,
variable_length=None, attempt_number=0, ttl=10800):
redis_connector = connect_redis()
event = redis_connector.Hash(event_id)
if catalogue is not None:
file_out = BytesIO()
catalogue.write(file_out, format='quakeml')
event.update(catalogue=file_out.getvalue())
if fixed_length is not None:
file_out = BytesIO()
fixed_length.write(file_out, format='mseed')
event.update(fixed_length=file_out.getvalue())
if context is not None:
file_out = BytesIO()
context.write(file_out, format='mseed')
event.update(context=file_out.getvalue())
if variable_length is not None:
file_out = BytesIO()
variable_length.write(file_out, format='mseed')
event.update(variable_length=file_out.getvalue())
event.expire(ttl)
return event
def get_event(event_id):
redis_connector = connect_redis()
event = redis_connector.Hash(event_id)
dict_in = {}
if b'catalogue' in event.keys():
bytes = event[b'catalogue']
dict_in['catalogue'] = read_events(BytesIO(bytes), format='quakeml')
if b'fixed_length' in event.keys():
bytes = event[b'fixed_length']
dict_in['fixed_length'] = read(BytesIO(bytes), format='mseed')
if b'context' in event.keys():
bytes = event[b'context']
dict_in['context'] = read(BytesIO(bytes), format='mseed')
if b'variable_length' in event.keys():
bytes = event[b'variable_length']
dict_in['variable_length'] = read(BytesIO(bytes),
format='mseed')
if b'attempt_number' in event.keys():
dict_in['attempt_number'] = int(event[b'attempt_number'])
else:
dict_in['attempt_number'] = 0
return dict_in
```
#### File: microquake/imaging/plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.text import Text
from matplotlib.transforms import offset_copy
from microquake.core import event
from microquake.core import logger
def guttenberg_richter(magnitudes, dates, bin_size=0.05, b_range=[-2.0, -0.5],
magnitude_type='Moment magnitude', xlim=[-2.0, 1.0], **kwargs):
"""
produce a Guttenberg Richter plot from a list of magnitudes
:param magnitudes: List of magnitudes
:type magnitudes: list of float
:param times: list of event time associated with the magnitude
:type time: list of microquake.core.UTCDateTime
:param bin_size: the width of bins
:type bin_size: float
:param b_range: range over which the b value is calculated an fit ([min, max])
:type b_range: list containing two floats
:param magnitude_type: Type of magnitude
:type magnitude_type: str
:param xlim: limits of the x axis
:type xlim: list containing two floats
"""
mag = np.array(magnitudes)
wdt = 15
num_years = (np.max(dates) - np.min(dates)) / (24 * 3600 * 365.25)
bins = np.arange(xlim[0], xlim[1], bin_size)
hist = np.histogram(mag, bins=bins)
hist = hist[0][::-1]
bins = bins[::-1]
bins = bins[1::]
cum_hist = hist.cumsum()
cum_hist = np.array([float(nb) for nb in cum_hist])
cum_hist /= float(num_years)
new_cum_yearly_rate = []
for i in cum_hist:
new_cum_yearly_rate.append(i)
log_cum_sum = np.log10(new_cum_yearly_rate)
cum = np.zeros(bins.shape)
min_mag = b_range[0]
max_mag = b_range[1]
indices = np.nonzero((bins >= min_mag) & (bins <= max_mag))[0]
b, a = np.polyfit(bins[indices], log_cum_sum[indices], 1)
mg = np.arange(min_mag, max_mag, 0.1)
fitmg = b * mg + a
# Tracer()()
plt.semilogy(bins, new_cum_yearly_rate, 'k.', **kwargs)
plt.semilogy(mg, 10 ** fitmg, 'k--', label='best fit ($%0.1f\,M_w + %0.1f$)' % (b, a), **kwargs)
plt.xlabel('%s' % magnitude_type)
plt.ylabel('Number of events/year')
plt.xlim(xlim)
plt.legend()
# ylim = plt.ylim()
# plt.ylim([0, 10**0])
# plt.show()
class Plot(object):
def __init__(self, data=None, picks=None, site=None):
self.data = data
self.picks = picks
self.extraData = None
self.style = "all"
self.onlyIfHasPicks = True
self.maxNumPlots = 999
self.dataDirty = True
self._plotData = None
self.numPlots = 0
self.numTraces = 0
self.extraCaption = ""
def close(self):
# need to check that a plot has been created
plt.close()
def setData(self, data):
for curTr, tr in enumerate(data):
if curTr == 0:
xmin = tr.stats.starttime
xmax = tr.stats.endtime
else:
if tr.stats.starttime < xmin:
xmin = tr.stats.starttime
if tr.stats.starttime > xmax:
xmax = tr.stats.endtime
data.trim(starttime=xmin, endtime=xmax, pad=True, fill_value=0)
self.data = data
self.dataDirty = True
def setPicks(self, picks):
self.picks = picks
self.dataDirty = True
def _prepare_data(self):
if not isinstance(self.picks, list):
self.picks = [self.picks]
self.data.detrend('demean')
stations = np.unique([tr.stats.station for tr in self.data])
self._plotData = []
self.numTraces = 0
for station in stations:
curPicks = []
# loop on picking stages
for cat2 in self.picks:
curStagePicks = []
if isinstance(cat2, event.Event):
evts = cat2
elif isinstance(cat2, event.Catalog):
if not cat2.events:
continue
evts = cat2.events
else:
continue
if not isinstance(evts, list):
evts = [evts]
for evt in evts:
if not evt['picks']:
continue
prevPicks = evt['picks']
# find the existing P and S picks for the current station
for pick in prevPicks:
if pick['waveform_id'].station_code != station:
continue
curStagePicks.append(pick)
curPicks.append(curStagePicks)
if self.onlyIfHasPicks:
numPicks = np.sum([len(a) for a in curPicks])
if numPicks == 0:
continue
trs = self.data.select(station=station)
# if len(trs) == 3:
# trs = trs.rotate_P_S()
self._plotData.append({'traces': trs, 'picks': curPicks})
self.numTraces += len(trs)
self.numPlots = 0
if self.style == "all":
self.numPlots = self.numTraces
if self.numPlots > self.maxNumPlots:
self.numPlots = self.maxNumPlots
self.dataDirty = False
def _make_plot(self, ax, plt_data, max_val, curPicks, starttime, cur_starttime, sr, transOffset, caption=None):
npts = len(plt_data)
t = np.array([(starttime + tmp).datetime for tmp in np.arange(0, npts) / sr])
ax.plot(t, plt_data / max_val)
plt.ylim([-1, 1])
if caption:
ax.text(.95, .9, caption, transform=ax.transAxes, va='top', ha='right', fontsize=10, backgroundcolor='white')
for curStage, pickStage in enumerate(curPicks):
for pick in pickStage:
pick_sample = pick.time.datetime
col = 'red' if pick['phase_hint'] == 'P' else 'black'
# col = 'red'
ax.axvline(pick_sample, c=col)
snr = None
for c in pick.comments:
if 'SNR=' not in c.text:
continue
snr = c.text.split('=')[1]
displayText = '%s%s' % (pick.phase_hint, curStage)
if snr:
displayText = '%s - %s' % (displayText, snr)
label = Text(pick_sample,
ax.get_ylim()[1] * .7, displayText,
color=col, backgroundcolor='white', size=10, alpha=.8, transform=transOffset)
ax.add_artist(label)
if hasattr(pick, 'tt_residual'):
pick_sample = (pick.time - pick.tt_residual).datetime
ax.axvline(pick_sample, c='green')
# This function supports plotting several stages of picking on the same graph.
# Simply pass in an array of catalogs to see the pick progression from one stage to the other.
def plot(self):
if self.dataDirty:
self._prepare_data()
if (self.style == "all" and self.numPlots == 0) \
or not self._plotData:
logger.warning('No data to plot!')
return
fig = None
if self.style == "all":
if self.extraData:
self.numPlots += 1
fig = plt.figure(figsize=(10, 2 * self.numPlots), dpi=100)
plotOffset = 0
for curSt, t in enumerate(self._plotData):
trs = t['traces']
curPicks = t['picks']
if self.style == "per_station":
self.numPlots = len(trs)
if self.extraData:
self.numPlots += 1
fig = plt.figure(figsize=(10, 5 * self.numPlots), dpi=100)
starttime = trs[0].stats.starttime
sr = trs[0].stats.sampling_rate
cur_starttime = starttime
transOffset = None
curTr = 0
for curTr, tr in enumerate(trs):
curPlot = plotOffset + curTr
if curPlot >= self.maxNumPlots:
# finished
return
ax = plt.subplot(self.numPlots, 1, curPlot + 1)
caption = '%s - %s' % (tr.stats.station, tr.stats.channel)
if self.extraCaption:
caption = '%s - %s' % (caption, self.extraCaption[curSt])
transOffset = offset_copy(ax.transData, fig=fig,
x=5, y=0, units='points')
max_val = np.max(np.abs(tr.data))
if max_val == 0:
continue
cur_starttime = tr.stats.starttime
self._make_plot(ax, tr.data, max_val, curPicks, starttime, cur_starttime, sr, transOffset,
caption=caption)
plt.title('station %s' % tr.stats.station)
if self.style == "per_station":
if self.extraData:
ax = plt.subplot(self.numPlots, 1, self.numPlots)
max_val = np.max(np.abs(self.extraData[curSt]))
self._make_plot(ax, self.extraData[curSt], max_val, curPicks, starttime, cur_starttime, sr, transOffset)
self.show()
else:
plotOffset += curTr + 1
def show(self):
plt.show()
def saveFig(self, outFile=""):
plt.tight_layout()
plt.savefig(outFile, dpi=100, bbox_inches='tight')
```
#### File: io/msgpack/core.py
```python
from io import BytesIO
import msgpack
from microquake.core.event import Catalog
from microquake.core import Stream, read, read_events
from microquake.core.event import Event
EXTYPES = {'mseed': 0,
'quakeml': 1}
def pack(data):
return msgpack.packb(data, default=_encode_one, use_bin_type=True)
def unpack(pack):
return msgpack.unpackb(pack, ext_hook=_decode_one, raw=False)
def _encode_one(obj):
if isinstance(obj, Stream):
buf = BytesIO()
obj.write(buf, format='mseed')
return msgpack.ExtType(EXTYPES['mseed'], buf.getvalue())
if isinstance(obj, Event) or isinstance(obj, Catalog):
buf = BytesIO()
obj.write(buf, format='quakeml')
return msgpack.ExtType(EXTYPES['quakeml'], buf.getvalue())
raise TypeError("Unknown type: %r" % (obj,))
def _decode_one(code, data):
if code == EXTYPES['mseed']:
return read(BytesIO(data))
if code == EXTYPES['quakeml']:
return read_events(BytesIO(data))
return msgpack.ExtType(code, data)
```
#### File: plugin/site/core.py
```python
from microquake.core import logger
def read_csv(filename, site_code='', has_header=True, **kwargs):
"""
read a csv file containing sensor information
The first line of the csv file should contain the site name
The expected file structure is as follows and should contain one header line
<network>, <sensor name>, <sensor type>, <no component>, x, y, z
where x, y and z represents the location of the sensors expressed in a local
coordinate system. Note that the <sensor name> is limited to four character
because of NonLinLoc limitations.
example of file strucuture
1. <Network>, <sensor long name>, <sensor code>, <sensor type>, <gain>,
<sensitivity>, <sx>, <sy>, <sz>, <channel 1 code>, <azimuth>, <dip>,
<channel 2 code>, <azimuth>, <dip>, <channel 3 code>, <azimuth>, <dip>
:param filename: path to a csv file
:type filename: string
:param site_code: site code
:type site_code: string
:param has_header: whether or not the input file has an header
:type has_header: bool
:rparam: site object
:rtype: ~microquake.core.station.Site
"""
from microquake.core.data.station import Site, Network, Station, Channel
with open(filename) as ifile:
networks = []
stations = []
for i, line in enumerate(ifile.readlines()):
if has_header and (i == 0):
continue
tmp = line.split(',')
nc = tmp[0]
long_name = tmp[1]
sc = tmp[2]
st = tmp[3]
smt = tmp[4]
gain = tmp[5]
sensitivity = tmp[6]
sx = float(tmp[7])
sy = float(tmp[8])
sz = float(tmp[9])
channels = []
for c in range(0, 3):
cc = tmp[4 * c + 10]
if not cc:
continue
x = float(tmp[4 * c + 10 + 1])
y = float(tmp[4 * c + 10 + 2])
z = float(tmp[4 * c + 10 + 3])
# az = float(tmp[3 * c + 10 + 1])
# dip = float(tmp[3 * c + 10 + 2])
channel = Channel(code=cc)
channel.orientation = [x, y, z]
# channel.dip_azimuth = (dip, az)
channels.append(channel)
station = Station(long_name=long_name, code=sc, sensor_type=st,
motion_type=smt, gain=gain,
sensitivity=sensitivity, loc=[sx, sy, sz],
channels=channels)
index = None
for j, net in enumerate(networks):
if net.code == nc:
index = j
if index == None:
network = Network(code=nc, stations=[])
networks.append(network)
index = -1
networks[index].stations.append(station)
site = Site(code=site_code, networks=networks)
return site
def read_pickle(filename, **kwargs):
"""
read site saved pickle format
:param filename:
:return:
"""
from microquake.core.data.station import Site
import pickle as pickle
try:
site = pickle.load(open(filename))
except:
logger.error('Not able to read %s' % filename)
return None
if not isinstance(site, Site):
logger.error(
"The pickle file does not contain and microquake.core.station.Site object")
return None
return site
def write_csv(site, filename, **kwargs):
"""
write a Site object to disk in csv format
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
:rtype: None
"""
# TODO write a function to save the site object in csv format
pass
def write_pickle(site, filename, protocol=-1, **kwargs):
"""
write a Site object to disk in pickle (.pickle or .npy extension) format
using the pickle module
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
"""
import pickle as pickle
with open(filename, 'w') as of:
pickle.dump(site, of, protocol=protocol)
def write_vtk(site, filename, **kwargs):
"""
write a Site object to disk in vtk format for viewing in Paraview for
example
:param filename: full path to file with extension
:type filename: str
:param site: site object to be saved
:type site: ~microquake.core.data.station.Site
:param protocol: pickling protocol level see pickle.dump documentation
for more information
:type protocol: int
"""
# TODO write a function to save the site object in vtk format for viewing
# in paraview
pass
```
#### File: microquake/processors/clean_data.py
```python
from microquake.processors.processing_unit import ProcessingUnit
from microquake.core.stream import Stream
from loguru import logger
import numpy as np
from microquake.core.settings import settings
class Processor(ProcessingUnit):
@property
def module_name(self):
return "clean_data"
def process(self, **kwargs):
"""
Process event and returns its classification.
"""
waveform = kwargs["waveform"]
black_list = settings.get('sensors').black_list
starttime = waveform[0].stats.starttime
endtime = waveform[0].stats.endtime
inventory = self.settings.inventory
for tr in waveform:
if tr.stats.starttime < starttime:
starttime = tr.stats.starttime
if tr.stats.endtime > endtime:
endtime = tr.stats.endtime
waveform.trim(starttime, endtime, pad=True, fill_value=0)
trs = []
for i, tr in enumerate(waveform):
if inventory.select(tr.stats.station) is None:
continue
if tr.stats.station not in black_list:
if np.any(np.isnan(tr.data)):
continue
if np.sum(tr.data ** 2) > 0:
trs.append(tr)
logger.info('The seismograms have been cleaned, %d trace remaining' %
len(trs))
return Stream(traces=trs)
# def legacy_pipeline_handler(self, msg_in, res):
# """
# legacy pipeline handler
# """
# cat, waveform = self.app.deserialise_message(msg_in)
# cat = self.output_catalog(cat)
# return cat, waveform
```
#### File: microquake/processors/magnitude_extractor.py
```python
from loguru import logger
import numpy as np
from microquake.processors.processing_unit import ProcessingUnit
from microquake.processors import quick_magnitude
from microquake.waveform.mag_utils import calc_static_stress_drop
class Processor(ProcessingUnit):
@property
def module_name(self):
return "extract_magnitude"
def process(
self,
**kwargs
):
logger.info("pipeline: measure_amplitudes")
cat = kwargs["cat"].copy()
stream = kwargs["stream"]
dict_out = {}
dict_keys = ['energy_joule', 'energy_p_joule', 'energy_p_std',
'energy_s_joule', 'energy_s_std', 'corner_frequency_hz',
'corner_frequency_p_hz', 'corner_frequency_s_hz',
'time_domain_moment_magnitude',
'frequency_domain_moment_magnitude',
'moment_magnitude', 'moment_magnitude_uncertainty',
'seismic_moment', 'potency_m3', 'source_volume_m3',
'apparent_stress', 'static_stress_drop_mpa']
for key in dict_keys:
dict_out[key] = None
mu = 29.5e9 # rigidity in Pa (shear-wave modulus)
# finding the index for magnitude object that contains the energy
td_magnitude = None
fd_magnitude = None
energy = None
cf = None
for magnitude in reversed(cat[0].magnitudes):
if magnitude.magnitude_type == 'E':
energy = magnitude.mag
dict_out['energy_joule'] = energy
energy_p_dict = eval(magnitude.comments[1].text)
dict_out['energy_p_joule'] = energy_p_dict['Ep']
dict_out['energy_p_std'] = energy_p_dict['std_Ep']
energy_s_dict = eval(magnitude.comments[2].text)
dict_out['energy_s_joule'] = energy_s_dict['Es']
dict_out['energy_s_std'] = energy_s_dict['std_Es']
break
for magnitude in reversed(cat[0].magnitudes):
if len(magnitude.comments) == 0:
continue
if 'time-domain' in magnitude.comments[0].text:
td_magnitude = magnitude.mag
dict_out['time_domain_moment_magnitude'] = td_magnitude
break
for magnitude in reversed(cat[0].magnitudes):
if len(magnitude.comments) == 0:
continue
if 'frequency-domain' in magnitude.comments[0].text:
fd_magnitude = magnitude.mag
dict_out['frequency_domain_moment_magnitude'] = fd_magnitude
break
cfs = []
for comment in cat[0].preferred_origin().comments:
if ('corner_frequency_p' in comment.text.lower()) or \
('corner_frequency_s' in comment.text.lower()):
cf_string = comment.text
cf = float(cf_string.split('=')[1].split(' ')[0])
if 'corner_frequency_p' in comment.text.lower():
dict_out['corner_frequency_p_hz'] = cf
elif 'corner_frequency_s' in comment.text.lower():
dict_out['corner_frequency_s_hz'] = cf
cfs.append(cf)
cfs = [cf for cf in cfs if not np.isnan(cf)]
if cfs:
cf = np.mean(cfs)
dict_out['corner_frequency_hz'] = cf
if (td_magnitude is not None) and (fd_magnitude is not None):
mw = np.mean([td_magnitude, fd_magnitude])
mw_uncertainty = np.abs(td_magnitude - fd_magnitude)
elif (td_magnitude is None) and (fd_magnitude is None):
mw, mw_uncertainty = quick_magnitude.Processor(
).process(cat=cat, stream=stream)
elif td_magnitude is None:
mw = fd_magnitude
mw_uncertainty = None
elif fd_magnitude is None:
mw = td_magnitude
mw_uncertainty = None
dict_out['moment_magnitude'] = mw
dict_out['moment_magnitude_uncertainty'] = mw_uncertainty
if mw is not None:
sm = 10 ** (3 / 2 * (mw + 6.02))
dict_out['seismic_moment'] = sm
potency = sm / mu
dict_out['potency_m3'] = potency
dict_out['source_volume_m3'] = potency
if energy is not None:
dict_out['apparent_stress'] = 2 * energy / potency
else:
dict_out['apparent_stress'] = None
if cf is not None:
ssd = calc_static_stress_drop(mw, cf)
dict_out['static_stress_drop_mpa'] = ssd
return dict_out
```
#### File: microquake/processors/magnitude.py
```python
import numpy as np
from loguru import logger
from microquake.waveform.mag import (calc_magnitudes_from_lambda,
set_new_event_mag)
from microquake.core.helpers.velocity import get_velocities
from microquake.processors.processing_unit import ProcessingUnit
class Processor(ProcessingUnit):
@property
def module_name(self):
return "magnitude"
def initializer(self):
self.vp_grid, self.vs_grid = get_velocities()
def process(
self,
**kwargs
):
"""
process(catalog)
Calculates the Magnitude in Frequency or Time domain
- various measures
- requires the arrivals
Parameters
----------
catalog: str
Returns
-------
catalog: str
few parameters related to the magitude
list of magnitudes for each stations
"""
logger.info("pipeline: magnitude")
cat = kwargs["cat"].copy()
density = self.params.density
min_dist = self.params.min_dist
use_sdr_rad = self.params.use_sdr_rad
use_free_surface_correction = self.params.use_free_surface_correction
make_preferred = self.params.make_preferred
phase_list = self.params.phase_list
use_smom = self.params.use_smom
if not isinstance(phase_list, list):
phase_list = [phase_list]
if use_sdr_rad and cat.preferred_focal_mechanism() is None:
logger.warning("use_sdr_rad=True but preferred focal mech = None"
"--> Setting use_sdr_rad=False")
use_sdr_rad = False
for i, event in enumerate(cat):
ev_loc = event.preferred_origin().loc
vp = self.vp_grid.interpolate(ev_loc)[0]
vs = self.vs_grid.interpolate(ev_loc)[0]
sdr = None
if use_sdr_rad:
focal_mech = event.preferred_focal_mechanism()
if focal_mech is not None:
nodal_plane = focal_mech.nodal_planes.nodal_plane_1
strike = nodal_plane.strike
dip = nodal_plane.dip
rake = nodal_plane.rake
sdr = (strike, dip, rake)
logger.info("use_sdr_rad=True (s,d,r)=(%.1f,%.1f,%.1f)" %
(strike, dip, rake))
Mws = []
station_mags = []
for phase in phase_list:
Mw, sta_mags = calc_magnitudes_from_lambda(
[event],
vp=vp,
vs=vs,
density=density,
P_or_S=phase,
use_smom=use_smom,
use_sdr_rad=use_sdr_rad,
use_free_surface_correction=use_free_surface_correction,
sdr=sdr,
min_dist=min_dist)
Mws.append(Mw)
station_mags.extend(sta_mags)
logger.info("Mw_%s=%.1f len(station_mags)=%d" %
(phase, Mws[-1], len(station_mags)))
if self.module_type == "frequency":
Mw = np.nanmean(Mws)
comment = "frequency-domain"
else:
Mw = np.mean(Mws)
comment = "time-domain"
comment = f"Average of {comment} station moment magnitudes"
if use_sdr_rad and sdr is not None:
comment += " Use_sdr_rad: sdr=(%.1f,%.1f,%.1f)" % (sdr[0], sdr[1], sdr[2])
if np.isnan(Mw):
logger.warning("Mw is nan, cannot set on event")
continue
set_new_event_mag(event, station_mags, Mw, comment,
make_preferred=make_preferred)
return cat.copy()
def legacy_pipeline_handler(
self,
msg_in,
res
):
_, stream = self.app.deserialise_message(msg_in)
return res['cat'], stream
```
#### File: microquake/waveform/parseval_utils.py
```python
import numpy as np
from scipy.fftpack import fft, fftfreq, rfft, rfftfreq
import matplotlib.pyplot as plt
"""
mag_utils - a collection of routines to assist in the moment magnitude calculation
"""
def parsevals(data, dt, nfft):
"""
Proper scaling to satisfy Parsevals:
Scale a 2-sided fft by dt
Scale a 1-sided rft by dt x sqrt(2)
The 2-sided nfft-point fft returns nfft Complex values (+ve and -ve freqs)
The 1-sided nfft-point fft returns nfft/2 + 1 Complex values (+ve freqs + DC + Nyq)
>>>parsevals(tr.data, tr.stats.sampling_rate, npow2(tr.data.size))
Parseval's: [time] ndata=11750 dt=6000.000000 sum=0.0001237450569 [time]
Parseval's: [freq] nfft=65536 df=2.54313e-09 sum=0.0001237450441 [2-sided]
Parseval's: [freq] nfft=65536 df=2.54313e-09 sum=0.0001237450441 [1-sided]
"""
tsum = np.sum(np.square(data))*dt
print("Parseval's: [time] ndata=%7d dt=%12.6f sum=%12.10g [time]" % (data.size, dt, tsum))
# 2-sided (+ve & -ve freqs) FFT:
X=fft(data,nfft)*dt
df = 1./(dt * float(X.size)) # X.size = nfft
#fsum = np.sum(X*np.conj(X))*df
# Do it this way so it doesn't spit a ComplexWarning about throwing away imag part
fsum = np.sum(np.abs(X)*np.abs(X))*df
print("Parseval's: [freq] nfft=%7d df=%12.6g sum=%12.10g [2-sided]" % (X.size, df, fsum))
# 1-sided: N/2 -1 +ve freqs + [DC + Nyq] = N/2 + 1 values:
df = 1./(dt * float(nfft)) # df is same as for 2-sided case
Y,freqs = unpack_rfft(rfft(data, n=nfft), df)
Y *= dt
'''
Note: We only have the +ve freq half, so we need to double all the power
at each frequency *except* for DC & Nyquist,
which are purely real and don't have a -ve freq.
So either scale the fsum by 2, or scale Y (minus DC/Nyq) by sqrt(2) here
'''
Y[1:-1] *= np.sqrt(2.)
fsum = np.sum(np.abs(Y)*np.abs(Y))*df
print("Parseval's: [freq] nfft=%7d df=%12.6g sum=%12.10g [1-sided]" % (nfft, df, fsum))
return
def unpack_rfft(rfft, df):
n = rfft.size
if n % 2 == 0:
n2 = int(n/2)
else:
print("n is odd!!")
exit()
#print("n2=%d" % n2)
c_arr = np.array( np.zeros(n2+1,), dtype=np.complex_)
freqs = np.array( np.zeros(n2+1,), dtype=np.float_)
c_arr[0] = rfft[0]
c_arr[n2] = rfft[n-1]
freqs[0] = 0.
freqs[n2] = float(n2)*df
for i in range(1, n2):
freqs[i] = float(i)*df
c_arr[i] = np.complex(rfft[2*i - 1], rfft[2*i])
return c_arr, freqs
def npow2(n: int) -> int:
""" return power of 2 >= n
"""
if n <= 0:
return 0
nfft = 2
while (nfft < n):
nfft = (nfft << 1)
return nfft
if __name__ == '__main__':
main()
```
|
{
"source": "jeanphilippemercier/uquake",
"score": 2
}
|
#### File: uquake/core/stream.py
```python
from abc import ABC
from io import BytesIO
import numpy as np
import obspy.core.stream as obsstream
from pkg_resources import load_entry_point
from .trace import Trace
from .util import ENTRY_POINTS, tools
from .logging import logger
from pathlib import Path
class Stream(obsstream.Stream, ABC):
__doc__ = obsstream.Stream.__doc__.replace('obspy', 'uquake')
def __init__(self, stream=None, **kwargs):
super(Stream, self).__init__(**kwargs)
if stream:
traces = []
for tr in stream.traces:
traces.append(Trace(trace=tr))
self.traces = traces
def composite(self):
"""
returns a new stream object containing composite trace for all station.
The amplitude of the composite traces are the norm of the amplitude of
the trace of all component and the phase of the trace (sign) is the
sign of the first components of a given station.
:param st: a stream object
:type st: ~uquake.core.stream.Stream
:rtype: ~uquake.core.stream.Stream
"""
return composite_traces(self)
def select(self, **kwargs):
if 'site' in kwargs.keys():
trs = [tr for tr in self.traces if tr.stats.site == kwargs['site']]
else:
return super().select(**kwargs)
st_tmp = Stream(traces=trs)
kwargs_tmp = {}
for key in kwargs.keys():
if key == 'site':
continue
kwargs_tmp[key] = kwargs[key]
return st_tmp.select(**kwargs_tmp)
def as_array(self, wlen_sec=None, taplen=0.05):
t0 = np.min([tr.stats.starttime for tr in self])
sr = self[0].stats.sampling_rate
if wlen_sec is not None:
npts_fix = int(wlen_sec * sr)
else:
npts_fix = int(np.max([len(tr.data) for tr in self]))
return tools.stream_to_array(self, t0, npts_fix, taplen=taplen), sr, t0
def chan_groups(self):
chanmap = self.channel_map()
groups = [np.where(sk == chanmap)[0] for sk in np.unique(chanmap)]
return groups
def channel_map(self):
stations = np.array([tr.stats.station for tr in self])
unique = np.unique(stations)
unique_dict = dict(zip(unique, np.arange(len(unique))))
chanmap = np.array([unique_dict[chan] for chan in stations], dtype=int)
return chanmap
def write(self, filename, format='MSEED', **kwargs):
from six import string_types
f = filename
if isinstance(filename, string_types):
if filename.endswith('gz'):
import gzip
f = gzip.open(filename, 'w')
elif filename.endswith('bz2'):
import bz2
f = bz2.BZ2File(filename, 'w')
elif filename.endswith('zip'):
print('Zip protocol is not supported')
st_out = self.copy()
return obsstream.Stream.write(st_out, f, format, **kwargs)
write.__doc__ = obsstream.Stream.write.__doc__.replace('obspy',
'uquake')
def write_bytes(self):
buf = BytesIO()
self.write(buf, format='MSEED')
return buf.getvalue()
def valid(self, **kwargs):
return is_valid(self, return_stream=True)
def concat(self, comp_st):
c = (comp_st is not None)
if c:
for i, (t1, t2) in enumerate(zip(comp_st.traces, self.traces)):
self.detrend_norm(t2)
comp_st.traces[i] = t1.__add__(t2, method=1, fill_value=0)
else:
for t in self:
self.detrend_norm(t)
comp_st = self
return comp_st
@property
def unique_stations(self):
return np.sort(np.unique([tr.stats.station for tr in self]))
@property
def unique_sites(self):
return np.sort(np.unique([tr.stats.site for tr in self]))
@property
def stations(self):
return self.unique_stations
@property
def sites(self):
return self.unique_sites
def zpad_names(self):
for tr in self.traces:
tr.stats.station = tr.stats.station.zfill(3)
self.sort()
def zstrip_names(self):
for tr in self.traces:
tr.stats.station = tr.stats.station.lstrip('0')
def distance_time_plot(self, event, site, scale=20, freq_min=100,
freq_max=1000):
"""
plot traces that have
:param event: event object
:param site: site object
:param scale: vertical size of pick markers and waveform
:return: plot handler
"""
st = self.copy()
st.detrend('demean')
st.taper(max_percentage=0.01)
st.filter('bandpass', freqmin=freq_min, freqmax=freq_max)
import matplotlib.pyplot as plt
import numpy as np
# initializing the plot
ax = plt.subplot(111)
if event.preferred_origin():
origin = event.preferred_origin()
elif event.origins:
origin = event.origins[0]
else:
return
event_location = origin.loc
# find the earliest start time and latest end time
start_time = None
end_time = None
for tr in st:
if not start_time:
start_time = tr.stats.starttime
end_time = tr.stats.endtime
if tr.stats.starttime < start_time:
start_time = tr.stats.starttime
if tr.stats.endtime > end_time:
end_time = tr.stats.endtime
for tr in st:
station_code = tr.stats.station
# search for arrival
station = site.select(station_code).stations()[0]
station_location = station.loc
distance = np.linalg.norm(event_location - station_location)
p_pick = None
s_pick = None
data = (tr.data / np.max(np.abs(tr.data))) * scale
time_delta = tr.stats.starttime - start_time
time = np.arange(0, len(data)) / tr.stats.sampling_rate + \
time_delta
for arrival in origin.arrivals:
if arrival.get_pick().waveform_id.station_code == station_code:
distance = arrival.distance
if arrival.phase == 'P':
p_pick = arrival.get_pick().time - start_time
elif arrival.phase == 'S':
s_pick = arrival.get_pick().time - start_time
ax.plot(time, data + distance, 'k')
if p_pick:
ax.vlines(p_pick, distance - scale, distance + scale, 'r')
if s_pick:
ax.vlines(s_pick, distance - scale, distance + scale, 'b')
plt.xlabel('relative time (s)')
plt.ylabel('distance from event (m)')
@staticmethod
def create_from_json_traces(traces_json_list):
traces = []
# for tr_json in traces_json_list:
for i, tr_json in enumerate(traces_json_list):
stats = tr_json['stats']
tr = Trace.create_from_json(tr_json)
traces.append(tr)
return Stream(traces=traces)
def to_traces_json(self):
traces = []
for tr in self:
trout = tr.to_json()
traces.append(trout)
return traces
def plot(self, *args, **kwargs):
"""
see Obspy stream.plot()
"""
from ..imaging.waveform import WaveformPlotting
waveform = WaveformPlotting(stream=self, *args, **kwargs)
return waveform.plotWaveform(*args, **kwargs)
# from uquake.core import read, read_events
# from spp.utils import application
# app = application.Application()
# site = app.get_stations()
# st = read('2018-11-08T10:21:49.898496Z.mseed', format='mseed')
# cat = read_events('test.xml')
# evt = cat[0]
# st = st.composite()
def is_valid(st_in, return_stream=False, STA=0.005, LTA=0.1, min_num_valid=5):
"""
Determine if an event is valid or return valid traces in a stream
:param st_in: stream
:type st_in: uquake.core.stream.Stream
:param return_stream: return stream of valid traces if true else return
true if the event is valid
:type return_stream: bool
:param STA: short term average used to determine if an event is valid
:type STA: float
:param LTA: long term average
:type LTA: float
:param min_num_valid: minimum number of valid traces to declare the
event valid
:type min_num_valid: int
:rtype: bool or uquake.core.stream.Stream
"""
from scipy.ndimage.filters import gaussian_filter1d
from obspy.signal.trigger import recursive_sta_lta
st = st_in.copy()
st.detrend('demean').detrend('linear')
trstd = []
trmax = []
trs_out = []
st_comp = composite_traces(st)
for tr in st_comp:
if not np.any(tr.data):
continue
sampling_rate = tr.stats.sampling_rate
trstd.append(np.std(tr.data))
trmax.append(np.max(np.abs(tr.data)))
nsta = int(STA * sampling_rate)
nlta = int(LTA * sampling_rate)
cft = recursive_sta_lta(np.array(tr.data), nsta, nlta)
sfreq = tr.stats['sampling_rate']
sigma = sfreq / (2 * np.pi * 100)
cft = gaussian_filter1d(cft, sigma=sigma, mode='reflect')
try:
mx = np.r_[True, cft[1:] > cft[:-1]] & \
np.r_[cft[:-1] > cft[1:], True]
except Exception as e:
logger.error(e)
continue
i1 = np.nonzero(mx)[0]
i2 = i1[cft[i1] > np.max(cft) / 2]
tspan = (np.max(i2) - np.min(i2)) / sampling_rate
ratio = np.max(np.abs(tr.data)) / np.std(tr.data)
accept = True
if len(i2) < 3:
if ratio < 4:
accept = False
elif len(i2) >= 4:
accept = False
# else:
# if ratio < 4:
# accept = False
if tspan > 0.1:
accept = False
if (len(i2) == 2) and (tspan > 0.01) and (tspan < 0.1):
if ratio > 5:
accept = True
if accept:
for tr_accepted in st_in.select(station=tr.stats.station):
trs_out.append(tr_accepted)
st_out = Stream(traces=trs_out)
if return_stream:
return st_out
else:
if len(st.unique_stations()) >= min_num_valid:
return True
else:
return False
def check_for_dead_trace(tr):
eps = 1e-6
data = tr.data.copy()
mean = np.mean(data)
max = np.max(data) - mean
min = np.min(data) - mean
# print('%s: mean:%f max:%f min:%f' % (tr.get_id(), mean, max, min))
if max < eps and np.abs(min) < eps:
return 1
else:
return 0
def composite_traces(st_in):
"""
Requires length and sampling_rates equal for all traces
returns a new stream object containing composite trace for all station.
The amplitude of the composite traces are the norm of the amplitude of
the trace of all component and the phase of the trace (sign) is the sign
of the first components of a given station.
:param st_in: a stream object
:type st_in: ~uquake.core.stream.Stream
:rtype: ~uquake.core.stream.Stream
"""
trsout = []
st = st_in.copy()
st.detrend('demean')
for site in st.unique_sites:
trs = st.select(site=site)
if len(trs) == 1:
trsout.append(trs[0].copy())
continue
npts = len(trs[0].data)
buf = np.zeros(npts, dtype=trs[0].data.dtype)
for tr in trs:
dat = tr.data
buf += (dat - np.mean(dat)) ** 2
buf = np.sign(trs[0].data) * np.sqrt(buf)
stats = trs[0].stats.copy()
ch = st_in.traces[0].stats.channel
if len(ch) > 1:
prefix = ch[:-1]
stats.channel = f'{prefix}C'
trsout.append(Trace(data=buf.copy(), header=stats))
return Stream(traces=trsout)
def read(filename, format='MSEED', **kwargs):
if isinstance(filename, Path):
filename = str(filename)
if format in ENTRY_POINTS['waveform'].keys():
format_ep = ENTRY_POINTS['waveform'][format]
read_format = load_entry_point(format_ep.dist.key,
'obspy.plugin.waveform.%s' %
format_ep.name, 'readFormat')
st = Stream(stream=read_format(filename, **kwargs))
# making sure the channel names are upper case
trs = []
for tr in st:
tr.stats.channel = tr.stats.channel.upper()
trs.append(tr.copy())
st.traces = trs
return st
else:
return Stream(stream=obsstream.read(filename, format=format, **kwargs))
read.__doc__ = obsstream.read.__doc__.replace('obspy', 'uquake')
```
#### File: core/util/base.py
```python
from subprocess import call
from obspy.core.util.base import ENTRY_POINTS, _get_entry_points
# appending elements to the obspy ENTRY_POINTS
ENTRY_POINTS['grid'] = _get_entry_points('uquake.io.grid', 'readFormat')
ENTRY_POINTS['grid_write'] = _get_entry_points('uquake.io.grid',
'writeFormat')
gfr_entry_points = _get_entry_points('uquake.io.waveform', 'readFormat')
gfw_entry_points = _get_entry_points('uquake.io.waveform', 'writeformat')
wf_entry_points = _get_entry_points('uquake.io.waveform', 'readFormat')
for key in wf_entry_points.keys():
ENTRY_POINTS['waveform'][key] = wf_entry_points[key]
wfw_entry_points = _get_entry_points('uquake.io.waveform', 'writeFormat')
for key in wfw_entry_points.keys():
ENTRY_POINTS['waveform_write'][key] = wfw_entry_points[key]
evt_entry_points = _get_entry_points('uquake.io.event', 'readFormat')
for key in evt_entry_points.keys():
ENTRY_POINTS['event'][key] = evt_entry_points[key]
def proc(cmd, cwd='.', silent=True):
from ..logging import logger
try:
if silent:
cmd = '%s > /dev/null 2>&1' % cmd
retcode = call(cmd, shell=True, cwd=cwd)
if retcode < 0:
logger.error('Child was terminated by signal %d' % (retcode,))
# else:
# print >>sys.stderr, "Child returned", retcode
except OSError as e:
logger.error('Execution failed: %s' % (e,))
def align_decimal(number, left_pad=7, precision=2):
"""Format a number in a way that will align decimal points."""
outer = '{0:>%i}.{1:<%i}' % (left_pad, precision)
inner = '{:.%if}' % (precision,)
return outer.format(*(inner.format(number).split('.')))
def pretty_print_array(arr):
return '(%s)' % ''.join([align_decimal(a) for a in arr])
def np_array(arr):
new_arr = np.empty(shape=(len(arr),), dtype=object)
for i, el in enumerate(arr):
new_arr[i] = el
return new_arr
def _read_from_plugin(plugin_type, filename, format=None, **kwargs):
"""
Reads a single file from a plug-in's readFormat function.
"""
eps = ENTRY_POINTS[plugin_type]
# get format entry point
format_ep = None
if not format:
# auto detect format - go through all known formats in given sort order
for format_ep in eps.values():
# search isFormat for given entry point
is_format = load_entry_point(
format_ep.dist.key,
'obspy.plugin.%s.%s' % (plugin_type, format_ep.name),
'isFormat')
# If it is a file-like object, store the position and restore it
# later to avoid that the isFormat() functions move the file
# pointer.
if hasattr(filename, "tell") and hasattr(filename, "seek"):
position = filename.tell()
else:
position = None
# check format
is_format = is_format(filename)
if position is not None:
filename.seek(0, 0)
if is_format:
break
else:
raise TypeError('Unknown format for file %s' % filename)
else:
# format given via argument
format = format.upper()
try:
format_ep = eps[format]
except (KeyError, IndexError):
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format, ', '.join(eps)))
# file format should be known by now
try:
# search readFormat for given entry point
read_format = load_entry_point(
format_ep.dist.key,
'obspy.plugin.%s.%s' % (plugin_type, format_ep.name),
'readFormat')
except ImportError:
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format_ep.name, ', '.join(eps)))
# read
list_obj = read_format(filename, **kwargs)
return list_obj, format_ep.name
```
#### File: uquake/grid/base.py
```python
import numpy as np
from uuid import uuid4
from ..core.logging import logger
from pkg_resources import load_entry_point
from ..core.util import ENTRY_POINTS
from pathlib import Path
from scipy.ndimage.interpolation import map_coordinates
from ..core.event import WaveformStreamID
import matplotlib.pyplot as plt
def read_grid(filename, format='PICKLE', **kwargs):
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
read_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'readFormat')
return read_format(filename, **kwargs)
class Grid:
"""
Object containing a regular grid
"""
def __init__(self, data_or_dims, spacing=None, origin=None,
resource_id=None, value=0):
"""
can hold both 2 and 3 dimensional grid
:param data_or_dims: either a numpy array or a tuple/list with the grid
dimensions. If grid dimensions are specified, the grid is initialized
with value
:param spacing: Spacing
:type spacing: typle
:param origin: tuple, list or array containing the origin of the grid
:type origin: tuple
:param resource_id: unique identifier for the grid, if set to None,
:param value: value to fill the grid should dims be specified
:type value:
uuid4 is used to define a unique identifier.
:type uuid4: str
"""
data_or_dims = np.array(data_or_dims)
if data_or_dims.ndim == 1:
self.data = np.ones(data_or_dims) * value
else:
self.data = data_or_dims
if resource_id is None:
self.resource_id = str(uuid4())
else:
self.resource_id = resource_id
if origin is None:
self.origin = np.zeros(len(self.data.shape))
else:
origin = np.array(origin)
if origin.shape[0] == len(self.data.shape):
self.origin = origin
else:
logger.error(f'origin shape should be {len(self.data.shape)}')
raise ValueError
if spacing is None:
self.spacing = np.ones(len(self.data.shape))
else:
spacing = np.array(spacing)
if spacing.shape[0] == len(self.data.shape):
self.spacing = spacing
else:
logger.error(f'spacing shape should be {len(self.data.shape)}')
raise ValueError
def __hash__(self):
return hash((tuple(self.data.ravel()), tuple(self.spacing),
tuple(self.shape), tuple(self.origin)))
def __eq__(self, other):
self.hash == other.hash
@property
def hash(self):
return self.__hash__()
@classmethod
def from_ods(cls, origin, dimensions, spacing, val=0):
"""
create a grid from origin, dimensions and spacing
:param origin: grid origin
:type origin: tuple
:param dimensions: grid dimension
:type dimensions: tuple
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
"""
data = np.ones(tuple(dimensions)) * val
cls.grid = cls.__init__(data, spacing=spacing, origin=origin)
@classmethod
def from_ocs(cls, origin, corner, spacing, val=0):
"""
create a grid from origin, corner and spacing
:param origin: grid origin (e.g., lower left corner for 2D grid)
:type origin: tuple or list or numpy.array
:param corner: grid upper (e.g., upper right corner for 2D grid)
:type corner: tuple or list or numpy.array
:param spacing: spacing between the grid nodes
:type spacing: float
:param val: constant value with which to fill the grid
:param buf: buffer around the grid in fraction of grid size
"""
origin2 = origin
corner2 = corner
gshape = tuple([int(np.ceil((c - o) / spacing))
for o, c in zip(origin2, corner2)])
data = np.ones(gshape) * val
cls.__init__(data, spacing=spacing, origin=origin)
cls.fill_homogeneous(val)
return cls
@classmethod
def from_ocd(cls, origin, corner, dimensions, val=0):
"""
create a grid from origin, corner and dimensions
:param origin: grid origin (e.g., lower left corner for 2D grid)
:param corner: grid upper (e.g., upper right corner for 2D grid)
:param dimensions: grid dimensions
:param val: constant value with which to fill the grid
:return:
"""
data = np.ones(dimensions) * val
spacing = (corner - origin) / (dimensions - 1)
cls.__init__(data, spacing, spacing=spacing, origin=origin)
return cls
def __repr__(self):
repr_str = """
spacing: %s
origin : %s
shape : %s
""" % (self.spacing, self.origin, self.shape)
return repr_str
def __str__(self):
return self.__repr__()
def __eq__(self, other):
return np.all((self.shape == other.shape) &
(self.spacing == other.spacing) &
np.all(self.origin == other.origin))
def __mul__(self, other):
if isinstance(other, Grid):
if self.check_compatibility(self, other):
mul_data = self.data * other.data
return Grid(mul_data, spacing=self.spacing,
origin=self.origin)
else:
raise ValueError
else:
raise TypeError
def __abs__(self):
return np.abs(self.data)
def transform_to(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
coords = (values - self.origin) / self.spacing
return coords
def transform_to_grid(self, values):
"""
transform model space coordinates into grid space coordinates
:param values: tuple of model space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_to(values)
def transform_from(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return values * self.spacing + self.origin
def transform_from_grid(self, values):
"""
transform grid space coordinates into model space coordinates
:param values: tuple of grid space coordinates
:type values: tuple
:rtype: tuple
"""
return self.transform_from(values)
def check_compatibility(self, other):
"""
check if two grids are compatible, i.e., have the same shape, spacing
and origin
"""
return (np.all(self.shape == other.shape) and
np.all(self.spacing == other.spacing) and
np.all(self.origin == other.origin))
def __get_shape__(self):
"""
return the shape of the object
"""
return self.data.shape
shape = property(__get_shape__)
def copy(self):
"""
copy the object using copy.deepcopy
"""
import copy
cp = copy.deepcopy(self)
return cp
def in_grid(self, point):
"""
Check if a point is inside the grid
:param point: the point to check in absolute coordinate (model)
:type point: tuple, list or numpy array
:returns: True if point is inside the grid
:rtype: bool
"""
corner1 = self.origin
corner2 = self.origin + self.spacing * np.array(self.shape)
return np.all((point >= corner1) & (point <= corner2))
def fill_homogeneous(self, value):
"""
fill the data with a constant value
:param value: the value with which to fill the array
"""
self.data.fill(value)
def generate_points(self, pt_spacing=None):
"""
Generate points within the grid
"""
# if pt_spacing is None:
ev_spacing = self.spacing
dimensions = np.array(self.shape) * self.spacing / ev_spacing
xe = np.arange(0, dimensions[0]) * ev_spacing + self.origin[0]
ye = np.arange(0, dimensions[1]) * ev_spacing + self.origin[1]
ze = np.arange(0, dimensions[2]) * ev_spacing + self.origin[2]
Xe, Ye, Ze = np.meshgrid(xe, ye, ze)
Xe = Xe.reshape(np.prod(Xe.shape))
Ye = Ye.reshape(np.prod(Ye.shape))
Ze = Ze.reshape(np.prod(Ze.shape))
return Xe, Ye, Ze
def generate_random_points_in_grid(self, n_points=1,
grid_space=False):
"""
Generate a random set of points within the grid
:param n_points: number of points to generate (default=1)
:type n_points: int
:param grid_space: whether the output is expressed in
grid coordinates (True) or model coordinates (False)
(default: False)
:type grid_space: bool
:return: an array of triplet
"""
points = np.random.rand(n_points, len(self.data.shape))
for i in range(n_points):
points[i] = points[i] * self.dimensions
if not grid_space:
return self.transform_from_grid(points)
return points
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
if format not in ENTRY_POINTS['grid'].keys():
logger.error('format %s is not currently supported for Grid '
'objects' % format)
return
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
'uquake.plugin.grid.%s'
% format_ep.name, 'writeFormat')
write_format(self, filename, **kwargs)
def interpolate(self, coord, grid_space=True, mode='nearest',
order=1, **kwargs):
"""
This function interpolate the values at a given point expressed
either in grid or absolute coordinates
:param coord: Coordinate of the point(s) at which to interpolate
either in grid or absolute coordinates
:type coord: list, tuple, numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be float) as opposed to model space
:param mode: {'reflect', 'constant', 'nearest', 'mirror', 'wrap'},
optional
The `mode` parameter determines how the input array is extended
beyond its boundaries. Default is 'constant'. Behavior for each valid
value is as follows:
'reflect' (`d c b a | a b c d | d c b a`)
The input is extended by reflecting about the edge of the last
pixel.
'constant' (`k k k k | a b c d | k k k k`)
The input is extended by filling all values beyond the edge with
the same constant value, defined by the `cval` parameter.
'nearest' (`a a a a | a b c d | d d d d`)
The input is extended by replicating the last pixel.
'mirror' (`d c b | a b c d | c b a`)
The input is extended by reflecting about the center of the last
pixel.
'wrap' (`a b c d | a b c d | a b c d`)
The input is extended by wrapping around to the opposite edge.
:param order: int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
:type order: int
:type grid_space: bool
:rtype: numpy.array
"""
coord = np.array(coord)
if not grid_space:
coord = self.transform_to(coord)
if len(coord.shape) < 2:
coord = coord[:, np.newaxis]
try:
return map_coordinates(self.data, coord, mode=mode, order=order,
**kwargs)
except Exception as e:
# logger.warning(e)
# logger.info('transposing the coordinate array')
return map_coordinates(self.data, coord.T, mode=mode, order=order,
**kwargs)
def fill_from_z_gradient(self, vals, zvals):
data = self.data
origin = self.origin
zinds = [int(self.transform_to([origin[0], origin[1], z_])[2]) for z_
in zvals]
# print(zinds, origin)
data[:, :, zinds[0]:] = vals[0]
data[:, :, :zinds[-1]] = vals[-1]
for i in range(len(zinds) - 1):
# print(i)
fill = np.linspace(vals[i + 1], vals[i], zinds[i] - zinds[i + 1])
data[:, :, zinds[i + 1]:zinds[i]] = fill
def get_grid_point_coordinates(self, mesh_grid=True):
"""
"""
x = []
for i, (dimension, spacing) in \
enumerate(zip(self.data.shape, self.spacing)):
v = np.arange(0, dimension) * spacing + self.origin[0]
x.append(v)
if not mesh_grid:
return tuple(x)
if len(x) == 2:
return tuple(np.meshgrid(x[0], x[1]))
if len(x) == 3:
return tuple(np.meshgrid(x[0], x[1], x[2]))
def write(self, filename, format='PICKLE', **kwargs):
"""
write the grid to disk
:param filename: full path to the file to be written
:type filename: str
:param format: output file format
:type format: str
"""
format = format.upper()
Path(filename).parent.mkdirs(parent=True, exist_ok=True)
if format not in ENTRY_POINTS['grid'].keys():
raise TypeError(f'format {format} is currently not supported '
f'for Grid objects')
format_ep = ENTRY_POINTS['grid'][format]
write_format = load_entry_point(format_ep.dist.key,
f'uquake.io.grid.{format_ep.name}',
'writeFormat')
return write_format(self, filename, **kwargs)
def plot_1D(self, x, y, z_resolution, grid_space=False,
inventory=None, reverse_y=True):
"""
:param x: x location
:param y: y location
:param z_resolution_m: z resolution in grid units
:param grid_space:
:return:
"""
if not grid_space:
x, y, z = self.transform_from([x, y, 0])
zs = np.arange(self.origin[2], self.corner[2], z_resolution)
coords = []
for z in zs:
coords.append(np.array([x, y, z]))
values = self.interpolate(coords, grid_space=grid_space)
plt.plot(values, zs)
if reverse_y:
plt.gca().invert_yaxis()
if (inventory):
z_stas = []
for network in inventory:
for station in network:
loc = station.loc
z_stas.append(loc[2])
plt.plot([np.mean(values)] * len(z_stas), z_stas, 'kv')
plt.plot()
plt.plot()
plt.show()
@property
def ndim(self):
return self.data.ndim
@property
def shape(self):
return list(self.data.shape)
@property
def dims(self):
return self.shape
@property
def dimensions(self):
return self.shape
@property
def corner(self):
return np.array(self.origin) + np.array(self.shape) * \
np.array(self.spacing)
def angles(travel_time_grid):
"""
This function calculate the take off angle and azimuth for every grid point
given a travel time grid calculated using an Eikonal solver
:param travel_time_grid: travel_time grid
:type travel_time_grid: ~uquake.core.grid.Grid.
:rparam: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
gds_tmp = np.gradient(travel_time_grid.data)
gds = [-gd for gd in gds_tmp]
tmp = np.arctan2(gds[0], gds[1]) # azimuth is zero northwards
azimuth = travel_time_grid.copy()
azimuth.type = 'ANGLE'
azimuth.data = tmp
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
tmp = np.arctan2(hor, -gds[2])
# takeoff is zero pointing down
takeoff = travel_time_grid.copy()
takeoff.type = 'ANGLE'
takeoff.data = tmp
return azimuth, takeoff
def ray_tracer(travel_time_grid, start, grid_space=False, max_iter=1000,
arrival_id=None, earth_model_id=None,
network: str=None):
"""
This function calculates the ray between a starting point (start) and an
end point, which should be the seed of the travel_time grid, using the
gradient descent method.
:param travel_time_grid: a travel time grid
:type travel_time_grid: TTGrid
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param max_iter: maximum number of iteration
:param arrival_id: id of the arrival associated to the ray if
applicable
:type arrival_id: uquake.core.event.ResourceIdentifier
:param earth_model_id: velocity/earth model id.
:type earth_model_id: uquake.core.event.ResourceIdentifier
:param network: network information
:type network: str
:rtype: numpy.array
"""
from uquake.core.event import Ray
if grid_space:
start = np.array(start)
start = travel_time_grid.transform_from(start)
origin = travel_time_grid.origin
spacing = travel_time_grid.spacing
end = np.array(travel_time_grid.seed)
start = np.array(start)
# calculating the gradient in every dimension at every grid points
gds = [Grid(gd, origin=origin, spacing=spacing)
for gd in np.gradient(travel_time_grid.data)]
dist = np.linalg.norm(start - end)
cloc = start # initializing cloc "current location" to start
gamma = spacing / 2 # gamma is set to half the grid spacing. This
# should be
# sufficient. Note that gamma is fixed to reduce
# processing time.
nodes = [start]
iter_number = 0
while np.all(dist > spacing / 2):
if iter_number > max_iter:
break
if np.all(dist < spacing * 4):
gamma = np.min(spacing) / 4
gvect = np.array([gd.interpolate(cloc, grid_space=False,
order=1)[0] for gd in gds])
cloc = cloc - gamma * gvect / (np.linalg.norm(gvect) + 1e-8)
nodes.append(cloc)
dist = np.linalg.norm(cloc - end)
iter_number += 1
nodes.append(end)
tt = travel_time_grid.interpolate(start, grid_space=False, order=1)[0]
az = travel_time_grid.to_azimuth_point(start, grid_space=False,
order=1)
toa = travel_time_grid.to_takeoff_point(start, grid_space=False,
order=1)
ray = Ray(nodes=nodes, site_code=travel_time_grid.seed_label,
arrival_id=arrival_id, phase=travel_time_grid.phase,
azimuth=az, takeoff_angle=toa, travel_time=tt,
earth_model_id=earth_model_id, network=network)
return ray
```
#### File: uquake/grid/hdf5.py
```python
import os
from glob import glob
import numpy as np
import h5py
class H5TTable(object):
"""docstring for H5TTable"""
def __init__(self, path, dset_key=None):
self.path = path
self.hf = h5py.File(path, 'r')
self.keys = list(self.hf.keys())
self.dset = None
if dset_key is not None:
self.set_dataset(dset_key)
self.sites = self.hf['sites'][:].astype('U6')
self.stations = self.hf['stations'][:].astype('U4')
self.station_locations = self.hf['station_locations'].astype('U2')
self._sitedict = dict(zip(self.sites, np.arange(len(self.sites))))
self.locations = self.hf['locations'][:]
self.coords = self.hf['grid_locs'][:]
def __delete__(self):
sefl.hf.close()
def set_dataset(self, key):
if key in self.keys:
self.dset = self.hf[key]
else:
raise KeyError('dataset %s does not exist' % key)
@property
def shape(self):
return self.hf.attrs['shape']
@property
def origin(self):
return self.hf.attrs['origin']
@property
def spacing(self):
return self.hf.attrs['spacing']
def index_sites(self, sites):
if isinstance(sites, (list, np.ndarray)):
return np.array([self._sitedict[site] for site in sites])
else:
return self._sitedict[site]
def icol_to_xyz(self, index):
nx, ny, nz = self.shape
iz = index % nz
iy = ((index - iz) // nz) % ny
ix = index // (nz * ny)
loc = np.array([ix, iy, iz], dtype=float) * self.spacing + self.origin
return loc
def xyz_to_icol(self, loc):
x, y, z = loc
ix, iy, iz = ((loc - self.origin) / self.spacing).astype(int)
nx, ny, nz = self.shape
# return (iz * nx * ny) + (iy * nx) + ix;
return int((ix * ny * nz) + (iy * nz) + iz)
def close(self):
self.hf.close()
def gdef_to_points(shape, origin, spacing):
maxes = origin + shape * spacing
x = np.arange(origin[0], maxes[0], spacing[0]).astype(np.float32)
y = np.arange(origin[1], maxes[1], spacing[1]).astype(np.float32)
z = np.arange(origin[2], maxes[2], spacing[2]).astype(np.float32)
points = np.zeros((np.product(shape), 3), dtype=np.float32)
ix = 0
for xv in x:
for yv in y:
for zv in z:
points[ix] = [xv, yv, zv]
ix += 1
return points
def array_from_travel_time_ensemble(tt_grids):
data = {'P': [],
'S': []}
sites = []
slocs = []
shape = tt_grids[0].shape
origin = tt_grids[0].origin
spacing = tt_grids[0].spacing
for tt_grid in tt_grids:
sites.append(tt_grid.seed_label)
slocs.append(tt_grid.seed)
sites = np.array(sites)
slocs = np.array(slocs)
nsites = len(sites)
ngrid = np.product(shape)
tts = np.zeros((nsites, ngrid), dtype=np.float32)
for i in range(nsites):
tts[i] = tt_grids[i].data.reshape(ngrid).astype(np.float32)
data[phase] = dict(ttable=tts, locations=slocs, shape=shape,
origin=origin, spacing=spacing, sites=sites)
return data
def write_hdf5(fname, tt_grids):
hf = h5py.File(fname, 'w')
shape = tt_grids.shape
spacing = tt_grids.spacing
origin = tt_grids.origin
hf.attrs['shape'] = shape
hf.attrs['spacing'] = spacing
hf.attrs['origin'] = origin
sites = tt_grids.seed_labels
locations = tt_grids.seeds
hf.create_dataset('locations', data=locations.astype(np.float32))
gridlocs = gdef_to_points(shape, origin, spacing)
hf.create_dataset('grid_locs', data=gridlocs.astype(np.float32))
gdef = np.concatenate((shape, origin, spacing)).astype(np.int32)
hf.create_dataset('grid_def', data=gdef)
hf.create_dataset('sites', data=sites.astype('S6'))
stations = np.array([site[0:4] for site in sites])
station_locations = np.array([site[4:] for site in sites])
hf.create_dataset('stations', data=stations.astype('S4'))
hf.create_dataset('station_locations', data=station_locations.astype('S2'))
nsites = len(sites)
ngrid = np.product(shape)
tts = {'P': np.zeros((nsites, ngrid), dtype=np.float32),
'S': np.zeros((nsites, ngrid), dtype=np.float32)}
for i, site in enumerate(sites):
for phase in ['P', 'S']:
tt_grid = tt_grids.select(phase=phase, seed_labels=site)[0]
tts[phase][i] = tt_grid.data.reshape(ngrid).astype(np.float32)
hf.create_dataset('ttp', data=tts['P'])
hf.create_dataset('tts', data=tts['S'])
hf.close()
```
#### File: uquake/grid/nlloc.py
```python
import numpy as np
import scipy.ndimage
from .base import Grid
from pathlib import Path
from uuid import uuid4
import matplotlib.pyplot as plt
from loguru import logger
import skfmm
from multiprocessing import Pool, cpu_count
from functools import partial
from typing import Optional
import h5py
from .base import ray_tracer
import shutil
from uquake.grid import read_grid
from .hdf5 import H5TTable, write_hdf5
from scipy.interpolate import interp1d
__cpu_count__ = cpu_count()
valid_phases = ('P', 'S')
valid_grid_types = (
'VELOCITY',
'VELOCITY_METERS',
'SLOWNESS',
'VEL2',
'SLOW2',
'SLOW2_METERS',
'SLOW_LEN',
'STACK',
'TIME',
'TIME2D',
'PROB_DENSITY',
'MISFIT',
'ANGLE',
'ANGLE2D'
)
valid_float_types = {
# NLL_type: numpy_type
'FLOAT': 'float32',
'DOUBLE': 'float64'
}
valid_grid_units = (
'METER',
'KILOMETER',
)
__velocity_grid_location__ = Path('model')
__time_grid_location__ = Path('time')
__default_grid_units__ = 'METER'
__default_float_type__ = 'FLOAT'
def validate_phase(phase):
if phase not in valid_phases:
msg = f'phase should be one of the following valid phases:\n'
for valid_phase in valid_phases:
msg += f'{valid_phase}\n'
raise ValueError(msg)
return True
def validate_grid_type(grid_type):
if grid_type.upper() not in valid_grid_types:
msg = f'grid_type = {grid_type} is not valid\n' \
f'grid_type should be one of the following valid grid ' \
f'types:\n'
for valid_grid_type in valid_grid_types:
msg += f'{valid_grid_type}\n'
raise ValueError(msg)
return True
def validate_grid_units(grid_units):
if grid_units.upper() not in valid_grid_units:
msg = f'grid_units = {grid_units} is not valid\n' \
f'grid_units should be one of the following valid grid ' \
f'units:\n'
for valid_grid_unit in valid_grid_units:
msg += f'{valid_grid_unit}\n'
raise ValueError(msg)
return True
def validate_float_type(float_type):
if float_type.upper() not in valid_float_types.keys():
msg = f'float_type = {float_type} is not valid\n' \
f'float_type should be one of the following valid float ' \
f'types:\n'
for valid_float_type in valid_float_types:
msg += f'{valid_float_type}\n'
raise ValueError(msg)
return True
def validate(value, choices):
if value not in choices:
msg = f'value should be one of the following choices\n:'
for choice in choices:
msg += f'{choice}\n'
raise ValueError(msg)
return True
class Seeds:
__valid_measurement_units__ = ['METERS', 'KILOMETERS']
def __init__(self, sites=[], units='METERS'):
"""
specifies a series of source location from an inventory object
:param sites: a list of sites containing at least the location,
and site label
:type sites: list of dictionary
:Example:
>>> site = {'label': 'test', 'x': 1000, 'y': 1000, 'z': 1000,
'elev': 0.0}
>>> sites = [site]
>>> seeds = Seeds(sites)
"""
validate(units, self.__valid_measurement_units__)
self.units = units
self.sites = sites
@classmethod
def from_inventory(cls, inventory):
"""
create from an inventory object
:param inventory:
:type inventory: uquake.core.inventory.Inventory
"""
srces = []
for site in inventory.sites:
srce = {'label': site.code,
'x': site.x,
'y': site.y,
'z': site.z,
'elev': 0}
srces.append(srce)
return cls(srces)
@classmethod
def from_json(cls, json):
pass
def add(self, label, x, y, z, elev=0, units='METERS'):
"""
Add a single site to the source list
:param label: site label
:type label: str
:param x: x location relative to geographic origin expressed
in the units of measurements for site/source
:type x: float
:param y: y location relative to geographic origin expressed
in the units of measurements for site/source
:type y: float
:param z: z location relative to geographic origin expressed
in the units of measurements for site/source
:type z: float
:param elev: elevation above z grid position (positive UP) in
kilometers for site (Default = 0)
:type elev: float
:param units: units of measurement used to express x, y, and z
( 'METERS' or 'KILOMETERS')
"""
validate(units.upper(), self.__valid_measurement_units__)
self.sites.append({'label': label, 'x': x, 'y': y, 'z': z,
'elev': elev})
self.units = units.upper()
@classmethod
def generate_random_seeds_in_grid(cls, grid, n_seeds=1):
"""
generate n_seeds random seeds inside the grid provided. This function
is mainly used for testing purposes
:param grid: a grid
:type grid: uquake.grid.base.Grid or an object inheriting from Grid
:param n_seeds: number of seeds to generate
:return: a list of seeds
>>> from uquake.grid.base import Grid
>>> from uquake.grid.nlloc import Seeds
>>> grid_dimensions = [10, 10, 10]
>>> grid_spacing = [1, 1, 1]
>>> grid_origin = [0, 0, 0]
>>> grid = Grid(grid_dimensions, grid_spacing, grid_origin, value=1)
>>> seeds = Seeds.generate_random_seeds_in_grid(grid, n_seeds=10)
"""
seeds = cls.__init__()
label_root = 'seed'
for i, point in enumerate(grid.generate_random_points_in_grid(
n_points=n_seeds)):
label = f'{label_root}_{i}'
seeds.add(label, point[0], point[1], point[2])
return seeds
def __repr__(self):
line = ""
for site in self.sites:
# test if site name is shorter than 6 characters
line += f'GTSRCE {site["label"]} XYZ ' \
f'{site["x"] / 1000:>15.6f} ' \
f'{site["y"] / 1000:>15.6f} ' \
f'{site["z"] / 1000:>15.6f} ' \
f'0.00\n'
return line
@property
def locs(self):
seeds = []
for site in self.sites:
seeds.append([site['x'], site['y'], site['z']])
return np.array(seeds)
@property
def labels(self):
seed_labels = []
for site in self.sites:
seed_labels.append(site['label'])
return np.array(seed_labels)
# class Srces(Seeds):
# def __init__(self, sites=[], units='METERS'):
# super().__init__(sites=sites, units=units)
class NLLocGrid(Grid):
"""
base 3D rectilinear grid object
"""
def __init__(self, data_or_dims, origin, spacing, phase,
value=0, grid_type='VELOCITY_METERS',
grid_units=__default_grid_units__,
float_type="FLOAT", model_id=None):
"""
:param data_or_dims: data or data dimensions. If dimensions are
provided the a homogeneous gris is created with value=value
:param origin: origin of the grid
:type origin: list
:param spacing: the spacing between grid nodes
:type spacing: list
:param phase: the uquake phase (value 'P' or 'S')
:type phase: str
:param value:
:type value: float
:param grid_type:
:type grid_type: str
:param grid_units:
:type grid_units: str
:param float_type:
:type float_type: str
:param model_id:
:type model_id: str
"""
super().__init__(data_or_dims, spacing=spacing, origin=origin,
value=value, resource_id=model_id)
if validate_phase(phase):
self.phase = phase.upper()
if validate_grid_type(grid_type):
self.grid_type = grid_type.upper()
self.extensions = ['.buf', '.mid', '.hdr']
if validate_grid_units(grid_units):
self.grid_units = grid_units.upper()
if validate_float_type(float_type):
self.float_type = float_type.upper()
def _write_grid_data(self, base_name, path='.'):
Path(path).mkdir(parents=True, exist_ok=True)
with open(Path(path) / (base_name + '.buf'), 'wb') \
as out_file:
if self.float_type == 'FLOAT':
out_file.write(self.data.astype(np.float32).tobytes())
elif self.float_type == 'DOUBLE':
out_file.write(self.data.astype(np.float64).tobytes())
def _write_grid_header(self, base_name, path='.', seed_label=None,
seed=None, seed_units=None):
# convert 'METER' to 'KILOMETER'
if self.grid_units == 'METER':
origin = self.origin / 1000
spacing = self.spacing / 1000
else:
origin = self.origin
spacing = self.spacing
line1 = f'{self.shape[0]:d} {self.shape[1]:d} {self.shape[2]:d} ' \
f'{origin[0]:f} {origin[1]:f} {origin[2]:f} ' \
f'{spacing[0]:f} {spacing[1]:f} {spacing[2]:f} ' \
f'{self.grid_type}\n'
with open(Path(path) / (base_name + '.hdr'), 'w') as out_file:
out_file.write(line1)
if self.grid_type in ['TIME', 'ANGLE']:
if seed_units is None:
logger.warning(f'seed_units are not defined. '
f'Assuming same units as grid ('
f'{self.grid_units}')
if self.grid_units == 'METER':
seed = seed / 1000
line2 = u"%s %f %f %f\n" % (seed_label,
seed[0], seed[1], seed[2])
out_file.write(line2)
out_file.write(u'TRANSFORM NONE\n')
return True
def _write_grid_model_id(self, base_name, path='.'):
with open(Path(path) / (base_name + '.mid'), 'w') as out_file:
out_file.write(f'{self.model_id}')
return True
def write(self, base_name, path='.'):
self._write_grid_data(base_name, path=path)
self._write_grid_header(base_name, path=path)
self._write_grid_model_id(base_name, path=path)
return True
def mv(self, base_name, origin, destination):
"""
move a NLLoc grid with a certain base_name from an origin to a
destination
:param NLLocGridObject:
:type NLLocGridObject: uquake.grid.nlloc.NLLocGrid
:param base_name:
:type base_name: str
:param origin:
:type origin: str
:param destination:
:type destination: str
:return:
"""
self.write(base_name, destination)
for ext in self.extensions:
shutil.move(f'{origin}/{base_name}.{ext}',
f'{destination}/{base_name}.{ext}')
@property
def model_id(self):
return self.resource_id
class ModelLayer:
"""
1D model varying in Z
"""
def __init__(self, z_top, value_top):
"""
:param z_top: Top of the layer z coordinates
:param value_top: Value at the top of the layer
"""
self.z_top = z_top
self.value_top = value_top
def __repr__(self):
return f'top - {self.z_top:5d} | value - {self.value_top:5d}\n'
class LayeredVelocityModel(object):
def __init__(self, model_id=None, velocity_model_layers=None,
phase='P', grid_units='METER',
float_type=__default_float_type__,
gradient=False):
"""
Initialize
:param model_id: model id, if not set the model ID is set using UUID
:type model_id: str
:param velocity_model_layers: a list of VelocityModelLayer
:type velocity_model_layers: list
:param phase: Phase either 'P' or 'S'
:type phase: str
"""
if velocity_model_layers is None:
self.velocity_model_layers = []
if validate_phase(phase):
self.phase = phase.upper()
if validate_grid_units(grid_units):
self.grid_units = grid_units.upper()
if validate_float_type(float_type):
self.float_type = float_type.upper()
self.grid_type = 'VELOCITY'
if model_id is None:
model_id = str(uuid4())
self.model_id = model_id
self.gradient = gradient
def __repr__(self):
output = ''
for i, layer in enumerate(self.velocity_model_layers):
output += f'layer {i + 1:4d} | {layer}'
return output
def add_layer(self, layer):
"""
Add a layer to the model. The layers must be added in sequence from the
top to the bottom
:param layer: a LayeredModel object
"""
if not (type(layer) is ModelLayer):
raise TypeError('layer must be a VelocityModelLayer object')
if self.velocity_model_layers is None:
self.velocity_model_layers = [layer]
else:
self.velocity_model_layers.append(layer)
def gen_1d_model(self, z_min, z_max, spacing):
# sort the layers to ensure the layers are properly ordered
z = []
v = []
for layer in self.velocity_model_layers:
z.append(layer.z_top)
v.append(layer.value_top)
if np.max(z) < z_max:
i_z_max = np.argmax(z)
v_z_max = v[i_z_max]
z.append(z_max)
v.append(v_z_max)
if np.min(z) > z_min:
i_z_min = np.argmin(z)
v_z_min = v[i_z_min]
z.append(z_min)
v.append(v_z_min)
i_sort = np.argsort(z)
z = np.array(z)
v = np.array(v)
z = z[i_sort]
v = v[i_sort]
z_interp = np.arange(z_min, z_max, spacing[2])
kind = 'previous'
if self.gradient:
kind = 'linear'
f_interp = interp1d(z, v, kind=kind)
v_interp = f_interp(z_interp)
return z_interp, v_interp
def gen_3d_grid(self, network_code, dims, origin, spacing):
model_grid_3d = VelocityGrid3D.from_layered_model(self,
network_code,
dims, origin,
spacing)
return model_grid_3d
def plot(self, z_min, z_max, spacing, *args, **kwargs):
"""
Plot the 1D velocity model
:param z_min: lower limit of the model
:param z_max: upper limit of the model
:param spacing: plotting resolution in z
:return: matplotlib axis
"""
z_interp, v_interp = self.gen_1d_model(z_min, z_max, spacing)
x_label = None
if self.phase == 'P':
x_label = 'P-wave velocity'
elif self.phase == 'S':
x_label = 's-wave velocity'
if self.grid_units == 'METER':
units = 'm'
else:
units = 'km'
y_label = f'z [{units}]'
ax = plt.axes()
ax.plot(v_interp, z_interp, *args, **kwargs)
plt.xlabel(x_label)
plt.ylabel(y_label)
ax.set_aspect(2)
plt.tight_layout()
return ax
class VelocityGrid3D(NLLocGrid):
def __init__(self, network_code, data_or_dims, origin, spacing,
phase='P', value=0, float_type=__default_float_type__,
model_id=None, **kwargs):
self.network_code = network_code
if (type(spacing) is int) | (type(spacing) is float):
spacing = [spacing, spacing, spacing]
super().__init__(data_or_dims, origin, spacing, phase,
value=value, grid_type='VELOCITY_METERS',
grid_units='METER', float_type=float_type,
model_id=model_id)
@staticmethod
def get_base_name(network_code, phase):
"""
return the base name given a network code and a phase
:param network_code: Code of the network
:type network_code: str
:param phase: Phase, either P or S
:type phase: str either 'P' or 'S'
:return: the base name
"""
validate_phase(phase)
return f'{network_code.upper()}.{phase.upper()}.mod'
@classmethod
def from_ocd(cls, origin, corner, dimensions, val=0):
pass
@classmethod
def from_ocs(cls, origin, corner, spacing, val=0):
pass
@classmethod
def from_ocd(cls, origin, dimensions, spacing, val=0):
pass
@classmethod
def from_layered_model(cls, layered_model, network_code, dims, origin,
spacing, **kwargs):
"""
Generating a 3D grid model from
:param network_code:
:param layered_model:
:param dims:
:param origin:
:param spacing:
:param kwargs:
:return:
"""
z_min = origin[-1]
z_max = z_min + spacing[-1] * dims[-1]
z_interp, v_interp = layered_model.gen_1d_model(z_min, z_max,
spacing)
data = np.zeros(dims)
for i, v in enumerate(v_interp):
data[:, :, i] = v_interp[i]
return cls(network_code, data, origin, spacing,
phase=layered_model.phase,
float_type=layered_model.float_type,
model_id=layered_model.model_id, **kwargs)
def to_slow_lens(self):
data = self.spacing[0] / self.data
return NLLocGrid(data, self.origin, self.spacing,
self.phase, grid_type='SLOW_LEN',
grid_units=self.grid_units,
float_type=self.float_type,
model_id=self.model_id)
@classmethod
def from_slow_len(cls, grid: NLLocGrid, network_code: str):
data = np.mean(grid.spacing) / grid.data
return cls(network_code, data, grid.origin, grid.spacing,
phase=grid.phase, float_type=grid.float_type,
model_id=grid.model_id)
def to_time(self, seed, seed_label, sub_grid_resolution=0.1,
*args, **kwargs):
"""
Eikonal solver based on scikit fast marching solver
:param seed: numpy array location of the seed or origin of useis wave
in model coordinates
(usually location of a station or an event)
:type seed: numpy.array or list
:param seed_label: seed label (name of station)
:type seed_label: basestring
:param sub_grid_resolution: resolution of the grid around the seed.
Propagating the wavefront on a denser grid around the seed,
significantly improves the travel time accuracy. The value represents
a fraction of the grid resolution. For instance, assuming a grid with
spacing of 10m, if the sub_grid_resolution is set to 0.1, the
resolution around the grid will be 1m.
:rtype: TTGrid
"""
if isinstance(seed, list):
seed = np.array(seed)
if not self.in_grid(seed):
logger.warning(f'{seed_label} is outside the grid. '
f'The travel time grid will not be calculated')
return
origin = self.origin
shape = self.shape
spacing = self.spacing
sub_grid_spacing = spacing * sub_grid_resolution
# extent = ((4 * sub_grid_spacing) * 1.2 + sub_grid_spacing)
n_pts_inner_grid = (4 * spacing / sub_grid_spacing * 1.2).astype(int)
for i in range(0, len(n_pts_inner_grid)):
if n_pts_inner_grid[i] % 2:
n_pts_inner_grid[i] += 1
x_i = np.arange(0, n_pts_inner_grid[0]) * sub_grid_spacing[0]
y_i = np.arange(0, n_pts_inner_grid[1]) * sub_grid_spacing[1]
z_i = np.arange(0, n_pts_inner_grid[2]) * sub_grid_spacing[2]
x_i = x_i - np.mean(x_i) + seed[0]
y_i = y_i - np.mean(y_i) + seed[1]
z_i = z_i - np.mean(z_i) + seed[2]
X_i, Y_i, Z_i = np.meshgrid(x_i, y_i, z_i, indexing='ij')
coords = np.array([X_i.ravel(), Y_i.ravel(), Z_i.ravel()]).T
vel = self.interpolate(coords, grid_space=False).reshape(
X_i.shape)
phi = np.ones_like(X_i)
phi[int(np.floor(len(x_i) / 2)), int(np.floor(len(y_i) / 2)),
int(np.floor(len(z_i) / 2))] = 0
tt_tmp = skfmm.travel_time(phi, vel, dx=sub_grid_spacing)
tt_tmp_grid = TTGrid(self.network_code, tt_tmp, [x_i[0], y_i[0],
z_i[0]],
sub_grid_spacing, seed, seed_label,
phase=self.phase, float_type=self.float_type,
model_id=self.model_id,
grid_units=self.grid_units)
data = self.data
xe = origin[0] + np.arange(0, shape[0], 1) * spacing[0]
ye = origin[1] + np.arange(0, shape[1], 1) * spacing[1]
ze = origin[2] + np.arange(0, shape[2], 1) * spacing[2]
Xe, Ye, Ze = np.meshgrid(xe, ye, ze, indexing='ij')
coords = np.array([Xe.ravel(), Ye.ravel(), Ze.ravel()])
corner1 = np.array([np.min(x_i), np.min(y_i), np.min(z_i)])
corner2 = np.array([np.max(x_i), np.max(y_i), np.max(z_i)])
test = ((coords[0, :] >= corner1[0]) & (coords[0, :] <= corner2[0]) &
(coords[1, :] >= corner1[1]) & (coords[1, :] <= corner2[1]) &
(coords[2, :] >= corner1[2]) & (coords[2, :] <= corner2[2]))
Xe_grid = Xe.ravel()[test]
Ye_grid = Ye.ravel()[test]
Ze_grid = Ze.ravel()[test]
X = np.array([Xe_grid, Ye_grid, Ze_grid]).T
tt_interp = tt_tmp_grid.interpolate(X, grid_space=False,
order=3)[0]
bias = np.max(tt_interp)
phi_out = np.ones_like(Xe).ravel()
phi_out[test] = tt_interp - bias
phi_out = phi_out.reshape(Xe.shape)
tt_out = skfmm.travel_time(phi_out, data, dx=spacing)
# tt_out = tt_out.ravel() + bias
tt_out = tt_out.ravel() + bias
tt_out[test] = tt_interp
tt_out = tt_out.reshape(Xe.shape)
tt_out_grid = TTGrid(self.network_code, tt_out, self.origin,
self.spacing, seed, seed_label, phase=self.phase,
float_type=self.float_type,
model_id=self.model_id,
grid_units=self.grid_units)
tt_out_grid.data -= tt_out_grid.interpolate(seed.T,
grid_space=False,
order=3)[0]
return tt_out_grid
def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9,
*args, **kwargs):
"""
Multi-threaded version of the Eikonal solver
based on scikit fast marching solver
:param seeds: array of seed
:type seeds: np.array
:param seed_labels: array of seed_labels
:type seed_labels: np.array
:param cpu_utilisation: fraction of the cpu core to be used for the
processing task (between 0 and 1)
:type cpu_utilisation: float between 0 and 1
:param args: arguments to be passed directly to skfmm.travel_time
function
:param kwargs: keyword arguments to be passed directly to
skfmm.travel_time function
:return: a travel time grid ensemble
:rtype: TravelTimeEnsemble
"""
num_threads = int(np.ceil(cpu_utilisation * __cpu_count__))
# ensuring that the number of threads is comprised between 1 and
# __cpu_count__
num_threads = np.max([np.min([num_threads, __cpu_count__]), 1])
data = []
for seed, seed_label in zip(seeds, seed_labels):
if not self.in_grid(seed):
logger.warning(f'{seed_label} is outside the grid. '
f'The travel time grid will not be calculated')
continue
data.append((seed, seed_label))
with Pool(num_threads) as pool:
results = pool.starmap(self.to_time, data)
tt_grid_ensemble = TravelTimeEnsemble(results)
return tt_grid_ensemble
def write(self, path='.'):
base_name = self.base_name
super().write(base_name, path=path)
def mv(self, origin, destination):
"""
move a the velocity grid files from {origin} to {destination}
:param origin: origin
:param destination:
:return:
"""
super().mv(self, self.base_name,
origin, destination)
@property
def base_name(self):
return self.get_base_name(self.network_code, self.phase)
class VelocityGridEnsemble:
def __init__(self, p_velocity_grid, s_velocity_grid):
"""
:param p_velocity_grid: p-wave 3D velocity grid
:type p_velocity_grid: VelocityGrid3D
:param s_velocity_grid: s-wave 3D velocity grid
:type s_velocity_grid: VelocityGrid3D
"""
self.p_velocity_grid = p_velocity_grid
self.s_velocity_grid = s_velocity_grid
self.__i__ = 0
def __getitem__(self, item):
if item.upper() == 'P':
return self.p_velocity_grid
elif item.upper() == 'S':
return self.s_velocity_grid
else:
raise ValueError(f'{item} is not a valid key. '
f'The key value must either be "P" or "S"')
def __iter__(self):
self.__i__ = 0
return self
def __next__(self):
if self.__i__ < 2:
if self.__i__ == '0':
return self.p_velocity_grid
elif self.__i__ == '1':
return self.s_velocity_grid
else:
raise StopIteration
# @property
# def keys(self):
# return ['P', 'S']
def keys(self):
return ['P', 'S']
def write(self, path='.'):
for key in self.keys():
self[key].write(path=path)
def to_time_multi_threaded(self, seeds, seed_labels, cpu_utilisation=0.9,
*args, **kwargs):
tt_grid_ensemble = TravelTimeEnsemble([])
for key in self.keys():
tt_grids = self[key].to_time_multi_threaded(seeds, seed_labels,
cpu_utilisation=
cpu_utilisation,
*args, **kwargs)
tt_grid_ensemble += tt_grids
return tt_grid_ensemble
def to_time(self, seeds, seed_labels, multi_threaded=False,
sub_grid_resolution=0.1, *args, **kwargs):
"""
Convert the velocity grids to travel-time
:param seeds: a list of seeds usually represents site location
:type seeds: numpy.array
:param seed_labels: a list of seed labels, usually represents site
codes
:type seed_labels: list
:param multi_threaded: if true, the travel-time grid will used
multithreading
:param sub_grid_resolution: sub grid resolution for near source
solution in fraction of grid resolution
:param args:
:param kwargs:
:return: Travel time grid ensemble
:rtype: ~uquake.grid.nlloc.TTGridEnsemble
"""
if multi_threaded:
return self.to_time_multi_threaded(seeds, seed_labels,
sub_grid_resolution=
sub_grid_resolution,
*args, **kwargs)
travel_time_grids = []
for seed, seed_label in zip(seeds, seed_labels):
for key in self.keys():
tg = self[key].to_time(seed, seed_label,
sub_grid_resolution
=sub_grid_resolution,
*args, **kwargs)
travel_time_grids.append(tg)
return TravelTimeEnsemble(travel_time_grids)
class SeededGrid(NLLocGrid):
"""
container for seeded grids (e.g., travel time, azimuth and take off angle)
"""
__valid_grid_type__ = ['TIME', 'TIME2D', 'ANGLE', 'ANGLE2D']
def __init__(self, network_code, data_or_dims, origin, spacing, seed,
seed_label, phase='P', value=0,
grid_units=__default_grid_units__,
grid_type='TIME', float_type="FLOAT", model_id=None):
self.seed = seed
self.seed_label = seed_label
self.network_code = network_code
if grid_type not in self.__valid_grid_type__:
raise ValueError()
self.grid_type = grid_type
super().__init__(data_or_dims, origin, spacing,
phase=phase, value=value,
grid_type='TIME', grid_units=grid_units,
float_type=float_type, model_id=model_id)
def __repr__(self):
line = f'Travel Time Grid\n' \
f' origin : {self.origin}\n' \
f' spacing : {self.spacing}\n' \
f' dimensions : {self.shape}\n' \
f' seed label : {self.seed_label}\n' \
f' seed location : {self.seed}'
return line
@classmethod
def get_base_name(cls, network_code, phase, seed_label, grid_type):
validate_phase(phase)
if grid_type not in cls.__valid_grid_type__:
raise ValueError(f'{grid_type} is not a valid grid type')
base_name = f'{network_code}.{phase}.{seed_label}.' \
f'{grid_type.lower()}'
return base_name
@property
def base_name(self):
base_name = self.get_base_name(self.network_code, self.phase,
self.seed_label, self.grid_type)
return base_name
def write(self, path='.'):
base_name = self.base_name
self._write_grid_data(base_name, path=path)
self._write_grid_header(base_name, path=path, seed=self.seed,
seed_label=self.seed_label,
seed_units=self.grid_units)
self._write_grid_model_id(base_name, path=path)
class TTGrid(SeededGrid):
def __init__(self, network_code, data_or_dims, origin, spacing, seed,
seed_label, phase='P', value=0, float_type="FLOAT",
model_id=None, grid_units='METER'):
super().__init__(network_code, data_or_dims, origin, spacing, seed,
seed_label, phase=phase, value=value,
grid_type='TIME', float_type=float_type,
model_id=model_id, grid_units=grid_units)
def to_azimuth(self):
"""
This function calculate the take off angle and azimuth for every
grid point given a travel time grid calculated using an Eikonal solver
:return: azimuth and takeoff angles grids
.. Note: The convention for the takeoff angle is that 0 degree is down.
"""
gds_tmp = np.gradient(self.data)
gds = [-gd for gd in gds_tmp]
azimuth = np.arctan2(gds[0], gds[1]) * 180 / np.pi
# azimuth is zero northwards
return AngleGrid(self.network_code, azimuth, self.origin, self.spacing,
self.seed, self.seed_label, 'AZIMUTH',
phase=self.phase, float_type=self.float_type,
model_id=self.model_id, grid_units=self.grid_units)
def to_takeoff(self):
gds_tmp = np.gradient(self.data)
gds = [-gd for gd in gds_tmp]
hor = np.sqrt(gds[0] ** 2 + gds[1] ** 2)
takeoff = np.arctan2(hor, -gds[2]) * 180 / np.pi
# takeoff is zero pointing down
return AngleGrid(self.network_code, takeoff, self.origin, self.spacing,
self.seed, self.seed_label, 'TAKEOFF',
phase=self.phase, float_type=self.float_type,
model_id=self.model_id, grid_units=self.grid_units)
def to_azimuth_point(self, coord, grid_space=False, mode='nearest',
order=1, **kwargs):
"""
calculate the azimuth angle at a particular point on the grid for a
given seed location
:param coord: coordinates at which to calculate the takeoff angle
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param mode: interpolation mode
:param order: interpolation order
:return: takeoff angle at the location coord
"""
return self.to_azimuth().interpolate(coord,
grid_space=grid_space,
mode=mode, order=order,
**kwargs)[0]
def to_takeoff_point(self, coord, grid_space=False, mode='nearest',
order=1, **kwargs):
"""
calculate the takeoff angle at a particular point on the grid for a
given seed location
:param coord: coordinates at which to calculate the takeoff angle
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param mode: interpolation mode
:param order: interpolation order
:return: takeoff angle at the location coord
"""
return self.to_takeoff().interpolate(coord,
grid_space=grid_space,
mode=mode, order=order,
**kwargs)[0]
def ray_tracer(self, start, grid_space=False, max_iter=1000,
arrival_id=None):
"""
This function calculates the ray between a starting point (start) and an
end point, which should be the seed of the travel_time grid, using the
gradient descent method.
:param start: the starting point (usually event location)
:type start: tuple, list or numpy.array
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param max_iter: maximum number of iteration
:param arrival_id: id of the arrival associated to the ray if
applicable
:rtype: numpy.array
"""
return ray_tracer(self, start, grid_space=grid_space,
max_iter=max_iter, arrival_id=arrival_id,
earth_model_id=self.model_id,
network=self.network_code)
@classmethod
def from_velocity(cls, seed, seed_label, velocity_grid):
return velocity_grid.to_time(seed, seed_label)
def write(self, path='.'):
return super().write(path=path)
@property
def site(self):
return self.seed_label
class TravelTimeEnsemble:
def __init__(self, travel_time_grids):
"""
Combine a list of travel time grids together providing meta
functionality (multi-threaded ray tracing, sorting, travel-time
calculation for a specific location etc.). It is assumed that
all grids are compatible, i.e., that all the grids have the same
origin, spacing and dimensions.
:param travel_time_grids: a list of TTGrid objects
"""
self.travel_time_grids = travel_time_grids
self.__i__ = 0
for tt_grid in self.travel_time_grids:
try:
assert tt_grid.check_compatibility(travel_time_grids[0])
except:
raise AssertionError('grids are not all compatible')
def __len__(self):
return len(self.travel_time_grids)
def __add__(self, other):
for travel_time_grid in other.travel_time_grids:
self.travel_time_grids.append(travel_time_grid)
return TravelTimeEnsemble(self.travel_time_grids)
def __iter__(self):
self.__i__ = 0
return self
def __next__(self):
if self.__i__ < len(self):
result = self.travel_time_grids[self.__i__]
self.__i__ += 1
return result
else:
raise StopIteration
def __getitem__(self, item):
if isinstance(item, int):
return self.travel_time_grids[item]
if isinstance(item, str):
tt_grid_out = None
for travel_time_grid in self.travel_time_grids:
if travel_time_grid.seed_label == item:
return travel_time_grid
raise KeyError(f'{item} not found')
def __repr__(self):
line = f'Number of travel time grids: {len(self)}'
return line
@classmethod
def from_files(cls, path):
"""
create a travel time ensemble from files located in a directory
:param path: the base path to the directory containing the travel time
files.
:return:
"""
tt_grids = []
for fle in Path(path).glob('*time*.hdr'):
path = fle.parent
base_name = '.'.join(fle.name.split('.')[:-1])
fname = str(Path(path) / base_name)
tt_grid = read_grid(fname, format='NLLOC',
float_type=__default_float_type__)
tt_grids.append(tt_grid)
return cls(tt_grids)
def select(self, seed_labels: Optional[list] = None,
phase: Optional[list] = None):
"""
return the a list of grid corresponding to seed_labels.
:param seed_labels: seed labels of the travel time grids to return
:param phase: the phase {'P' or 'S'}, both if None.
:return: a list of travel time grids
:rtype: TravelTimeEnsemble
"""
if (seed_labels is None) and (phase is None):
return self
tmp = []
if seed_labels is None:
seed_labels = np.unique(self.seed_labels)
if phase is None:
phase = ['P', 'S']
returned_grids = []
for travel_time_grid in self.travel_time_grids:
if travel_time_grid.seed_label in seed_labels:
if travel_time_grid.phase in phase:
returned_grids.append(travel_time_grid)
return TravelTimeEnsemble(returned_grids)
def sort(self, ascending:bool = True):
"""
sorting the travel time grid by seed_label
:param ascending: if true the grids are sorted in ascending order
:param ascending: bool
:return: sorted travel time grids.
:rtype: TravelTimeEnsemble
"""
i = np.sort(self.seed_labels)
if not ascending:
i = i[-1::-1]
sorted_tt_grids = np.array(self.travel_time_grids)[i]
return TravelTimeEnsemble(sorted_tt_grids)
def travel_time(self, seed, grid_space: bool = False,
seed_labels: Optional[list] = None,
phase: Optional[list] = None):
"""
calculate the travel time at a specific point for a series of site
ids
:param seed: travel time seed
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param seed_labels: a list of sites from which to calculate the
travel time.
:param phase: a list of phases for which the travel time need to be
calculated
:return: a list of dictionary containing the travel time and site id
"""
if isinstance(seed, list):
seed = np.array(seed)
if grid_space:
seed = self.travel_time_grids[0].transform_from(seed)
if not self.travel_time_grids[0].in_grid(seed):
raise ValueError('seed is outside the grid')
tt_grids = self.select(seed_labels=seed_labels, phase=phase)
tts = []
labels = []
phases = []
for tt_grid in tt_grids:
labels.append(tt_grid.seed_label)
tts.append(tt_grid.interpolate(seed.T,
grid_space=False)[0])
phases.append(tt_grid.phase)
tts_dict = {}
for phase in np.unique(phases):
tts_dict[phase] = {}
for label, tt, phase in zip(labels, tts, phases):
tts_dict[phase][label] = tt
return tts_dict
def angles(self, seed, grid_space: bool = False,
seed_labels: Optional[list] = None,
phase: Optional[list] = None, **kwargs):
"""
calculate the azimuth at a specific point for a series of site
ids
:param seed: travel time seed
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param seed_labels: a list of sites from which to calculate the
travel time.
:param phase: a list of phases for which the travel time need to be
calculated
:return: a list of dictionary containing the azimuth and site id
"""
if isinstance(seed, list):
seed = np.array(seed)
if grid_space:
seed = self.travel_time_grids[0].transform_from(seed)
if not self.travel_time_grids[0].in_grid(seed):
raise ValueError('seed is outside the grid')
tt_grids = self.select(seed_labels=seed_labels, phase=phase)
azimuths = []
takeoffs = []
labels = []
phases = []
for tt_grid in tt_grids:
labels.append(tt_grid.seed_label)
azimuths.append(tt_grid.to_azimuth_point(seed.T,
grid_space=False,
**kwargs))
takeoffs.append(tt_grid.to_takeoff_point(seed.T,
grid_space=False,
**kwargs))
phases.append(tt_grid.phase)
azimuth_dict = {}
takeoff_dict = {}
for phase in np.unique(phases):
azimuth_dict[phase] = {}
takeoff_dict[phase] = {}
for label, azimuth, takeoff, phase in zip(labels, azimuths, takeoffs,
phases):
takeoff_dict[phase][label] = takeoff
azimuth_dict[phase][label] = azimuth
angle_dict = {}
angle_dict['takeoff'] = takeoff_dict
angle_dict['azimuth'] = azimuth_dict
return angle_dict
def ray_tracer(self, start, seed_labels=None, multithreading=False,
cpu_utilisation=0.9, grid_space=False, max_iter=1000):
"""
:param start: origin of the ray, usually the location of an event
:param seed_labels: a list of seed labels
:param grid_space: true if the coordinates are expressed in
grid space (indices can be fractional) as opposed to model space
(x, y, z)
:param multithreading: if True use multithreading
:param max_iter: maximum number of iteration
:param cpu_utilisation: fraction of core to use, between 0 and 1.
The number of core to be use is bound between 1 and the total number of
cores
:return: a list of ray
:rtype: list
"""
travel_time_grids = self.select(seed_labels=seed_labels)
kwargs = {'grid_space': grid_space,
'max_iter': max_iter}
if multithreading:
ray_tracer_func = partial(ray_tracer, **kwargs)
num_threads = int(np.ceil(cpu_utilisation * __cpu_count__))
# ensuring that the number of threads is comprised between 1 and
# __cpu_count__
num_threads = np.max([np.min([num_threads, __cpu_count__]), 1])
data = []
for travel_time_grid in travel_time_grids:
data.append((travel_time_grid, start))
with Pool(num_threads) as pool:
results = pool.starmap(ray_tracer_func, data)
for result in results:
result.network = self.travel_time_grids[0].network_code
else:
results = []
for travel_time_grid in travel_time_grids:
results.append(travel_time_grid.ray_tracer(start, **kwargs))
return results
@property
def seeds(self):
seeds = []
for seed_label in self.seed_labels:
seeds.append(self.select(seed_labels=seed_label)[0].seed)
return np.array(seeds)
@property
def seed_labels(self):
seed_labels = []
for grid in self.travel_time_grids:
seed_labels.append(grid.seed_label)
return np.unique(np.array(seed_labels))
@property
def shape(self):
return self.travel_time_grids[0].shape
@property
def origin(self):
return self.travel_time_grids[0].origin
@property
def spacing(self):
return self.travel_time_grids[0].spacing
def write(self, path='.'):
for travel_time_grid in self.travel_time_grids:
travel_time_grid.write(path=path)
def write_hdf5(self, file_name):
write_hdf5(file_name, self)
def to_hdf5(self, file_name):
self.write_hdf5(file_name)
return H5TTable(file_name)
class AngleGrid(SeededGrid):
def __init__(self, network_code, data_or_dims, origin, spacing, seed,
seed_label, angle_type, phase='P', value=0,
float_type="FLOAT", model_id=None, grid_units='degrees'):
self.angle_type = angle_type
super().__init__(network_code, data_or_dims, origin, spacing, seed,
seed_label, phase=phase, value=value,
grid_type='ANGLE', float_type=float_type,
model_id=model_id, grid_units=grid_units)
def write(self, path='.'):
pass
```
#### File: uquake/nlloc/nlloc.py
```python
from datetime import datetime
from struct import unpack
import numpy as np
from obspy import UTCDateTime
from ..core.inventory import Inventory
from ..core.logging import logger
from ..core.event import (Catalog)
from uuid import uuid4
from pathlib import Path
import json
test_station_code = 'STN'
def validate(value, choices):
if value not in choices:
msg = f'value should be one of the following choices\n:'
for choice in choices:
msg += f'{choice}\n'
raise ValueError(msg)
return True
def validate_type(obj, expected_type):
if type(obj) is not expected_type:
raise TypeError('object is not the right type')
__valid_geographic_transformation__ = ['GLOBAL', 'SIMPLE', 'NONE', 'SDC',
'LAMBERT']
__valid_reference_ellipsoid__ = ['WGS-84', 'GRS-80', 'WGS-72', 'Australian',
'Krasovsky', 'International', 'Hayford-1909'
'Clarke-1880',
'Clarke-1866', 'Airy Bessel',
'Hayford-1830', 'Sphere']
__valid_units__ = ['METER', 'KILOMETER']
class Grid2Time:
# def __init__(self, srces, grid_transform, base_directory, base_name,
# verbosity=1, random_seed=1000, p_wave=True, s_wave=True,
# calculate_angles=True, model_directory='models',
# grid_directory='grids'):
def __init__(self, inventory, base_directory, base_name,
verbosity=1, random_seed=1000, p_wave=True, s_wave=True,
calculate_angles=True, model_directory='models',
grid_directory='grids'):
"""
Build the control file and run the Grid2Time program.
Note that at this time the class does not support any geographic
transformation
:param inventory: inventory data
:type inventory: uquake.core.inventory.Inventory
:param base_directory: the base directory of the project
:type base_directory: str
:param base_name: the network code
:type base_name: str
:param verbosity: sets the verbosity level for messages printed to
the terminal ( -1 = completely silent, 0 = error messages only,
1 = 0 + higher-level warning and progress messages,
2 and higher = 1 + lower-level warning and progress messages +
information messages, ...) default: 1
:type verbosity: int
:param random_seed: integer seed value for generating random number
sequences (used by program NLLoc to generate Metropolis samples and
by program Time2EQ to generate noisy time picks)
:param p_wave: if True calculate the grids for the P-wave
:type p_wave: bool
:param s_wave: if True calculate the grids for the S-wave
:type s_wave: bool
:param calculate_angles: if true calculate the azimuth and the
take-off angles
:type calculate_angles: bool
:param model_directory: location of the model directory relative to
the base_directory
:type model_directory: str
:param grid_directory: location of the grid directory relative to
the base_directory
"""
self.verbosity = verbosity
self.random_seed = random_seed
self.base_name = base_name
self.base_directory = Path(base_directory)
self.velocity_model_path = self.base_directory / f'{model_directory}'
self.grid_path = self.base_directory / f'{grid_directory}'
# create the velocity_model_path and the grid_path if they do not exist
self.grid_path.mkdir(parents=True, exist_ok=True)
self.velocity_model_path.mkdir(parents=True, exist_ok=True)
self.calculate_p_wave = p_wave
self.calculate_s_wave = s_wave
self.calculate_angles = calculate_angles
if isinstance(inventory, Inventory):
raise TypeError('inventory must be '
'"uquake.core.inventory.Inventory" type')
self.inventory = inventory
def run(self):
if self.calculate_p_wave:
self.__write_control_file__('P')
# run
if self.calculate_s_wave:
self.__write_control_file__('S')
def __write_control_file__(self, phase):
ctrl_dir = f'{self.base_directory}/run/'
Path(ctrl_dir).mkdir(parents=True, exist_ok=True)
# create the directory if the directory does not exist
ctrl_file = ctrl_dir + f'{str(uuid4())}.ctl'
with open(ctrl_file, 'w') as ctrl:
# writing the control section
ctrl.write(f'CONTROL {self.verbosity} {self.random_seed}\n')
# writing the geographic transformation section
ctrl.write('TRANS NONE\n')
# writing the Grid2Time section
out_line = f'GTFILES ' \
f'{self.velocity_model_path}/{self.base_name} ' \
f'{self.grid_path}/{self.base_name} ' \
f'{phase} 0\n'
ctrl.write(out_line)
if self.calculate_angles:
angle_mode = 'ANGLES_YES'
else:
angle_mode = 'ANGLE_NO'
ctrl.write(f'GTMODE GRID3D {angle_mode}\n')
for site in self.inventory.sites:
# test if site name is shorter than 6 characters
out_line = f'GTSRCE {site.code} XYZ ' \
f'{site.x / 1000:>10.6f} ' \
f'{site.y / 1000 :>10.6f} ' \
f'{site.z / 1000 :>10.6f} ' \
f'0.00\n'
ctrl.write(out_line)
ctrl.write(f'GT_PLFD 1.0e-4 {self.verbosity + 1}\n')
class Control:
def __init__(self, message_flag=-1, random_seed=1000):
"""
Control section
:param message_flag: (integer, min:-1, default:1) sets the verbosity
level for messages printed to the terminal ( -1 = completely silent,
0 = error messages only, 1 = 0 + higher-level warning and progress
messages, 2 and higher = 1 + lower-level warning and progress
messages + information messages, ...)
:param random_seed:(integer) integer seed value for generating random
number sequences (used by program NLLoc to generate Metropolis samples
and by program Time2EQ to generate noisy time picks)
"""
try:
self.message_flag = int(message_flag)
except Exception as e:
raise e
try:
self.random_seed = int(random_seed)
except Exception as e:
raise e
def __repr__(self):
return f'CONTROL {self.message_flag} {self.random_seed}'
class GeographicTransformation:
type = 'GeographicTransformation'
def __init__(self, transformation='NONE'):
validate(transformation, __valid_geographic_transformation__)
self.transformation = transformation
def __repr__(self):
line = f'TRANS {self.transformation}'
return line
class SimpleSDCGeographicTransformation(GeographicTransformation):
def __init__(self, latitude_origin, longitude_origin,
rotation_angle, simple=True):
"""
The SIMPLE or SDC transformation only corrects longitudinal
distances as a function of latitude Algorithm:
>> x = (long - longOrig) * 111.111 * cos(lat_radians);
>> y = (lat - latOrig) * 111.111;
>> lat = latOrig + y / 111.111;
>> long = longOrig + x / (111.111 * cos(lat_radians));
:param latitude_origin: (float, min:-90.0, max:90.0) latitude in
decimal degrees of the rectangular coordinates origin
:param longitude_origin: (float, min:-180.0, max:180.0) longitude in
decimal degrees of the rectangular coordinates origin
:param rotation_angle: (float, min:-360.0, max:360.0) rotation angle
of geographic north in degrees clockwise relative to the rectangular
coordinates system Y-axis
:param simple: Transformation is set to SIMPLE if simple is True.
Transformation is set to SDC if simple is set to False
"""
if -90 > latitude_origin > 90:
raise ValueError('latitude_origin must be comprised between '
'-90 and 90 degrees')
if -180 > longitude_origin > 180:
raise ValueError('longitude_origin must be comprised between '
'-180 and 180 degrees')
if -360 > rotation_angle > 360:
raise ValueError('the rotation angle must be comprised between '
'-360 and 360 degrees')
self.latitude_origin = latitude_origin
self.longitude_origin = longitude_origin
self.rotation_angle = rotation_angle
if simple:
transformation = 'SIMPLE'
else:
transformation = 'SDC'
super.__init__(transformation=transformation)
def __setattr__(self, key, value):
if key in self.__dict__.keys():
self.__dict__[key] = value
def __repr__(self):
line = f'TRANS {self.transformation} {self.latitude_origin} ' \
f'{self.longitude_origin} {self.rotation_angle}'
return line
class LambertGeographicTransformation(GeographicTransformation):
def __init__(self, reference_ellipsoid, latitude_origin,
longitude_origin, first_standard_parallax,
second_standard_parallax, rotation_angle):
"""
Define a Lambert coordinates system for transformation from Lambert
geographic coordinates to a cartesian/rectangular system.
:param reference_ellipsoid: (choice: WGS-84 GRS-80 WGS-72
Australian Krasovsky International Hayford-1909 Clarke-1880
Clarke-1866 Airy Bessel Hayford-1830 Sphere) reference ellipsoid name
:param latitude_origin: (float, min:-90.0, max:90.0) latitude in
decimal degrees of the rectangular coordinates origin
:param longitude_origin: (float, min:-180.0, max:180.0) longitude in
decimal degrees of the rectangular coordinates origin
:param first_standard_parallax: (float, min:-90.0, max:90.0) first
standard parallels (meridians) in decimal degrees
:param second_standard_parallax: (float, min:-90.0, max:90.0)
second standard parallels (meridians) in decimal degrees
:param rotation_angle: (float, min:-360.0, max:360.0) rotation angle
of geographic north in degrees clockwise relative to the rectangular
coordinates system Y-axis
"""
validate(reference_ellipsoid, __valid_reference_ellipsoid__)
self.reference_ellipsoid = reference_ellipsoid
if -90 > latitude_origin > 90:
raise ValueError('latitude_origin must be comprised between '
'-90 and 90 degrees')
if -180 > longitude_origin > 180:
raise ValueError('longitude_origin must be comprised between '
'-180 and 180 degrees')
if -360 > rotation_angle > 360:
raise ValueError('the rotation angle must be comprised between '
'-360 and 360 degrees')
if -90 > first_standard_parallax > 90:
raise ValueError('first_standard_parallax must be comprised '
'between -90 and 90 degrees')
if -90 > second_standard_parallax > 90:
raise ValueError('second_standard_parallax must be comprised '
'between -90 and 90 degrees')
self.latitude_origin = latitude_origin
self.longitude_origin = longitude_origin
self.rotation_angle = rotation_angle
self.first_standard_parallax = first_standard_parallax
self.second_standard_parallax = second_standard_parallax
def __repr__(self):
line = f'TRANS LAMBERT {self.reference_ellipsoid} ' \
f'{self.latitude_origin} {self.longitude_origin} ' \
f'{self.first_standard_parallax} ' \
f'{self.second_standard_parallax} {self.rotation_angle}'
class LocSearchGrid:
def __init__(self, num_sample_draw=1000):
"""
:param num_sample_draw: specifies the number of scatter samples to
draw from each saved PDF grid ( i.e. grid with gridType = PROB_DENSITY
and saveFlag = SAVE ) No samples are drawn if saveFlag < 0.
:type num_sample_draw: int
"""
self.num_sample_draw = num_sample_draw
def __repr__(self):
return f'GRID {self.num_sample_draw}\n'
@property
def type(self):
return 'LOCSEARCH'
class LocSearchMetropolis:
def __init__(self, num_samples, num_learn, num_equil, num_begin_save,
num_skip, step_init, step_min, prob_min, step_fact=8.):
"""
Container for the Metropolis Location algorithm parameters
The Metropolis-Gibbs algorithm performs a directed random walk within
a spatial, x,y,z volume to obtain a set of samples that follow the
3D PDF for the earthquake location. The samples give and estimate of
the optimal hypocenter and an image of the posterior probability
density function (PDF) for hypocenter location.
Advantages:
1. Does not require partial derivatives, thus can be used with
complicated, 3D velocity structures
2. Accurate recovery of moderately irregular (non-ellipsoidal)
PDF's with a single minimum
3. Only only moderately slower (about 10 times slower) than linearised,
iterative location techniques, and is much faster
(about 100 times faster) than the grid-search
4. Results can be used to obtain confidence contours
Drawbacks:
1. Stochastic coverage of search region - may miss important features
2. Inconsistent recovery of very irregular (non-ellipsoidal)
PDF's with multiple minima
3. Requires careful selection of sampling parameters
4. Attempts to read full 3D travel-time grid files into memory,
thus may run very slowly with large number of observations and large
3D travel-time grids
:param num_samples: total number of accepted samples to obtain (min:0)
:type num_samples: int
:param num_learn: number of accepted samples for learning stage of
search (min:0)
:type num_learn: int
:param num_equil: number of accepted samples for equilibration stage
of search (min:0)
:type num_equil: int
:param num_begin_save: number of accepted samples after which to begin
saving stage of search, denotes end of equilibration stage (min:0)
:type num_begin_save: int
:param num_skip: number of accepted samples to skip between saves
(numSkip = 1 saves every accepted sample, min:1)
:type num_skip: int
:param step_init: initial step size in km for the learning stage
(stepInit < 0.0 gives automatic step size selection. If the search
takes too long, the initial step size may be too large;
this may be the case if the search region is very large relative
to the volume of the high confidence region for the locations.)
:type step_init: float
:param step_min: minimum step size allowed during any search stage
(This parameter should not be critical, set it to a low value. min:0)
:type step_min: float
:param prob_min: minimum value of the maximum probability (likelihood)
that must be found by the end of learning stage, if this value is not
reached the search is aborted (This parameters allows the filtering of
locations outside of the search grid and locations with large
residuals.)
:type prob_min: float
:param step_fact: step factor for scaling step size during
equilibration stage (Try a value of 8.0 to start.) Default=8.
:type step_fact: float
"""
self.num_samples = num_samples
self.num_learn = num_learn
self.num_equil = num_equil
self.num_begin_save = num_begin_save
self.num_skip = num_skip
self.step_init = step_init
self.step_min = step_min
self.prob_min = prob_min
self.step_fact = step_fact
def __repr__(self):
line = f'LOCSEARCH MET {self.num_samples} {self.num_learn} ' \
f'{self.num_equil} {self.num_begin_save} {self.num_skip} ' \
f'{self.step_min} {self.step_min} {self.step_fact} ' \
f'{self.prob_min}\n'
return line
@classmethod
def init_with_default(cls):
pass
@property
def type(self):
return 'LOCSEARCH'
class LocSearchOctTree:
def __init__(self, init_num_cell_x, init_num_cell_y, init_num_cell_z,
min_node_size, max_num_nodes, num_scatter,
use_station_density=False, stop_on_min_node_size=True):
"""
Container for the Octree Location algorithm parameters
Documenation: http://alomax.free.fr/nlloc/octtree/OctTree.html
Developed in collaboration with <NAME>; Schlumberger Cambridge
Research, Cambridge CB3 0EL, England; <EMAIL>
The oct-tree importance sampling algorithm gives accurate, efficient
and complete mapping of earthquake location PDFs in 3D space (x-y-z).
Advantages:
1. Much faster than grid-search (factor 1/100)
2. More global and complete than Metropolis-simulated annealing
3. Simple, with very few parameters (initial grid size, number of samples)
Drawbacks:
1. Results are weakly dependant on initial grid size - the method may
not identify narrow, local maxima in the PDF.
2. Attempts to read full 3D travel-time grid files into memory,
thus may run very slowly with large number of observations and large
3D travel-time grids
:param init_num_cell_x: initial number of octtree cells in the x
direction
:type init_num_cell_x: int
:param init_num_cell_y: initial number of octtree cells in the y
direction
:type init_num_cell_y: int
:param init_num_cell_z: initial number of octtree cells in the z
direction
:type init_num_cell_z: int
:param min_node_size: smallest octtree node side length to process,
the octree search is terminated after a node with a side smaller
than this length is generated
:type min_node_size: float
:param max_num_nodes: total number of nodes to process
:type max_num_nodes: int
:param num_scatter: the number of scatter samples to draw from the
octtree results
:type num_scatter: int
:param use_station_density: flag, if True weights oct-tree cell
probability values used for subdivide decision in proportion to number
of stations in oct-tree cell; gives higher search priority to cells
containing stations, stablises convergence to local events when global
search used with dense cluster of local stations
(default:False)
:type use_station_density: bool
:param stop_on_min_node_size: flag, if True, stop search when first
min_node_size reached, if False stop subdividing a given cell when
min_node_size reached (default:True)
:type stop_on_min_node_size: bool
"""
self.init_num_cell_x = init_num_cell_x
self.init_num_cell_y = init_num_cell_y
self.init_num_cell_z = init_num_cell_z
self.min_node_size = min_node_size
self.max_num_nodes = max_num_nodes
self.num_scatter = num_scatter
self.use_station_density = use_station_density
self.stop_on_min_node_size = stop_on_min_node_size
@classmethod
def init_default(cls):
init_num_cell_x = 5
init_num_cell_y = 5
init_num_cell_z = 5
min_node_size = 1E-6
max_num_nodes = 5000
num_scatter = 500
use_station_density = False
stop_on_min_node_size = True
return cls(init_num_cell_x, init_num_cell_y, init_num_cell_z,
min_node_size, max_num_nodes, num_scatter,
use_station_density, stop_on_min_node_size)
def __repr__(self):
line = f'LOCSEARCH OCT {self.init_num_cell_x} ' \
f'{self.init_num_cell_y} {self.init_num_cell_z} ' \
f'{self.min_node_size} {self.max_num_nodes} ' \
f'{self.num_scatter} {self.use_station_density:d} ' \
f'{self.stop_on_min_node_size:d}\n'
return line
@property
def type(self):
return 'LOCSEARCH'
class GaussianModelErrors:
def __init__(self, sigma_time, correlation_length):
"""
container for Gaussian Error Model
:param sigma_time: typical error in seconds for travel-time to one
station due to model errors
:type sigma_time: float
:param correlation_length: correlation length that controls covariance
between stations ( i.e. may be related to a characteristic scale length
of the medium if variations on this scale are not included in the
velocity model)
:type correlation_length: float
"""
self.sigma_time = sigma_time
self.correlation_length = correlation_length
@classmethod
def init_default(cls):
sigma_time = 1E-3
correlation_length = 1E-3
return cls(sigma_time, correlation_length)
def __repr__(self):
return f'LOCGAU {self.sigma_time} {self.correlation_length}\n'
__valid_location_methods__ = ['GAU_ANALYTIC', 'EDT', 'EDT_OT_WT',
'EDT_OT_WT_ML']
class LocationMethod:
def __init__(self, method, max_dist_sta_grid, min_number_phases,
max_number_phases, min_number_s_phases, vp_vs_ratio,
max_number_3d_grid_memory, min_dist_sta_grid):
"""
container for location method
:param method: location method/algorithm ( GAU_ANALYTIC = the inversion
approach of Tarantola and Valette (1982) with L2-RMS likelihood
function. EDT = Equal Differential Time likelihood function cast into
the inversion approach of Tarantola and Valette (1982) EDT_OT_WT =
Weights EDT-sum probabilities by the variance of origin-time estimates
over all pairs of readings. This reduces the probability (PDF values)
at points with inconsistent OT estimates, and leads to more compact
location PDF's. EDT_OT_WT_ML = version of EDT_OT_WT with EDT
origin-time weighting applied using a grid-search, maximum-likelihood
estimate of the origin time. Less efficient than EDT_OT_WT which
uses simple statistical estimate of the origin time.)
:param max_dist_sta_grid: maximum distance in km between a station and the
center of the initial search grid; phases from stations beyond this
distance will not be used for event location
:param min_number_phases: minimum number of phases that must be
accepted before event will be located
:param max_number_phases: maximum number of accepted phases that will
be used for event location; only the first maxNumberPhases read from
the phase/observations file are used for location
:param min_number_s_phases: minimum number of S phases that must be
accepted before event will be located
:param vp_vs_ratio: P velocity to S velocity ratio. If VpVsRatio > 0.0
then only P phase travel-times grids are read and VpVsRatio is used to
calculate S phase travel-times. If VpVsRatio < 0.0 then S phase
travel-times grids are used.
:param max_number_3d_grid_memory: maximum number of 3D travel-time
grids to attempt to read into memory for Metropolis-Gibbs search. This
helps to avoid time-consuming memory swapping that occurs if the total
size of grids read exceeds the real memory of the computer. 3D grids
not in memory are read directly from disk. If maxNum3DGridMemory < 0
then NLLoc attempts to read all grids into memory.
:param min_dist_sta_grid: minimum distance in km between a station and
the center of the initial search grid; phases from stations closer than
this distance will not be used for event location
"""
validate(method, __valid_location_methods__)
self.method = method
self.max_dist_sta_grid = max_dist_sta_grid
self.min_number_phases = min_number_phases
self.max_number_phases = max_number_phases
self.min_number_s_phases = min_number_s_phases
self.vp_vs_ratio = vp_vs_ratio
self.max_number_3d_grid_memory = max_number_3d_grid_memory
self.min_dist_sta_grid = min_dist_sta_grid
@classmethod
def init_default(cls):
method = 'EDT_OT_WT'
max_dist_sta_grid = 9999.
min_number_phases = 6
max_number_phases = -1
min_number_s_phases = -1
vp_vs_ratio = -1
max_number_3d_grid_memory = 0
min_dist_sta_grid = 0
return cls(method, max_dist_sta_grid, min_number_phases,
max_number_phases, min_number_s_phases, vp_vs_ratio,
max_number_3d_grid_memory, min_dist_sta_grid)
def __repr__(self):
line = f'LOCMETH {self.method} {self.max_dist_sta_grid:.1f} ' \
f'{self.min_number_phases} {self.max_number_phases} ' \
f'{self.min_number_s_phases} {self.vp_vs_ratio} ' \
f'{self.max_number_3d_grid_memory}\n'
return line
class SimpleArrival:
def __init__(self, time: UTCDateTime, site: str, phase: str,
polarity: str):
pass
class Observations:
def __init__(self, picks, p_pick_error=1e-3, s_pick_error=1e-3):
"""
:param picks: a list of Pick object
:type picks: list of uquake.core.event.Pick
:param p_pick_error: p-wave picking error in second
:param s_pick_error: s-wave picking error in second
"""
self.picks = picks
self.p_pick_error = p_pick_error
self.s_pick_error = s_pick_error
@classmethod
def from_event(cls, event, p_pick_error=1e-3, s_pick_error=1e-3,
origin_index=None):
if type(event) is Catalog:
event = event[0]
logger.warning('An object type Catalog was provided. Taking the '
'first event of the catalog. This may lead to '
'unwanted behaviour')
if origin_index is None:
if event.preferred_origin() is None:
logger.warning('The preferred origin is not defined. The last'
'inserted origin will be use. This may lead '
'to unwanted behaviour')
origin = event.origins[-1]
else:
origin = event.preferred_origin()
else:
origin = event.origins[origin_index]
picks = [arrival.get_pick() for arrival in origin.arrivals]
return cls(picks, p_pick_error=p_pick_error,
s_pick_error=s_pick_error)
@classmethod
def generate_observations_event_location(cls, tt_grids, e_loc=None):
"""
:param tt_grids: a velocity grid
:param tt_grids: ~uquake.grid.nlloc.TravelTimeEnsemble
:param e_loc: event location (default None), if not provided, a
:type
"""
from uquake.core.event import Pick, WaveformStreamID
if e_loc is None:
e_loc = tt_grids[0].generate_random_points_in_grid()
travel_times = tt_grids.travel_time(e_loc)
picks = []
location_code = 0
for phase in travel_times.keys():
for site in travel_times[phase].keys():
time = UTCDateTime.now() + travel_times[phase][site]
waveform_id = WaveformStreamID(station_code=site[:-2],
location_code=site[-2:],
channel_code='BHZ')
pk = Pick(time=time, phase_hint=phase, waveform_id=waveform_id,
onset='impulsive', evaluation_mode='automatic',
evaluation_status='preliminary')
picks.append(pk)
location_code += 1
return cls(picks)
def __repr__(self):
lines = ''
for pick in self.picks:
if pick.evaluation_status == 'rejected':
continue
site = pick.site
instrument_identification = pick.waveform_id.channel_code[0:2]
component = pick.waveform_id.channel_code[-1]
phase_onset = 'e' if pick.onset in ['emergent', 'questionable'] \
else 'i'
phase_descriptor = pick.phase_hint.upper()
if pick.polarity is None:
first_motion = '?'
else:
first_motion = 'U' if pick.polarity.lower() == 'positive' \
else 'D'
datetime_str = pick.time.strftime('%Y%m%d %H%M %S.%f')
error_type = 'GAU'
if pick.phase_hint.upper() == 'P':
pick_error = f'{self.p_pick_error:0.2e}'
else:
pick_error = f'{self.s_pick_error:0.2e}'
# not implemented
coda_duration = -1
amplitude = -1
period = -1
phase_weight = 1
line = f'{site:<6s} {instrument_identification:<4s} ' \
f'{component:<4s} {phase_onset:1s} ' \
f'{phase_descriptor:<6s} {first_motion:1s} ' \
f'{datetime_str} {error_type} {pick_error} ' \
f'{coda_duration:.2e} {amplitude:.2e} {period:.2e} ' \
f'{phase_weight:d}\n'
lines += line
return lines
def write(self, file_name, path='.'):
with open(Path(path) / file_name, 'w') as file_out:
file_out.write(str(self))
class LocFiles:
def __init__(self, velocity_file_path, travel_time_file_path, p_wave=True,
swap_bytes_on_input=False):
"""
Specifies the directory path and file root name (no extension), and
the wave type identifier for the input velocity grid and output
time grids.
:param velocity_file_path: full or relative path and file
root name (no extension) for input velocity grid (generated by
program Vel2Grid)
:type velocity_file_path: str
:param travel_time_file_path: full or relative path and file
root name (no extension) for output travel-time and take-off angle
grids
:type travel_time_file_path: str
:param p_wave: p-wave if True, s-wave if False
:type p_wave: bool
:param swap_bytes_on_input: flag to indicate if hi and low bytes of
input velocity grid file should be swapped
:type swap_bytes_on_input: bool
"""
self.velocity_file_path = velocity_file_path
self.travel_time_file_path = travel_time_file_path
if p_wave:
self.phase = 'P'
else:
self.phase = 'S'
self.swap_bytes_on_input = int(swap_bytes_on_input)
def __repr__(self):
return f'GTFILES {self.velocity_file_path} ' \
f'{self.travel_time_file_path} {self.phase} ' \
f'{self.swap_bytes_on_input}'
class GridTimeMode:
def __init__(self, grid_3d=True, calculate_angles=True):
"""
Specifies several program run modes.
:param grid_3d: if True 3D grid if False 2D grid
:type grid_3d: bool
:param calculate_angles: if True calculate angles and not if False
:type calculate_angles: bool
"""
if grid_3d:
self.grid_mode = 'GRID3D'
else:
self.grid_mode = 'GRID2D'
if calculate_angles:
self.angle_mode = 'ANGLES_YES'
else:
self.angle_mode = 'ANGLES_NO'
def __repr__(self):
return f'GTMODE {self.grid_mode} {self.angle_mode}'
class Site:
def __init__(self, label, x, y, z, elev=None):
self.label = label
self.x = x
self.y = y
self.z = z
self.elev = elev
if elev is None:
self.elev = z
class Srces:
__valid_measurement_units__ = ['METERS', 'KILOMETERS']
def __init__(self, sites=[], units='METERS'):
"""
specifies a series of source location from an inventory object
:param sites: a list of sites containing at least the location,
and site label
:type sites: list of dictionary
:Example:
>>> site = Site(label='test', x=1000, y=1000, z=1000, elev=0.0)
>>> sites = [site]
>>> srces = Srces(srces)
"""
validate(units, self.__valid_measurement_units__)
self.units = units
self.sites = sites
@classmethod
def from_inventory(cls, inventory):
"""
create from an inventory object
:param inventory:
:type inventory: uquake.core.inventory.Inventory
"""
sites = []
for site in inventory.sites:
sites.append(Site(site.code, site.x, site.y, site.z))
return cls(sites)
@classmethod
def generate_random_srces_in_grid(cls, gd, n_srces=1):
"""
generate n_srces random srces inside the grid provided. This function
is mainly used for testing purposes
:param gd: a grid
:type gd: uquake.grid.base.Grid or an object inheriting from Grid
:param n_srces: number of Srces to generate
:return: a list of srces
>>> from uquake.grid.base import Grid
>>> from uquake.nlloc.nlloc import Srces
>>> grid_dimensions = [10, 10, 10]
>>> grid_spacing = [1, 1, 1]
>>> grid_origin = [0, 0, 0]
>>> grid = Grid(grid_dimensions, grid_spacing, grid_origin, value=1)
>>> srces = Srces.generate_random_srces_in_grid(grid, nb_seeds=10)
"""
srces = []
label_root = test_station_code
for i, point in enumerate(gd.generate_random_points_in_grid(
n_points=n_srces)):
label = f'{label_root}{i:02d}'
site = Site(label, point[0], point[1], point[2])
srces.append(site)
return cls(srces)
def add_site(self, label, x, y, z, elev=None, units='METERS'):
"""
Add a single site to the source list
:param label: site label
:type label: str
:param x: x location relative to geographic origin expressed
in the units of measurements for site/source
:type x: float
:param y: y location relative to geographic origin expressed
in the units of measurements for site/source
:type y: float
:param z: z location relative to geographic origin expressed
in the units of measurements for site/source
:type z: float
:param elev: elevation above z grid position (positive UP) in
kilometers for site (Default = None)
:type elev: float
:param units: units of measurement used to express x, y, and z
( 'METERS' or 'KILOMETERS')
"""
validate(units.upper(), self.__valid_measurement_units__)
self.sites.append(Site(label, x, y, z, elev=elev))
self.units = units.upper()
def __repr__(self):
line = ""
for site in self.sites:
# test if site name is shorter than 6 characters
line += f'GTSRCE {site.label} XYZ ' \
f'{site.x / 1000:>15.6f} ' \
f'{site.y / 1000:>15.6f} ' \
f'{site.z / 1000:>15.6f} ' \
f'0.00\n'
return line
@property
def json(self):
dict_out = vars(self)
for i, site in enumerate(dict_out['sites']):
dict_out['sites'][i] = vars(dict_out['sites'][i])
return json.dumps(dict_out)
@classmethod
def from_json(cls, json_obj):
obj = json.loads(json_obj)
sites = []
for key in obj.keys():
if key == 'sites':
for site_dict in obj[key]:
sites.append(Site(**site_dict))
obj['sites'] = sites
cls.__init__(**obj)
@property
def locs(self):
seeds = []
for site in self.sites:
seeds.append([site.x, site.y, site.z])
return np.array(seeds)
@property
def labels(self):
seed_labels = []
for site in self.sites:
seed_labels.append(site.label)
return np.array(seed_labels)
__valid_search_grid_type__ = ['MISFIT', 'PROB_DENSITY']
class LocGrid(object):
def __init__(self, dim_x, dim_y, dim_z, origin_x, origin_y, origin_z,
spacing_x, spacing_y, spacing_z, grid_type='PROB_DENSITY',
save=False, units='METERS'):
"""
Specifies the size and other parameters of an initial or nested 3D
search grid. The order of LOCGRID statements is critical (see Notes).
repeatable
:param dim_x: number of grid nodes in the x direction
:param dim_y: number of grid nodes in the y direction
:param dim_z: number of grid nodes in the z direction
:param origin_x: x location of the grid origin in km
relative to the geographic origin. Use a large, negative value
( i.e. -1.0e30 ) to indicate automatic positioning of grid along
corresponding direction (valid for nested grids only, may not be used
for initial grid).
:param origin_y: y location of the grid origin in km
relative to the geographic origin.
:param origin_z: z location of the grid origin in km
relative to the geographic origin.
:param spacing_x: grid node spacing in kilometers along the x axis
:param spacing_y: grid node spacing in kilometers along the y axis
:param spacing_z: grid node spacing in kilometers along the z axis
:param grid_type: (choice: MISFIT PROB_DENSITY) statistical quantity to
calculate on grid
:param save: specifies if the results of the search over this grid
should be saved to disk (Default: False)
:type save: bool
:param units: (choice: 'METERS', 'KILOMETERS') grid units
(Default 'METERS')
"""
self.dim_x = dim_x
self.dim_y = dim_y
self.dim_z = dim_z
self.origin_x = origin_x
self.origin_y = origin_y
self.origin_z = origin_z
self.spacing_x = spacing_x
self.spacing_y = spacing_y
self.spacing_z = spacing_z
validate(grid_type, __valid_search_grid_type__)
self.grid_type = grid_type
self.save = save
self.units = units
@classmethod
def init_from_grid(cls, input_grid, grid_type='PROB_DENSITY', save=True):
"""
:param input_grid:
:type input_grid: nlloc.grid.NLLocGrid
:param grid_type: (choice: MISFIT PROB_DENSITY) statistical quantity to
calculate on grid
:param save: specifies if the results of the search over this grid
should be saved to disk (Default: True)
:return:
"""
dims = input_grid.dims
origin = input_grid.origin
spacing = input_grid.spacing
units = input_grid.grid_units
return cls(dims[0], dims[1], dims[2], origin[0], origin[1], origin[2],
spacing[0], spacing[1], spacing[2], units=units,
grid_type=grid_type, save=save)
def __repr__(self):
div = 1
if self.units == 'METER':
div = 1000
if self.save:
save_flag = 'SAVE'
else:
save_flag = 'NO_SAVE'
repr = f'LOCGRID {self.dim_x} {self.dim_y} {self.dim_z} ' \
f'{self.origin_x / div:0.6f} {self.origin_y / div:0.6f} ' \
f'{self.origin_z / div:0.6f} ' \
f'{self.spacing_x / div:0.6f} {self.spacing_y / div:0.6f} ' \
f'{self.spacing_z / div:0.6f} {self.grid_type} {save_flag}\n'
return repr
class LocQual2Err(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
line = 'LOCQUAL2ERR'
for arg in self.args:
line += f' {arg}'
return line + '\n'
__observation_file_types__ = ['NLLOC_OBS', 'HYPO71', 'HYPOELLIPSE',
'NEIC', 'CSEM_ALERT', 'SIMULPS', 'HYPOCENTER',
'HYPODD', 'SEISAN', 'NORDIC', 'NCSN_Y2K_5',
'NCEDC_UCB', 'ETH_LOC', 'RENASS_WWW',
'RENASS_DEP', 'INGV_BOLL', 'INGV_BOLL_LOCAL',
'INGV_ARCH']
class NllocInputFiles:
def __init__(self, observation_files, travel_time_file_root,
output_file_root, observation_file_type='NLLOC_OBS',
i_swap_bytes=False, create_missing_folders=True):
"""
Specifies the directory path and filename for the phase/observation
files, and the file root names (no extension) for the input time grids
and the output files.
the path where the files are to be located is
:param observation_files: full or relative path and name for
phase/observations files, mulitple files may be specified with
standard UNIX "wild-card" characters ( * and ? )
:type observation_files: str
:param observation_file_type: (choice: NLLOC_OBS HYPO71 HYPOELLIPSE
NEIC CSEM_ALERT SIMULPS HYPOCENTER HYPODD SEISAN NORDIC NCSN_Y2K_5
NCEDC_UCB ETH_LOC RENASS_WWW RENASS_DEP INGV_BOLL
INGV_BOLL_LOCAL INGV_ARCH) format type for phase/observations files
(see Phase File Formats) - DEFAULT NLLOC_OBS
:type observation_file_type: str
:param travel_time_file_root: full or relative path and file root name
(no extension) for input time grids.
:type travel_time_file_root: str
:param output_file_root: full or relative path and file root name
(no extension) for output files
:type output_file_root: str
:param i_swap_bytes: flag to indicate if hi and low bytes of input
time grid files should be swapped. Allows reading of travel-time grids
from different computer architecture platforms during TRANS GLOBAL mode
location. DEFAULT=False
:type i_swap_bytes: bool
:param create_missing_folders: if True missing folder will be created
"""
# validate if the path exist if the path does not exist the path
# should be created
observation_files = Path(observation_files)
if not observation_files.parent.exists():
if create_missing_folders:
logger.warning(f'the path <{observation_files.parent}> does '
f'not exist. missing folders will be created')
observation_files.parent.mkdir(parents=True, exist_ok=True)
else:
raise IOError(f'path <{observation_files.parent}> does not '
f'exist')
self.observation_files = observation_files
travel_time_file_root = Path(travel_time_file_root)
if not travel_time_file_root.parent.exists():
if create_missing_folders:
logger.warning(f'the path <{travel_time_file_root.parent}> '
f'does not exist. missing folders will '
f'be created')
travel_time_file_root.parent.mkdir(parents=True, exist_ok=True)
else:
raise IOError(f'path <{travel_time_file_root.parent}> does '
f'not exist')
self.travel_time_file_root = travel_time_file_root
output_file_root = Path(output_file_root)
if not output_file_root.parent.exists():
if create_missing_folders:
logger.warning(f'the path <{output_file_root.parent}> '
f'does not exist. missing folders will '
f'be created')
output_file_root.parent.mkdir(parents=True, exist_ok=True)
else:
raise IOError(f'path <{output_file_root.parent}> does '
f'not exist')
self.output_file_root = output_file_root
validate(observation_file_type, __observation_file_types__)
self.observation_file_type = observation_file_type
self.i_swap_bytes = i_swap_bytes
def __repr__(self):
line = f'LOCFILES {self.observation_files} ' \
f'{self.observation_file_type} {self.travel_time_file_root} ' \
f'{self.output_file_root} {int(self.i_swap_bytes)}\n'
return line
def read_hypocenter_file(filename, units='METER'):
validate(units, __valid_units__)
with open(filename, 'r') as hyp_file:
all_lines = hyp_file.readlines()
hyp = [line.split() for line in all_lines if 'HYPOCENTER' in line][0]
geo = [line.split() for line in all_lines if 'GEOGRAPHIC' in line][0]
s = int(np.floor(float(geo[7])))
us = int((float(geo[7]) - s) * 1e6)
if s < 0:
s = 0
if us < 0:
us = 0
tme = datetime(int(geo[2]), int(geo[3]), int(geo[4]),
int(geo[5]), int(geo[6]), s, us)
tme = UTCDateTime(tme)
hyp_x = float(hyp[2]) * 1000
hyp_y = float(hyp[4]) * 1000
hyp_z = float(hyp[6]) * 1000
return tme, hyp_x, hyp_y, hyp_z
def read_scatter_file(filename):
"""
:param filename: name of the scatter file to read
:return: a numpy array of the points in the scatter file
"""
with open(filename, 'rb') as f:
n_samples = unpack('i', f.read(4))[0]
unpack('f', f.read(4))
unpack('f', f.read(4))
unpack('f', f.read(4))
points = []
for k in range(0, n_samples):
x = unpack('f', f.read(4))[0] * 1000
y = unpack('f', f.read(4))[0] * 1000
z = unpack('f', f.read(4))[0] * 1000
pdf = unpack('f', f.read(4))[0]
points.append([x, y, z, pdf])
return np.array(points)
```
#### File: uquake/waveform/transforms.py
```python
import numpy as np
from uquake.helpers.logging import logger
"""
Place to keep trace transforms - mainly rotations to ENZ, PSVSH, etc.
"""
def rotate_to_P_SV_SH(st, cat, debug=False):
fname = 'rotate_to_P_SV_SH'
st_new = st.copy()
event = cat[0]
for arr in event.preferred_origin().arrivals:
if arr.phase == 'S':
continue
pk = arr.pick_id.get_referred_object()
sta = pk.waveform_id.station_code
baz = arr.backazimuth
az = arr.azimuth
takeoff = arr.takeoff_angle
inc_angle = arr.inc_angle
if inc_angle is None:
baz, inc_angle = event.preferred_origin(
).get_incidence_baz_angles(sta, arr.phase)
inc_angle *= 180 / np.pi
if inc_angle is None:
logger.warning("%s: sta:%s [%s] has inc_angle=None --> skip "
"rotation!" % (fname, sta, arr.phase))
continue
trs = st_new.select(station=sta)
if len(trs) == 3:
cos_i = np.cos(inc_angle * np.pi / 180.)
sin_i = np.sin(inc_angle * np.pi / 180.)
cos_baz = np.cos(baz * np.pi / 180.)
sin_baz = np.sin(baz * np.pi / 180.)
col1 = np.array([cos_i, sin_i, 0.])
col2 = np.array([-sin_i * sin_baz, cos_i * sin_baz, -cos_baz])
col3 = np.array([-sin_i * cos_baz, cos_i * cos_baz, sin_baz])
A = np.column_stack((col1, col2, col3))
if debug:
print("sta:%s az:%.1f baz:%.1f takeoff:%.1f inc:%.1f" % (
sta, az, baz, takeoff, inc_angle))
E = trs[0].data
N = trs[1].data
Z = trs[2].data
D = np.row_stack((Z, E, N))
foo = A @ D
# if sta in ['59', '87']:
# trs.plot()
trs[0].data = foo[0, :]
trs[1].data = foo[1, :]
trs[2].data = foo[2, :]
trs[0].stats.channel = 'P'
trs[1].stats.channel = 'SV'
trs[2].stats.channel = 'SH'
'''
P = trs[0].copy().trim(starttime = pk.time -.02, endtime=pk.time +.02)
SV = trs[1].copy().trim(starttime = pk.time -.02, endtime=pk.time +.02)
SH = trs[2].copy().trim(starttime = pk.time -.02, endtime=pk.time +.02)
S = np.sqrt(SV.data**2 + SH.data**2)
print(type(S))
print(S)
PtoS = np.var(P.data)/np.var(S)
print(type(PtoS))
print(PtoS)
print("P_max:%g SV_max:%g SH_max:%g P/S:%f" % (np.max(np.abs(P.data)), np.max(np.abs(SV.data)), \
np.max(np.abs(SH.data), PtoS)))
#if sta in ['59', '87']:
#trs.plot()
#exit()
'''
else:
print("sta:%s --> only has n=%d traces --> can't rotate" % (
sta, len(trs)))
return st_new
def rotate_to_ENZ(st, inventory):
st_new = st.copy()
for sta in st_new.unique_stations():
trs = st_new.select(station=sta)
if not inventory.select(sta):
logger.warning(f'missing station "{sta}" in inventory')
continue
# catching edge case when a uniaxial sensors contains three traces
# with two traces containing only NaN.
if len(trs) == 3:
if np.any([np.all(np.isnan(trs[0].data)),
np.all(np.isnan(trs[1].data)),
np.all(np.isnan(trs[2].data))]):
continue
if len(trs) == 3 and len(inventory.select(sta).channels) == 3:
try:
col1 = inventory.get_channel(sta=sta, cha='X').cosines
col2 = inventory.get_channel(sta=sta, cha='Y').cosines
col3 = inventory.get_channel(sta=sta, cha='Z').cosines
except AttributeError as err:
logger.error(err)
continue
A = np.column_stack((col1, col2, col3))
At = A.transpose()
x = trs[0].data
y = trs[1].data
z = trs[2].data
D = np.row_stack((x, y, z))
foo = At @ D
trs[0].data = foo[0, :]
trs[1].data = foo[1, :]
trs[2].data = foo[2, :]
trs[0].stats.channel = 'E'
trs[1].stats.channel = 'N'
trs[2].stats.channel = 'Z'
return st_new
```
|
{
"source": "jeanphilippemercier/uquake-useis",
"score": 2
}
|
#### File: tests/deprecated/nlloc.py
```python
from useis.processors import nlloc
from importlib import reload
from pathlib import Path
from uquake.grid.nlloc import (ModelLayer, LayeredVelocityModel,
VelocityGridEnsemble)
from uquake.core import read_events, read_inventory
from uquake.nlloc import Observations
import numpy as np
reload(nlloc)
project_path = Path('/data_2/projects')
project_name = 'test_nlloc'
network = 'test_nlloc'
nlloc = nlloc.NLLOC(project_path, project_name, network)
settings = nlloc.settings
test_artifact_path = Path(nlloc.settings.TEST_ARTIFACTS)
inventory = read_inventory(str(test_artifact_path / 'inventory.xml'))
def get_catalog():
event_file = test_artifact_path / 'event_file.xml'
cat = read_events(str(event_file))
for i, pick in enumerate(cat[0].picks):
for site in inventory.sites:
if site.alternate_code == pick.waveform_id.station_code:
cat[0].picks[i].waveform_id.network_code = inventory[0].code
cat[0].picks[i].waveform_id.station_code = site.station.code
cat[0].picks[i].waveform_id.location_code = \
site.location_code
cat[0].picks[i].waveform_id.channel_code = \
site.channels[0].code
break
return cat
event = get_catalog()
def make_layered_model():
# The origin is the lower left corner
project_code = project_name
network_code = network
origin = np.array(settings.default.grids.origin)
dimensions = np.array(settings.default.grids.dimensions)
spacing = np.array(settings.default.grids.spacing)
z = [1168, 459, -300, -500]
vp_z = [4533, 5337, 5836, 5836]
vs_z = [2306, 2885, 3524, 3524]
p_layered_model = LayeredVelocityModel(project_code)
s_layered_model = LayeredVelocityModel(project_code, phase='S')
for (z_, vp, vs) in zip(z, vp_z, vs_z):
layer = ModelLayer(z_, vp)
p_layered_model.add_layer(layer)
layer = ModelLayer(z_, vs)
s_layered_model.add_layer(layer)
vp_grid_3d = p_layered_model.gen_3d_grid(network_code, dimensions, origin,
spacing)
vs_grid_3d = s_layered_model.gen_3d_grid(network_code, dimensions, origin,
spacing)
velocities = VelocityGridEnsemble(vp_grid_3d, vs_grid_3d)
return velocities
vels = make_layered_model()
nlloc.add_inventory(inventory, initialize_travel_time=False)
nlloc.add_velocities(vels, initialize_travel_times=False)
observations = Observations.from_event(event)
loc = nlloc.run_location(calculate_rays=True,
event=event)
```
#### File: uquake-useis/tests/nlloc.py
```python
import shutil
import unittest
from uquake.grid.nlloc import VelocityGrid3D, VelocityGridEnsemble
from loguru import logger
import numpy as np
root_dir = 'projects'
test_project = 'test_project'
test_network = 'TN'
grid_dim = [10, 10, 10]
grid_spacing = [1, 1, 1]
grid_origin = [0, 0, 0]
class NLLOC(unittest.TestCase):
def test_locate_event(self):
from uquake.nlloc.nlloc import Srces, Observations
from useis.processors.nlloc import NLLOC
nll = NLLOC('projects', 'test', 'TN')
p_velocity_grid = VelocityGrid3D(test_network,
grid_dim,
grid_origin,
grid_spacing,
phase='P', value=5000)
s_velocity_grid = VelocityGrid3D(test_network,
grid_dim,
grid_origin,
grid_spacing,
phase='S', value=3000)
velocity_grids = VelocityGridEnsemble(p_velocity_grid, s_velocity_grid)
nll.add_velocities(velocity_grids, initialize_travel_times=False)
srces = Srces.generate_random_srces_in_grid(p_velocity_grid,
n_srces=20)
nll.add_srces(srces, initialize_travel_time=False)
nll.init_travel_time_grids(multi_threaded=True)
e_loc = nll.p_velocity.generate_random_points_in_grid()
observations = Observations.generate_observations_event_location(
nll.travel_times, e_loc=e_loc)
result = nll.run_location(observations=observations)
distance = np.linalg.norm(result.loc - e_loc)
logger.info(f'\n{distance:0.1f} m - error\n'
f'{result.uncertainty * 2:0.1f} m - uncertainty (2 std) ')
shutil.rmtree(root_dir)
self.assertTrue(distance < 3 * result.uncertainty)
if __name__ == '__main__':
unittest.main()
shutil.rmtree(root_dir)
```
#### File: useis/processors/classifier.py
```python
from ..core.project_manager import ProjectManager
import pickle
from pathlib import Path
from ..ai.model import EventClassifier
import uquake
class Classifier(ProjectManager):
def __init__(self, base_projects_path: Path, project_name: str,
network_code: str, use_srces: bool=False):
"""
Object to control the classification
:param base_projects_path:
:param project_name:
:param network_code:
:param use_srces:
"""
self.event_classifier = None
super().__init__(base_projects_path, project_name, network_code,
use_srces=use_srces)
if self.files.classification_model.is_file():
self.event_classifier = EventClassifier.read(
self.files.classification_model)
def add_model(self, model: EventClassifier):
self.event_classifier = model
model.write(self.files.classification_model)
def add_model_from_file(self, filename: str):
ec = EventClassifier.from_pretrained_model_file(filename)
self.event_classifier = ec
ec.write(self.files.classification_model)
def predict(self, st: uquake.core.stream.Stream):
"""
:param st: the waveforms
:type st: uquake.core.stream.Stream
:return:
"""
return self.event_classifier.predict(st)
```
#### File: sandbox/demo_kafka/consumer.py
```python
from confluent_kafka import Consumer
import sys
conf = {'bootstrap.servers': "localhost:29092",
'group.id': 'test',
'auto.offset.reset': 'smallest'}
c = Consumer(conf)
running = True
def basic_consume_loop(consumer, topics):
try:
consumer.subscribe(topics)
while running:
msg = consumer.poll(timeout=1.0)
if msg is None: continue
if msg.error():
print('error')
# if msg.error().code() == #KafkaError._PARTITION_EOF:
# # End of partition event
# sys.stderr.write('%% %s [%d] reached end at offset %d\n' %
# (msg.topic(), msg.partition(), msg.offset()))
# elif msg.error():
# print(msg.error())
# # raise KafkaException(msg.error())
else:
print(msg)
finally:
# Close down consumer to commit final offsets.
consumer.close()
def shutdown():
running = False
basic_consume_loop(c, ['test'])
```
#### File: services/file_server/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy import Column, Integer, String, DateTime
from uquake.core.stream import Trace
Base = declarative_base()
class Database(object):
def __init__(self, url, port, username, password, db):
self.connection_string=f'postgresql://{username}:{password}' \
f'@{url}:{port}/db'
self.engine = create_engine(self.connection_string)
Base.metadata.create_all(self.engine)
self.db_session = sessionmaker(bind=self.engine)
self.session = self.db_session()
def index_trace(self, tr: Trace, file_path):
csd = ContinuousSeismicData(network=tr.stats.network,
station=tr.stats.station,
location=tr.stats.location,
channel=tr.stats.channel,
start_time=tr.stats.starttime.datetime,
end_time=tr.stats.endtime.datetime,
filepath=file_path)
self.session.add(csd)
self.session.commit()
class ContinuousSeismicData(Base):
__tablename__ = "continuous_seismic_data"
id = Column(Integer, primary_key=True, index=True)
network = Column(String, index=True)
station = Column(String, index=True)
location = Column(String, index=True)
channel = Column(String, index=True)
start_time = Column(DateTime, index=True)
end_time = Column(DateTime, index=True)
filepath = Column(String)
```
#### File: services/models/nlloc.py
```python
from .event import SimpleArrival
from pydantic import BaseModel
from typing import Optional, List
from uquake.nlloc import nlloc
from enum import Enum
from datetime import datetime
import numpy as np
from uquake.core import UTCDateTime
from .base import Coordinates3D
from .event import Ray
import useis
from uquake.core.event import Event
class FloatType(str, Enum):
FLOAT = "FLOAT"
DOUBLE = "DOUBLE"
class Site(BaseModel):
label: str
location: Coordinates3D
class Srces(BaseModel):
sites: List[Site]
class Observations(BaseModel):
picks: List[SimpleArrival]
p_pick_error: Optional[float] = 1e-3
s_pick_error: Optional[float] = 1e-3
class Config:
orm_mode = True
@classmethod
def from_uquake(cls, observations: nlloc.Observations):
simple_arrivals = []
for pick in observations.picks:
simple_arrivals.append(SimpleArrival.from_uquake_pick(pick))
return cls(picks=simple_arrivals,
p_pick_error=observations.p_pick_error,
s_pick_error=observations.s_pick_error)
def to_uquake(self):
picks = []
for pick in self.picks:
picks.append(pick.to_uquake_pick())
return nlloc.Observations(picks=picks,
p_pick_error=self.p_pick_error,
s_pick_error=self.s_pick_error)
def to_dict(self):
picks = []
for pick in self.picks:
picks.append(pick.__dict__)
observations_dict = self.__dict__
observations_dict['picks'] = picks
return observations_dict
class NLLOCResults(BaseModel):
hypocenter: Coordinates3D
event_time: datetime
scatter_cloud: List[Coordinates3D]
rays: List[Ray]
observations: Observations
uncertainty: float
hypocenter_file: str
@classmethod
def from_nlloc_results(cls, nlloc_results: useis.procesors.NLLocResults):
hypocenter = Coordinates3D.from_array(nlloc_results.hypocenter)
scatter_cloud = []
for scatter in nlloc_results.scatter_cloud:
scatter_cloud.append(Coordinates3D.from_array(scatter))
rays = []
for ray in nlloc_results.rays:
rays.append(Ray.from_uquake(ray))
observations = Observations.from_uquake(nlloc_results.observations)
return cls(hypocenter=hypocenter, event_time=nlloc_results.event_time,
scatter_cloud=scatter_cloud, rays=rays,
observations=observations,
uncertainty=nlloc_results.uncertainty,
hypocenter_file=nlloc_results.hypocenter_file)
```
#### File: useis/tomography/data.py
```python
__doc__ = \
"""
TODO : BIG DESCRIPTION OF EXCHANGE DATA
"""
import numpy as np
import pickle
import copy
from functools import wraps
def memoize(fct):
"""
This is a decorator which cache the result of a function based on the
given parameter.
"""
return_dict = {}
@wraps(fct)
def wrapper(*args, **kwargs):
if args not in return_dict:
return_dict[args] = fct(*args, **kwargs)
return return_dict[args]
return wrapper
# This is the data description for the input array describing the event
#
ev_dtype = [('name', 'str'),
('id', 'int'),
('position', 'float', (3,)),
('delta_t', 'float')]
st_dtype = ev_dtype
tt_dtype = [('id', 'int'),
('event_id', 'int'),
('traveltime', 'float')]
class EKTTTable(object):
"""
This object represent a
:param data: u
:param staid: u
:param evnfile: u
:param stafile: u
"""
dtype = tt_dtype
def __init__(self, data, staid, evnfile=None, stafile=None):
try:
for tname, ttype in tt_dtype:
data[tname]
import sys
sys.stderr.write(str(data.size))
sys.stderr.flush()
data = np.array(data.__getitem__([tname for tname, ttype in
tt_dtype]), dtype = self.dtype)
except ValueError as e:
data = np.asarray(data, dtype = self.dtype)
self.data = data
self.data.dtype = self.dtype
self.station_id = staid
self.__evn_file__ = evnfile
self.__sta_file__ = stafile
def __get_event_rows__(self):
return self.event_table.data[self.data['event_id']]
event_rows = property(__get_event_rows__)
@memoize
def __get_event_table__(self):
return pickle.load(open(self.__evn_file__))
event_table = property(__get_event_table__)
def __get_station_row__(self):
return self.station_table.data[self.station_id]
station_row = property(__get_station_row__)
@memoize
def __get_station_table__(self):
return pickle.load(open(self.__sta_file__))
station_table = property(__get_station_table__)
class EKPunctualData(object):
dtype = None
def __init__(self, data, origin=None, scale=1):
try:
for tname, ttype in self.dtype:
data[tname]
self.data = data
except ValueError as e:
self.data = np.asarray(data, dtype=self.dtype)
self.origin = tuple([0] * data['position'].shape[-1]) \
if origin is None else origin
self.scale = scale
def __get_position_zyx__(self):
return self.data['position']
def __set_position_zyx__(self, pos):
self.data['position'] = pos
position_zyx = property(__get_position_zyx__, __set_position_zyx__)
def __get_position_xyz__(self):
position_zyx = self.data['position']
position_xyz = np.empty_like(position_zyx)
for i in range(position_zyx.shape[1]):
position_xyz[:, i] = position_zyx[:, -(i + 1)]
return position_xyz
position_xyz = property(__get_position_xyz__)
def __get_size__(self):
return self.data.size
size = property(__get_size__)
def add_gaussian_noise(self, position=0, time=0):
pass
class EKEventTable(EKPunctualData):
dtype = ev_dtype
class EKStationTable(EKPunctualData):
dtype = st_dtype
class EKImageData(object):
"""
This object represent a geometric structure that representing a regular
array of points positionned in space.
:param shape_or_data: The numpy array or the shape of the underlying data
:param spacing: The spacing of the grid
:param origin: A Tuple representing the position of the lower left corner \
of the grid
"""
def __init__(self, shape_or_data, spacing = 1, origin = None):
if isinstance(shape_or_data, np.ndarray):
self.data = shape_or_data
else:
self.data = np.zeros(shape_or_data)
self.origin = tuple([0] * self.data.ndim) if origin is None else origin
self.spacing = spacing
def transform_to(self, values):
return (values - self.origin) / self.spacing
def transform_from(self, values):
return (values + self.origin) * self.spacing
def check_compatibility(self, other):
return (self.shape == other.shape) and \
(self.spacing == other.spacing) and \
(self.origin == other.origin)
def __get_shape__(self):
return self.data.shape
shape = property(__get_shape__)
def homogenous_like(self, value):
data = np.empty_like(self.data)
data.fill(value)
return EKImageData(data, self.spacing, origin=self.origin)
def copy(self):
cp = copy.deepcopy(self)
return cp
def SaveNLL(self):
pass
def LoadNLL(self):
pass
### ADDING method to save and load on data from/to NLL
```
|
{
"source": "Jean-Pierre-Richa/C3D-with-OpenPose-in-Tensorflow",
"score": 3
}
|
#### File: Jean-Pierre-Richa/C3D-with-OpenPose-in-Tensorflow/pose_list.py
```python
import json
import os
# Collects all the json files from the folder and assign a
# unique ID for each one
def generate_activity_list(json_dir='json/'):
files = os.listdir(json_dir)
activities_list = []
for f in files:
with open(json_dir + f) as file:
Json_dict = json.load(file)
for video in list(Json_dict.keys()):
for activity in list(Json_dict[video]):
if (activity['label'] not in activities_list):
activities_list.append(activity['label'])
activities_ids = dict(map(reversed, enumerate(activities_list)))
# print('Activity list size: ', len(activities_list))
# print('Activities IDs: ', activities_ids)
return activities_ids
```
#### File: Jean-Pierre-Richa/C3D-with-OpenPose-in-Tensorflow/tf_records.py
```python
import glob
import os
import activities
import tensorflow as tf
import sys
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
shuffle_data = True
DATASET_DIR_TRAIN = 'images/train/'
DATASET_DIR_TEST = 'images/test/'
TFR_DIR = 'tfrecords/'
# Read addresses and labels from the 'train' folder
def create_tfRecords(DATASET_DIR, phase):
labels = []
labels = activities.activities_tfrecords
train_filename = TFR_DIR + phase + '.tfrecords'
if not os.path.exists(TFR_DIR):
os.makedirs(TFR_DIR)
if phase == 'train':
# addrs = glob.glob(DATASET_DIR_TRAIN)
addrs = DATASET_DIR_TRAIN
else:
# addrs = glob.glob(DATASET_DIR_TEST)
addrs = DATASET_DIR_TEST
# phase_addrs = phase + '_addrs'
# phase_labels = phase + '_labels'
phase_addrs = []
phase_labels = []
for classes in os.listdir(DATASET_DIR):
final_class = classes
for j, k in labels.items():
if final_class.split('.')[0] in j:
phase_addrs.append(final_class)
phase_labels.append(k)
# print(final_class)
# print(phase_addrs)
# print(phase_labels)
# Open the tfrecords file
writer = tf.python_io.TFRecordWriter(train_filename)
for i in tqdm(range(len(phase_addrs))):
# print how many images are saved every 1000 images
# if not i%1000:
# print(phase + ' data: {}/{}'.format(i, len(phase_addrs)))
# sys.stdout.flush()
img = load_image(addrs+phase_addrs[i])
lbl = phase_labels[i]
print(addrs+phase_addrs[i])
print(lbl)
# print ('label: ', lbl)
# create a feature
feature = {'label': _int64_feature(lbl),
'image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}
# create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
# img1=mpimg.imread(addrs+phase_addrs[i])
# imgplot = plt.imshow(img1)
# plt.show()
writer.close()
sys.stdout.flush()
def load_image(addr):
# Read an image and resize to (im_size, im_size)
# cv2 load image as BGR, convert it to RGB
# print (addr)
img = cv2.imread(addr)
img = cv2.resize(img, (activities.im_size, activities.im_size), interpolation = cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)
# print (type(img))
return img
def _int64_feature(value):
return tf.train.Feature(int64_list = tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=[value]))
create_tfRecords('images/train/', 'train')
```
|
{
"source": "jeanpierrethach/Design-Patterns",
"score": 4
}
|
#### File: patterns/bridge/bridge.py
```python
from abc import ABC, abstractmethod
class Implementor(ABC):
@abstractmethod
def action(self):
pass
class ConcreteImplementorA(Implementor):
def action(self):
print("Concrete Implementor A.")
class ConcreteImplementorB(Implementor):
def action(self):
print("Concrete Implementor B.")
class Abstraction(ABC):
def __init__(self, impl):
self._implementor = impl
@abstractmethod
def operation(self):
pass
class RefinedAbstraction(Abstraction):
def __init__(self, impl):
super().__init__(impl)
def operation(self):
self._implementor.action()
if __name__ == '__main__':
abstracts = [
RefinedAbstraction(ConcreteImplementorA()),
RefinedAbstraction(ConcreteImplementorB())
]
for abstract in abstracts:
abstract.operation()
```
#### File: patterns/builder/builder.py
```python
from abc import ABC, abstractmethod
class Product(object):
def __init__(self):
self._part_a = None
self._part_b = None
def make_a(self, part):
self._part_a = part
def make_b(self, part):
self._part_b = part
def __str__(self):
return "%s %s" % (self._part_a, self._part_b)
class Builder(ABC):
@abstractmethod
def build_part_a(self):
pass
@abstractmethod
def build_part_b(self):
pass
@abstractmethod
def get_result(self):
pass
class ConcreteBuilder(Builder):
def __init__(self):
self._product = Product()
def build_part_a(self):
self._product.make_a("Part A built.")
def build_part_b(self):
self._product.make_b("Part B built.")
def get_result(self):
return self._product
class Director(object):
def __init__(self, builder):
self._builder = builder
def set(self, builder):
self._builder = builder
def construct(self):
self._builder.build_part_a()
self._builder.build_part_b()
if __name__ == '__main__':
builder = ConcreteBuilder()
director = Director(builder)
director.construct()
product1 = builder.get_result()
print(product1)
```
#### File: patterns/facade/facade.py
```python
class SubsystemA(object):
def operation_a1(self):
print("Operation a1")
def operation_a2(self):
print("Operation a2")
class SubsystemB(object):
def operation_b1(self):
print("Operation b1")
def operation_b2(self):
print("Operation b2")
class SubsystemC(object):
def operation_c1(self):
print("Operation c1")
def operation_c2(self):
print("Operation c2")
class Facade(object):
def __init__(self):
self.subsystem_a = SubsystemA()
self.subsystem_b = SubsystemB()
self.subsystem_c = SubsystemC()
def operation1(self):
self.subsystem_a.operation_a1()
self.subsystem_b.operation_b1()
self.subsystem_c.operation_c1()
def operation2(self):
self.subsystem_a.operation_a2()
self.subsystem_b.operation_b2()
self.subsystem_c.operation_c2()
if __name__ == '__main__':
facade = Facade()
facade.operation1()
facade.operation2()
```
|
{
"source": "jean-plank/rpy2html",
"score": 3
}
|
#### File: rpy2json/project/fonts.py
```python
from os import path
from utils import remove_invalid_chars, guiattr
DEFAULT_FONT = 'DejaVuSans.ttf'
DEFAULT_FONT_NAME = remove_invalid_chars(DEFAULT_FONT).lower()
def parse(GAME_BASE_DIR, RENPY_BASE_DIR, gui):
'''
Parse fonts.
'''
res = {
'definitions': {
# 'font_family': {
# 'src': str,
# 'bold': bool
# },
# ...
},
'usages': {
# 'dialog': 'font_family'
# ...
}
}
# add default font definition
file = find_font(GAME_BASE_DIR, RENPY_BASE_DIR, DEFAULT_FONT)
if file:
if not DEFAULT_FONT_NAME in res['definitions']:
res['definitions'][DEFAULT_FONT_NAME] = {
'src': file,
'bold': True
}
else:
print('Couldn\'t find default font')
add_font = partial_add_font(GAME_BASE_DIR, RENPY_BASE_DIR, gui)
add_font('dialog', 'text_font', res)
add_font('choicebtn', 'choice_button_text_font', res)
add_font('mmenu', 'interface_text_font', res)
add_font('guibtn', 'button_text_font', res)
add_font('namebox', 'name_text_font', res)
return res
def partial_add_font(GAME_BASE_DIR, RENPY_BASE_DIR, gui):
def res(usage, gui_attr, fonts):
fpath = guiattr(gui, gui_attr, DEFAULT_FONT)
full_fpath = find_font(GAME_BASE_DIR, RENPY_BASE_DIR, fpath)
if full_fpath:
fname = remove_invalid_chars(fpath).lower()
if fname in fonts['definitions']:
# definition already exists
fonts['usages'][usage] = fname
else:
fonts['definitions'][fname] = {
'src': full_fpath,
'bold': False
}
fonts['usages'][usage] = fname
else:
('[WARNING] couldn\'t find font \'%s\', replacing with default font' % fpath)
fonts['usages'][usage] = DEFAULT_FONT_NAME
return res
def find_font(GAME_BASE_DIR, RENPY_BASE_DIR, file):
'''
:returns: file's full path if it exists in GAME_BASE_DIR or
RENPY_BASE_DIR/common
'''
res = path.join(GAME_BASE_DIR, file)
if path.isfile(res):
return res
else:
res = path.join(RENPY_BASE_DIR, 'common', file)
if path.isfile(res):
return res
```
|
{
"source": "jeanpommier/ker51",
"score": 2
}
|
#### File: app/hivernants/views.py
```python
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.template import loader
import json
from django.views import generic
from django_tables2 import SingleTableView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.serializers import serialize
from django.views.generic.base import TemplateView
from .models import Hivernant, Person
from .tables import HivernantTable
#
# def index(request):
# persons = Person.objects.order_by('names')
# context = {
# 'persons': persons,
# }
# return render(request, 'map/index.html', context)
# def detail(request, person_id):
# person = get_object_or_404(Person, pk=person_id)
# return render(request, 'map/detail.html', {'person': person})
class HomeView(TemplateView):
template_name = 'hivernants/index.html'
class ListView(LoginRequiredMixin, SingleTableView):
model = Hivernant
table_class = HivernantTable
# context_object_name = 'persons'
template_name = 'hivernants/liste.html'
# def get_queryset(self):
# return Person.objects.order_by('names')
class DetailView(LoginRequiredMixin, generic.DetailView):
model = Hivernant
template_name = 'hivernants/detail.html'
class MapView(LoginRequiredMixin, generic.TemplateView):
model = Hivernant
template_name = 'hivernants/map.html'
def get_context_data(self, **kwargs):
"""Return the view context data."""
context = super().get_context_data(**kwargs)
context["markers"] = json.loads(
serialize("geojson",
Person.objects.all(),
geometry_field='location',
fields=('names', 'fulladdress', 'picture')
)
)
return context
```
|
{
"source": "jeanpommier/ldap2pg",
"score": 3
}
|
#### File: ldap2pg/ldap2pg/format.py
```python
import itertools
from copy import deepcopy
from string import Formatter
from .utils import unicode
class FormatSpec(object):
# A format string to generate e.g. role names.
def __init__(self, spec):
self.spec = spec
self._fields = None
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.spec)
@property
def fields(self):
if self._fields is None:
self._fields = []
formatter = Formatter()
for _, field, _, _ in formatter.parse(self.spec):
if field is None:
continue
path = [
f for f in field.split('.')
# Exclude method call.
if '(' not in f and ')' not in f
]
self._fields.append(
FormatField(path[0], '.'.join(path[1:]))
)
return self._fields
@property
def static(self):
return 0 == len(self.fields)
@property
def attributes_map(self):
# Aggregate attributes map of all fields.
map_ = AttributesMap()
for field in self.fields:
map_.update(field.attributes_map)
return map_
def iter_combinations(self, vars_):
# Here is the core logic for combination of attributes.
#
# vars_ has the same schema as attributes_map. Top level keys define
# the entry name (either __self__ or a join name). Top level values are
# list of either string (regular value) or dict (for DN components).
#
# First, combine entries, and then, in each entry, combine attributes.
objcombinations = {}
map_ = self.attributes_map.intersection(
getattr(vars_, 'attributes_map', None)
or AttributesMap.from_dict(vars_)
)
for objname, objattrs in map_.items():
objcombinations[objname] = []
objattrs = list(set(["dn"]) | objattrs)
for entry in vars_[objname]:
subset = dict([
(k, v)
for k, v in entry.items()
if k in objattrs
])
objcombinations[objname].extend([
dict(zip(subset.keys(), combination))
for combination in
itertools.product(*subset.values())
])
for combinations in itertools.product(*objcombinations.values()):
out = dict()
for objname, attrs in zip(objcombinations.keys(), combinations):
attrs = dict([
(k, (
FormatEntry(_str=v['dn'], **v)
if isinstance(v, dict) else
v
))
for k, v in attrs.items()
])
if '__self__' == objname:
out.update(attrs)
else:
out[objname] = FormatEntry(
_str=unicode(attrs['dn']), **attrs)
yield out
def expand(self, vars_):
for combination in self.iter_combinations(vars_):
yield self.spec.format(**combination)
class AttributesMap(dict):
# Mapping for format variables dict.
#
# It's a dictionnary with variable name as key and a set of attributes as
# value. The variable name are __self__ for accessing LDAPEntry attributes
# or join attribute name for children entries access.
#
# Each FormatSpec is meant to *request* attributes from LDAPEntry or
# children of it. The fusion of map from all specs of a RoleRule generates
# the schema of the final big variables dict holding all values for
# formatting.
#
# The schema of two specs on the same entry may have a conflict when using
# both {member} and {member.cn} in the same role or grant rule. {member} is
# considered attribute member of __self__ entry while {member.cn} is
# cn attribute of child member.
#
# This will conflict when feeding str.format with member value. Should it
# be the list of __self__.member or all member objects? The code choose to
# always use member entry instead of member attribute.
#
# When spreading the conflict in different FormatSpec, we can be aware of
# the conflict by always passing map_ with vars_.
@classmethod
def from_dict(cls, dct):
return cls([
(k, set(v[0].keys()))
for k, v in dct.items()
])
@classmethod
def gather(cls, *maps):
self = cls()
for map_ in maps:
self.update(map_)
return self
def __add__(self, other):
res = self.__class__()
res.update(self)
res.update(other)
return res
def intersection(self, other):
i = deepcopy(self)
for name in list(i.get("__self__", [])):
if name in other:
i["__self__"].remove(name)
i[name] = set(["dn"])
for name, attrs in list(i.items()):
if name in other.get("__self__", []):
i[name] = set(["dn"])
elif name in other:
i[name] &= other[name]
else:
del i[name]
return i
def update(self, other):
# Merge objects and their attributes
for objname, attrset in other.items():
myset = self.setdefault(objname, set())
myset.update(attrset)
# Remove joined attribute from self.
if "__self__" not in self:
return
for name in set(self.keys()):
if name not in self["__self__"]:
continue
self["__self__"].remove(name)
self[name].add("dn")
class FormatField(object):
# A single {} field from a FormatSpec.
def __init__(self, var, attribute=None):
self.var = var
self.attribute = attribute or None
def __eq__(self, other):
return self.as_tuple() == other.as_tuple()
def __hash__(self):
return hash(self.as_tuple())
def __repr__(self):
return '%s(%r, %r)' % (
self.__class__.__name__,
self.var,
self.attribute,
)
def __str__(self):
return '%s%s' % (
self.var,
'.%s' % self.attribute if self.attribute else '',
)
def as_tuple(self):
return self.var, self.attribute
@property
def attributes_map(self):
# Determine to which object the value should be fetched : parent entry
# (__self__) or a named join entry.
if self.var == 'dn':
# {dn.cn} -> __self__."dn.cn"
object_ = '__self__'
attribute = str(self)
elif self.attribute:
# {member.mail} -> member."mail"
object_ = self.var
attribute = self.attribute
else:
# {member} -> __self__."member"
object_ = "__self__"
attribute = self.var
return AttributesMap({object_: set([attribute])})
class FormatList(list):
# A list of format specs
@classmethod
def factory(cls, format_list):
self = cls()
for format_ in format_list:
self.append(FormatSpec(format_))
return self
def __repr__(self):
return '[%s]' % (', '.join(self.formats),)
@property
def attributes_map(self):
map_ = AttributesMap()
for spec in self:
map_.update(spec.attributes_map)
return map_
def expand(self, vars_):
for spec in self:
for string in spec.expand(vars_):
yield string
@property
def formats(self):
"""List plain formats as fed in factory."""
return [spec.spec for spec in self]
@property
def fields(self):
"""Gather all reference fields in all formats."""
return [
field
for spec in self
for field in spec.fields
]
@property
def has_static(self):
return bool([x for x in self if x.static])
def collect_fields(*field_lists):
return set(itertools.chain(*[
list_.fields for list_ in field_lists
]))
class FormatVars(dict):
# A dictionnary of values from LDAP, grouped for combination, and
# associated with an Attributes map.
def __init__(self, map_, *a, **kw):
self.attributes_map = map_
super(FormatVars, self).__init__(*a, **kw)
class FormatEntry(object):
# Object for dot access of attributes in format, like {member.cn}. Allows
# to render {member} and {member.cn} in the same string.
def __init__(self, **kw):
self._str = "**unset**"
self.update(kw)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%s' % i for i in self.__dict__.items()])
)
def __str__(self):
return self._str
def update(self, kw):
self.__dict__.update(kw)
class FormatValue(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return 'FormatValue(%r)' % (self.value)
def __eq__(self, other):
return self.value == str(other)
def __getattr__(self, name):
if name in ['lower()', 'upper()']:
return getattr(self.value, name[:-2])()
else:
raise AttributeError(name)
```
|
{
"source": "jeanpva/pythonbirds",
"score": 4
}
|
#### File: pythonbirds/oo/pessoa.py
```python
class Pessoa:
def __init__(self, *filhos, nome = None, idade = 57):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'olá {id(self)}'
if __name__ == '__main__':
jean = Pessoa(nome='Jeam')
luciana = Pessoa(jean, nome='Luciana')
print(Pessoa.cumprimentar(luciana))
print (id(luciana))
print (luciana.cumprimentar())
print (luciana.nome)
print (luciana.idade)
for filho in luciana.filhos:
print(filho.nome)
print(luciana.filhos)
```
|
{
"source": "jeanqasaur/jeeves",
"score": 2
}
|
#### File: conf/conf/forms.py
```python
from django.forms import Form, ModelForm, CharField, FileField, Textarea, ModelForm, HiddenInput, MultipleChoiceField, CheckboxSelectMultiple, BooleanField, ChoiceField
from models import Paper, PaperVersion, UserProfile, Review, ReviewAssignment, Comment, UserPCConflict
from django.contrib.auth.models import User
import random
from django.forms.formsets import formset_factory
class SubmitForm(Form):
coauthor1 = CharField(required=False)
coauthor2 = CharField(required=False)
coauthor3 = CharField(required=False)
title = CharField(1024, required=True)
contents = FileField(required=True)
abstract = CharField(widget=Textarea, required=True)
def __init__(self, possible_reviewers, default_conflict_reviewers, *args, **kwargs):
super(SubmitForm, self).__init__(*args, **kwargs)
choices = []
for r in possible_reviewers:
choices.append((r.username, r))
self.fields['conflicts'] = MultipleChoiceField(widget=CheckboxSelectMultiple(), required=False, choices=choices, initial=list(default_conflict_reviewers))
def is_valid(self):
if not super(SubmitForm, self).is_valid():
return False
try:
coauthors = []
for coauthor_id in ['coauthor1', 'coauthor2', 'coauthor3']:
if coauthor_id in self.cleaned_data and self.cleaned_data[coauthor_id]:
coauthor = User.objects.filter(username=self.cleaned_data[coauthor_id]).get()
coauthors.append(coauthor)
except User.DoesNotExist:
return False
self.cleaned_data['coauthors'] = coauthors
return True
def save(self, user):
d = self.cleaned_data
authors = [user]
if 'coauthor1' in d:
authors.append(d['coauthor1'])
if 'coauthor2' in d:
authors.append(d['coauthor2'])
if 'coauthor3' in d:
authors.append(d['coauthor3'])
paper = Paper()
paper.save()
paper.authors.add(user)
for coauthor in d['coauthors']:
paper.authors.add(coauthor)
paper.save()
d['contents'].name = '%030x' % random.randrange(16**30) + ".pdf"
paper_version = PaperVersion(
paper = paper,
title = d['title'],
abstract = d['abstract'],
contents = d['contents'],
)
paper_version.save()
# need to save paper twice since paper and paper_version point to each other...
paper.latest_version = paper_version
paper.save()
for conflict_username in d['conflicts']:
ra = ReviewAssignment()
ra.user = User.objects.get(username=conflict_username)
ra.paper = paper
ra.type = 'conflict'
ra.save()
return paper
class SubmitReviewForm(ModelForm):
class Meta:
model = Review
fields = ['contents', 'score_novelty', 'score_presentation', 'score_technical', 'score_confidence']
class SubmitCommentForm(ModelForm):
class Meta:
model = Comment
fields = ['contents']
class ReviewAssignmentForm(ModelForm):
class Meta:
model = ReviewAssignment
fields = ['assign_type', 'user', 'paper']
widgets = {
'user' : HiddenInput(),
'paper' : HiddenInput(),
}
ReviewAssignmentFormset = formset_factory(ReviewAssignmentForm, extra=0)
class SearchForm(Form):
# should only show accepted papers
filter_accepted = BooleanField(required=False)
# should only show papers accepted by a reviewer
# filter_reviewer (defined in __init__ below)
# should only show papers by the given author
# filter_author (defined in __init__ below)
filter_title_contains = CharField(required=False)
sort_by = ChoiceField(required=True,
choices=(('---', None),
('title', 'title'),
('score_technical', 'score_technical'),
))
def __init__(self, *args, **kwargs):
reviewers = kwargs['reviewers']
authors = kwargs['authors']
del kwargs['reviewers']
del kwargs['authors']
super(SearchForm, self).__init__(*args, **kwargs)
self.fields['filter_reviewer'] = ChoiceField(required=False,
choices=[('', '---')] + [(r.username, r) for r in reviewers])
self.fields['filter_author'] = ChoiceField(required=False,
choices=[('', '---')] + [(r.username, r) for r in authors])
def get_results(self):
d = self.cleaned_data
query = Paper.objects
# TODO enable support for accepting papers and then enable this
#if d['filter_accepted']:
# query = query.filter(
if d.get('filter_reviewer', ''):
query = query.filter(authors__username=d['filter_reviewer'])
if d.get('filter_author', ''):
query = query.filter(reviewers__username=d['filter_author'])
if d.get('filter_title_contains', ''):
query = query.filter(latest_version__title__contains=d['filter_title_contains'])
if d.get('sort_by','') == 'title':
query = query.order_by('latest_version__title')
elif d.get('sort_by','') == 'score_technical':
query = query.order_by('latest_version__score_technical')
print query.query.__str__()
results = list(query.all())
return list(results)
```
#### File: demo/courseManager/submission.py
```python
from datetime import *
import sys
sys.path.append("../../..")
import JeevesLib
from smt.Z3 import *
import macropy.activate
from users import *
from assignment import *
class Submission():
def __init__(self, submissionId, title, assignmentId, submitterId, fileRef):
self.submissionId = submissionId
self.title = title
self.assignmentId = assignmentId
self.submitterId = submitterId
self.fileRef = fileRef
self.submittedOn = ""
self.grade = None
self.submittedOn = datetime.now()
JeevesLib.init()
## Policies ##
def _isUser(context):
return isinstance(context, User)
def _isSubmitter(context):
return context.userId == self.submitterId
def _isInstructor(context):
return isinstance(context, Instructor)
## Labels ##
self._viewerL = JeevesLib.mkLabel()
self._editorL = JeevesLib.mkLabel()
self._adminL = JeevesLib.mkLabel()
## Restrict Labels ##
JeevesLib.restrict(self._viewerL, lambda oc: JeevesLib.jor(lambda :_isSubmitter(oc), lambda : _isInstructor(oc) ) )
JeevesLib.restrict(self._editorL, lambda oc: _isSubmitter(oc) )
JeevesLib.restrict(self._adminL, lambda oc: _isInstructor(oc) )
## Getter, Setters, and Show-ers ##
#Grade
def getGrade(self):
score = JeevesLib.mkSensitive(_viewerL, self.grade, -1)
return score
def setGrade(self,score):
# Would it be better to store score as a concretized value?
# It wouldn't work as well for a database, but possibly in simple examples
self.grade = score
def showGrade(self, context):
faceted_value = self.getGrade()
return JeevesLib.concretize(context, faceted_value)
#Submission Details (fileRef)
def getSubmissionDetails(self):
details = JeevesLib.mkSensitive(self._viewerL, self.fileRef, "N/A")
return details
def setSubmissionDetails(self, text):
self.fileRef = text
def showSubmissionDetails(self, context):
return JeevesLib.concretize(context, self.getSubmissionDetails())
#Submission Title
def getTitle(self):
details = JeevesLib.mkSensitive(self._viewerL, self.title, "N/A")
return details
def setTitle(self, title):
self.title = title
def showTitle(self, context):
return JeevesLib.concretize(context, self.getTitle())
## Magic Methods ##
def __repr__(self):
#Is there a way to integrate contexts with representation?
#Would there be a point?
return "Submisison(%d, %s, %s)" % (self.submissionId, self.title, self.fileRef)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.submissionId == other.submissionId and self.title == other.title
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
```
#### File: demo/hipaa/tests.py
```python
from datetime import date
from django.contrib.auth.models import User
from django.db import models
from django.utils import unittest
from django.test import TestCase
import JeevesLib
from jeevesdb import JeevesModel
from jelf.models import Address, CoveredEntity, HospitalVisit, Individual, \
Treatment, UserProfile
import nose.tools as nt
class TestHealthModels(TestCase):
def setUp(self):
JeevesLib.init()
self.arielsHouse=Address.objects.create(
City="Cambridge",
State="MA",
Street="5 Y St.",
ZipCode="14830")
self.house1=Address.objects.create(
City="Boston",
State="MA",
Street="625 Frost Ln.",
ZipCode="14830")
self.jean=Individual.objects.create(
FirstName="Jean"
, LastName="Yang"
, Email="<EMAIL>"
, Sex="Female"
, BirthDate=date(1900,01,01)
, Address=self.house1)
self.ariel=Individual.objects.create(
FirstName="Ariel",
LastName="Jacobs",
Email="<EMAIL>",
Sex="Male",
BirthDate=date(1993,03,21),
Address=self.arielsHouse)
self.jeanyang=User.objects.create_user(
username="jeanyang"
, password="hi")
self.jeanyangProfile=UserProfile.objects.create(
profiletype=1
, username="jeanyang"
, email="<EMAIL>"
, name="<NAME>"
, individual=self.jean)
self.arielj=User.objects.create_user(
username="arielj321"
, email="<EMAIL>"
, password="<PASSWORD>")
self.arielProfile=UserProfile.objects.create(
profiletype=1
, username="arielj321"
, email="<EMAIL>"
, name="<NAME>"
, individual=self.ariel)
self.vision = CoveredEntity.objects.create(ein = "01GBS253DV"
, name = "Vision National")
self.visionProfile=UserProfile.objects.create(
profiletype=2
, username="vision"
, email="<EMAIL>"
, name="Vision National"
, entity=self.vision)
self.health = CoveredEntity.objects.create(ein = "0424D3294N"
, name = "Covered Health")
self.healthProfile=UserProfile.objects.create(
profiletype=2
, username="health"
, email="<EMAIL>"
, name="Covered Health"
, entity=self.health)
def test_get_sample_data(self):
jeanyang = UserProfile.objects.get(username="jeanyang")
self.assertEqual(JeevesLib.concretize(self.jeanyangProfile, jeanyang)
, self.jeanyangProfile)
def test_see_Address(self):
self.assertEqual(
JeevesLib.concretize(self.jeanyangProfile, self.jean.Address)
, self.house1)
self.assertEqual(
JeevesLib.concretize(self.arielProfile, self.jean.Address.Street)
, None)
self.assertEqual(
JeevesLib.concretize(self.arielProfile, self.jean.Address.ZipCode)
, "14800")
def test_hospital_visit_visibility(self):
actual_visit_location = "Third room on the left"
visit = HospitalVisit.objects.create(patient=self.ariel,
date_admitted=date(2003,4,1),
date_released=date(2003,9,13),
condition="Good",
location=actual_visit_location,
hospital=self.vision)
self.assertEqual(
JeevesLib.concretize(self.jeanyangProfile, visit.location)
, HospitalVisit.UNDISCLOSED_LOCATION)
self.assertEqual(
JeevesLib.concretize(self.arielProfile, visit.location)
, actual_visit_location)
self.assertEqual(
JeevesLib.concretize(self.visionProfile, visit.location)
, actual_visit_location)
def test_treatment(self):
treatment = Treatment.objects.create(patient=self.ariel,
date_performed=date(2014,1,1),
performing_entity = self.health,
prescribing_entity = self.health,
service = "W4-491")
self.assertEqual(
JeevesLib.concretize(self.arielProfile, treatment.patient)
, self.ariel)
self.assertEqual(
JeevesLib.concretize(self.jeanyangProfile, treatment.patient)
, None)
self.assertEqual(
JeevesLib.concretize(self.healthProfile, treatment.patient)
, self.ariel)
self.assertEqual(
JeevesLib.concretize(self.visionProfile, treatment.patient)
, None)
```
#### File: demo/jelf-skeleton/tests.py
```python
from datetime import datetime
from django.db import models
from django.utils import unittest
from django.test import TestCase
import JeevesLib
from jelf.models import UserProfile
from jeevesdb import JeevesModel
import nose.tools as nt
class TestJelf(TestCase):
def setUp(self):
JeevesLib.init()
self.alice = UserProfile.objects.create(
username="alice", email="<EMAIL>")
self.bob = UserProfile.objects.create(
username="bob", email="<EMAIL>")
def test_email_view(self):
self.assertEqual(JeevesLib.concretize(self.alice, self.alice.email)
, "<EMAIL>")
self.assertEqual(JeevesLib.concretize(self.bob, self.alice.email)
, "[redacted]")
self.assertEqual(
JeevesLib.concretize(self.alice
, UserProfile.objects.get(email="<EMAIL>"))
, self.alice)
self.assertEqual(
JeevesLib.concretize(self.bob
, UserProfile.objects.get(email="<EMAIL>"))
, None)
```
#### File: openmrs/openmrs/BaseClasses.py
```python
from OpenmrsClasses import *
class BaseOpenmrsObject(OpenmrsObject):
def __init__(self):
self.uuid = str(uuid4()) #generates a random uuid
def getUuid(self):
return self.uuid
def setUuid(self, uuid):
self.uuid = uuid
def hashCode(self):
if self.getUuid() == None:
return hash(object) #im not sure if this serves the same purpose as "return super.hashCode();" in the JAVA code
return hash(self.getUuid())
def equals(self, obj):
if self is obj:
return True
if not(isinstance(obj, BaseOpenmrsObject)):
return False
other = obj
if self.getUuid() == None:
return False
return self.getUuid() == (other.getUuid())
def __str__(self):
return "ClassName{hashCode= " + str(self.hashCode()) + "," + "uuid=" + str(self.uuid) + "}"
class BaseOpenmrsData(BaseOpenmrsObject, OpenmrsData):
def __init(self,creator=None,dateCreated=None, changedBy=None, dateChanged=None, \
voided=False,dateVoided=None, voidedBy=None, voidReason=None):
self.creator = creator
self.dateCreated = dateCreated
self.changedBy = changedBy
self.dateChanged = dateChanged
self.voided = voided
self.dateVoided = dateVoided
self.voidedBy = voidedBy
self.voidReason = voidReason
def getCreator(self):
return self.creator
def setCreator(self, creator):
self.creator = creator
def getDateCreated(self):
return self.dateCreated
def setDateCreated(self, dateCreated):
self.dateCreated = dateCreated
def getChangedBy(self):
return self.changedBy
def setChangedBy(self, changedBy):
self.changedBy = changedBy
def getDateChanged(self):
return self.dateChanged
def setDateChanged(self, dateChanged):
self.dateChanged = dateChanged
def isVoided(self):
return self.voided
def getVoided(self):
return self.isVoided()
def setVoided(self, voided):
self.voided = voided
def getDateVoided(self):
return self.dateVoided
def setDateVoided(self, dateVoided):
self.dateVoided = dateVoided
def getVoidedBy(self):
return self.voidedBy
def setVoidedBy(self, voidedBy):
self.voidedBy = voidedBy
def getVoidReason(self):
return self.voidReason
def setVoidReason(self, voidReason):
self.voidReason = voidReason
class BaseOpenmrsMetadata(BaseOpenmrsObject, OpenmrsMetadata):
def __init__(self,name=None, description=None, creator=None, dateCreated=None, \
changedBy=None, retired=False, retiredBy = None):
self.name = name
self.description = description
self.creator = creator
self.dateCreated = dateCreated
self.changedBy = changedBy
self.dateChanged = dateChanged
self.retired = retired
self.dateRetired = dateRetired
self.retiredBy = retiredBy
self.retireReason = retireReason
```
#### File: openmrs/openmrs/Obs.py
```python
from BaseClasses import *
import sets
import logging
class Node:
def __init__(self, cargo, nextNode, previousNode):
self.cargo = cargo
self.next = nextNode
self.previous = previousNode
def __str__(self):
print str(self.cargo)
class ordered_set(set):
def __init__(self, *args, **kwargs):
set.__init__(self, *args, **kwargs)
self.elements = []
for i in self:
self.elements.append(i)
self._order = self.elements #NOT SURE IF ORDERED IN ORDER ELTS WERE ADDED
def add(self, elt):
set.add(self, elt)
if elt in self._order:
self._order.remove(elt)
self._order.append(elt)
def remove(self, elt):
set.remove(self, elt)
self._order.remove(elt)
def order(self):
return self._order[:]
def ordered_items(self):
return [(elt) for elt in self._order]
o = ordered_set(set([3, 2, 5, 4, 10]))
print o
class Obs(BaseOpenmrsData): #needs to implement Serializable
serialVersionUID = 112342333L
#log = LogFactory.getLog(Obs) #haven't defined a LogFactory yet
def __init__(self, obsId=None,question=None, obsDatetime=None, accessionNumber=None,\
obsGroup = None, groupMembers=set(), valueCoded=None, valueCodedName=None,\
valueDrug=None, valueGroupId=None,valueDatetime=None, valueNumeric=None,\
valueModifier=None, valueText=None, valueComplex=None,complexData = None,\
comment=None, personId=None,person=None, order=None, location=None,encounter=None,\
previousVersion=None, voided=None, creator = None, dateCreated=None, voidedBy= None,\
dateVoided=None, voidReason = None):
self.obsId = obsId
self.concept = question
self.obsDatetime = obsDatetime
self.accessionNumber = accessionNumber
self.obsGroup = obsGroup
self.groupMembers = groupMembers #set
self.valueCoded = valueCoded #Concept obj
self.valueCodedName = valueCodedName #ConceptName obj
self.valueDrug = valueDrug #Drug obj
self.valueGroupId = valueGroupId
self.valueDatetime = valueDatetime
self.valueNumeric = valueNumeric
self.valueModifier = valueModifier
self.valueText = valueText
self.valueComplex = valueComplex
self.complexData = complexData #transient: can't be serialized
self.comment = comment
self.person = person
if person != None:
self.personId = person.getPersonId() #transient
self.order = order
self.location = location
self.encounter = encounter
self.previousVersion = previousVersion
self.voided = voided
self.creator = creator
self.dateCreated = dateCreated
self.voidedBy = voidedBy
self.dateVoided = dateVoided
self.voidReason = voidReason
def newInstance(self, obsToCopy):
newObs = Obs(obsToCopy.getPerson(), obsToCopy.getConcept(), obsToCopy.getObsDatetime(),\
obsToCopy.getLocation())
newObs.setObsGroup(obsToCopy.getObsGroup())
newObs.setAccessionNumber(obsToCopy.getAccessionNumber())
newObs.setValueCoded(obsToCopy.getValueCoded())
newObs.setValueDrug(obsToCopy.getValueDrug())
newObs.setValueGroupId(obsToCopy.getValueGroupId())
newObs.setValueDatetime(obsToCopy.getValueDatetime())
newObs.setValueNumeric(obsToCopy.getValueNumeric())
newObs.setValueModifier(obsToCopy.getValueModifier())
newObs.setValueText(obsToCopy.getValueText())
newObs.setComment(obsToCopy.getComment())
newObs.setOrder(obsToCopy.getOrder())
newObs.setEncounter(obsToCopy.getEncounter())
newObs.setCreator(obsToCopy.getCreator())
newObs.setDateCreated(obsToCopy.getDateCreated())
newObs.setVoided(obsToCopy.getVoided())
newObs.setVoidedBy(obsToCopy.getVoidedBy())
newObs.setDateVoided(obsToCopy.getDateVoided())
newObs.setVoidReason(obsToCopy.getVoidReason())
newObs.setValueComplex(obsToCopy.getValueComplex())
newObs.setComplexData(obsToCopy.getComplexData())
if obsToCopy.hasGroupMembers(True):
for member in obsToCopy.getGroupMembers(True):
if member.getObsId() == None:
newObs.addGroupMember(member)
else:
newObs.addGroupMember(Obs.newInstance(member))
return newObs
def getComment(self):
return self.comment
def setComment(self, comment):
self.comment = comment
def getConcept(self):
return self.concept
def setConcept(self, concept):
self.concept = concept
def getConceptDescription(self):
if self.getConcept() == None:
return None
return self.concept.getDescription()
def getEncounter(self):
return self.encounter
def setEncounter(self, encounter):
self.encounter = encounter
def getLocation(self):
return self.location
def setLocation(self, location):
self.location = location
def getObsDatetime(self):
return self.obsDatetime
def setObsDatetime(self, obsDatetime):
self.obsDatetime = obsDatetime
def getObsGroup(self):
return self.obsGroup
def setObsGroup(self,obsGroup):
self.obsGroup = obsGroup
def hasGroupMembers(self, includeVoided=False):
#uses springFramework library
pass
def isObsGrouping(self):
return self.hasGroupMembers(True)
def getGroupMembers(self, includeVoided=False):
if includeVoided:
return self.groupMembers
if self.groupMembers == None:
return None
nonVoided = ordered_set(self.groupMembers)
for obs in nonVoided:
if obs.isVoided():
nonVoided.remove(obs) #not sure if this is what's required
return nonVoided
def setGroupMembers(self, groupMembers):
self.groupMembers = groupMembers
def addGroupMember(self, member):
if member == None:
return None
if self.getGroupMembers() == None:
self.groupMembers = sets.ImmutableSet() #Same as HashSet?
## if member == self:
## raise APIException("An obsGroup cannot have itself as a mentor. obsGroup: " + self \
## + " obsMember attempting to add: " + member)
#I think APIException is defined in another JAVA class file; not sure if Python has this
member.setObsGroup(self)
self.groupMembers.add(member)
def removeGroupMember(self, member):
if (member == None) or self.getGroupMembers() == None:
return None
if self.groupMembers.remove(member):
member.setObsGroup(None)
def getRelatedObservations(self):
ret = sets.Set() #Supposed to be ImmutableSet but we can't add elts to that; Set isnt hashed
if self.isObsGrouping():
for i in self.getGroupMembers():
ret.add(i)
parentObs = self
while parentObs.getObsGroup() != None :
for obsSibling in parentObs.getObsGroup().getGroupMembers():
if not(obsSibling.isObsGrouping()):
ret.add(obsSibling)
parentObs = parentObs.getObsGroup()
elif self.getObsGroup() != None:
for obsSibling in self.getObsGroup().getGroupMembers():
if not(obsSibling.isObsGrouping()):
ret.add(obsSibling)
return ret
def getObsId(self):
return self.obsId
def setObsId(self,obsId):
self.obsId = obsId
def getOrder(self):
return self.order
def setOrder(self, order):
self.order = order
def getPersonId(self):
return self.personId
def setPersonId(self, personId):
self.personId = personId
def getPerson(self):
return self.person
def setPerson(self, person):
self.person = person
if person != None:
self.personId = person.getPersonId()
def setValueBoolean(self, valueBoolean):
if (valueBoolean != None) and (self.getConcept() != None) and self.getConcept().getDatatype().isBoolean():
if valueBoolean.booleanValue():
self.setValueCoded(Context().getConceptService().getTrueConcept())
else:
self.setValueCoded(Context().getConceptService().getFalseConcept())
#Context is from api directory
elif valueBoolean == None:
self.setValueCoded(None)
def getValueAsBoolean(self):
if self.getValueCoded() != None:
if self.getValueCoded() == Context().getConceptService().getTrueConcept():
return True
elif self.getValueCoded() == Context().getConceptService().getFalseConcept():
return False
elif self.getValueNumeric() != None:
if self.getValueNumeric() == 1:
return True
elif self.getValueNumeric() == 0:
return False
return None
def getValueBoolean(self):
if (self.getConcept() != None) and (self.valueCoded != None) and (self.getConcept().getDatatype().isBoolean()):
trueConcept = Context.getConceptService().getTrueConcept()
return (trueConcept != None) and (self.valueCoded.getId() == trueConcept.getId())
return None
def getValueCoded(self):
return self.valueCoded
def setValueCoded(self, valueCoded):
self.valueCoded = valueCoded
def getValueCodedName(self):
return self.valueCodedName
def setValueCodedName(self, valueCodedName):
self.valueCodedName = valueCodedName
def getValueDrug(self):
return self.valueDrug
def setValueDrug(self, valueDrug):
self.valueDrug = valueDrug
def getValueDatetime(self):
return self.valueDatetime
def setValueDatetime(self, valueDatetime):
self.valueDatetime = valueDatetime
def getValueDate(self):
return self.valueDatetime
def setValueDate(self, valueDate):
self.valueDatetime = valueDate
def getValueTime(self):
return self.valueDatetime
def setValueTime(self, valueTime):
self.valueDatetime = valueTime
def getValueGroupId(self):
return self.valueGroupId
def setValueGroupId(self, valueGroupId):
self.valueGroupId = valueGroupId
def getValueModifier(self):
return self.valueModifier
def setValueModifier(self, valueModifier):
self.valueModifier = valueModifier
def getValueNumeric(self):
return self.valueNumeric
def setValueNumeric(self, valueNumeric):
self.valueNumeric = valueNumeric
def getValueText(self):
return self.valueText
def setValueText(self, valueText):
self.valueText = valueText
def isComplex(self):
if self.getConcept() != None:
return self.getConcept().isComplex()
return False
def getValueComplex(self):
return self.valueComplex
def setValueComplex(self, valueComplex):
self.valueComplex = valueComplex
def setComplexData(self, complexData):
self.complexData = complexData
def getComplexData(self):
return self.complexData
def getAccessionNumber(self):
return self.accessionNumber
def setAccessionNumber(self, accessionNumber):
self.accessionNumber = accessionNumber
def getValueAsString(self, locale):
#Needs NumberFormat and other built in functions
pass
def setValueAsString(self, s):
#logging.Logger.debug("self.getConcept() == " + str(self.getConcept()))
if (self.getConcept() != None): #and (isBlank(s)): #isBlank(s) checks if s is whitespace, null, or empty. Need to find Python equivalent.
abbrev = self.getConcept().getDatatype().getHl7Abbreviation()
if abbrev == "BIT":
self.setValueBoolean(s) #s might be lowercase true, not True. Solve this.
elif abbrev == "CWE":
raise RuntimeException("Not Yet Implemented")
elif (abbrev == "NM") or (abbrev == "SN"):
self.setValueNumeric(s)
elif abbrev == "DT":
self.setValueDatetime(s) #dateFormat.parse(s) in JAVA. must be in da specific date format
elif abbrev == "TM":
self.setValueDatetime(s) #timeFormat.parse(s) in JAVA too
elif abbrev == "TS":
self.setValueDatetime(s) #datetimeFormat.parse(s)
elif abbrev == "ST":
self.setValueText(s)
## else:
## raise RuntimeException("Don't know how to handle " + str(abbrev))
## else:
## raise RuntimeException("concept is None for " + str(self))
def __str__(self):
if self.obsId == None:
return "obs id is None"
return "Obs #" + str(self.obsId)
def getId(self):
return self.getObsId
def setId(self,Id):
self.setObsId(Id)
def getPreviousVersion(self):
return self.previousVersion
def setPreviousVersion(self, previousVersion):
self.previousVersion = previousVersion
def hasPreviousVersion(self):
return self.getPreviousVersion() != None
```
#### File: openmrs/openmrs/OpenmrsClasses.py
```python
from abc import ABCMeta, abstractmethod
import uuid
#import org.python.google.common.base.objects as objects #not sure if this is the same as com.google.common.base.Objects in JAVA code
from datetime import datetime, date
import pickle
#Interfaces
class OpenmrsObject:
"""This is the base interface for all OpenMRS-defined classes"""
__metaclass__ = ABCMeta
@abstractmethod
def getId(self):
#return id - The unique Identifier for the object
pass
def setId(self, Id):
#param id - The unique Identifier for the object
pass
def getUuid(self):
#return the universally unique id for this object
pass
def setUuid(self, uuid):
#param uuid a universally unique id for this object
pass
class OpenmrsData(Auditable, Voidable):
__metaclass__ = ABCMeta
class OpenmrsMetadata(Auditable, Retireable):
__metaclass__ = ABCMeta
def getName(self):
pass
def setName(self, name):
pass
def getDescription(self):
pass
def setDescription(self, description):
pass
```
#### File: simpleRule/jelf/views.py
```python
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from models import UserProfile, individual
from sourcetrans.macro_module import macros, jeeves
import JeevesLib
# "Glue method". Right now you just write a method like `index` below.
# It returns a (faceted) tuple either of the form (template_name, template_ctxt)
# or ("redirect", redirect_url).
#
# SUPER HACKY, obviously. Ideally we would have a full object similar to the django
# HttpResponse that can be faceted. Such an object would need to support Jeeves,
# of course. And the concretized rendering should be moved to a library function
# (like render_to_response).
@jeeves
def add_to_context(context_dict, request, template_name, profile, concretize):
template_name = concretize(template_name)
context_dict['concretize'] = concretize
context_dict['is_admin'] = profile != None and profile.level == "chair"
context_dict['profile'] = profile
context_dict['is_logged_in'] = (request.user and
request.user.is_authenticated() and
(not request.user.is_anonymous()))
def request_wrapper(view_fn):
def real_view_fn(request, *args, **kwargs):
try:
profile = UserProfile.objects.get(username=request.user.username)
ans = view_fn(request, profile, *args, **kwargs)
template_name = ans[0]
context_dict = ans[1]
if template_name == "redirect":
path = context_dict
return HttpResponseRedirect(JeevesLib.concretize(profile, path))
concretizeState = JeevesLib.jeevesState.policyenv.getNewSolverState(profile)
def concretize(val):
return concretizeState.concretizeExp(val, JeevesLib.jeevesState.pathenv.getEnv())
add_to_context(context_dict, request, template_name, profile, concretize)
return render_to_response(template_name, RequestContext(request, context_dict))
except Exception:
import traceback
traceback.print_exc()
raise
real_view_fn.__name__ = view_fn.__name__
return real_view_fn
@login_required
@request_wrapper
@jeeves
def profile_view(request, user_profile):
profile = UserProfile.objects.get(username=request.user.username)
if profile == None:
profile = UserProfile(username=request.user.username)
if request.method == 'POST':
profile.email = request.POST.get('email', '')
profile.save()
return ("profile.html", {
"email": profile.email,
"which_page": "profile",
})
# An example of a really simple view.
# The argument `user_profile` is a UserProfile object (defined in models.py).
# Use this instead of `request.user` (which is the ordinary django User model).
# You can access request.POST and request.GET as normal.
@login_required
@request_wrapper
@jeeves
def index(request, user_profile):
return ( "index.html"
, { 'name' : user_profile.email } )
@login_required
@request_wrapper
@jeeves
def patients(request, user_profile, patient):
p=individual.objects.get(pk=patient)
print p.ssn
dataset={"Address":p.address}
return ( "patient.html"
, { 'patient' : p } )
def register_account(request):
if request.user.is_authenticated():
return HttpResponseRedirect("index")
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
user.save()
UserProfile.objects.create(
username=user.username,
email=request.POST.get('email', ''),
)
user = authenticate(username=request.POST['username'],
password=request.POST['<PASSWORD>'])
login(request, user)
return HttpResponseRedirect("index")
else:
form = UserCreationForm()
return render_to_response("registration/account.html", RequestContext(request,
{
'form' : form,
'which_page' : "register"
}))
```
#### File: jeeves/env/PathVars.py
```python
import JeevesLib
import fast.AST
class VarSetting:
def __init__(self, var, val):
self.var = var
self.val = val
def __eq__(self, other):
return self.var is other.var and self.val == other.val
def __str__(self):
return "(%s, %s)" % (self.var.name, self.val)
# TODO: Define your path variable environment, as well as manipulations, here.
class PathVars:
def __init__(self):
self.conditions = []
def push(self, var, value):
assert type(var) == fast.AST.Var
assert type(value) == bool
if VarSetting(var, not value) in self.conditions:
raise Exception("Path condition for '%s' already set to '%s'" % (var, not value))
self.conditions.append(VarSetting(var, value))
def pop(self):
self.conditions.pop()
def hasPosVar(self, var):
return VarSetting(var, True) in self.conditions
def hasNegVar(self, var):
return VarSetting(var, False) in self.conditions
def getPathFormula(self):
c2 = [(vs.var if vs.val else fast.AST.Not(vs.var)) for vs in self.conditions]
return reduce(fast.AST.And, c2, fast.AST.Constant(True))
def getEnv(self):
return {vs.var.name : vs.val for vs in self.conditions}
```
#### File: jeeves/jeevesdb/JeevesModel.py
```python
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager
from django.db.models import CharField
from django.apps import apps
import django.db.models.fields.related
import JeevesLib
from JeevesLib import fexpr_cast
from fast.AST import Facet, FObject, Unassigned, FExpr, FNull
import JeevesModelUtils
class JeevesQuerySet(QuerySet):
"""The Jeeves version of Django's QuerySet.
"""
@JeevesLib.supports_jeeves
def get_jiter(self):
"""Creates an iterator for the QuerySet. Returns a list (object,
conditions) of rows and their conditions.
"""
self._fetch_all()
def acquire_label_by_name(app_label, label_name, obj=None):
"""Gets a label by name.
"""
if JeevesLib.doesLabelExist(label_name):
return JeevesLib.getLabel(label_name)
else:
label = JeevesLib.mkLabel(label_name, uniquify=False)
model_name, field_name, jeeves_id = label_name.split('__')
# Get the model that corresponds to the application label and
# model name.
# TODO: Make get_model faster?
model = apps.get_model(app_label, model_name)
# Gets the current row so we can feed it to the policy.
# TODO: Figure out why we need the faceted value here...
obj = model.objects.get(use_base_env=True, jeeves_id=jeeves_id)
restrictor = getattr(model, 'jeeves_restrict_' + field_name)
JeevesLib.restrict(label, lambda ctxt: restrictor(obj, ctxt), True)
return label
def get_env(obj, fields, env):
"""Gets the Jeeves variable environment associated with the fields.
"""
if hasattr(obj, "jeeves_vars"):
jeeves_vars = JeevesModelUtils.unserialize_vars(obj.jeeves_vars)
else:
jeeves_vars = {}
for var_name, value in jeeves_vars.iteritems():
# TODO: We only need to do this whole label thing if we don't
# know where the value is going.
# Loop through the list of variables and their assignments.
if var_name in env and env[var_name] != value:
# If we already know that this variable doesn't match with
# our program counter, we return nothing for this current
# variable.
return None
# Otherwise, we map the variable to the condition value.
# TODO: See if the value is consistent.
label = acquire_label_by_name(self.model._meta.app_label
, var_name)
env[var_name] = (label, value)
for field, subs in fields.iteritems() if fields else []:
# Do the same thing for the fields.
if field and get_env(getattr(obj, field), subs, env) is None:
return None
return env
results = []
for obj in self._result_cache:
# Get the corresponding labels for our list of conditions.
env = get_env(obj, self.query.select_related, {})
if env is not None:
results.append((obj, env))
return results
def get(self, use_base_env=False, **kwargs):
"""Fetches a JList of rows that match the conditions.
"""
matches = self.filter(**kwargs).get_jiter()
if len(matches) == 0:
return None
for (row, _) in matches:
if row.jeeves_id != matches[0][0].jeeves_id:
raise Exception("wow such error: \
get() found rows for more than one jeeves_id")
viewer = JeevesLib.get_viewer()
has_viewer = not isinstance(viewer, FNull)
pathenv = JeevesLib.jeevesState.pathenv.getEnv()
solverstate = JeevesLib.get_solverstate()
cur = None
for (row, conditions) in matches:
old = cur
cur = FObject(row)
for var_name, (label, val) in conditions.iteritems():
# TODO: Figure out if we need to make obj the faceted value.
'''
if has_viewer:
if solverstate.assignLabel(label, pathenv):
if not val:
cur = old
else:
if val:
cur = old
else:
'''
if val:
cur = Facet(label, cur, old)
else:
cur = Facet(label, old, cur)
try:
return cur.partialEval({} if use_base_env \
else JeevesLib.jeevesState.pathenv.getEnv())
except TypeError:
raise Exception("wow such error: \
could not find a row for every condition")
def filter(self, **kwargs):
"""Jelf implementation of filter.
"""
related_names = []
for argname, _ in kwargs.iteritems():
related_name = argname.split('__')
if len(related_name) > 1:
related_names.append("__".join(related_name[:-1]))
if len(related_names) > 0:
return super(
JeevesQuerySet, self).filter(
**kwargs).select_related(*related_names)
else:
return super(JeevesQuerySet, self).filter(**kwargs)
@JeevesLib.supports_jeeves
def all(self):
viewer = JeevesLib.get_viewer()
if isinstance(viewer, FNull):
# If we don't know who the viewer is, create facets.
elements = JeevesLib.JList2([])
env = JeevesLib.jeevesState.pathenv.getEnv()
for val, cond in self.get_jiter():
popcount = 0
for vname, (vlabel, vval) in cond.iteritems():
if vname not in env:
# vlabel = acquire_label_by_name(
# self.model._meta.app_label, vname, obj=val)
JeevesLib.jeevesState.pathenv.push(vlabel, vval)
popcount += 1
elif env[vname] != vval:
break
else:
elements.append(val)
for _ in xrange(popcount):
JeevesLib.jeevesState.pathenv.pop()
return elements
else:
# Otherwise concretize early.
elements = []
env = JeevesLib.jeevesState.pathenv.getEnv()
solverstate = JeevesLib.get_solverstate()
for val, cond in self.get_jiter():
# Get a list of (object, condition list).
for vname, (vlabel, vval) in cond.iteritems():
# Loop through the list of conditions to see what they
# should actually be assigned to.
if vname in env:
# If we have already assumed the current variable,
# then add the element if the assumption matches
# the condition.
if env[vname] == vval:
elements.append(val)
else:
# If the current label matches with our policy
# assumptions, then we add it to the list of results.
label = solverstate.assignLabel(vlabel, env)
env[vlabel] = label
if label == vval:
elements.append(val)
return elements
@JeevesLib.supports_jeeves
def delete(self):
# can obviously be optimized
# TODO write tests for this
for val, cond in self.get_jiter():
popcount = 0
for vname, (vlabel, vval) in cond.iteritems():
if vname not in JeevesLib.jeevesState.pathenv.getEnv():
# vlabel = acquire_label_by_name(
# self.model._meta.app_label, vname)
JeevesLib.jeevesState.pathenv.push(vlabel, vval)
popcount += 1
val.delete()
for _ in xrange(popcount):
JeevesLib.jeevesState.pathenv.pop()
@JeevesLib.supports_jeeves
def exclude(self, **kwargs):
raise NotImplementedError
# TODO: methods that return a queryset subclass of the ordinary QuerySet
# need to be overridden
def values(self, *fields):
raise NotImplementedError
def values_list(self, *fields, **kwargs):
raise NotImplementedError
def dates(self, field_name, kind, order='ASC'):
raise NotImplementedError
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
raise NotImplementedError
def none(self):
raise NotImplementedError
class JeevesManager(Manager):
"""The Jeeves version of Django's Manager class, which [...]
"""
@JeevesLib.supports_jeeves
def get_queryset(self):
return JeevesQuerySet(self.model, using=self._db).order_by('jeeves_id')
def all(self):
return super(JeevesManager, self).all().all()
@JeevesLib.supports_jeeves
def create(self, **kw):
elt = self.model(**kw)
elt.save()
return elt
def clone(old):
"""Returns a copy of an object.
"""
new_kwargs = dict([(fld.name, getattr(old, fld.name))
for fld in old._meta.fields
if not isinstance(fld, JeevesForeignKey)])
ans = old.__class__(**new_kwargs)
for fld in old._meta.fields:
if isinstance(fld, JeevesForeignKey):
setattr(ans, fld.attname, getattr(old, fld.attname))
return ans
'''
def acquire_label_by_name(app_label, label_name, obj=None):
"""Gets a label by name.
"""
if JeevesLib.doesLabelExist(label_name):
return JeevesLib.getLabel(label_name)
else:
label = JeevesLib.mkLabel(label_name, uniquify=False)
model_name, field_name, jeeves_id = label_name.split('__')
# Get the model that corresponds to the application label and model
# name.
model = apps.get_model(app_label, model_name)
# Gets the current row so we can feed it to the policy.
if obj==None:
obj = model.objects.get(use_base_env=True, jeeves_id=jeeves_id)
# print "RESTRICTING OBJECT ", obj.id, ": ", obj.jeeves_id
# print "WITH LABEL ", label
restrictor = getattr(model, 'jeeves_restrict_' + field_name)
JeevesLib.restrict(label, lambda ctxt: restrictor(obj, ctxt), True)
return label
'''
def get_one_differing_var(vars1, vars2):
"""Checks to see if two sets of variables have one differing one??
"""
if len(vars1) != len(vars2):
return None
ans = None
for var in vars1:
if var in vars2:
if vars1[var] != vars2[var]:
if ans is None:
ans = var
else:
return None
else:
return None
return ans
def label_for(*field_names):
"""The decorator for associating a label with a field.
"""
def decorator(field):
"""Definition of the decorator itself.
"""
field._jeeves_label_for = field_names
return field
return decorator
#from django.db.models.base import ModelBase
#class JeevesModelBase(ModelBase):
# def __new__(cls, name, bases, attrs):
# obj = super(ModelBase, cls).__new__(cls, name, bases, attrs)
# return obj
# Make a Jeeves Model that enhances the vanilla Django model with information
# about how labels work and that kind of thing. We'll also need to override
# some methods so that we can create records and make queries appropriately.
class JeevesModel(models.Model):
""" Jeeves version of Django's Model class.
"""
def __init__(self, *args, **kw):
self.jeeves_base_env = JeevesLib.jeevesState.pathenv.getEnv()
super(JeevesModel, self).__init__(*args, **kw)
self._jeeves_labels = {}
field_names = [f.name for f in self._meta.concrete_fields]
for attr in dir(self.__class__):
if attr.startswith('jeeves_restrict_'):
value = getattr(self.__class__, attr)
label_name = attr[len('jeeves_restrict_'):]
assert label_name not in self._jeeves_labels
if hasattr(value, '_jeeves_label_for'):
self._jeeves_labels[label_name] = value._jeeves_label_for
else:
assert label_name in field_names
self._jeeves_labels[label_name] = (label_name,)
def __setattr__(self, name, value):
field_names = [field.name for field in self._meta.concrete_fields] \
if hasattr(self, '_meta') else []
if name in field_names and \
name not in ('jeeves_vars', 'jeeves_id', 'id'):
if name in self.__dict__:
old_val = getattr(self, name)
else:
old_val = Unassigned("attribute '%s' in %s" % (name, self.__class__.__name__))
models.Model.__setattr__(self,
name, JeevesLib.jassign(old_val, value, self.jeeves_base_env))
else:
models.Model.__setattr__(self, name, value)
objects = JeevesManager()
jeeves_id = CharField(max_length=JeevesModelUtils.JEEVES_ID_LEN, null=False)
jeeves_vars = CharField(max_length=1024, null=False)
@JeevesLib.supports_jeeves
def do_delete(self, vars_env):
"""A helper for delete?
"""
if len(vars_env) == 0:
delete_query = self.__class__._objects_ordinary.filter(
jeeves_id=self.jeeves_id)
delete_query.delete()
else:
filter_query = self.__class__._objects_ordinary.filter(
jeeves_id=self.jeeves_id)
objs = list(filter_query)
for obj in objs:
eobj = JeevesModelUtils.unserialize_vars(obj.jeeves_vars)
if any(var_name in eobj and eobj[var_name] != var_value
for var_name, var_value in vars_env.iteritems()):
continue
if all(var_name in eobj and eobj[var_name] == var_value
for var_name, var_value in vars_env.iteritems()):
super(JeevesModel, obj).delete()
continue
addon = ""
for var_name, var_value in vars_env.iteritems():
if var_name not in eobj:
new_obj = clone(obj)
if addon != "":
new_obj.id = None
# so when we save a new row will be made
new_obj.jeeves_vars += addon + '%s=%d;' \
% (var_name, not var_value)
addon += '%s=%d;' % (var_name, var_value)
super(JeevesModel, new_obj).save()
@JeevesLib.supports_jeeves
def acquire_label(self, field_name):
label_name = '%s__%s__%s' % \
(self.__class__.__name__, field_name, self.jeeves_id)
if JeevesLib.doesLabelExist(label_name):
return JeevesLib.getLabel(label_name)
else:
label = JeevesLib.mkLabel(label_name, uniquify=False)
restrictor = getattr(self, 'jeeves_restrict_' + field_name)
JeevesLib.restrict(label
, lambda ctxt: restrictor(self, ctxt), True)
return label
@JeevesLib.supports_jeeves
def save(self, *args, **kw):
"""Saves elements with the appropriate faceted labels.
"""
def full_eval(val, env):
"""Evaluating a value in the context of an environment.
"""
eval_expr = val.partialEval(env)
return eval_expr.v
# TODO: OMG why is this so long.
if not self.jeeves_id:
self.jeeves_id = JeevesModelUtils.get_random_jeeves_id()
if kw.get("update_field", None) is not None:
raise NotImplementedError("Partial saves not supported.")
# Go through fields and do something. TODO: Figure out what.
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
# Go through labels and create facets.
for label_name, field_name_list in self._jeeves_labels.iteritems():
label = self.acquire_label(label_name)
for field_name in field_name_list:
public_field_value = getattr(self, field_name)
private_field_value = getattr(self
, 'jeeves_get_private_' + \
field_name)(self)
faceted_field_value = JeevesLib.mkSensitive(label
, public_field_value
, private_field_value).partialEval(
JeevesLib.jeevesState.pathenv. \
getEnv())
setattr(self, field_name, faceted_field_value)
all_vars = []
field_dict = {}
env = JeevesLib.jeevesState.pathenv.getEnv()
for field_name in field_names:
value = getattr(self, field_name)
field_val = fexpr_cast(value).partialEval(env)
all_vars.extend(v.name for v in field_val.vars())
field_dict[field_name] = field_val
all_vars = list(set(all_vars))
for cur_vars in JeevesModelUtils.powerset(all_vars):
true_vars = list(cur_vars)
false_vars = list(set(all_vars).difference(cur_vars))
env_dict = dict(env)
env_dict.update({tv : True for tv in true_vars})
env_dict.update({fv : False for fv in false_vars})
self.do_delete(env_dict)
klass = self.__class__
obj_to_save = klass(**{
field_name : full_eval(field_value, env_dict)
for field_name, field_value in field_dict.iteritems()
})
all_jid_objs = list(
klass._objects_ordinary.filter(
jeeves_id=obj_to_save.jeeves_id).all())
all_relevant_objs = [obj for obj in all_jid_objs if
all(field_name == 'jeeves_vars' or
getattr(obj_to_save, field_name) == getattr(obj, field_name)
for field_name in field_dict)]
# Optimization.
# TODO: See how we can refactor this to shorten the function.
while True:
# check if we can collapse
# if we can, repeat; otherwise, exit
for i in xrange(len(all_relevant_objs)):
other_obj = all_relevant_objs[i]
diff_var = get_one_differing_var(env_dict
, JeevesModelUtils.unserialize_vars(
other_obj.jeeves_vars))
if diff_var is not None:
super(JeevesModel, other_obj).delete()
del env_dict[diff_var]
break
else:
break
obj_to_save.jeeves_vars = JeevesModelUtils.serialize_vars(env_dict)
super(JeevesModel, obj_to_save).save(*args, **kw)
@JeevesLib.supports_jeeves
def delete(self):
if self.jeeves_id is None:
return
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
all_vars = []
field_dict = {}
env = JeevesLib.jeevesState.pathenv.getEnv()
for field_name in field_names:
value = getattr(self, field_name)
field_fexpr = fexpr_cast(value).partialEval(env)
all_vars.extend(v.name for v in field_fexpr.vars())
field_dict[field_name] = field_fexpr
for var_set in JeevesModelUtils.powerset(all_vars):
true_vars = list(var_set)
false_vars = list(set(all_vars).difference(var_set))
env_dict = dict(env)
env_dict.update({tv : True for tv in true_vars})
env_dict.update({fv : False for fv in false_vars})
self.do_delete(env_dict)
class Meta(object):
"""Abstract class.
"""
abstract = True
_objects_ordinary = Manager()
@JeevesLib.supports_jeeves
def __eq__(self, other):
if isinstance(other, FExpr):
return other == self
return isinstance(other, self.__class__) and \
self.jeeves_id == other.jeeves_id
@JeevesLib.supports_jeeves
def __ne__(self, other):
if isinstance(other, FExpr):
return other != self
return not (isinstance(other, self.__class__) and \
self.jeeves_id == other.jeeves_id)
from django.contrib.auth.models import User
@JeevesLib.supports_jeeves
def evil_hack(self, other):
"""Hack __eq__ that checks equality if we have FExprs and checks object ID
equality otherwise.
"""
if isinstance(other, FExpr):
return other == self
return isinstance(other, self.__class__) and self.id == other.id
User.__eq__ = evil_hack
class JeevesRelatedObjectDescriptor(property):
"""WRITE SOME COMMENTS.
"""
@JeevesLib.supports_jeeves
def __init__(self, field):
self.field = field
self.cache_name = field.get_cache_name()
@JeevesLib.supports_jeeves
def get_cache(self, instance):
"""Gets the... cache?
"""
cache_attr_name = self.cache_name
if hasattr(instance, cache_attr_name):
cache = getattr(instance, cache_attr_name)
if not isinstance(cache, dict):
jid = getattr(instance, self.field.get_attname())
assert not isinstance(jid, FExpr)
cache = {jid : cache}
setattr(instance, cache_attr_name, cache)
else:
cache = {}
setattr(instance, cache_attr_name, cache)
return cache
@JeevesLib.supports_jeeves
def __get__(self, instance, instance_type):
"""??
"""
if instance is None:
return self
cache = self.get_cache(instance)
def get_obj(jeeves_id):
"""??
"""
if jeeves_id is None:
return None
if jeeves_id not in cache:
cache[jeeves_id] = self.field.to.objects.get(
**{self.field.join_field.name:jeeves_id})
return cache[jeeves_id]
if instance is None:
return self
return JeevesLib.facetMapper(
fexpr_cast(
getattr(instance, self.field.get_attname())), get_obj)
@JeevesLib.supports_jeeves
def __set__(self, instance, value):
cache = self.get_cache(instance)
def get_id(obj):
"""Gets the ID associated with an object.
"""
if obj is None:
return None
obj_jid = getattr(obj, self.field.join_field.name)
if obj_jid is None:
raise Exception("Object must be saved before it can be \
attached via JeevesForeignKey.")
cache[obj_jid] = obj
return obj_jid
ids = JeevesLib.facetMapper(fexpr_cast(value), get_id)
setattr(instance, self.field.get_attname(), ids)
from django.db.models.fields.related import ForeignObject
class JeevesForeignKey(ForeignObject):
"""Jeeves version of Django's ForeignKey.
"""
requires_unique_target = False
@JeevesLib.supports_jeeves
def __init__(self, to, *args, **kwargs):
self.to = to
if (isinstance(to,basestring)):
super(JeevesForeignKey, self).__init__(
to, kwargs.pop("on_delete",models.DO_NOTHING), kwargs.pop("from_fields",[]), kwargs.pop("to_fields",[]), *args, **kwargs)
else:
self.join_field = to._meta.pk
for field in to._meta.fields:
if field.name == 'jeeves_id':
self.join_field = field
break
else:
# support non-Jeeves tables
self.join_field = to._meta.pk
#raise Exception("Need jeeves_id field")
super(JeevesForeignKey, self).__init__(
to, models.DO_NOTHING, [self], [self.join_field], *args, **kwargs)
self.db_constraint = False
@JeevesLib.supports_jeeves
def contribute_to_class(self, cls, name, virtual_only=False):
super(JeevesForeignKey, self).contribute_to_class(
cls, name, virtual_only=virtual_only)
setattr(cls, self.name, JeevesRelatedObjectDescriptor(self))
@JeevesLib.supports_jeeves
def get_attname(self):
return '%s_id' % self.name
@JeevesLib.supports_jeeves
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def deconstruct(self):
name, path, args, kwargs = super(JeevesForeignKey, self).deconstruct()
#kwargs['to'] = self.to
kwargs.pop("from_fields",None)
kwargs.pop("to_fields",None)
kwargs.pop("on_delete",None)
return name, path, args, kwargs
'''
@JeevesLib.supports_jeeves
def db_type(self, connection):
return IntegerField().db_type(connection=connection)
'''
@JeevesLib.supports_jeeves
def get_path_info(self):
opts = self.to._meta
from_opts = self.model._meta
return [django.db.models.fields.related.PathInfo(
from_opts, opts, (self.join_field,), self, False, True)]
@JeevesLib.supports_jeeves
def get_joining_columns(self):
return ((self.column, self.join_field.column),)
@property
def foreign_related_fields(self):
return (self.join_field,)
@property
def local_related_fields(self):
return (self,)
@property
def related_fields(self):
return ((self, self.join_field),)
@property
def reverse_related_fields(self):
return ((self.join_field, self),)
@JeevesLib.supports_jeeves
def get_extra_restriction(self, where_class, alias, related_alias):
return None
@JeevesLib.supports_jeeves
def get_cache_name(self):
return '_jfkey_cache_' + self.name
def db_type(self, connection):
return "VARCHAR(%d)" % JeevesModelUtils.JEEVES_ID_LEN
```
#### File: jeeves/sourcetrans/classes.py
```python
from macropy.core.macros import *
from macropy.core.quotes import macros, ast, u
from ast import *
def classes_transform(node, gen_sym):
@Walker
def transform(tree, **k2):
if isinstance(tree, ClassDef):
self_name = gen_sym()
attr_name = gen_sym()
value_name = gen_sym()
newfunc = FunctionDef(name='__setattr__', args=arguments(args=[Name(id=self_name, ctx=Param()), Name(id=attr_name, ctx=Param()), Name(id=value_name, ctx=Param())], vararg=None, kwarg=None, defaults=[]), decorator_list=[], body=[Assign([Subscript(value=Attribute(value=Name(id=self_name, ctx=Load()), attr='__dict__', ctx=Load()), slice=Index(Name(id=attr_name, ctx=Load())), ctx=Store())], Call(func=Attribute(value=Name(id='JeevesLib', ctx=Load()), attr='jassign', ctx=Load()), args=[Call(func=Attribute(value=Attribute(value=Name(id=self_name), attr='__dict__', ctx=Load()), attr='get', ctx=Load()), args=[Name(id=attr_name), Call(func=Attribute(value=Name(id='JeevesLib', ctx=Load()), attr='Unassigned', ctx=Load()), args=[BinOp(left=Str(s="attribute '%s'"), op=Mod(), right=Name(id=attr_name))], keywords=[], starargs=None, kwargs=None)], keywords=[], starargs=None, kwargs=None), Name(id=value_name)], keywords=[], starargs=None, kwargs=None))])
return copy_location(ClassDef(name=tree.name, bases=tree.bases, body=([newfunc] + tree.body), decorator_list=tree.decorator_list), tree)
return transform.recurse(node)
```
#### File: jeeves/sourcetrans/common.py
```python
from macropy.core.macros import *
from macropy.core.quotes import macros, ast, u
from ast import *
@Walker
def toParam(tree, **kw):
if isinstance(tree, Store):
return Param()
@Walker
def toLoad(tree, **kw):
if isinstance(tree, Store):
return Load()
def storeToParam(node):
return toParam.recurse(node)
def storeToLoad(node):
return toLoad.recurse(node)
```
#### File: jeeves/sourcetrans/namespace.py
```python
from macropy.core.macros import *
from macropy.core.quotes import macros, ast, u
from ast import *
import common
import copy
def get_params_in_arguments(node):
@Walker
def get_params(tree, collect, **kw):
if isinstance(tree, Name):
collect(tree.id)
(_, p1) = get_params.recurse_collect(node.args)
(_, p2) = get_params.recurse_collect(node.vararg)
(_, p3) = get_params.recurse_collect(node.kwarg)
return ((p1 + p2) + p3)
def get_vars_in_scope(node):
@Walker
def get_vars(tree, collect, stop, **kw):
if (isinstance(tree, Name) and isinstance(tree.ctx, Store)):
collect(tree.id)
if isinstance(tree, ClassDef):
stop()
if ((tree != node) and isinstance(tree, FunctionDef)):
collect(tree.name)
stop()
if isinstance(tree, arguments):
pass
@Walker
def get_globals(tree, collect, stop, **kw):
if isinstance(tree, Global):
for name in tree.names:
collect(name)
if ((tree != node) and (isinstance(tree, ClassDef) or isinstance(tree, FunctionDef))):
stop()
(_, v) = get_vars.recurse_collect(node)
(_, g) = get_globals.recurse_collect(node)
p = get_params_in_arguments(node.args)
return (list((set(v) - set(g))), p)
def replace_local_scopes_with_namespace(node, gen_sym):
@Walker
def transform(tree, stop, ctx, set_ctx, **kw):
if isinstance(tree, FunctionDef):
(varNames, paramNames) = get_vars_in_scope(tree)
namespaceName = gen_sym()
namespaceStmt = Assign(targets=[Name(id=namespaceName, ctx=Store())], value=Call(func=Attribute(value=Name(id='JeevesLib', ctx=Load()), attr='Namespace', ctx=Load()), args=[Dict(keys=[Str(p) for p in paramNames], values=[Name(id=p, ctx=Load()) for p in paramNames]), Str(s=tree.name)], keywords=[], starargs=None, kwargs=None))
scopeMapping = dict(ctx)
for name in (varNames + paramNames):
scopeMapping[name] = namespaceName
name = tree.name
args = transform.recurse(tree.args, ctx=ctx)
body = transform.recurse(tree.body, ctx=scopeMapping)
decorator_list = transform.recurse(tree.decorator_list, ctx=ctx)
newtree = copy_location(FunctionDef(name=name, args=args, body=([namespaceStmt] + body), decorator_list=decorator_list), tree)
stop()
if ((tree.name in ctx) and (ctx[tree.name] != None)):
outerAssignStmt = copy_location(Assign(targets=[Attribute(value=Name(id=ctx[tree.name], ctx=Load()), attr=tree.name, ctx=Store())], value=Name(id=tree.name, ctx=Load())), tree)
return [newtree, outerAssignStmt]
else:
return newtree
if isinstance(tree, Lambda):
paramNames = get_params_in_arguments(tree.args)
scopeMapping = dict(ctx)
for name in paramNames:
scopeMapping[name] = None
args = transform.recurse(tree.args, ctx=ctx)
body = transform.recurse(tree.body, ctx=scopeMapping)
newlambda = copy_location(Lambda(args=args, body=body), tree)
stop()
return newlambda
if (isinstance(tree, Name) and (isinstance(tree.ctx, Load) or isinstance(tree.ctx, Store) or isinstance(tree.ctx, Del))):
if ((tree.id in ctx) and (ctx[tree.id] != None)):
return Attribute(value=Name(id=ctx[tree.id], ctx=Load()), attr=tree.id, ctx=tree.ctx)
if isinstance(tree, For):
target = tree.target
iter = tree.iter
body = tree.body
orelse = tree.orelse
stop()
assignTarget = transform.recurse(copy.deepcopy(target), ctx=ctx)
assignValue = common.storeToLoad(copy.deepcopy(target))
assignStmt = Assign([assignTarget], assignValue)
iter = transform.recurse(iter, ctx=ctx)
body = transform.recurse(body, ctx=ctx)
orelse = transform.recurse(orelse, ctx=ctx)
return copy_location(For(target=target, iter=iter, body=([assignStmt] + body), orelse=orelse), tree)
if isinstance(tree, arguments):
stop()
return arguments(args=tree.args, vararg=tree.vararg, kwarg=tree.kwarg, defaults=transform.recurse(tree.defaults, ctx=ctx))
return transform.recurse(node, ctx={})
```
#### File: gallery/authentication/testAuthConfidentiality.py
```python
from smt.Z3 import *
import unittest
from AuthConfidentiality import Authentication, Principal
import JeevesLib
class TestAuthConfidentiality(unittest.TestCase):
def setUp(self):
JeevesLib.init()
self.alicePwd = "<PASSWORD>"
self.bobPwd = "<PASSWORD>"
self.aliceUser = Principal.User(1, "Alice", self.alicePwd)
self.bobUser = Principal.User(2, "Bob", self.bobPwd)
def testUserCanSeeOwnPassword(self):
alicePwdToAlice = JeevesLib.concretize(
self.aliceUser, self.aliceUser.pwd)
self.assertEqual(alicePwdToAlice, self.alicePwd)
def testUserCannotSeeOtherPassword(self):
bobPwdToAlice = JeevesLib.concretize(
self.aliceUser, self.bobUser.pwd)
self.assertEqual(bobPwdToAlice, "")
def testLogin(self):
self.assertEqual( JeevesLib.concretize(self.aliceUser
, Authentication.login(self.aliceUser, self.alicePwd))
, self.aliceUser)
self.assertEqual( JeevesLib.concretize(self.aliceUser
, Authentication.login(self.aliceUser, "otherPwd"))
, Principal.NullUser())
def testSensitiveUserPassword(self):
# Make a sensitive user that is either Alice or Bob. Make sure it shows the
# the right password based on the access level of the user.
pass
if __name__ == '__main__':
unittest.main()
```
#### File: gallery/battleship/Board.py
```python
import JeevesLib
from Bomb import Bomb
from GamePiece import Carrier, Battleship, Cruiser, Destroyer, Submarine, NoShip
from sourcetrans.macro_module import macros, jeeves
from Square import Square
class Board:
class OutOfBoundsException(Exception):
pass
def __init__(self, owner):
self.owner = owner
self.boardSize = 10
# Initialize the board.
self.board = []
for i in range(0, self.boardSize):
curCol = []
for j in range(0, self.boardSize):
curCol.append(Square(self.owner))
self.board.append(curCol)
self.pieces = [ Carrier(owner), Battleship(owner), Cruiser(owner)
, Destroyer(owner), Destroyer(owner)
, Submarine(owner), Submarine(owner) ]
def getSquare(self, x, y):
return self.board[x][y]
# Places a ship on the board. Looks in the list of current pieces to mark
# it as placed. Updates the ship and the board.
@jeeves
def placeShip(self, ctxt, ship, start, end):
for cur in self.pieces:
if cur == ship and not cur.isPlaced():
# Update the relevant board pieces.
pts = cur.getPiecePoints(start, end)
if not (pts == None):
for pt in pts:
shipUpdated = self.board[pt.x][pt.y].updateShip(ctxt, cur)
squareUpdated = cur.addSquare(self.board[pt.x][pt.y])
if not (shipUpdated and squareUpdated):
return False
return cur.placePiece(ctxt)
# If the points didn't fit, then we can't place the ship.
else:
print "Piece didn't fit: "
print ship
print "\n"
return False
print "Don't have piece to play: "
print ship
print "\n"
return False
# Places a bomb. Updates the specific square on the board. If there is a
# ship at this point, this function also updates the ship with the fact that
# it has been bombed.
# NOTE: This seems to be a problematic function causing some tests to fail...
@jeeves
def placeBomb(self, ctxt, x, y):
if x < self.boardSize and y < self.boardSize:
boardShip = self.board[x][y].getShip()
bomb = Bomb(ctxt.user)
bombedPoint = self.board[x][y].bomb(ctxt, bomb)
succeeded = (bombedPoint if boardShip == NoShip()
else boardShip.bombPiece(ctxt) and JeevesLib.jall(map(lambda s: s.bomb(ctxt, bomb), boardShip.getSquares())))
print "succeeded: ", succeeded
return boardShip if succeeded else NoShip()
else:
print "Bomb location outside of board: (" + x + ", " + y + ")" + "\n"
raise OutOfBoundsException
# Determines if all of a player's pieces have been placed. This variable
# should always be concrete.
@jeeves
def allPlaced(self):
return JeevesLib.jall(map(lambda p: p.isPlaced(), self.pieces))
# Determines if all pieces on the board have been bombed. This variable
# should always be concrete.
@jeeves
def hasLost(self):
return JeevesLib.jall(map(lambda p: p.isBombed(), self.pieces))
def printBoard(self, ctxt):
for j in range(0, 10):
for i in range(0, 10):
curSquare = self.board[i][j]
if JeevesLib.concretize(ctxt, curSquare.hasBomb()):
print("X")
elif concretize(ctxt, curSquare.hasShip()):
print("S")
else:
print("W")
print("\n")
def printUnplacedPieces(self):
print "Remaining unplaced pieces:\n"
for p in self.pieces:
if not p.isPlaced():
print p
print "\n"
def printRemainingPieces(self):
print "Remaining pieces:\n"
for p in self.pieces:
if not p.isBombed():
print p
print "\n"
```
#### File: gallery/fitnessprivacy/Fitness.py
```python
import datetime
import operator
import JeevesLib
from sourcetrans.macro_module import macros, jeeves
class InternalError(Exception):
def __init__(self, message):
self.message = message
# Definitions of locations.
@jeeves
class User:
def __init__(self, userId):
self.userId = userId
self.activity = {}
def addActivity(self, activity):
# Confidentiality policy:
self.activity[datetime.date] = activity
# POLICY: If there are more than k people who have similar profile like Jean,
# then uses avgActivityLevelJean as the real value to compute the group
# average; else use the existing group average avgActivityLevelinstead. This
# means that adding Jean's value avgActivityLevelJean will not change the
# group's average and will not be picked out as outliner.
def getAverageActivityLevel(self):
a = JeevesLib.mkLabel('activity_label')
# Compute average activity level.
activityValues = self.activity.values()
# TODO: We really want this to be the average activity level of the
# *output channel* and not the current activity level...
genericAverage = 3
averageActivity = reduce(operator.add, activityValues, 0) / len(activityValues) if len(activityValues) > 0 else genericAverage
# Can see the average activity level if there are at least 3 with averages
# within 0.2.
JeevesLib.restrict(a
, lambda oc: oc.atLeastKSimilar(averageActivity, 2, 0.2))
activityLevel = JeevesLib.mkSensitive(a, averageActivity, genericAverage)
return activityLevel
# TODO: Is this just the view context?
@jeeves
class UserNetwork:
def __init__(self, users=[]):
self.users = users
def getAverageActivityLevel(self):
userSum = reduce(lambda acc, u: acc + u.averageActivityLevel(), self.users)
return userSum / len(self.users)
def atLeastKSimilar(self, avg, k, delta):
count = 0
for user in self.users:
userAvg = user.getAverageActivityLevel()
if (avg - delta) < userAvg and userAvg < (avg + delta):
count += 1
return count >= k
```
#### File: gallery/proteinsignal/testChemotaxis.py
```python
import JeevesLib
from smt.Z3 import *
import unittest
from RSphaeroides import RSphaeroides
import JeevesLib
class TestAuction(unittest.TestCase):
def setUp(self):
JeevesLib.init()
def test_something(self):
r = RSphaeroides()
pass
```
#### File: jeevesdb_perf/testdb/models.py
```python
from django.db import models
from jeevesdb import JeevesModel
class Animal(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
sound = models.CharField(max_length=30)
@staticmethod
def jeeves_get_private_sound(animal):
return ""
@staticmethod
@JeevesModel.label_for('sound')
def jeeves_restrict_awplabel(animal, ctxt):
return ctxt == animal
class Zoo(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
inhabitant = JeevesModel.JeevesForeignKey(Animal)
'''
class AnimalWithPolicy(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
sound = models.CharField(max_length=30)
@staticmethod
def jeeves_get_private_sound(animal):
return ""
@staticmethod
def jeeves_restrict_sound(animal, ctxt):
return ctxt == animal
'''
```
#### File: jeevesdb/testdb/models.py
```python
from django.db import models
from jeevesdb import JeevesModel
class Animal(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
sound = models.CharField(max_length=30)
def speak(self):
return "The %s says \"%s\"" % (self.name, self.sound)
class Zoo(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
inhabitant = JeevesModel.JeevesForeignKey(Animal)
class AnimalWithPolicy(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
sound = models.CharField(max_length=30)
@staticmethod
def jeeves_get_private_sound(animal):
return ""
@staticmethod
def jeeves_restrict_sound(animal, ctxt):
return ctxt == animal
class AnimalWithPolicy2(JeevesModel.JeevesModel):
name = models.CharField(max_length=30)
sound = models.CharField(max_length=30)
@staticmethod
def jeeves_get_private_sound(animal):
return ""
@staticmethod
@JeevesModel.label_for('sound')
def jeeves_restrict_awplabel(animal, ctxt):
return ctxt == animal
```
#### File: jeeves/test/testCaching.py
```python
import unittest
import macropy.activate
from sourcetrans.macro_module import macros, jeeves
import JeevesLib
import operator
@jeeves
class TestClass:
def __init__(self, a, b):
self.a = a
self.b = b
@jeeves
class TestClassMethod:
def __init__(self, a, b):
self.a = a
self.b = b
def add_a_to_b(self):
self.b += self.a
def return_sum(self):
return self.a + self.b
@jeeves
class TestClass1:
def __init__(self, a):
self.a = a
def __getstate__(self):
if hasattr(self.a, '__getstate__'):
return "(TestClass1:%s)" % self.a.__getstate__()
else:
return "(TestClass1:%s)" % repr(self.a)
@jeeves
class TestClass1Eq:
def __init__(self, a):
self.a = a
def __eq__(self, other):
return self.a == other.a
def __ne__(self, other):
return self.a != other.a
def __lt__(self, other):
return self.a < other.a
def __gt__(self, other):
return self.a > other.a
def __le__(self, other):
return self.a <= other.a
def __ge__(self, other):
return self.a >= other.a
class TestSourceTransform(unittest.TestCase):
def setUp(self):
# reset the Jeeves state
JeevesLib.init()
JeevesLib.start_caching()
@jeeves
def test_restrict_all_permissive(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda _: True)
self.assertTrue(JeevesLib.concretize(None, x))
# Now we test the cache.
self.assertTrue(JeevesLib.concretize(None, x))
self.assertEqual(len(JeevesLib.get_cache()), 1)
@jeeves
def test_restrict_all_restrictive(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda _: False)
self.assertFalse(JeevesLib.concretize(None, x))
self.assertFalse(JeevesLib.concretize(None, x))
@jeeves
def test_restrict_with_context(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda y: y == 2)
self.assertTrue(JeevesLib.concretize(2, x))
self.assertTrue(JeevesLib.concretize(2, x))
self.assertFalse(JeevesLib.concretize(3, x))
self.assertFalse(JeevesLib.concretize(3, x))
@jeeves
def test_restrict_with_sensitive_value(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda y: y == 2)
value = JeevesLib.mkSensitive(x, 42, 41)
self.assertEquals(JeevesLib.concretize(2, value), 42)
self.assertEquals(JeevesLib.concretize(2, value), 42)
self.assertEquals(JeevesLib.concretize(1, value), 41)
self.assertEquals(JeevesLib.concretize(1, value), 41)
self.assertEquals(len(JeevesLib.get_cache()), 2)
@jeeves
def test_restrict_with_cyclic(self):
jl = JeevesLib
jl.clear_cache()
# use the value itself as the context
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt == 42)
value = jl.mkSensitive(x, 42, 20)
self.assertEquals(jl.concretize(value, value), 42)
self.assertEquals(jl.concretize(value, value), 42)
value = jl.mkSensitive(x, 41, 20)
self.assertEquals(jl.concretize(value, value), 20)
self.assertEquals(jl.concretize(value, value), 20)
@jeeves
def test_restrict_under_conditional(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
value = JeevesLib.mkSensitive(x, 42, 0)
if value == 42:
JeevesLib.restrict(x, lambda ctxt : ctxt == 1)
self.assertEquals(JeevesLib.concretize(0, value), 0)
self.assertEquals(JeevesLib.concretize(0, value), 0)
self.assertEquals(JeevesLib.concretize(1, value), 42)
self.assertEquals(JeevesLib.concretize(1, value), 42)
y = JeevesLib.mkLabel('y')
value = JeevesLib.mkSensitive(y, 43, 0)
if value == 42:
JeevesLib.restrict(y, lambda ctxt : ctxt == 1)
self.assertEquals(JeevesLib.concretize(0, value), 43)
self.assertEquals(JeevesLib.concretize(0, value), 43)
self.assertEquals(JeevesLib.concretize(1, value), 43)
self.assertEquals(JeevesLib.concretize(1, value), 43)
@jeeves
def test_jbool_functions_fexprs(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda (a,_) : a == 42)
for lh in (True, False):
for ll in (True, False):
for rh in (True, False):
for rl in (True, False):
l = jl.mkSensitive(x, lh, ll)
r = jl.mkSensitive(x, rh, rl)
self.assertEquals(
jl.concretize((42,0), l and r)
, operator.and_(lh, rh))
self.assertEquals(
jl.concretize((42,0), l and r)
, operator.and_(lh, rh))
self.assertEquals(
jl.concretize((10,0), l and r)
, operator.and_(ll, rl))
self.assertEquals(
jl.concretize((10,0), l and r)
, operator.and_(ll, rl))
@jeeves
def test_jif_with_assign(self):
jl = JeevesLib
jl.clear_cache()
y = jl.mkLabel('y')
jl.restrict(y, lambda ctxt : ctxt == 42)
value0 = jl.mkSensitive(y, 0, 1)
value2 = jl.mkSensitive(y, 2, 3)
value = value0
value = value2
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
value = 100
value = value2
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
value = value0
value = 200
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
value = 100
value = 200
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
@jeeves
def test_jif_with_assign_with_pathvars(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
y = jl.mkLabel('y')
jl.restrict(x, lambda (a,_) : a)
jl.restrict(y, lambda (_,b) : b)
value0 = jl.mkSensitive(y, 0, 1)
value2 = jl.mkSensitive(y, 2, 3)
value = value0
if x:
value = value2
self.assertEquals(jl.concretize((True, True), value), 2)
self.assertEquals(jl.concretize((True, False), value), 3)
self.assertEquals(jl.concretize((False, True), value), 0)
self.assertEquals(jl.concretize((False, False), value), 1)
self.assertEquals(jl.concretize((True, True), value), 2)
self.assertEquals(jl.concretize((True, False), value), 3)
self.assertEquals(jl.concretize((False, True), value), 0)
self.assertEquals(jl.concretize((False, False), value), 1)
value = value0
if not x:
value = value2
self.assertEquals(jl.concretize((False, True), value), 2)
self.assertEquals(jl.concretize((False, False), value), 3)
self.assertEquals(jl.concretize((True, True), value), 0)
self.assertEquals(jl.concretize((True, False), value), 1)
self.assertEquals(jl.concretize((False, True), value), 2)
self.assertEquals(jl.concretize((False, False), value), 3)
self.assertEquals(jl.concretize((True, True), value), 0)
self.assertEquals(jl.concretize((True, False), value), 1)
@jeeves
def test_function_facets(self):
def add1(a):
return a+1
def add2(a):
return a+2
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt == 42)
fun = jl.mkSensitive(x, add1, add2)
value = fun(15)
self.assertEquals(jl.concretize(42, value), 16)
self.assertEquals(jl.concretize(41, value), 17)
self.assertEquals(jl.concretize(42, value), 16)
self.assertEquals(jl.concretize(41, value), 17)
@jeeves
def test_objects_faceted(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
y = jl.mkSensitive(x,
TestClass(1, 2),
TestClass(3, 4))
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 2)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, y.b), 4)
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 2)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, y.b), 4)
@jeeves
def test_objects_mutate(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
s = TestClass(1, None)
t = TestClass(3, None)
y = jl.mkSensitive(x, s, t)
if y.a == 1:
y.a = y.a + 100
self.assertEquals(jl.concretize(True, y.a), 101)
self.assertEquals(jl.concretize(True, s.a), 101)
self.assertEquals(jl.concretize(True, t.a), 3)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, t.a), 3)
self.assertEquals(jl.concretize(True, y.a), 101)
self.assertEquals(jl.concretize(True, s.a), 101)
self.assertEquals(jl.concretize(True, t.a), 3)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, t.a), 3)
@jeeves
def test_context_mutate(self):
jl = JeevesLib
jl.clear_cache()
test_alice = TestClass(1, 1)
test_bob = TestClass(2, 2)
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt: ctxt.a == 1)
y = jl.mkSensitive(x, 42, 0)
self.assertEquals(jl.concretize(test_alice, y), 42)
self.assertEquals(jl.concretize(test_bob, y), 0)
test_alice.a = 2
test_bob.a = 1
self.assertEquals(jl.concretize(test_alice, y), 0)
self.assertEquals(jl.concretize(test_bob, y), 42)
@jeeves
def test_objects_methodcall(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
s = TestClassMethod(1, 10)
t = TestClassMethod(100, 1000)
y = jl.mkSensitive(x, s, t)
self.assertEquals(jl.concretize(True, y.return_sum()), 11)
self.assertEquals(jl.concretize(False, y.return_sum()), 1100)
self.assertEquals(jl.concretize(True, y.return_sum()), 11)
self.assertEquals(jl.concretize(False, y.return_sum()), 1100)
y.add_a_to_b()
self.assertEquals(jl.concretize(True, s.a), 1)
self.assertEquals(jl.concretize(True, s.b), 11)
self.assertEquals(jl.concretize(True, t.a), 100)
self.assertEquals(jl.concretize(True, t.b), 1000)
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 11)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, s.b), 10)
self.assertEquals(jl.concretize(False, t.a), 100)
self.assertEquals(jl.concretize(False, t.b), 1100)
self.assertEquals(jl.concretize(False, y.a), 100)
self.assertEquals(jl.concretize(False, y.b), 1100)
self.assertEquals(jl.concretize(True, s.a), 1)
self.assertEquals(jl.concretize(True, s.b), 11)
self.assertEquals(jl.concretize(True, t.a), 100)
self.assertEquals(jl.concretize(True, t.b), 1000)
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 11)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, s.b), 10)
self.assertEquals(jl.concretize(False, t.a), 100)
self.assertEquals(jl.concretize(False, t.b), 1100)
self.assertEquals(jl.concretize(False, y.a), 100)
self.assertEquals(jl.concretize(False, y.b), 1100)
@jeeves
def test_objects_eq_is(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
a = TestClass1(3)
b = TestClass1(3)
c = TestClass1(2)
# Ensure that a < b and b < c (will probably be true anyway,
# just making sure)
s = sorted((a, b, c))
a = s[0]
b = s[1]
c = s[2]
a.a = 3
b.a = 3
c.a = 2
v1 = jl.mkSensitive(x, a, c)
v2 = jl.mkSensitive(x, b, c)
v3 = jl.mkSensitive(x, c, a)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), False)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), False)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), True)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), True)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), True)
self.assertEquals(jl.concretize(True, v2 < v3), True)
self.assertEquals(jl.concretize(True, v3 < v1), False)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), True)
self.assertEquals(jl.concretize(True, v2 < v3), True)
self.assertEquals(jl.concretize(True, v3 < v1), False)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), False)
self.assertEquals(jl.concretize(True, v3 > v1), True)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), False)
self.assertEquals(jl.concretize(True, v3 > v1), True)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), True)
self.assertEquals(jl.concretize(True, v3 <= v1), False)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), True)
self.assertEquals(jl.concretize(True, v3 <= v1), False)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), False)
self.assertEquals(jl.concretize(True, v2 >= v3), False)
self.assertEquals(jl.concretize(True, v3 >= v1), True)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), False)
self.assertEquals(jl.concretize(True, v2 >= v3), False)
self.assertEquals(jl.concretize(True, v3 >= v1), True)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), False)
self.assertEquals(jl.concretize(False, v2 > v3), True)
self.assertEquals(jl.concretize(False, v2 <= v3), False)
self.assertEquals(jl.concretize(False, v2 >= v3), True)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), False)
self.assertEquals(jl.concretize(False, v2 > v3), True)
self.assertEquals(jl.concretize(False, v2 <= v3), False)
self.assertEquals(jl.concretize(False, v2 >= v3), True)
a = TestClass1Eq(3)
b = TestClass1Eq(3)
c = TestClass1Eq(2)
v1 = jl.mkSensitive(x, a, c)
v2 = jl.mkSensitive(x, b, c)
v3 = jl.mkSensitive(x, c, a)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), True)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), True)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), False)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), False)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), False)
self.assertEquals(jl.concretize(True, v2 < v3), False)
self.assertEquals(jl.concretize(True, v3 < v1), True)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), False)
self.assertEquals(jl.concretize(True, v2 < v3), False)
self.assertEquals(jl.concretize(True, v3 < v1), True)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), True)
self.assertEquals(jl.concretize(True, v3 > v1), False)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), True)
self.assertEquals(jl.concretize(True, v3 > v1), False)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), False)
self.assertEquals(jl.concretize(True, v3 <= v1), True)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), False)
self.assertEquals(jl.concretize(True, v3 <= v1), True)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), True)
self.assertEquals(jl.concretize(True, v2 >= v3), True)
self.assertEquals(jl.concretize(True, v3 >= v1), False)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), True)
self.assertEquals(jl.concretize(True, v2 >= v3), True)
self.assertEquals(jl.concretize(True, v3 >= v1), False)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), True)
self.assertEquals(jl.concretize(False, v2 > v3), False)
self.assertEquals(jl.concretize(False, v2 <= v3), True)
self.assertEquals(jl.concretize(False, v2 >= v3), False)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), True)
self.assertEquals(jl.concretize(False, v2 > v3), False)
self.assertEquals(jl.concretize(False, v2 <= v3), True)
self.assertEquals(jl.concretize(False, v2 >= v3), False)
@jeeves
def test_jhasElt(self):
jl = JeevesLib
jl.clear_cache()
a = jl.mkLabel ()
jl.restrict(a, lambda x: x)
xS = jl.mkSensitive(a, 42, 1)
b = jl.mkLabel ()
jl.restrict(b, lambda x: x)
yS = jl.mkSensitive(b, 43, 3)
lst = [xS, 2, yS]
self.assertEquals(jl.concretize(True, 42 in lst) , True)
self.assertEquals(jl.concretize(False, 42 in lst) , False)
self.assertEquals(jl.concretize(True, 1 in lst) , False)
self.assertEquals(jl.concretize(False, 1 in lst) , True)
self.assertEquals(jl.concretize(True, 43 in lst) , True)
self.assertEquals(jl.concretize(False, 43 in lst) , False)
self.assertEquals(jl.concretize(True, 3 in lst) , False)
self.assertEquals(jl.concretize(False, 3 in lst) , True)
self.assertEquals(jl.concretize(True, 42 in lst) , True)
self.assertEquals(jl.concretize(False, 42 in lst) , False)
self.assertEquals(jl.concretize(True, 1 in lst) , False)
self.assertEquals(jl.concretize(False, 1 in lst) , True)
self.assertEquals(jl.concretize(True, 43 in lst) , True)
self.assertEquals(jl.concretize(False, 43 in lst) , False)
self.assertEquals(jl.concretize(True, 3 in lst) , False)
self.assertEquals(jl.concretize(False, 3 in lst) , True)
@jeeves
def test_list(self):
jl = JeevesLib
jl.clear_cache()
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
l = jl.mkSensitive(x, [40,41,42], [0,1,2,3])
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 41)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 1)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 41)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 1)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
self.assertEqual(jl.concretize(True, l.__len__()), 3)
self.assertEqual(jl.concretize(False, l.__len__()), 4)
self.assertEqual(jl.concretize(True, l.__len__()), 3)
self.assertEqual(jl.concretize(False, l.__len__()), 4)
l[1] = 19
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 19)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 19)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 19)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 19)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
@jeeves
def test_jmap(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
l = JeevesLib.mkSensitive(x, [0,1,2], [3,4,5,6])
m = [x*x for x in l]
self.assertEqual(JeevesLib.concretize(True, m[0]), 0)
self.assertEqual(JeevesLib.concretize(True, m[1]), 1)
self.assertEqual(JeevesLib.concretize(True, m[2]), 4)
self.assertEqual(JeevesLib.concretize(False, m[0]), 9)
self.assertEqual(JeevesLib.concretize(False, m[1]), 16)
self.assertEqual(JeevesLib.concretize(False, m[2]), 25)
self.assertEqual(JeevesLib.concretize(False, m[3]), 36)
self.assertEqual(JeevesLib.concretize(True, m[0]), 0)
self.assertEqual(JeevesLib.concretize(True, m[1]), 1)
self.assertEqual(JeevesLib.concretize(True, m[2]), 4)
self.assertEqual(JeevesLib.concretize(False, m[0]), 9)
self.assertEqual(JeevesLib.concretize(False, m[1]), 16)
self.assertEqual(JeevesLib.concretize(False, m[2]), 25)
self.assertEqual(JeevesLib.concretize(False, m[3]), 36)
@jeeves
def test_jmap_for(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
l = JeevesLib.mkSensitive(x, [0,1,2], [3,4,5,6])
m = 0
for t in l:
m = m + t*t
self.assertEqual(JeevesLib.concretize(True, m), 5)
self.assertEqual(JeevesLib.concretize(False, m), 86)
self.assertEqual(JeevesLib.concretize(True, m), 5)
self.assertEqual(JeevesLib.concretize(False, m), 86)
@jeeves
def test_jlist(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
l = JeevesLib.mkSensitive(x, [0,1,2], [3,4,5,6])
if x:
l.append(10)
else:
l.append(11)
self.assertEqual(JeevesLib.concretize(True, l[0]), 0)
self.assertEqual(JeevesLib.concretize(True, l[1]), 1)
self.assertEqual(JeevesLib.concretize(True, l[2]), 2)
self.assertEqual(JeevesLib.concretize(True, l[3]), 10)
self.assertEqual(JeevesLib.concretize(False, l[0]), 3)
self.assertEqual(JeevesLib.concretize(False, l[1]), 4)
self.assertEqual(JeevesLib.concretize(False, l[2]), 5)
self.assertEqual(JeevesLib.concretize(False, l[3]), 6)
self.assertEqual(JeevesLib.concretize(False, l[4]), 11)
self.assertEqual(JeevesLib.concretize(True, l[0]), 0)
self.assertEqual(JeevesLib.concretize(True, l[1]), 1)
self.assertEqual(JeevesLib.concretize(True, l[2]), 2)
self.assertEqual(JeevesLib.concretize(True, l[3]), 10)
self.assertEqual(JeevesLib.concretize(False, l[0]), 3)
self.assertEqual(JeevesLib.concretize(False, l[1]), 4)
self.assertEqual(JeevesLib.concretize(False, l[2]), 5)
self.assertEqual(JeevesLib.concretize(False, l[3]), 6)
self.assertEqual(JeevesLib.concretize(False, l[4]), 11)
if x:
l[0] = 20
self.assertEqual(JeevesLib.concretize(True, l[0]), 20)
self.assertEqual(JeevesLib.concretize(False, l[0]), 3)
self.assertEqual(JeevesLib.concretize(True, l[0]), 20)
self.assertEqual(JeevesLib.concretize(False, l[0]), 3)
@jeeves
def test_scope(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
y = 5
def awesome_function():
y = 7
if x:
return 30
y = 19
return 17
z = awesome_function()
self.assertEqual(JeevesLib.concretize(True, y), 5)
self.assertEqual(JeevesLib.concretize(False, y), 5)
self.assertEqual(JeevesLib.concretize(True, z), 30)
self.assertEqual(JeevesLib.concretize(False, z), 17)
self.assertEqual(JeevesLib.concretize(True, y), 5)
self.assertEqual(JeevesLib.concretize(False, y), 5)
self.assertEqual(JeevesLib.concretize(True, z), 30)
self.assertEqual(JeevesLib.concretize(False, z), 17)
@jeeves
def test_jfun(self):
JeevesLib.clear_cache()
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
y = JeevesLib.mkSensitive(x, [1,2,3], [4,5,6,7])
z = [x*x for x in y]
self.assertEqual(JeevesLib.concretize(True, z[0]), 1)
self.assertEqual(JeevesLib.concretize(True, z[1]), 4)
self.assertEqual(JeevesLib.concretize(True, z[2]), 9)
self.assertEqual(JeevesLib.concretize(False, z[0]), 16)
self.assertEqual(JeevesLib.concretize(False, z[1]), 25)
self.assertEqual(JeevesLib.concretize(False, z[2]), 36)
self.assertEqual(JeevesLib.concretize(False, z[3]), 49)
self.assertEqual(JeevesLib.concretize(True, z[0]), 1)
self.assertEqual(JeevesLib.concretize(True, z[1]), 4)
self.assertEqual(JeevesLib.concretize(True, z[2]), 9)
self.assertEqual(JeevesLib.concretize(False, z[0]), 16)
self.assertEqual(JeevesLib.concretize(False, z[1]), 25)
self.assertEqual(JeevesLib.concretize(False, z[2]), 36)
self.assertEqual(JeevesLib.concretize(False, z[3]), 49)
```
#### File: jeeves/test/testJeevesConfidentiality.py
```python
import JeevesLib
from smt.Z3 import *
import unittest
from JeevesLib import PositiveVariable, NegativeVariable
class TestJeevesConfidentiality(unittest.TestCase):
def setUp(self):
self.s = Z3()
# reset the Jeeves state
JeevesLib.init()
def test_restrict_all_permissive(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda _: True)
xConcrete = JeevesLib.concretize(None, x)
# make sure that concretizing x allows everyone to see
self.assertTrue(xConcrete)
def test_restrict_all_restrictive(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda _: False)
xConcrete = JeevesLib.concretize(None, x)
self.assertFalse(xConcrete)
def test_restrict_with_context(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda y: y == 2)
xConcrete = JeevesLib.concretize(2, x)
self.assertTrue(xConcrete)
xConcrete = JeevesLib.concretize(3, x)
self.assertFalse(xConcrete)
def test_restrict_with_sensitivevalue(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda y: y == 2)
value = JeevesLib.mkSensitive(x, 42, 41)
valueConcrete = JeevesLib.concretize(2, value)
self.assertEquals(valueConcrete, 42)
valueConcrete = JeevesLib.concretize(1, value)
self.assertEquals(valueConcrete, 41)
def test_restrict_with_cyclic(self):
jl = JeevesLib
# use the value itself as the context
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt == 42)
value = jl.mkSensitive(x, 42, 20)
self.assertEquals(jl.concretize(value, value), 42)
value = jl.mkSensitive(x, 41, 20)
self.assertEquals(jl.concretize(value, value), 20)
def test_restrict_with_sensitive_policy(self):
jl = JeevesLib
x = jl.mkLabel()
jl.restrict(x, lambda ctxt: ctxt == 42)
y = jl.mkLabel()
jl.restrict(y, lambda ctxt: x)
value = jl.mkSensitive(y, 1, 0)
self.assertEquals(jl.concretize(42, value), 1)
self.assertEquals(jl.concretize(0, value), 0)
def test_jif_with_ints(self):
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt == 42)
a = jl.jif(x, lambda:13, lambda:17 )
self.assertEquals(jl.concretize(42, a), 13)
self.assertEquals(jl.concretize(-2, a), 17)
b = jl.jif(True, lambda:13, lambda:17)
self.assertEquals(jl.concretize(42, b), 13)
self.assertEquals(jl.concretize(-2, b), 13)
c = jl.jif(False, lambda:13, lambda:17)
self.assertEquals(jl.concretize(42, c), 17)
self.assertEquals(jl.concretize(-2, c), 17)
conditional = jl.mkSensitive(x, True, False)
d = jl.jif(conditional, lambda:13, lambda:17)
self.assertEquals(jl.concretize(42, d), 13)
self.assertEquals(jl.concretize(-2, d), 17)
conditional = jl.mkSensitive(x, False, True)
d = jl.jif(conditional, lambda:13, lambda:17)
self.assertEquals(jl.concretize(42, d), 17)
self.assertEquals(jl.concretize(-2, d), 13)
y = jl.mkLabel('y')
z = jl.mkLabel('z')
jl.restrict(y, lambda (a,_) : a)
jl.restrict(z, lambda (_,a) : a)
faceted_int = jl.mkSensitive(y, 10, 0)
conditional = faceted_int > 5
i1 = jl.mkSensitive(z, 101, 102)
i2 = jl.mkSensitive(z, 103, 104)
f = jl.jif(conditional, lambda:i1, lambda:i2)
self.assertEquals(jl.concretize((True, True), f),101)
self.assertEquals(jl.concretize((True, False), f), 102)
self.assertEquals(jl.concretize((False, True), f), 103)
self.assertEquals(jl.concretize((False, False), f), 104)
def test_jif_with_objects(self):
return NotImplemented
def test_restrict_under_conditional(self):
jl = JeevesLib
x = jl.mkLabel('x')
def yes_restrict():
jl.restrict(x, lambda ctxt : ctxt == 1)
def no_restrict():
pass
value = jl.mkSensitive(x, 42, 0)
jl.jif(value == 42, yes_restrict, no_restrict)
self.assertEquals(jl.concretize(0, value), 0)
self.assertEquals(jl.concretize(1, value), 42)
y = jl.mkLabel('y')
def yes_restrict():
jl.restrict(y, lambda ctxt : ctxt == 1)
def no_restrict():
pass
value = jl.mkSensitive(y, 43, 0)
jl.jif(value == 42, yes_restrict, no_restrict)
self.assertEquals(jl.concretize(0, value), 43)
self.assertEquals(jl.concretize(1, value), 43)
def test_jbool_functions_constants(self):
jl = JeevesLib
self.assertEquals(jl.jand(lambda:True, lambda:True), True)
self.assertEquals(jl.jand(lambda:True, lambda:False), False)
self.assertEquals(jl.jand(lambda:False, lambda:True), False)
self.assertEquals(jl.jand(lambda:False, lambda:False), False)
self.assertEquals(jl.jor(lambda:True, lambda:True), True)
self.assertEquals(jl.jor(lambda:True, lambda:False), True)
self.assertEquals(jl.jor(lambda:False, lambda:True), True)
self.assertEquals(jl.jor(lambda:False, lambda:False), False)
self.assertEquals(jl.jnot(True), False)
self.assertEquals(jl.jnot(False), True)
def test_jbool_functions_fexprs(self):
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda (a,_) : a == 42)
for lh in (True, False):
for ll in (True, False):
for rh in (True, False):
for rl in (True, False):
l = jl.mkSensitive(x, lh, ll)
r = jl.mkSensitive(x, rh, rl)
self.assertEquals(jl.concretize((42,0), jl.jand(lambda:l, lambda:r)), lh and rh)
self.assertEquals(jl.concretize((10,0), jl.jand(lambda:l, lambda:r)), ll and rl)
self.assertEquals(jl.concretize((42,0), jl.jor(lambda:l, lambda:r)), lh or rh)
self.assertEquals(jl.concretize((10,0), jl.jor(lambda:l, lambda:r)), ll or rl)
self.assertEquals(jl.concretize((42,0), jl.jnot(l)), not lh)
self.assertEquals(jl.concretize((10,0), jl.jnot(l)), not ll)
y = jl.mkLabel('y')
jl.restrict(y, lambda (_,b) : b == 42)
for lh in (True, False):
for ll in (True, False):
for rh in (True, False):
for rl in (True, False):
l = jl.mkSensitive(x, lh, ll)
r = jl.mkSensitive(y, rh, rl)
self.assertEquals(jl.concretize((42,0), jl.jand(lambda:l, lambda:r)), lh and rl)
self.assertEquals(jl.concretize((10,0), jl.jand(lambda:l, lambda:r)), ll and rl)
self.assertEquals(jl.concretize((42,42), jl.jand(lambda:l, lambda:r)), lh and rh)
self.assertEquals(jl.concretize((10,42), jl.jand(lambda:l, lambda:r)), ll and rh)
self.assertEquals(jl.concretize((42,0), jl.jor(lambda:l, lambda:r)), lh or rl)
self.assertEquals(jl.concretize((10,0), jl.jor(lambda:l, lambda:r)), ll or rl)
self.assertEquals(jl.concretize((42,42), jl.jor(lambda:l, lambda:r)), lh or rh)
self.assertEquals(jl.concretize((10,42), jl.jor(lambda:l, lambda:r)), ll or rh)
def test_nested_conditionals_no_shared_path(self):
return NotImplemented
def test_nested_conditionals_shared_path(self):
return NotImplemented
def test_jif_with_assign(self):
jl = JeevesLib
y = jl.mkLabel('y')
jl.restrict(y, lambda ctxt : ctxt == 42)
value0 = jl.mkSensitive(y, 0, 1)
value2 = jl.mkSensitive(y, 2, 3)
value = value0
value = jl.jassign(value, value2)
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
value = 100
value = jl.jassign(value, value2)
self.assertEquals(jl.concretize(42, value), 2)
self.assertEquals(jl.concretize(10, value), 3)
value = value0
value = jl.jassign(value, 200)
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
value = 100
value = jl.jassign(value, 200)
self.assertEquals(jl.concretize(42, value), 200)
self.assertEquals(jl.concretize(10, value), 200)
def test_jif_with_assign_with_pathvars(self):
jl = JeevesLib
x = jl.mkLabel('x')
y = jl.mkLabel('y')
jl.restrict(x, lambda (a,_) : a)
jl.restrict(y, lambda (_,b) : b)
value0 = jl.mkSensitive(y, 0, 1)
value2 = jl.mkSensitive(y, 2, 3)
value = value0
with PositiveVariable(x):
value = jl.jassign(value, value2)
self.assertEquals(jl.concretize((True, True), value), 2)
self.assertEquals(jl.concretize((True, False), value), 3)
self.assertEquals(jl.concretize((False, True), value), 0)
self.assertEquals(jl.concretize((False, False), value), 1)
value = value0
with NegativeVariable(x):
value = jl.jassign(value, value2)
self.assertEquals(jl.concretize((False, True), value), 2)
self.assertEquals(jl.concretize((False, False), value), 3)
self.assertEquals(jl.concretize((True, True), value), 0)
self.assertEquals(jl.concretize((True, False), value), 1)
def test_function_facets(self):
def add1(a):
return a+1
def add2(a):
return a+2
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt == 42)
fun = jl.mkSensitive(x, add1, add2)
value = fun(15)
self.assertEquals(jl.concretize(42, value), 16)
self.assertEquals(jl.concretize(41, value), 17)
def test_objects_faceted(self):
class TestClass:
def __init__(self, a, b):
self.a = a
self.b = b
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
y = jl.mkSensitive(x,
TestClass(1, 2),
TestClass(3, 4))
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 2)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, y.b), 4)
def test_objects_mutate(self):
class TestClass:
def __init__(self, a, b):
self.__dict__['a'] = a
self.__dict__['b'] = b
def __setattr__(self, attr, val):
self.__dict__[attr] = JeevesLib.jassign(
self.__dict__[attr], val)
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
s = TestClass(1, None)
t = TestClass(3, None)
y = jl.mkSensitive(x, s, t)
def mut():
y.a = y.a + 100
def nonmut():
pass
jl.jif(y.a == 1, mut, nonmut)
self.assertEquals(jl.concretize(True, y.a), 101)
self.assertEquals(jl.concretize(True, s.a), 101)
self.assertEquals(jl.concretize(True, t.a), 3)
self.assertEquals(jl.concretize(False, y.a), 3)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, t.a), 3)
def test_objects_methodcall(self):
class TestClassMethod:
def __init__(self, a, b):
self.a = a
self.b = b
def add_a_to_b(self):
self.b = JeevesLib.jassign(self.b, self.a + self.b)
def return_sum(self):
return self.a + self.b
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
s = TestClassMethod(1, 10)
t = TestClassMethod(100, 1000)
y = jl.mkSensitive(x, s, t)
self.assertEquals(jl.concretize(True, y.return_sum()), 11)
self.assertEquals(jl.concretize(False, y.return_sum()), 1100)
y.add_a_to_b()
self.assertEquals(jl.concretize(True, s.a), 1)
self.assertEquals(jl.concretize(True, s.b), 11)
self.assertEquals(jl.concretize(True, t.a), 100)
self.assertEquals(jl.concretize(True, t.b), 1000)
self.assertEquals(jl.concretize(True, y.a), 1)
self.assertEquals(jl.concretize(True, y.b), 11)
self.assertEquals(jl.concretize(False, s.a), 1)
self.assertEquals(jl.concretize(False, s.b), 10)
self.assertEquals(jl.concretize(False, t.a), 100)
self.assertEquals(jl.concretize(False, t.b), 1100)
self.assertEquals(jl.concretize(False, y.a), 100)
self.assertEquals(jl.concretize(False, y.b), 1100)
def test_objects_eq_is(self):
class TestClass:
def __init__(self, a):
self.a = a
class TestClassEq:
def __init__(self, a):
self.a = a
def __eq__(self, other):
return self.a == other.a
def __ne__(self, other):
return self.a != other.a
def __lt__(self, other):
return self.a < other.a
def __gt__(self, other):
return self.a > other.a
def __le__(self, other):
return self.a <= other.a
def __ge__(self, other):
return self.a >= other.a
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
a = TestClass(3)
b = TestClass(3)
c = TestClass(2)
# Ensure that a < b and b < c (will probably be true anyway,
# just making sure)
a, b, c = sorted((a, b, c))
a.a, b.a, c.a = 3, 3, 2
v1 = jl.mkSensitive(x, a, c)
v2 = jl.mkSensitive(x, b, c)
v3 = jl.mkSensitive(x, c, a)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), False)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), True)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), True)
self.assertEquals(jl.concretize(True, v2 < v3), True)
self.assertEquals(jl.concretize(True, v3 < v1), False)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), False)
self.assertEquals(jl.concretize(True, v3 > v1), True)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), True)
self.assertEquals(jl.concretize(True, v3 <= v1), False)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), False)
self.assertEquals(jl.concretize(True, v2 >= v3), False)
self.assertEquals(jl.concretize(True, v3 >= v1), True)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), False)
self.assertEquals(jl.concretize(False, v2 > v3), True)
self.assertEquals(jl.concretize(False, v2 <= v3), False)
self.assertEquals(jl.concretize(False, v2 >= v3), True)
a = TestClassEq(3)
b = TestClassEq(3)
c = TestClassEq(2)
v1 = jl.mkSensitive(x, a, c)
v2 = jl.mkSensitive(x, b, c)
v3 = jl.mkSensitive(x, c, a)
self.assertEquals(jl.concretize(True, v1 == v1), True)
self.assertEquals(jl.concretize(True, v2 == v2), True)
self.assertEquals(jl.concretize(True, v3 == v3), True)
self.assertEquals(jl.concretize(True, v1 == v2), True)
self.assertEquals(jl.concretize(True, v2 == v3), False)
self.assertEquals(jl.concretize(True, v3 == v1), False)
self.assertEquals(jl.concretize(True, v1 != v1), False)
self.assertEquals(jl.concretize(True, v2 != v2), False)
self.assertEquals(jl.concretize(True, v3 != v3), False)
self.assertEquals(jl.concretize(True, v1 != v2), False)
self.assertEquals(jl.concretize(True, v2 != v3), True)
self.assertEquals(jl.concretize(True, v3 != v1), True)
self.assertEquals(jl.concretize(True, v1 < v1), False)
self.assertEquals(jl.concretize(True, v2 < v2), False)
self.assertEquals(jl.concretize(True, v3 < v3), False)
self.assertEquals(jl.concretize(True, v1 < v2), False)
self.assertEquals(jl.concretize(True, v2 < v3), False)
self.assertEquals(jl.concretize(True, v3 < v1), True)
self.assertEquals(jl.concretize(True, v1 > v1), False)
self.assertEquals(jl.concretize(True, v2 > v2), False)
self.assertEquals(jl.concretize(True, v3 > v3), False)
self.assertEquals(jl.concretize(True, v1 > v2), False)
self.assertEquals(jl.concretize(True, v2 > v3), True)
self.assertEquals(jl.concretize(True, v3 > v1), False)
self.assertEquals(jl.concretize(True, v1 <= v1), True)
self.assertEquals(jl.concretize(True, v2 <= v2), True)
self.assertEquals(jl.concretize(True, v3 <= v3), True)
self.assertEquals(jl.concretize(True, v1 <= v2), True)
self.assertEquals(jl.concretize(True, v2 <= v3), False)
self.assertEquals(jl.concretize(True, v3 <= v1), True)
self.assertEquals(jl.concretize(True, v1 >= v1), True)
self.assertEquals(jl.concretize(True, v2 >= v2), True)
self.assertEquals(jl.concretize(True, v3 >= v3), True)
self.assertEquals(jl.concretize(True, v1 >= v2), True)
self.assertEquals(jl.concretize(True, v2 >= v3), True)
self.assertEquals(jl.concretize(True, v3 >= v1), False)
self.assertEquals(jl.concretize(False, v2 == v3), False)
self.assertEquals(jl.concretize(False, v2 != v3), True)
self.assertEquals(jl.concretize(False, v2 < v3), True)
self.assertEquals(jl.concretize(False, v2 > v3), False)
self.assertEquals(jl.concretize(False, v2 <= v3), True)
self.assertEquals(jl.concretize(False, v2 >= v3), False)
def test_objects_operators(self):
return NotImplemented
def test_objects_delattr(self):
return NotImplemented
def test_objects_hasattr(self):
return NotImplemented
def test_objects_callable(self):
return NotImplemented
def test_functions_operators(self):
return NotImplemented
def test_accessing_special_attributes(self):
return NotImplemented
def test_attribute_names(self):
return NotImplemented
def test_jhasElt(self):
jl = JeevesLib
a = jl.mkLabel ()
jl.restrict(a, lambda x: x)
xS = jl.mkSensitive(a, 42, 1)
b = jl.mkLabel ()
jl.restrict(b, lambda x: x)
yS = jl.mkSensitive(b, 43, 3)
lst = [xS, 2, yS]
self.assertTrue(jl.concretize(True, jl.jhasElt(lst, lambda x: x == 42)))
self.assertFalse(jl.concretize(False, jl.jhasElt(lst, lambda x: x == 42)))
self.assertFalse(jl.concretize(True, jl.jhasElt(lst, lambda x: x == 1)))
self.assertTrue(jl.concretize(False, jl.jhasElt(lst, lambda x: x == 1)))
self.assertTrue(jl.concretize(True, jl.jhasElt(lst, lambda x: x == 43)))
self.assertFalse(jl.concretize(False, jl.jhasElt(lst, lambda x: x == 43)))
self.assertFalse(jl.concretize(True, jl.jhasElt(lst, lambda x: x == 3)))
self.assertTrue(jl.concretize(False, jl.jhasElt(lst, lambda x: x == 3)))
def test_jhas_empty(self):
jl = JeevesLib
lst = []
self.assertFalse(jl.concretize(True, jl.jhas(lst, 2)))
def test_jhas_in_policy(self):
jl = JeevesLib
a = jl.mkLabel ()
jl.restrict(a, lambda oc: jl.jhas(oc, 3))
self.assertTrue(jl.concretize([1, 2, 3], a))
self.assertTrue(jl.concretize([3], a))
self.assertFalse(jl.concretize([], a))
self.assertFalse(jl.concretize([1, 2], a))
def test_jall(self):
jl = JeevesLib
a = jl.mkLabel ()
jl.restrict(a, lambda x: x)
xS = jl.mkSensitive(a, True, False)
b = jl.mkLabel ()
jl.restrict(b, lambda x: jl.jnot(x) )
yS = jl.mkSensitive(b, False, True)
lst = [xS, True, yS]
self.assertTrue(jl.concretize(True, jl.jall(lst)))
self.assertFalse(jl.concretize(False, jl.jall(lst)))
def test_list(self):
jl = JeevesLib
x = jl.mkLabel('x')
jl.restrict(x, lambda ctxt : ctxt)
l = jl.mkSensitive(x, [40,41,42], [0,1,2,3])
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 41)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 1)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
self.assertEqual(jl.concretize(True, l.__len__()), 3)
self.assertEqual(jl.concretize(False, l.__len__()), 4)
l[1] = 19
self.assertEqual(jl.concretize(True, l[0]), 40)
self.assertEqual(jl.concretize(True, l[1]), 19)
self.assertEqual(jl.concretize(True, l[2]), 42)
self.assertEqual(jl.concretize(False, l[0]), 0)
self.assertEqual(jl.concretize(False, l[1]), 19)
self.assertEqual(jl.concretize(False, l[2]), 2)
self.assertEqual(jl.concretize(False, l[3]), 3)
def test_jmap_listcomp(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
l = JeevesLib.mkSensitive(x, [0,1,2], [3,4,5,6])
m = JeevesLib.jmap(l, lambda x : x*x)
self.assertEqual(JeevesLib.concretize(True, m[0]), 0)
self.assertEqual(JeevesLib.concretize(True, m[1]), 1)
self.assertEqual(JeevesLib.concretize(True, m[2]), 4)
self.assertEqual(JeevesLib.concretize(False, m[0]), 9)
self.assertEqual(JeevesLib.concretize(False, m[1]), 16)
self.assertEqual(JeevesLib.concretize(False, m[2]), 25)
self.assertEqual(JeevesLib.concretize(False, m[3]), 36)
def test_jlist(self):
x = JeevesLib.mkLabel('x')
JeevesLib.restrict(x, lambda ctxt : ctxt)
l = JeevesLib.mkSensitive(x, JeevesLib.JList([0,1,2]), JeevesLib.JList([3,4,5,6]))
def add10():
l.append(10)
def add11():
l.append(11)
JeevesLib.jif(x, add10, add11)
self.assertEqual(JeevesLib.concretize(True, l[0]), 0)
self.assertEqual(JeevesLib.concretize(True, l[1]), 1)
self.assertEqual(JeevesLib.concretize(True, l[2]), 2)
self.assertEqual(JeevesLib.concretize(True, l[3]), 10)
self.assertEqual(JeevesLib.concretize(False, l[0]), 3)
self.assertEqual(JeevesLib.concretize(False, l[1]), 4)
self.assertEqual(JeevesLib.concretize(False, l[2]), 5)
self.assertEqual(JeevesLib.concretize(False, l[3]), 6)
self.assertEqual(JeevesLib.concretize(False, l[4]), 11)
if __name__ == '__main__':
unittest.main()
```
#### File: jeeves/test/testZ3.py
```python
import macropy.activate
from smt.Z3 import *
from fast import AST
import unittest
class TestZ3(unittest.TestCase):
def setUp(self):
self.s = Z3()
def test_sat_ints(self):
x = self.s.getIntVar('x')
self.s.solverAssert(x > 0)
self.s.solverAssert(x < 2)
self.assertTrue(self.s.isSatisfiable())
def test_unsat_ints(self):
x = self.s.getIntVar('x')
self.s.solverAssert(x > 2)
self.s.solverAssert(x < 2)
self.assertFalse(self.s.isSatisfiable())
def test_multiple_vars(self):
x0 = self.s.getIntVar('x')
x1 = self.s.getIntVar('x')
self.s.solverAssert(x0 > 2)
self.s.solverAssert(x1 < 2)
self.assertFalse(self.s.isSatisfiable())
def test_multiple_vars2(self):
x0 = self.s.getIntVar('x')
x1 = self.s.getIntVar('y')
self.s.solverAssert(x0 > 2)
self.s.solverAssert(x1 < 2)
self.assertTrue(self.s.isSatisfiable())
def test_ast(self):
b1 = AST.Var('x')
b2 = AST.Var('y')
t = AST.Facet(b1, 1, 10) + AST.Facet(b2, 100, 1000)
self.assertTrue(self.s.isSatisfiable())
self.s.push()
self.s.boolExprAssert(t == 101)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 1001)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 110)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 1010)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 11)
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 1001)
self.s.boolExprAssert(t == 1010)
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t == 1001)
self.s.boolExprAssert(AST.Not(b1))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t - 1 == 1009)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t - 1 == 1008)
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t * 2 == 2002)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(t * 2 == 2004)
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
def test_ast_bools(self):
b1 = AST.Var()
b2 = AST.Var()
b3 = AST.Var()
b4 = AST.Var()
self.s.push()
self.s.boolExprAssert(AST.And(b1, b2))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.And(b1, b2))
self.s.boolExprAssert(AST.Not(b1))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Or(b1, b2))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Or(b1, b2))
self.s.boolExprAssert(AST.Not(b1))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Or(b1, b2))
self.s.boolExprAssert(AST.Not(b1))
self.s.boolExprAssert(AST.Not(b2))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Or(b1, b2))
self.s.boolExprAssert(AST.And(AST.Not(b1),AST.Not(b2)))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.And(AST.Or(b1, b2),AST.And(AST.Not(b1),AST.Not(b2))))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Implies(b1, b2))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Implies(b1, b2))
self.s.boolExprAssert(b1)
self.s.boolExprAssert(b2)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Implies(b1, b2))
self.s.boolExprAssert(b1)
self.s.boolExprAssert(AST.Not(b2))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Implies(b1, b2))
self.s.boolExprAssert(AST.Not(b1))
self.s.boolExprAssert(b2)
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Implies(b1, b2))
self.s.boolExprAssert(AST.Not(b1))
self.s.boolExprAssert(AST.Not(b2))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
def test_ast_comparisons(self):
b1 = AST.Var()
b2 = AST.Var()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 0, 1) != AST.Facet(b1, 1, 2) - 1)
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 0, 1) < AST.Facet(b2, 3, 4))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 0, 1) <= AST.Facet(b2, 3, 4))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 0, 1) < AST.Facet(b2, -1, 0))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 0, 1) <= AST.Facet(b2, -1,0))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 3, 4) > AST.Facet(b2, 0, 1))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, 3, 4) >= AST.Facet(b2, 0, 1))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, -1, 0) > AST.Facet(b2, 0, 1))
self.assertFalse(self.s.isSatisfiable())
self.s.pop()
self.s.push()
self.s.boolExprAssert(AST.Facet(b1, -1, 0) >= AST.Facet(b2, 0, 1))
self.assertTrue(self.s.isSatisfiable())
self.s.pop()
if __name__ == '__main__':
unittest.main()
```
#### File: web/calendar/test_Jcal.py
```python
import unittest
from random import random
from funkload.FunkLoadTestCase import FunkLoadTestCase
class Jcal(FunkLoadTestCase):
"""This test use a configuration file Conf.conf."""
def setUp(self):
"""Setting up test."""
self.server_url = self.conf_get('main', 'url')
def test_simple(self):
# The description should be set in the configuration file
server_url = self.server_url
# begin of test ---------------------------------------------
nb_time = self.conf_getInt('test_simple', 'nb_time')
for i in range(nb_time):
self.get(server_url, description='Get url')
# end of test -----------------------------------------------
if __name__ in ('main', '__main__'):
unittest.main()
```
#### File: web/coursemanager/test_Jcourse.py
```python
import unittest
from random import random
from funkload.FunkLoadTestCase import FunkLoadTestCase
from funkload.utils import extract_token
from funkload.utils import xmlrpc_get_credential
class Jcourse(FunkLoadTestCase):
"""This test use a configuration file Conf.conf."""
def setUp(self):
"""Setting up test."""
self.server_url = self.conf_get('main', 'url')
credential_host = self.conf_get('credential', 'host')
credential_port = self.conf_getInt('credential', 'port')
self.username, self.pwd = xmlrpc_get_credential(
credential_host, credential_port, "group1")
def login(self, page="/index"):
# The description should be set in the configuration file
server_url = self.server_url
reply = self.get(server_url + "/index",
description="Get index")
csrftoken = extract_token(self.getBody(), "name='csrfmiddlewaretoken' value='", "' />")
self.post(server_url + "/accounts/login/?next=" + page,
params=[['csrfmiddlewaretoken', csrftoken],
['redirect_to', page],
['username', self.username],
['password', self.pwd]],
description="Post /accounts/login/")
def logout(self):
self.get(self.server_url + "/accounts/logout/",
description="Get /accounts/logout/")
def test_show_all_assignments(self):
page = "/courses"
self.login(page)
self.assert_(page == self.getLastUrl(), "Error in login")
reply = self.get(self.server_url + page, description="Get assignments")
self.logout()
if __name__ in ('main', '__main__'):
unittest.main()
```
#### File: web/hipaa/test_Hipaa.py
```python
import unittest
from random import random
from funkload.FunkLoadTestCase import FunkLoadTestCase
from funkload.Lipsum import Lipsum
from funkload.utils import extract_token
from funkload.utils import xmlrpc_get_credential
class Hipaa(FunkLoadTestCase):
"""This test use a configuration file Conf.conf."""
def setUp(self):
"""Setting up test."""
self.server_url = self.conf_get('main', 'url')
self.lipsum = Lipsum()
def login_as(self, username, password):
# The description should be set in the configuration file
server_url = self.server_url
self.get(server_url + "/",
description="Get /")
reply = self.get(server_url + "/index",
description="Get index")
csrftoken = extract_token(self.getBody(), "name='csrfmiddlewaretoken' value='", "' />")
self.post(server_url + "/accounts/login/?next=/",
params=[['csrfmiddlewaretoken', csrftoken],
['redirect_to', '/index'],
['username', username],
['password', password]],
description="Post /accounts/login/")
def logout(self):
self.get(self.server_url + "/accounts/logout/",
description="Get /accounts/logout/")
def test_simple(self):
# The description should be set in the configuration file
server_url = self.server_url
# begin of test ---------------------------------------------
nb_time = self.conf_getInt('test_simple', 'nb_time')
for i in range(nb_time):
self.get(server_url, description='Get url')
# end of test -----------------------------------------------
def test_login(self):
page="/index"
self.login_as("admin", "admin")
reply = self.get(self.server_url + page, description="Get index")
self.logout()
"""
self.login_as("admin", "admin")
self.logout()
"""
def test_register(self):
username = self.lipsum.getUniqWord()
password = self.lipsum.getWord()
name = self.lipsum.getWord() + " " + self.lipsum.getWord()
email = self.lipsum.getWord() + "@example.org"
server_url = self.server_url
# self.get(server_url + "/register", description='Get url')
csrftoken = extract_token(self.getBody(), "name='csrfmiddlewaretoken' value='", "' />")
self.post(server_url + "/register",
params=[ ['csrfmiddlewaretoken', csrftoken],
['username', username],
['password1', password],
['password2', password],
['name', name],
['email', email],
['profiletype', '1']],
description="Post /register")
def test_credential(self):
credential_host = self.conf_get('credential', 'host')
credential_port = self.conf_getInt('credential', 'port')
login, pwd = xmlrpc_get_credential(credential_host, credential_port
, "group1")
self.login_as(login, pwd)
self.logout()
def test_random_register(self):
self.logout()
username = self.lipsum.getUniqWord()
password = <PASSWORD>()
server_url = self.server_url
# self.get(server_url + "/register", description='Get url')
csrftoken = extract_token(self.getBody(), "name='csrfmiddlewaretoken' value='", "' />")
self.post(server_url + "/register",
params=[ ['csrfmiddlewaretoken', csrftoken],
['username', username],
['password1', password],
['password2', password],
['name', 'New User'],
['email', '<EMAIL>'],
['profiletype', '1']],
description="Post /register")
# TODO: Check page after logging in.
self.logout()
self.login_as(username, password)
self.logout()
if __name__ in ('main', '__main__'):
unittest.main()
```
|
{
"source": "JeanRibes/ppc-freakout",
"score": 3
}
|
#### File: JeanRibes/ppc-freakout/gui.py
```python
import socket
import sys
from threading import Thread, Event
import pygame
import pygameMenu
from data import *
from matchmaking import FindGame
def dessiner_carte(screen, couleur, chiffre, font, y, x):
if couleur:
rgb = (0, 0, 255)
else:
rgb = (255, 0, 0)
card_rect = pygame.Rect(x, y, 40, 60)
pygame.draw.rect(screen, rgb, card_rect)
text = font.render(("R" if not couleur else "B") + str(chiffre), True, (255, 255, 255))
textrect: pygame.rect.RectType = text.get_rect()
textrect.centerx = card_rect.centerx
textrect.centery = card_rect.centery
screen.blit(text, textrect)
def dessiner_main(screen, x0, y0, cartes, font):
x = x0
y = y0
for couleur, chiffre in cartes:
dessiner_carte(screen, couleur, chiffre, font, y, x)
x += 50
def afficher_message(screen, message, font):
if message is not None:
texte = font.render(message, True, (255, 255, 255))
texte_rect = texte.get_rect()
texte_rect.centerx = screen.get_width() // 2
texte_rect.centery = screen.get_height() // 2
screen.blit(texte, texte_rect)
def highlight(screen, y, x, selected_highlight):
border = 4
pygame.draw.rect(screen, (252, 232, 3) if selected_highlight else (255, 255, 255),
pygame.Rect(x - border, y - border, 40 + 2 * border, 60 + 2 * border))
class NetworkReceiver(Thread):
hand = []
board: Board = Board()
message = None
game_finished = False
game_ready=False
def __init__(self, conn: socket.socket):
self.conn = conn
super().__init__(daemon=True)
def run(self) -> None:
while not self.game_finished or True:
buf = self.conn.recv(2048)
data: ServerMessage = ServerMessage.deserialize(buf)
self.message = data.infos
print(data) # affiche le type de message reçu
if data.type_message in [TYPE_TIMEOUT, TYPE_HAND_CHANGED]:
self.hand = [x.to_tuple() for x in data.payload]
elif data.type_message == TYPE_BOARD_CHANGED:
self.board = data.payload
if not self.game_ready:
print("debut jeu")
self.game_ready = True
elif data.type_message in STRING_TYPES:
self.message = data.payload
print("--->" + data.payload)
if data.type_message == TYPE_GAME_END:
print("**************************= JEU FINI =*******************************")
self.game_finished = True
self.message = data.payload
# self.conn.close()
def send(self, message: ClientMessage):
if not self.game_finished:
# self.conn.setblocking(False)
self.conn.send(message.serialize())
# board = [[(True, 4), (True, 4), (True, 4), (True, 4)],[(True, 5), (True, 5), (True, 5), (True, 5)],[(False, 5), (False, 5), (False, 5), ],[(False, 9), (False, 9), (False, 9), (False, 9), ],[(True, 1), (True, 1), (True, 1)],]
def dessiner_board(screen, x0, y0, board, font):
x = x0
y = y0
for cartes in board.values():
for carte in cartes:
dessiner_carte(screen, carte.color, carte.value, font, x, y)
x += 70
y += 50
x = x0
def move_selection(selected_index, i, hand):
if len(hand) > 0:
return (selected_index + i) % len(hand)
else:
return -1
screen = pygame.display.set_mode((1500, 500))
x0 = 10
y0 = 10
selected_index = 0
hy = y0
selected_highlight = False
police = "Noto Sans"
menu_running = True
if __name__ == '__main__':
server_finder = FindGame(daemon=True)
server_finder.start()
pygame.init() # initialisation des ressources
pygame.display.set_caption("PPC Freak Out!")
font = pygame.font.SysFont(police, 20, 5)
clock = pygame.time.Clock() # intialisation du timer FPS
def bg_func():
screen.fill((128, 0, 128))
# setup du menu
main = pygameMenu.Menu(screen, 1000, 500, police, "Freak Out !", True, bgfun=bg_func, menu_height=400,
menu_width=900)
main._onclose = main.disable
main.add_text_input(title="nom d'utilisateur: ", textinput_id='username', default="", input_underline='_',
maxchar=15, align=pygameMenu.locals.ALIGN_LEFT)
main.add_selector("Joystick", values=[('On', 0), ('Off', 1)], selector_id='joystick', default=1)
main.add_option('Jouer', main.disable)
main.add_option('Quitter', pygameMenu.events.EXIT)
main.mainloop() # 1er affichage du menu
main.disable(closelocked=True)
username = main.get_input_data()['username']
joystick = main.get_input_data()['joystick'][0] == 'On'
print(f"joystick : {joystick}, username={username}, input_date: {main.get_input_data()}")
try:
if len(sys.argv) > 2:
if sys.argv[1] == "-j":
joystick = True
if joystick:
j = pygame.joystick.Joystick(0)
j.init()
except:
j = None
# configuration du jeu finie
# démarrage du réseau
screen.fill((102, 102, 153))
afficher_message(screen, "Connexion au serveur de jeu", font)
pygame.display.update()
pygame.display.flip()
conn = socket.socket()
server_finder.join() # attend le timeout de l'écoute du broadcast
conn.connect(server_finder.found_server) # écoute le broadast local pour découvrir le serveur
server_data = NetworkReceiver(conn) # initialisation de la connexion au serveur de jeu
server_data.message = username
conn.send(ClientMessage(type_message=TYPE_JOIN, payload=username).serialize())
#
# lobby: attente des autres joueurs
start_menu = pygameMenu.TextMenu(screen, 700, 400, police, "Lobby", bgfun=bg_func)
def im_ready():
print('sending ready')
start_menu.disable(closelocked=True)
conn.send(ClientMessage(type_message=TYPE_READY).serialize())
start_menu.add_option("Demarrer", im_ready)
start_menu.add_option('Quitter', pygameMenu.events.EXIT)
# pendant l'attente on initialise le menu 'ESC' du jeu
game_menu = pygameMenu.Menu(screen, 800, 400, police, "Freak Out !", True, bgfun=bg_func, menu_height=300,
menu_width=700)
game_menu._onclose = game_menu.disable
game_menu.add_option('Quitter', pygameMenu.events.EXIT)
game_menu.add_option('Retour au jeu', game_menu.disable)
game_menu.disable()
server_data.start()
start_menu.mainloop()
screen.fill((153, 102, 0))
afficher_message(screen, "Attente des autres joueurs", font)
pygame.display.update()
pygame.display.flip()
print("starting game")
#server_data.game_ready_event.wait()
while not server_data.game_ready:
events=pygame.event.get()
game_menu.mainloop(events)
screen.fill((153, 102, 0))
afficher_message(screen, "Attente des autres joueurs", font)
clock.tick(60)
pygame.display.update()
pygame.display.flip()
if server_data.game_ready:
print('start game')
while True: # on quitte avec le menu
if selected_index >= len(server_data.hand):
selected_index -= 1
events = pygame.event.get()
game_menu.mainloop(events) # pour que le menu puisse s'afficher si on appuie sur ESC
for event in events:
if event.type == pygame.QUIT:
sys.exit(0)
elif event.type == pygame.MOUSEBUTTONDOWN:
x,y = pygame.mouse.get_pos()
if y >= 10 and y <= 70:
clicked_index= (x-10)//50
#print(f"calc:{clicked_index} index: {selected_index}")
if clicked_index < len(server_data.hand):
selected_index = clicked_index
#print(f"x={x} y={y}")
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
selected_highlight = True
elif event.key == pygame.K_ESCAPE:
game_menu.enable()
elif event.key == pygame.K_LEFT:
selected_index = move_selection(selected_index, -1, server_data.hand)
elif event.key == pygame.K_RIGHT:
selected_index = move_selection(selected_index, +1, server_data.hand)
elif event.key == pygame.K_q:
sys.exit(0)
elif event.key == pygame.K_s: # on lance le jeu
server_data.send(ClientMessage(type_message=TYPE_READY))
elif event.key == pygame.K_m:
server_data.message = None
elif event.type == pygame.JOYHATMOTION:
selected_index = move_selection(selected_index, event.value[0], server_data.hand)
elif event.type == pygame.JOYAXISMOTION:
# print("j{} h{} v{}".format(event.joy, event.axis, event.value))
selected_index = move_selection(selected_index, int(event.value), server_data.hand)
break # je voulais avoir moins d'auto-repeat mais en fait ça marche pas
elif event.type == pygame.JOYBUTTONDOWN:
# print("j{} b{}".format(event.joy, event.button))
if event.button == 5:
selected_index = move_selection(selected_index, 1, server_data.hand)
elif event.button == 4:
selected_index = move_selection(selected_index, -1, server_data.hand)
elif event.button == 2 or event.button == 7:
if not selected_highlight and len(server_data.hand) > 0:
print("séléctionné carte {} index {}".format(server_data.hand[selected_index], selected_index))
selected_highlight = True
elif event.button == 8: # select
sys.exit(0)
elif event.button == 0 and len(server_data.hand) > 0:
server_data.hand[selected_index] = (
not server_data.hand[selected_index][0], server_data.hand[selected_index][1])
elif event.button == 1 and len(server_data.hand) > 0:
server_data.hand[selected_index] = (
server_data.hand[selected_index][0], server_data.hand[selected_index][1] + 1)
elif event.button == 3 and len(server_data.hand) > 0:
server_data.hand[selected_index] = (
server_data.hand[selected_index][0], server_data.hand[selected_index][1] - 1)
if selected_highlight and not server_data.game_finished and len( # action 'asynchrone'
server_data.hand) > 0 and selected_index >= 0: # une carte a été sélectionnée
server_data.send(ClientMessage(
type_message=TYPE_ACTION,
payload=Card.from_tuple(server_data.hand[selected_index])
)) # on envoie notre action au serveur
print("sending action")
selected_highlight = False
if selected_index >= 0 and not server_data.game_finished:
highlight(screen, hy, x0 + 50 * selected_index, selected_highlight)
dessiner_main(screen, y0, x0, server_data.hand, font)
dessiner_board(screen, y0 + 90, x0, server_data.board, font)
afficher_message(screen, server_data.message, font)
pygame.display.update()
pygame.display.flip() # double-buffered
clock.tick(60) # on attent pour limiter les FPS à 30 (et pas 100000 et un jeu qui bouf 100% CPU)
screen.fill((0, 100, 0)) # on reset pour la frame suivante
```
#### File: JeanRibes/ppc-freakout/logic.py
```python
from queue import Queue
from data import *
from random import random
from copy import deepcopy
def generate_pile_random(N=20, max_value=9):
pile = Pile()
for _ in range(N):
value = int(random() * max_value) + 1
color = random() > 0.5
pile.append(Card(color, value))
return pile
def generate_pile_fixed(max_value):
pile = [Card(True, i) for i in range(1, max_value + 1)]
pile.extend(
[Card(False, i) for i in range(1, max_value + 1)]
)
return pile
def generate_pile(N, max_value):
#return generate_pile_fixed(max_value)
pile = generate_pile_random(N, max_value)
pile.extend(
generate_pile_fixed(max_value)) # toutes les cartes possibles plus un nombre de cartes alatoires
return pile
def shuffle(pile):
indexes = []
inp = deepcopy(pile)
for i, _ in enumerate(inp):
indexes.append(i)
out = []
while len(inp) > 0:
out.append(inp.pop(indexes[int(random() * len(inp))]))
return out
def move_valid(board:Board, card:Card):
ret = False
for cards in board.values():
for card_on_board in cards:
ret = True if card_on_board // card else ret
break
return ret
def broadcast(queues, item):
for q in queues:
q.put(item, block=False)
def flush(queue: Queue):
"""
merci StackOverflow
:param queue:
:return:
"""
while not queue.empty():
queue.get()
if __name__ == '__main__':
cartes = generate_pile(5, 8)
print(List(cartes))
shuffled = List(shuffle(cartes))
print(shuffled)
for i in cartes:
if i not in shuffled:
print('erreur')
```
|
{
"source": "JeanRochCoulon/riscv-dv",
"score": 2
}
|
#### File: riscv-dv/scripts/verilator_log_to_trace_csv.py
```python
import argparse
import os
import re
import sys
import logging
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
from riscv_trace_csv import *
from lib import *
RD_RE = re.compile(r"(?P<pri>\d) 0x(?P<addr>[a-f0-9]+?) " \
"\((?P<bin>.*?)\) (?P<reg>[xf]\s*\d*?) 0x(?P<val>[a-f0-9]+)")
CORE_RE = re.compile(r"core.*0x(?P<addr>[a-f0-9]+?) \(0x(?P<bin>.*?)\) (?P<instr>.*?)$")
ILLE_RE = re.compile(r"trap_illegal_instruction")
LOGGER = logging.getLogger()
def process_instr(trace):
if trace.instr == "jal":
# Spike jal format jal rd, -0xf -> jal rd, -15
idx = trace.operand.rfind(",")
imm = trace.operand[idx+1:]
if imm[0] == "-":
imm = "-" + str(int(imm[1:], 16))
else:
imm = str(int(imm, 16))
trace.operand = trace.operand[0:idx+1] + imm
trace.operand = trace.operand.replace("(", ",")
trace.operand = trace.operand.replace(")", "")
def read_verilator_instr(match, full_trace):
'''Unpack a regex match for CORE_RE to a RiscvInstructionTraceEntry
If full_trace is true, extract operand data from the disassembled
instruction.
'''
# Extract the disassembled instruction.
disasm = match.group('instr')
# Spike's disassembler shows a relative jump as something like "j pc +
# 0x123" or "j pc - 0x123". We just want the relative offset.
disasm = disasm.replace('pc + ', '').replace('pc - ', '-')
instr = RiscvInstructionTraceEntry()
instr.pc = match.group('addr')
instr.instr_str = disasm
instr.binary = match.group('bin')
if full_trace:
opcode = disasm.split(' ')[0]
operand = disasm[len(opcode):].replace(' ', '')
instr.instr, instr.operand = \
convert_pseudo_instr(opcode, operand, instr.binary)
process_instr(instr)
return instr
def read_verilator_trace(path, full_trace):
'''Read a Spike simulation log at <path>, yielding executed instructions.
This assumes that the log was generated with the -l and --log-commits options
to Spike.
If full_trace is true, extract operands from the disassembled instructions.
Since Spike has a strange trampoline that always runs at the start, we skip
instructions up to and including the one at PC 0x1010 (the end of the
trampoline). At the end of a DV program, there's an ECALL instruction, which
we take as a signal to stop checking, so we ditch everything that follows
that instruction.
This function yields instructions as it parses them as tuples of the form
(entry, illegal). entry is a RiscvInstructionTraceEntry. illegal is a
boolean, which is true if the instruction caused an illegal instruction trap.
'''
# This loop is a simple FSM with states TRAMPOLINE, INSTR, EFFECT. The idea
# is that we're in state TRAMPOLINE until we get to the end of Spike's
# trampoline, then we switch between INSTR (where we expect to read an
# instruction) and EFFECT (where we expect to read commit information).
#
# We yield a RiscvInstructionTraceEntry object each time we leave EFFECT
# (going back to INSTR), we loop back from INSTR to itself, or we get to the
# end of the file and have an instruction in hand.
#
# On entry to the loop body, we are in state TRAMPOLINE if in_trampoline is
# true. Otherwise, we are in state EFFECT if instr is not None, otherwise we
# are in state INSTR.
end_trampoline_re = re.compile('core.*0x0000000080000000 ')
in_trampoline = True
instr = None
with open(path, 'r') as handle:
for line in handle:
if in_trampoline:
# The TRAMPOLINE state
if 'core 0: 0x0000000080000000' in line:
in_trampoline = False
continue
if instr is None:
# The INSTR state. We expect to see a line matching CORE_RE. We'll
# discard any other lines.
instr_match = CORE_RE.match(line)
if not instr_match:
continue
instr = read_verilator_instr(instr_match, full_trace)
# If instr.instr_str is 'ecall', we should stop.
if instr.instr_str == 'ecall':
break
continue
# The EFFECT state. If the line matches CORE_RE, we should have been in
# state INSTR, so we yield the instruction we had, read the new
# instruction and continue. As above, if the new instruction is 'ecall',
# we need to stop immediately.
instr_match = CORE_RE.match(line)
if instr_match:
yield (instr, False)
instr = read_verilator_instr(instr_match, full_trace)
if instr.instr_str == 'ecall':
break
continue
# The line doesn't match CORE_RE, so we are definitely on a follow-on
# line in the log. First, check for illegal instructions
if 'trap_illegal_instruction' in line:
yield (instr, True)
instr = None
continue
# The instruction seems to have been fine. Do we have commit data (from
# the --log-commits Spike option)?
commit_match = RD_RE.match(line)
if commit_match:
instr.gpr.append(gpr_to_abi(commit_match.group('reg')
.replace(' ', '')) +
':' + commit_match.group('val'))
instr.mode = commit_match.group('pri')
# At EOF, we might have an instruction in hand. Yield it if so.
if instr is not None:
yield (instr, False)
def process_verilator_sim_log(verilator_log, csv, full_trace = 0):
"""Process VERILATOR simulation log.
Extract instruction and affected register information from verilator simulation
log and write the results to a CSV file at csv. Returns the number of
instructions written.
"""
logging.info("Processing verilator log : %s" % verilator_log)
instrs_in = 0
instrs_out = 0
with open(csv, "w") as csv_fd:
trace_csv = RiscvInstructionTraceCsv(csv_fd)
trace_csv.start_new_trace()
for (entry, illegal) in read_verilator_trace(verilator_log, full_trace):
instrs_in += 1
if illegal and full_trace:
logging.debug("Illegal instruction: {}, opcode:{}"
.format(entry.instr_str, entry.binary))
# Instructions that cause no architectural update (which includes illegal
# instructions) are ignored if full_trace is false.
#
# We say that an instruction caused an architectural update if either we
# saw a commit line (in which case, entry.gpr will contain a single
# entry) or the instruction was 'wfi' or 'ecall'.
if not (full_trace or entry.gpr or entry.instr_str in ['wfi', 'ecall']):
continue
trace_csv.write_trace_entry(entry)
instrs_out += 1
logging.info("Processed instruction count : %d" % instrs_in)
logging.info("CSV saved to : %s" % csv)
return instrs_out
def main():
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--log", type=str, help="Input verilator simulation log")
parser.add_argument("--csv", type=str, help="Output trace csv_buf file")
parser.add_argument("-f", "--full_trace", dest="full_trace", action="store_true",
help="Generate the full trace")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Verbose logging")
parser.set_defaults(full_trace=False)
parser.set_defaults(verbose=False)
args = parser.parse_args()
setup_logging(args.verbose)
# Process verilator log
process_verilator_sim_log(args.log, args.csv, args.full_trace)
if __name__ == "__main__":
main()
```
|
{
"source": "jeanrodriguez27/PLProject",
"score": 3
}
|
#### File: jeanrodriguez27/PLProject/CodeGenerator.py
```python
code = []
type = ""
def set_scripttype(sct):
global type
type = sct
print(type)
def initial_simple(speed):
block = "using System.Collections;\n" + \
"using System.Collections.Generic;\n" + \
"using UnityEngine;\n" + \
"\n" + \
"public class SimpleMovement : MonoBehaviour {\n" + \
"private float speed;\n" + \
"private float time; \n" + \
"void Start() \n { \n" + \
"speed = " + speed + "f; \n" + \
"time = Time.deltaTime; \n } \n"
if code != block:
code.append(block)
def initial_rigid_body(speed):
block = "using System.Collections;\n" \
"using System.Collections.Generic;\n" \
"using UnityEngine;\n" \
"public class RigidMovement: MonoBehaviour \n { \n" \
"float _speed = " + speed + "f;\n"
code.append(block)
def initial_char_cont(speed, gravity):
block = "using System.Collections;\n" \
"using System.Collections.Generic;\n" \
"using UnityEngine;\n" \
"\n" \
"public class PlayerMovement : MonoBehaviour {\n" \
"\n" \
"private float speed = " + speed + "f;\n" \
"private float jump = 1f;\n" \
"private float gravity = " + gravity + "f;\n" \
"float deltaX; \n float deltaZ; \n" \
"private Vector3 movement = Vector3.zero;\n" \
"\n" \
"private CharacterController charCont;\n" \
"\n" \
"// Use this for initialization\n" \
"void Start() {\n" \
"charCont = GetComponent<CharacterController>();\n" \
"\n" \
"if (charCont == null) {\n" \
"Debug.LogError(\"character controller could not be found.\");\n" \
"}\n} \n" \
"\n// FixedUpdate is called for physics calculations\n" \
"void FixedUpdate() { \n"
code.append(block)
def move(x, y, z):
if type == 'RIGIDBODY':
block = "string _movementX = \"" + x + "\" ; \n" \
"string _movementZ = \"" + z + "\" ; \n" \
"Rigidbody _rigidBody; \n float _moveX; \n float _moveZ; \n float dist_to_ground = 1f; \n" \
"void Start() \n { \n" \
"_rigidBody = this.GetComponent<Rigidbody>(); \n if (_rigidBody == null) \n { \n " \
"Debug.LogError(\"Rigid body could not be found.\"); \n } \n }" \
"void Update() \n { \n _moveX = Input.GetAxis(_movementX); \n _moveZ = Input.GetAxis(_movementZ); \n " \
"} \n" \
"void FixedUpdate() \n { \n Vector3 moveVector = new Vector3(_moveX,0,_moveZ)*_speed;\n"
code.append(block)
elif type == 'CHARACTERCONTROLLER':
block = "deltaX = Input.GetAxis( \"" + x + "\");\n" \
"deltaZ = Input.GetAxis( \"" + z + "\");\n" \
"\n" \
"movement = new Vector3(deltaX, 0, deltaZ); \n " \
"\n" \
"movement = transform.TransformDirection(movement);\n " \
"\n" \
"movement *= speed;\n"
code.append(block)
else:
block = "void Update() \n { \n"
code.append(block)
if x == 'NONE':
simple_noX(y, z)
elif y == 'NONE':
simple_noY(x, z)
else:
simple_noZ(x, y)
def addForce(force):
block = "_rigidBody.AddForce(moveVector,ForceMode."+force+"); \n"
code.append(block)
def set_Action(actionList):
for i in range(0, len(actionList), 2):
if actionList[i] == 'JUMP':
jump_action(actionList[i+1])
elif actionList[i] == 'DASH':
dash_action(actionList[i+1])
elif actionList[i] == 'WALK':
walk_action(actionList[i+1])
else:
jetpack_action(actionList[i+1])
def jump_action(key):
if type == 'RIGIDBODY':
block = "if(Input.GetKey(KeyCode."+key+") && isGrounded()) {\n" \
"jump();\n" \
"}\n"
code.append(block)
elif type == 'CHARACTERCONTROLLER':
block = "if (Input.GetKey(KeyCode."+key+") && charCont.isGrounded) \n { \n" \
"movement.y = jump; \n } \n"
code.append(block)
def dash_action(key):
if type == 'RIGIDBODY':
block = "if(Input.GetKey(KeyCode." + key + ")) {\n" \
"dash(moveVector);\n" \
"}\n"
code.append(block)
if type == 'CHARACTERCONTROLLER':
block = "if (Input.GetKey(KeyCode."+key+")) \n { \n" \
"movement *= 2f;\n } \n"
code.append(block)
def walk_action(key):
if type == 'RIGIDBODY':
block = "if(Input.GetKey(KeyCode." + key + ")) {\n" \
"_rigidBody.AddForce(moveVector*0.5f, ForceMode.Force);\n" \
"}\n"
code.append(block)
if type == 'CHARACTERCONTROLLER':
block = "if (Input.GetKey(KeyCode."+key+")) \n { \n" \
"movement *= 0.5f;\n } \n"
code.append(block)
def jetpack_action(key):
if type == 'RIGIDBODY':
block = "if(Input.GetKey(KeyCode." + key + ")) {\n" \
"jump();\n" \
"}\n"
code.append(block)
if type == 'CHARACTERCONTROLLER':
block = "if (Input.GetKey(KeyCode." + key + ")) \n { \n" \
"movement.y = jump; \n } \n"
code.append(block)
# Helpers for Simple movement
def simple_noY(x, z):
block = "transform.Translate(speed * Input.GetAxis(\"" + x + "\") * time, 0f, 0f); \n" \
"transform.Translate(0f, 0f, speed * Input.GetAxis(\"" + z + "\") * time); \n"
code.append(block)
def simple_noX(y, z):
block = "transform.Translate(0f,speed * Input.GetAxis(\"" + y + "\") * time, 0f); \n" \
"transform.Translate(0f, 0f, speed * Input.GetAxis(\"" + z + "\") * time); \n"
code.append(block)
def simple_noZ(x, y):
block = "transform.Translate(speed * Input.GetAxis(\"" + x + "\") * time, 0f, 0f); \n" \
"transform.Translate(0f,speed * Input.GetAxis(\"" + y + "\") * time, 0f); \n"
code.append(block)
# Script enders
def end_rigidbody():
block = "\n" \
"}\n" \
"bool isGrounded() {\n" \
"return Physics.Raycast(transform.position, Vector3.down, dist_to_ground);\n" + "}\n" \
"\n" \
"void jump() {\n" \
"_rigidBody.AddForce(new Vector3(0,1f,0), ForceMode.Impulse);\n" \
"}\n" \
"void dash(Vector3 moveVector) \n { \n " \
"_rigidBody.AddForce(moveVector*2f, ForceMode.Force); \n } \n } \n"
code.append(block)
def end_char_cont():
block = "\n" \
"movement.y -= gravity;" \
"\n" \
"charCont.Move(movement);\n" \
"}\n" \
"}\n"
code.append(block)
def end_simple():
block = "\n } \n } \n"
code.append(block)
def walk_char_cont():
block = "if (Input.GetKey(KeyCode.LeftShift))\n" \
"movement *= .5f;\n"
code.append(block)
def upload():
final_code = code[0]
for block in code:
if final_code != block:
final_code += block
if type == 'RIGIDBODY':
file = open("RigidMovement.cs", 'w')
elif type == 'CHARACTERCONTROLLER':
file = open("PlayerMovement.cs", 'w')
else:
file = open("SimpleMovement.cs", 'w')
file.write(final_code)
file.close()
```
|
{
"source": "Jeans212/codility-dev-training",
"score": 4
}
|
#### File: codility-dev-training/Arrays/cyclic_rotation.py
```python
from collections import deque
def solution(A, K):
# write your code in Python 3.6
deq_A = deque(A)
deq_A.rotate(K)
return list(deq_A)
```
#### File: codility-dev-training/iterations/binarygap.py
```python
def solution(N):
# write your code in Python 3.6
# converting the input N to binary number
binary = format(N, 'b')
temp = []
# case 1: there are no '0's in the binary number e.g. '11111111111'
if '0' not in binary:
return 0
# case 2a: there are less than two '1's in the binary number e.g. '10000000'
if binary.count('1') < 2:
return 0
else: # case 2b: if there are 2 or more '1's in the binary number e.g. '1100010000000'
if binary[-1] == '1':
for each_zeros in binary.split('1'):
temp.append(len(each_zeros))
return max(temp)
elif binary[0] != '1':
for each_zeros in binary.split('1'):
temp.append(len(each_zeros))
temp.remove(temp[0])
return max(temp)
else:
for each_zeros in binary.split('1'):
temp.append(len(each_zeros))
temp.remove(temp[-1])
return max(temp)
```
#### File: codility-dev-training/Time Complexity/frog_jump.py
```python
import math
def solution(X, Y, D):
# write your code in Python 3.6
return math.ceil((Y-X) / D)
```
|
{
"source": "jeansaad/hello_world",
"score": 3
}
|
#### File: hello_world/tests/test_basic.py
```python
from hello_world import hello_world
from unittest import TestCase
class BasicTest(TestCase):
def test_basic_hello_world(self):
"""
Test basic hello world messaging
"""
self.assertEqual(hello_world(), 'Hello, World!')
```
|
{
"source": "jeansabety/learning_python",
"score": 4
}
|
#### File: jeansabety/learning_python/mcb185.py
```python
def read_fasta(filename):
name = None
seq = []
with open(filename) as fp:
while True:
line = fp.readline()
if line == '': break
elif line.startswith('>'):
if len(seq) > 0: # now is the time to return name, seq
yield name, ''.join(seq)
words = line.split()
name = words[0][1:]
seq = []
else:
line = line.rstrip()
seq.append(line)
yield name, ''.join(seq)
#gc content
def gc(dna):
g = dna.count('G')
c = dna.count('C')
return (g + c)/len(dna)
#n50
def n50(length):
for value in length :
running_sum += value
if running_sum > total/2 :
return value
#generate random sequence of specified size and gc content
import random
def randseq(length, gc) :
seq = ''
for i in range(length):
if random.random() < gc: #has to be based on given gc content
seq += random.choice('GC')
else : seq += random.choice('AT')
return seq
#ORF:
def orf(seq):
#find ATG
lengths = []
for i in range(len(seq) -2): #all the starting positions we could possibly see
start = None
stop = None
if seq[i:i+3] == 'ATG':
start = i
#one you find an ATG, you have to go by triplets
for j in range(i, len(seq) -2, 3) : #starting at A, through the rest of the sequence, by 3s
codon = seq[j: j+3] #stop codon starts at j
if codon == 'TAA' or codon == 'TAG' or codon == 'TGA' :
stop = j
break
if stop != None: lengths.append((stop - start)//3) #only if you find a start,stop pair do you add the orf length to the list | /3 so we get the AA length
return lengths
#translate dictionary
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
#translate:
def translate(seq):
seq = seq.upper() #normalize upper and lowercase letters
protein = ''
for i in range(0, len(seq) -2, 3): #each codon
#protein += gcode[seq[i:i+3]]
codon = seq[i:i+3]
if codon in gcode:
protein += gcode[codon] #if codon in dict, use it
else :
protein += 'X' #any weird codon will become an X
return protein
#reverse compliment:
def rc(seq):
rcdna = ''
for i in range(len(seq) -1, -1, -1) :
nt = seq[i]
if nt == 'A' : nt = 'T'
elif nt == 'T' : nt = 'A'
elif nt == 'C' : nt = 'G'
elif nt == 'G' : nt = 'C'
else : nt = 'N'
rcdna += nt
return rcdna
#ORF but prints the dna seq rather than the length:
def orfseq(seq):
#find ATG
for i in range(len(seq) -2): #all the starting positions we could possibly see
start = None
stop = None
if seq[i:i+3] == 'ATG':
start = i
#one you find an ATG, you have to go by triplets
for j in range(i, len(seq) -2, 3) : #starting at A, through the rest of the sequence, by 3s
codon = seq[j: j+3] #stop codon starts at j
if codon == 'TAA' or codon == 'TAG' or codon == 'TGA' :
stop = j
break
if stop != None: yield seq[start:stop] #yield returns one at a time - doesnt create the whole list, does one at a time
#entropy
import math
def entropy(prob):
assert(math.isclose(sum(prob), 1.0))
h = 0
for i in range(len(prob)):
if prob[i] != 0: h -= prob[i] * math.log2(prob[i])
return(h)
```
|
{
"source": "jeanschmidt/poc_deeplearning_industrial",
"score": 3
}
|
#### File: poc_deeplearning_industrial/learn/test.py
```python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import time
import torch
from torch.autograd import Variable
def chunks(l):
n = 40
for i in range(0, len(l), n):
yield l[i:i + n]
def print_test_result(set_type, test_loss, correct, test_dataset_len,
evaluate_time):
print(
'\n"{}" set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%) '
'Evaluate Time: {:.4f}s ({:.2f}ms per test)\n'.format(
set_type,
test_loss,
correct,
test_dataset_len,
100. * correct / test_dataset_len,
evaluate_time,
(evaluate_time / test_dataset_len) * 1000
)
)
def test_data(model, criterion, data, target):
start_time = time.time()
if torch.cuda.is_available():
data = data.cuda()
target = target.cuda()
model.eval()
test_dataset_len = data.size()[0]
output = model(data)
test_loss = criterion(output, target).data.sum()
pred = output.data.max(1, keepdim=True)[1]
target_pred = target.max(1, keepdim=True)[1]
correct = pred.eq(target_pred.data).sum()
test_loss /= test_dataset_len
evaluate_time = time.time() - start_time
return {
'test_loss': test_loss,
'correct': correct,
'test_dataset_len': test_dataset_len,
'evaluate_time': evaluate_time,
}
def _sum_dicts(total, inc):
for k in inc.keys():
total[k] = total.get(k, 0.0) + inc.get(k, 0.0)
return total
def test(test_dataset, model, criterion):
data_lst = list(chunks(test_dataset[0]))
target_lst = list(chunks(test_dataset[1]))
total = {}
for idx in xrange(len(data_lst)):
ret = test_data(
model,
criterion,
Variable(data_lst[idx], volatile=True),
Variable(target_lst[idx])
)
_sum_dicts(total, ret)
return total
def test_train(train_dataset, model, criterion):
total = {}
for _, get_data, get_target in train_dataset:
ret = test_data(
model,
criterion,
Variable(get_data(), volatile=True),
Variable(get_target())
)
_sum_dicts(total, ret)
return total
def test_print_save(model, criterion, epoch, train_batches, jal):
test_result = test(jal.test_dataset, model, criterion)
print_test_result('TEST', **test_result)
train_result = test_train(train_batches, model, criterion)
print_test_result('TRAIN', **train_result)
```
|
{
"source": "jeanschmidt/python_propertyutils",
"score": 3
}
|
#### File: python_propertyutils/lazyproperty/classproperty.py
```python
from __future__ import unicode_literals
class ClassPropertyDescriptor(object):
"""
Thanks StackOverflow =)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
if fget and not isinstance(fget, (classmethod, staticmethod)):
fget = classmethod(fget)
if fset and not isinstance(fset, (classmethod, staticmethod)):
fset = classmethod(fset)
if fdel and not isinstance(fdel, (classmethod, staticmethod)):
fdel = classmethod(fdel)
if doc is None and fget is not None:
doc = fget.__doc__
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, obj, klass=None):
if self.fget is None:
raise AttributeError("can't get attribute")
if klass is None:
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
type_ = type(obj)
return self.fdel.__get__(obj, type_)()
def getter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fget = func
return self
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def deleter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fdel = func
return self
def classproperty(fget, fset=None, fdel=None):
return ClassPropertyDescriptor(fget=fget, fset=fset, fdel=fdel)
```
|
{
"source": "JeanSchnorr/votaluno",
"score": 2
}
|
#### File: votaluno/votacoes/models.py
```python
from django.db import models
from django.contrib.auth.models import User
CHOICES_BIMESTRE=[(1,'1'),(2,'2'),(3,'3'),(4,'4')]
class Disciplina(models.Model):
nome = models.TextField(max_length=30)
def __str__(self):
return self.nome
class Curso(models.Model):
nome = models.TextField(max_length=30)
def __str__(self):
return self.nome
class Turma(models.Model):
curso = models.ForeignKey(Curso, on_delete=models.CASCADE)
ano = models.IntegerField()
sala = models.TextField(max_length=1)
def __str__(self):
return f'{self.ano}º "{self.sala}" {self.curso}'
class OfertaDisciplina(models.Model):
professor = models.ForeignKey(User, on_delete=models.CASCADE,related_name="disciplinas_professor")
turma = models.ForeignKey(Turma, on_delete=models.CASCADE,related_name="disciplinas_turma")
disciplina = models.ForeignKey(Disciplina, on_delete=models.CASCADE)
def __str__(self):
return f'{self.disciplina} - {self.professor}'
class Aluno(models.Model):
nome = models.TextField(max_length=50)
cpf = models.TextField(max_length=14)
foto = models.ImageField(default='default.png',upload_to='alunos/')
turma = models.ForeignKey(Turma, on_delete=models.CASCADE,related_name="alunos_turma")
def __str__(self):
return self.nome
class AvaliacaoAluno(models.Model):
oferta_disciplina = models.ForeignKey(OfertaDisciplina, on_delete=models.CASCADE,related_name="avaliacoes_aluno_disciplina")
aluno = models.ForeignKey("Aluno", on_delete=models.CASCADE,related_name="avaliacoes_aluno")
bimestre = models.PositiveIntegerField(choices=CHOICES_BIMESTRE)
avaliacao = models.PositiveIntegerField(default=0)
outros_avaliacao = models.TextField(max_length=255,blank=True,null=True)
ano = models.PositiveIntegerField()
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Avaliação de aluno'
verbose_name_plural = 'Avaliações de alunos'
def __str__(self):
return f'{self.aluno} - {self.oferta_disciplina}'
class AvaliacaoTurma(models.Model):
oferta_disciplina = models.ForeignKey(OfertaDisciplina, on_delete=models.CASCADE)
bimestre = models.PositiveIntegerField(choices=CHOICES_BIMESTRE)
ano = models.PositiveIntegerField()
avaliacao = models.PositiveIntegerField(default=0)
outros_avaliacao = models.TextField(max_length=255,blank=True,null=True)
status = models.BooleanField(default=True)
class Meta:
verbose_name = 'Avaliação de turma'
verbose_name_plural = 'Avaliações de turmas'
def __str__(self):
return f'{self.oferta_disciplina.turma} - {self.oferta_disciplina} - {self.ano}'
class Conselho(models.Model):
turma = models.ForeignKey(Turma, on_delete=models.CASCADE)
data = models.DateField(auto_now=False)
situacao = models.BooleanField(default=False)
def __str__(self):
if self.situacao:
text = 'Iniciada'
else:
text = 'Fechada'
return f'{self.turma} - {text}'
class UsuarioConselho(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE,related_name="conselhos_usuario")
conselho = models.ForeignKey(Conselho, on_delete=models.CASCADE)
def __str__(self):
return f'{self.usuario} - {self.conselho}'
class Votacao(models.Model):
aluno = models.ForeignKey(Aluno, on_delete=models.CASCADE)
situacao = models.BooleanField(default=False)
conselho = models.ForeignKey(Conselho, on_delete=models.CASCADE,related_name="votacoes_conselho")
class Meta:
verbose_name = 'Votação'
verbose_name_plural = 'Votações'
def __str__(self):
return f'{self.aluno} - {self.conselho.turma}'
class Voto(models.Model):
SITUACAO_CHOICES={
('Aprovar', 'Aprovar'),
('Reprovar', 'Reprovar'),
('Abster', 'Abster')
}
votacao = models.ForeignKey(Votacao, on_delete=models.CASCADE,related_name="votos_votacao")
usuario = models.ForeignKey(User, on_delete=models.CASCADE, related_name="votos_usuario")
situacao = models.CharField(max_length=8, choices=SITUACAO_CHOICES, default='Abster')
votado = models.BooleanField(default=False)
def __str__(self):
return f'{self.usuario} para {self.votacao}'
```
#### File: votaluno/votacoes/views.py
```python
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.contrib.auth.models import User
from .models import Aluno, AvaliacaoTurma, AvaliacaoAluno, Turma, OfertaDisciplina, Conselho,UsuarioConselho, Votacao, Voto
from django.shortcuts import render, redirect
from datetime import datetime
from .avaliacoes import *
@login_required
def home(request):
context = {}
conselhos_professor=[]
conselhos_abertos = []
conselhos = request.user.conselhos_usuario.all()
for conselho in conselhos:
conselhos_professor.append(conselho.conselho)
for conselho in Conselho.objects.filter(situacao=True):
if conselho in conselhos_professor:
conselhos_abertos.append(conselho)
if request.user.is_superuser:
context['conselhos_abertos'] = Conselho.objects.filter(situacao=True)
else:
context['conselhos_abertos'] = conselhos_abertos
return render(request, 'home.html',context)
# Views que manipulam as avaliações das turmas
@login_required
def avaliacoesTurmas(request):
context = {}
avaliacoes=[]
avaliacoes_lancadas=[]
ofertas = OfertaDisciplina.objects.filter(professor=request.user)
for oferta in ofertas:
avs = AvaliacaoTurma.objects.filter(oferta_disciplina=oferta).filter(status=True)
avs_lancadas = AvaliacaoTurma.objects.filter(oferta_disciplina=oferta).filter(status=False)[:10]
if len(avs_lancadas) > 0:
for avaliacoesLancadas in avs_lancadas:
avaliacoes_lancadas.append(avaliacoesLancadas)
if len(avs) > 0:
for avaliacoesDisciplina in avs:
avaliacoes.append(avaliacoesDisciplina)
context['avaliacoes'] = avaliacoes
context['avaliacoes_lancadas'] = avaliacoes_lancadas
return render(request,'avaliacoes/avaliacoesTurmas.html',context)
@login_required
def criarAvaliacaoTurma(request, avaliacao_id):
context = {}
avaliacao = AvaliacaoTurma.objects.get(id=avaliacao_id)
context['avaliacao'] = avaliacao
context['opcoes'] = DICT_TURMA
return render(request,'avaliacoes/avaliarTurma.html', context)
@login_required
def lancarAvaliacaoTurma(request, avaliacao_id):
soma = 0
selecionadas = request.POST.getlist('checks')
for opcao in selecionadas:
soma += int(opcao)
avaliacao = AvaliacaoTurma.objects.get(pk=avaliacao_id)
avaliacao.status = False
avaliacao.avaliacao = soma
avaliacao.outros_avaliacao = request.POST.get('outros')
avaliacao.save()
return avaliacoesTurmas(request)
@login_required
def visualizarAvaliacaoTurma(request, avaliacao_id):
context = {}
avaliacao = AvaliacaoTurma.objects.get(id=avaliacao_id)
context['avaliacao'] = avaliacao
context['opcoes'] = get_array_turma(avaliacao.avaliacao)
return render(request,'avaliacoes/visualizarAvaliacaoTurma.html', context)
#Views que manipulam as avaliações dos alunos
@login_required
def gerarAvaliacoesTurma(request):
turma = Turma.objects.get(id=request.POST.get("turma"))
bimestre = request.POST.get("bimestre")
alunos = Aluno.objects.filter(turma=turma)
ofertaDisciplinas_turma = OfertaDisciplina.objects.filter(turma=turma)
for disciplina in ofertaDisciplinas_turma:
avaliacaoTurma = AvaliacaoTurma(oferta_disciplina=disciplina,bimestre=bimestre,ano=int(datetime.now().year))
avaliacaoTurma.save()
for aluno in alunos:
avaliacaoAluno = AvaliacaoAluno(oferta_disciplina=disciplina,aluno=aluno,bimestre=bimestre,ano=int(datetime.now().year))
avaliacaoAluno.save()
return administracao(request)
@login_required
def avaliacoesAlunos(request):
context = {}
avaliacoes=[]
avaliacoes_lancadas=[]
ofertas = OfertaDisciplina.objects.filter(professor=request.user)
for oferta in ofertas:
avs = AvaliacaoAluno.objects.filter(oferta_disciplina=oferta).filter(status=True)
avs_lancadas = AvaliacaoAluno.objects.filter(oferta_disciplina=oferta).filter(status=False)[:10]
if len(avs_lancadas) > 0:
for avaliacoesLancadas in avs_lancadas:
avaliacoes_lancadas.append(avaliacoesLancadas)
if len(avs) > 0:
for avaliacoesDisciplina in avs:
avaliacoes.append(avaliacoesDisciplina)
context['avaliacoes'] = avaliacoes
context['avaliacoes_lancadas'] = avaliacoes_lancadas
return render(request,'avaliacoes/avaliacoesAlunos.html',context)
@login_required
def criarAvaliacaoAluno(request, avaliacao_id):
context = {}
avaliacao = AvaliacaoAluno.objects.get(id=avaliacao_id)
context['avaliacao'] = avaliacao
context['opcoes'] = DICT_ALUNO
return render(request,'avaliacoes/avaliarAluno.html', context)
@login_required
def lancarAvaliacaoAluno(request, avaliacao_id):
soma = 0
selecionadas = request.POST.getlist('checks')
for opcao in selecionadas:
soma += int(opcao)
avaliacao = AvaliacaoAluno.objects.get(pk=avaliacao_id)
avaliacao.status = False
avaliacao.avaliacao = soma
avaliacao.outros_avaliacao = request.POST.get('outros')
avaliacao.save()
return avaliacoesAlunos(request)
@login_required
def visualizarAvaliacaoAluno(request, avaliacao_id):
context = {}
avaliacao = AvaliacaoAluno.objects.get(id=avaliacao_id)
context['avaliacao'] = avaliacao
context['opcoes'] = get_array_aluno(avaliacao.avaliacao)
return render(request,'avaliacoes/visualizarAvaliacaoAluno.html', context)
#Views que manipulam a administração
@login_required
def administracao(request):
context = {}
turmas = Turma.objects.all()
conselhosFechados = Conselho.objects.filter(situacao=False)
conselhosAbertos = Conselho.objects.filter(situacao=True)
context['turmas'] = turmas
context['conselhosFechados'] = conselhosFechados
context['conselhosAbertos'] = conselhosAbertos
return render(request,'administracao.html', context)
@login_required
def admin(request):
return HttpResponseRedirect('/admin')
#Views para Conselhos
@login_required
def gerarConselho(request):
# Gerar conselho
turma = Turma.objects.get(id=request.POST.get("turma"))
data = request.POST.get("data")
conselho = Conselho.objects.create(
turma= turma,
data= data,
situacao = False,
)
conselho.save()
#Criar e popular lista de professores que dão aula para essa turma
professores = []
for disciplina in turma.disciplinas_turma.all():
if not disciplina.professor in professores:
professores.append(disciplina.professor)
# Gerar relacionamento UsuarioConselho
if professores:
print(professores)
for professor in professores:
usuario_conselho = UsuarioConselho(usuario=professor,conselho=conselho)
usuario_conselho.save()
# Gerar votacoes dos alunos da turma deste conselho
alunos = Aluno.objects.filter(turma=turma)
for aluno in alunos:
votacao_aluno = Votacao(aluno=aluno,conselho=conselho)
votacao_aluno.save()
#Gerar votos em branco para os professores deste conselho
for professor in professores:
voto_branco = Voto(usuario=professor,votacao=votacao_aluno)
voto_branco.save()
return administracao(request)
@login_required
def iniciarConselho(request):
# Pesquisar e iniciar o conselho
conselho_id = request.POST.get("conselho")
conselho = Conselho.objects.get(id=conselho_id)
conselho.situacao = True
conselho.save()
# Pesquisar e iniciar as votações dos alunos que pertencem à turma deste conselho
alunos = Aluno.objects.filter(turma=conselho.turma)
for aluno in alunos:
votacao_aluno = Votacao.objects.get(aluno=aluno,conselho=conselho)
votacao_aluno.situacao = True
votacao_aluno.save()
return administracao(request)
@login_required
def encerrrarConselho(request):
# Pesquisar e encerrar o conselho
conselho_id = request.POST.get("select")
conselho = Conselho.objects.get(id=conselho_id)
conselho.situacao = False
conselho.save()
# Pesquisar e encerrar as votações dos alunos que pertencem à turma deste conselho
alunos = Aluno.objects.filter(turma=conselho.turma)
for aluno in alunos:
votacao_aluno = Votacao.objects.get(aluno=aluno,conselho=conselho)
votacao_aluno.situacao = False
votacao_aluno.save()
return administracao(request)
def exibirConselho(request, conselho_id):
context = {}
conselho = Conselho.objects.get(id = conselho_id)
votacoes_conselho = conselho.votacoes_conselho.all()
votos_usuario = request.user.votos_usuario.all()
votos_conselho = []
for votacao in votacoes_conselho:
for voto in votacao.votos_votacao.filter(usuario=request.user).filter(votado=False):
votos_conselho.append(voto)
if request.user.is_superuser:
context['votacoes_conselho'] = votacoes_conselho
context['conselho'] = conselho
context['votos_conselho'] = votos_conselho
return render(request,'votacoes/exibirConselho.html',context)
#Views para Votacões
def exibirVoto(request,votacao_id):
context = {}
votacao = Votacao.objects.get(id=votacao_id)
if request.user.is_superuser:
context['votos_aprovar'] = len(votacao.votos_votacao.filter(votado=True).filter(situacao="Aprovar"))
context['votos_reprovar'] = len(votacao.votos_votacao.filter(votado=True).filter(situacao="Reprovar"))
context['votos_abster'] = len(votacao.votos_votacao.filter(votado=True).filter(situacao="Abster"))
context['votos_usuarios'] = votacao.votos_votacao.filter(votado=True)
else:
context['voto'] = votacao.votos_votacao.filter(usuario=request.user).filter(votado=False)[0]
context['votacao'] = votacao
context['conselho'] = votacao.conselho
return render(request,'votacoes/voto.html',context)
def gerarHistoricoAluno(id_aluno):
historico = {}
aluno = Aluno.objects.get(id=aluno_id)
aluno.avaliacoes_aluno.all()
for avaliacao in avaliacoes_aluno:
a+b
return historico
def gerarHistoricoTurma(id_turma):
historico = {}
return historico
def lancarVoto(request,voto_id):
context = {}
voto = Voto.objects.get(id=voto_id)
conselho = voto.votacao.conselho
alunos_conselho = conselho.turma.alunos_turma.all()
voto.situacao = request.POST.get('botao')
voto.votado = True
voto.save()
return exibirConselho(request, conselho.id)
#erros
def error404(request,exception):
return render(request, '404.html', status=404)
def error500(request):
return render(request, '500.html', status=500)
def faq(request):
return render(request, 'faq.html')
```
|
{
"source": "jeanschuchardt/FGApp",
"score": 3
}
|
#### File: Servidores/inserter/insert.py
```python
import pandas as pd
from sqlalchemy import create_engine
from unidecode import unidecode
from datetime import datetime
def insert_cadastro(cadastro,remuneracao):
chunk = 10000
#le o arquivo csv remuneração
remuneracao_result_set = read_csv(remuneracao)
#remove caracteres especiais da lingua portuguesa
remuneracao_result_set.columns = remove_pt_caracteres(remuneracao_result_set)
#remove dolar
remuneracao_result_set = remove_coll_dolar(remuneracao_result_set)
#remove espaços em branco
remove_spaces(remuneracao_result_set)
#remove colunas não necessarias
remuneracao_result_set = remove_columns_remuneracao(remuneracao_result_set)
#cria nova coluna para armazenar o gato total do servidor
create_total_column(remuneracao_result_set)
# remover no futuro -> existe para test
#
# keep_col = ['ANO','MES','Id_SERVIDOR_PORTAL','CPF','NOME','total_remuneracao'] #remover valores intermediarios a soma
# y = y[keep_col]
#
#
# ##
for i, result_set in enumerate((pd.read_csv(cadastro, encoding ="ISO-8859-1", delimiter=';', skipinitialspace=True,error_bad_lines = False, engine='c',chunksize=chunk))):
result_set = remove_columns_cadastro(result_set)
result_set = remove_last_line(result_set)
result_set = define_columns_types(result_set)
# merge salva o resultado de inner join da tabala remuneraçao com a tabela cadastro
merge = pd.merge(result_set,remuneracao_result_set,how='inner',on=['Id_SERVIDOR_PORTAL','NOME','CPF'])
database_insert(merge)
# TODO
# remover path estatico do codigo
# #
# merge.to_csv("D:\\Github\\FGApp\\backend\\Servidores\\test\\"+ str(i) + datetime.now().strftime("%d-%m-%Y-%H-%M-%S") +'.csv', index=False)
#
# TODO
# inserir no banco nesse momento
# e nao gerar um arquivo csv
# #
def database_insert(result):
try:
#TODO
# ler isso do config e remover do codigo
#
# ##
eng = create_engine('mysql://admin:example@localhost:3308/datastage')
result.to_sql('stg_servidores', eng, if_exists='append', index=False)
print('arquivo inserido')
except Exception as e:
print("insert")
print (e)
#
# define tipos de dados e remove espaços em branco
# #
def define_columns_types(resul_set):
resul_set.CPF = resul_set.CPF.str.replace('\D','')
resul_set.MATRICULA = resul_set.MATRICULA.str.replace('\D','')
resul_set["Id_SERVIDOR_PORTAL"] = resul_set['Id_SERVIDOR_PORTAL'].astype('int')
resul_set["CPF"] = resul_set['CPF'].astype('int')
resul_set["MATRICULA"] = resul_set['MATRICULA'].astype('int')
return resul_set
def remove_last_line(x):
x = x[x.SIGLA_FUNCAO != '-1']
return x
def remove_columns_cadastro(x):
keep_col = ['Id_SERVIDOR_PORTAL','NOME','CPF','MATRICULA','SIGLA_FUNCAO',
'NIVEL_FUNCAO','FUNCAO','UORG_EXERCICIO','DATA_INICIO_AFASTAMENTO',
'DATA_TERMINO_AFASTAMENTO','DATA_INGRESSO_CARGOFUNCAO','UF_EXERCICIO']
x = x[keep_col]
return x
def create_total_column(remuneracao_result_set):
col_list = list(remuneracao_result_set)
col_list.remove('ANO')
col_list.remove('MES')
col_list.remove('Id_SERVIDOR_PORTAL')
col_list.remove('CPF')
col_list.remove('NOME')
define_types(remuneracao_result_set)
remuneracao_result_set['total_remuneracao'] = remuneracao_result_set[col_list].sum(axis=1)
def define_types(y):
y["ANO"] = y['ANO'].astype('int')
y["MES"] = y['MES'].astype('int')
y["CPF"] = y['CPF'].astype('int')
y["Id_SERVIDOR_PORTAL"] = y['Id_SERVIDOR_PORTAL'].astype('int')
y["REMUNERACAO_BASICA_BRUTA"] = y['REMUNERACAO_BASICA_BRUTA'].astype('float')
y["GRATIFICACAO_NATALINA"] = y['GRATIFICACAO_NATALINA'].astype('float')
y["FERIAS"] = y['FERIAS'].astype('float')
y["OUTRAS_REMUNERACOES_EVENTUAIS"] = y['OUTRAS_REMUNERACOES_EVENTUAIS'].astype('float')
y["TOTAL_DE_VERBAS_INDENIZATORIAS"] = y['TOTAL_DE_VERBAS_INDENIZATORIAS'].astype('float')
def remove_columns_remuneracao(y):
keep_col = ['ANO', 'MES', 'Id_SERVIDOR_PORTAL', 'CPF', 'NOME',
'REMUNERACAO_BASICA_BRUTA', 'GRATIFICACAO_NATALINA',
'FERIAS',
'OUTRAS_REMUNERACOES_EVENTUAIS',
'TOTAL_DE_VERBAS_INDENIZATORIAS']
y = y[keep_col]
y = y[:-1] #remove last row
return y
def remove_spaces(y):
y.columns = y.columns.str.replace('\D(R\$\D)','')
y.columns = y.columns.str.replace('\D(\*\D)','')
y.columns = y.columns.str.replace('\/^\s+|\s+$','')
y.columns = y.columns.str.replace(' ','_')
y.CPF = y.CPF.str.replace('\D','')
def remove_coll_dolar(y):
drop_columns = []
for x in y[:0]:
if('U$' in x):
y = y.drop(columns=[x])
return y
def remove_pt_caracteres(y):
new_header = []
for x in y[:0]:
new_header.append(unidecode(x))
return new_header
def read_csv(remuneracao):
y = pd.read_csv(remuneracao, encoding ="ISO-8859-1", delimiter=';', skipinitialspace=True,error_bad_lines = False, engine='c',decimal=",")
return y
```
|
{
"source": "jeanschuchardt/pythonToolBox",
"score": 4
}
|
#### File: TDD/phonebook/test_phonebook.py
```python
import unittest
from phonebook import PhoneBook
class PhoneBookTest(unittest.TestCase):
def test_creat_phonebook(self):
phonebook = PhoneBook()
def test_lookup_phonebook(self):
phonebook = PhoneBook()
phonebook.add('Jean','992759779')
self.assertEqual('992759779', phonebook.lookup('Jean'))
def test_missing_entry_raises_keyErrror(self):
phonebook = PhoneBook()
with self.assertRaises(KeyError):
phonebook.lookup("missing")
```
|
{
"source": "Jeansding/Gather-Tensorflow-Serving",
"score": 3
}
|
#### File: Gather-Tensorflow-Serving/15.text-classification-tornado-gunicorn/app.py
```python
from tornado.web import Application, RequestHandler, asynchronous
import numpy as np
import tensorflow as tf
import json
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
with open('dictionary-test.json', 'r') as fopen:
dic = json.load(fopen)
g = load_graph('frozen_model.pb')
label = ['negative', 'positive']
X = g.get_tensor_by_name('import/Placeholder:0')
Y = g.get_tensor_by_name('import/logits:0')
sess = tf.InteractiveSession(graph = g)
maxlen = 50
UNK = 3
class MainHandler(RequestHandler):
def get(self):
self.write('Hello from Tornado')
class TextClassification(RequestHandler):
def get(self):
sentence = self.get_argument('sentence', None)
x = np.zeros((1, maxlen))
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
val = dic[k] if k in dic else UNK
x[0, -1 - no] = val
index = np.argmax(sess.run(Y, feed_dict = {X: x})[0])
self.write(json.dumps({'sentiment': label[index]}))
app = Application([(r'/', MainHandler), (r'/classifier', TextClassification)])
```
#### File: Gather-Tensorflow-Serving/16.celery-hadoop-flask-text-classification/app.py
```python
from celery import Celery
from flask import Flask, request
from werkzeug import secure_filename
import numpy as np
import subprocess
import shlex
import json
import os
import logging
import sys
import random
import time
import string
logging.basicConfig(level = logging.DEBUG)
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.getcwd() + '/upload'
app.config['CELERY_BROKER_URL'] = 'redis://redis:6379/0'
app.config['CELERY_RESULT_BACKEND'] = 'redis://redis:6379/0'
celery = Celery(app.name, broker = app.config['CELERY_BROKER_URL'])
dfs_location = '/user/input_text'
celery.conf.update(app.config)
def id_generator(size = 6, chars = string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def get_hadoop_script(
input_location,
output_location,
mapper,
hadoop_location = '/opt/hadoop/bin/hadoop',
hadoop_streaming = '/opt/hadoop/share/hadoop/tools/lib/hadoop-streaming-3.1.1.jar',
files = ['dictionary-test.json', 'frozen_model.pb'],
reducer = 'reducer.py',
):
files = ' '.join(['-file %s' % (file) for file in files])
reducer = '-file %s -reducer %s' % (reducer, reducer)
mapper = '-file %s -mapper %s' % (mapper, mapper)
input_location = '-input %s/*' % (input_location)
output_location = '-output %s' % (output_location)
return '%s jar %s %s %s %s %s %s' % (
hadoop_location,
hadoop_streaming,
files,
mapper,
reducer,
input_location,
output_location,
)
@celery.task(bind = True)
def classify_text(self):
output_dfs_location = '/user/' + id_generator(10)
script = get_hadoop_script(
dfs_location, output_dfs_location, 'classification.py'
)
print(script)
p = subprocess.Popen(shlex.split(script), stdout = subprocess.PIPE)
for line in p.stdout:
self.update_state(
state = 'PROGRESS', meta = {'status': line.rstrip().decode('utf-8')}
)
subprocess.Popen(
shlex.split(
'/opt/hadoop/bin/hadoop fs -get %s' % (output_dfs_location)
),
stdout = subprocess.PIPE,
)
return {'status': 'classification completed!', 'result': 42}
@celery.task(bind = True)
def upload_files_dfs(self, file_location, split_size):
with open(file_location) as fopen:
texts = list(filter(None, fopen.read().split('\n')))
splitted_list = np.array_split(texts, split_size)
for no, split in enumerate(splitted_list):
filename = '%d-%s' % (no, file_location)
joined = '\n'.join(split.tolist())
script = '/opt/hadoop/bin/hdfs dfs -put %s %s/%s' % (
filename,
dfs_location,
filename,
)
print('%d: uploading %s/%s' % (no, dfs_location, filename))
print('%d: %s' % (no, script))
with open(filename, 'w') as fopen:
fopen.write(joined)
process = subprocess.Popen(
shlex.split(script), stdout = subprocess.PIPE
)
self.update_state(
state = 'PROGRESS',
meta = {'status': 'uploaded %s/%s' % (dfs_location, filename)},
)
return {'status': 'upload completed!', 'result': 42}
@app.route('/upload', methods = ['POST'])
def upload():
f = request.files['file']
f.save(
os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename))
)
split_size = int(request.form['split_size'])
task = upload_files_dfs.apply_async([f.filename, split_size])
return json.dumps({'id': task.id, 'filename': f.filename})
@app.route('/process', methods = ['GET'])
def process():
task = classify_text.apply_async()
return json.dumps({'id': task.id})
@app.route('/upload_status/<task_id>')
def upload_status(task_id):
task = upload_files_dfs.AsyncResult(task_id)
if task.state == 'PENDING':
response = {'state': task.state, 'status': 'Pending...'}
elif task.state != 'FAILURE':
response = {'state': task.state, 'status': task.info.get('status', '')}
if 'result' in task.info:
response['result'] = task.info['result']
else:
response = {'state': task.state, 'status': str(task.info)}
return json.dumps(response)
@app.route('/classify_text_status/<task_id>')
def classify_text_status(task_id):
task = classify_text.AsyncResult(task_id)
if task.state == 'PENDING':
response = {'state': task.state, 'status': 'Pending...'}
elif task.state != 'FAILURE':
response = {'state': task.state, 'status': task.info.get('status', '')}
if 'result' in task.info:
response['result'] = task.info['result']
else:
response = {'state': task.state, 'status': str(task.info)}
return json.dumps(response)
if __name__ == '__main__':
app.run(debug = True, host = '0.0.0.0', port = 5000)
```
#### File: Gather-Tensorflow-Serving/18.luigi-celery-text-classification/function.py
```python
import tensorflow as tf
import numpy as np
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import luigi
import re
import json
def clearstring(string):
string = re.sub('[^A-Za-z0-9 ]+', '', string)
string = string.split(' ')
string = filter(None, string)
string = [y.strip() for y in string]
string = ' '.join(string)
return string.lower()
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
def str_idx(corpus, dic, maxlen, UNK = 3):
X = np.zeros((len(corpus), maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
X[i, -1 - no] = dic.get(k, UNK)
return X
sentiment_label = ['negative', 'positive']
emotion_label = ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise']
def classify_sentiment(text):
text = clearstring(text)
batch_x = str_idx([text], dict_sentiment['dictionary'], 100)
output_sentiment = sess_sentiment.run(
logits_sentiment, feed_dict = {x_sentiment: batch_x}
)
return [sentiment_label[i] for i in np.argmax(output_sentiment, 1)][0]
class Sentiment(luigi.Task):
filename = luigi.Parameter()
summary = luigi.Parameter()
batch_size = luigi.IntParameter(default = 32)
def output(self):
return luigi.LocalTarget('%s-sentiment.json' % (self.summary))
def run(self):
g_sentiment = load_graph('sentiment.pb')
x_sentiment = g_sentiment.get_tensor_by_name('import/Placeholder:0')
logits_sentiment = g_sentiment.get_tensor_by_name('import/logits:0')
sess_sentiment = tf.InteractiveSession(graph = g_sentiment)
with open('fast-text-sentiment.json') as fopen:
dict_sentiment = json.load(fopen)
with open(self.filename) as fopen:
texts = list(filter(None, fopen.read().split('\n')))
results = []
for i in range(0, len(texts), self.batch_size):
batch_x_text = texts[i : min(i + self.batch_size, len(texts))]
batch_x_text = [clearstring(t) for t in batch_x_text]
batch_x = str_idx(batch_x_text, dict_sentiment['dictionary'], 100)
output_sentiment = sess_sentiment.run(
logits_sentiment, feed_dict = {x_sentiment: batch_x}
)
labels = [
sentiment_label[l] for l in np.argmax(output_sentiment, 1)
]
for no, text in enumerate(batch_x_text):
results.append({'text': text, 'sentiment_label': labels[no]})
with self.output().open('w') as fopen:
fopen.write(json.dumps(results))
class Emotion(luigi.Task):
filename = luigi.Parameter()
summary = luigi.Parameter()
batch_size = luigi.IntParameter(default = 32)
def output(self):
return luigi.LocalTarget('%s-emotion.json' % (self.summary))
def requires(self):
return {
'Sentiment': Sentiment(
filename = self.filename,
summary = self.summary,
batch_size = self.batch_size,
)
}
def run(self):
g_emotion = load_graph('emotion.pb')
x_emotion = g_emotion.get_tensor_by_name('import/Placeholder:0')
logits_emotion = g_emotion.get_tensor_by_name('import/logits:0')
sess_emotion = tf.InteractiveSession(graph = g_emotion)
with open('fast-text-emotion.json') as fopen:
dict_emotion = json.load(fopen)
with self.input()['Sentiment'].open('r') as fopen:
outputs = json.load(fopen)
for i in range(0, len(outputs), self.batch_size):
batch_x_text = outputs[i : min(i + self.batch_size, len(outputs))]
batch_x_text = [t['text'] for t in batch_x_text]
batch_x_text = [clearstring(t) for t in batch_x_text]
batch_x = str_idx(batch_x_text, dict_emotion['dictionary'], 100)
output_emotion = sess_emotion.run(
logits_emotion, feed_dict = {x_emotion: batch_x}
)
labels = [emotion_label[l] for l in np.argmax(output_emotion, 1)]
for no, label in enumerate(labels):
outputs[i + no]['emotion_label'] = label
with self.output().open('w') as fopen:
fopen.write(json.dumps(outputs))
class Save_to_Elastic(luigi.Task):
filename = luigi.Parameter()
summary = luigi.Parameter()
index = luigi.Parameter()
batch_size = luigi.IntParameter(default = 32)
def requires(self):
return {
'Emotion': Emotion(
filename = self.filename,
summary = self.summary,
batch_size = self.batch_size,
)
}
def run(self):
with self.input()['Emotion'].open('r') as fopen:
emotions = json.load(fopen)
es = Elasticsearch()
for i in range(0, len(emotions), self.batch_size):
batch = emotions[i : min(i + self.batch_size, len(emotions))]
actions = [
{
'_index': self.index,
'_type': 'text',
'_id': '%d-%s' % (i + j, self.summary),
'_source': batch[j],
}
for j in range(len(batch))
]
helpers.bulk(es, actions)
```
#### File: Gather-Tensorflow-Serving/2.object-detection-flasksocketio-opencv/camera.py
```python
import cv2
import base64
from socketIO_client import SocketIO, BaseNamespace
import numpy as np
import time
from PIL import Image
from threading import Thread, ThreadError
import io
img_np = None
socketIO = SocketIO('localhost', 5000)
live_namespace = socketIO.define(BaseNamespace, '/live')
def receive_events_thread():
socketIO.wait()
def on_camera_response(*args):
global img_np
img_bytes = base64.b64decode(args[0]['data'])
img_np = np.array(Image.open(io.BytesIO(img_bytes)))
def run_cam():
global img_np
while True:
try:
cv2.imshow('cam', img_np)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
except:
continue
live_namespace.on('camera_update', on_camera_response)
receive_events_thread = Thread(target = receive_events_thread)
receive_cam_thread = Thread(target = run_cam)
receive_events_thread.daemon = True
receive_events_thread.start()
receive_cam_thread.daemon = True
receive_cam_thread.start()
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
img_b = cv2.imencode('.jpg', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))[
1
].tobytes()
base64_bytes = base64.b64encode(img_b)
base64_string = base64_bytes.decode('utf-8')
live_namespace.emit('livevideo', {'data': base64_string})
time.sleep(0.05)
```
#### File: Gather-Tensorflow-Serving/4.classification-flask-gunicorn/api_dynamic.py
```python
import os
import pickle
import tensorflow as tf
import time
import model
import numpy as np
from flask import Flask, render_template, request
from werkzeug import secure_filename
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
maxlen = 50
location = os.getcwd()
num_layers = 3
size_layer = 256
learning_rate = 0.0001
output_size = 2
with open('vector-sentiment.p', 'rb') as fopen:
vectors = pickle.load(fopen)
with open('dictionary-sentiment.p', 'rb') as fopen:
dictionary = pickle.load(fopen)
sess = tf.InteractiveSession()
model = model.Model(num_layers, size_layer, vectors.shape[1], output_size, learning_rate)
sess.run(tf.global_variables_initializer())
dimension = vectors.shape[1]
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, os.getcwd() + "/model-rnn-vector-huber.ckpt")
@app.route('/dynamic', methods = ['GET'])
def get_text():
last_time = time.time()
batch_x = np.zeros((1, maxlen, dimension))
tokens = request.args.get('text').split()[:maxlen]
for no, text in enumerate(tokens[::-1]):
try:
batch_x[0, -1 - no, :] += vectors[dictionary[text], :]
except Exception as e:
pass
sess.run(tf.nn.softmax(model.logits), feed_dict = {model.X : batch_x})
return str(time.time()-last_time)
if __name__ == '__main__':
app.run(host = '0.0.0.0', threaded = True, port = 8033)
```
#### File: Gather-Tensorflow-Serving/6.inception-flasksocketio/camera.py
```python
import cv2
import base64
from socketIO_client import SocketIO, BaseNamespace
import numpy as np
import time
from PIL import Image
from threading import Thread, ThreadError
import io
socketIO = SocketIO('localhost', 5000)
live_namespace = socketIO.define(BaseNamespace, '/live')
def receive_events_thread():
socketIO.wait()
def on_camera_response(*args):
print(args[0])
live_namespace.on('camera_update', on_camera_response)
receive_events_thread = Thread(target = receive_events_thread)
receive_events_thread.daemon = True
receive_events_thread.start()
cap = cv2.VideoCapture(0)
count = 0
while True:
ret, img = cap.read()
img_b = cv2.imencode('.jpg', cv2.cvtColor(img, cv2.COLOR_BGR2RGB))[
1
].tobytes()
if count % 20 == 0:
base64_bytes = base64.b64encode(img_b)
base64_string = base64_bytes.decode('utf-8')
live_namespace.emit('livevideo', {'data': base64_string})
count = 1
count += 1
```
|
{
"source": "Jeansding/Malaya",
"score": 2
}
|
#### File: malaya/_models/_sklearn_model.py
```python
import xgboost as xgb
import numpy as np
from collections import Counter
from scipy.sparse import hstack
from ..texts._text_functions import (
simple_textcleaning,
classification_textcleaning,
entities_textcleaning,
language_detection_textcleaning,
)
from .._utils._parse_dependency import DependencyGraph
from ..texts.vectorizer import features_crf, features_crf_dependency
def transitions(trans_features):
for (label_from, label_to), weight in trans_features:
print('%-6s -> %-7s %0.6f' % (label_from, label_to, weight))
def state_features(state_features):
for (attr, label), weight in state_features:
print('%0.6f %-8s %s' % (weight, label, attr))
class CRF:
def __init__(self, model, is_lower = True):
self._model = model
self._is_lower = is_lower
def predict(self, string):
"""
Tag a string
Parameters
----------
string : str
Returns
-------
string: tagged string
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
string = string.lower() if self._is_lower else string
string = entities_textcleaning(string)
batch_x = [features_crf(string, index) for index in range(len(string))]
return [
(string[no], tag)
for no, tag in enumerate(self._model.predict_single(batch_x))
]
def print_transitions(self, top_k = 10):
"""
Print important top-k transitions
Parameters
----------
top_k : int
"""
if not isinstance(top_k, int):
raise ValueError('input must be an integer')
print('Top-%d likely transitions:' % (top_k))
transitions(
Counter(self._model.transition_features_).most_common(top_k)
)
print('\nTop-%d unlikely transitions:' % (top_k))
transitions(
Counter(self._model.transition_features_).most_common()[-top_k:]
)
def print_features(self, top_k = 10):
"""
Print important top-k features
Parameters
----------
top_k : int
"""
if not isinstance(top_k, int):
raise ValueError('input must be an integer')
print('Top-%d positive:' % (top_k))
state_features(Counter(self._model.state_features_).most_common(top_k))
print('\nTop-%d negative:' % (top_k))
state_features(
Counter(self._model.state_features_).most_common()[-top_k:]
)
class DEPENDENCY:
def __init__(self, tag, depend):
self._tag = tag
self._depend = depend
def predict(self, string):
"""
Tag a string
Parameters
----------
string : str
Returns
-------
string: tagged string
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
string = entities_textcleaning(string)
if len(string) > 120:
raise Exception(
'Dependency parsing only able to accept string less than 120 words'
)
batch_x = [features_crf(string, index) for index in range(len(string))]
tagging = self._tag.predict_single(batch_x)
batch_x = [
features_crf_dependency(string, tagging, index)
for index in range(len(string))
]
depend = [int(i) for i in self._depend.predict_single(batch_x)]
for i in range(len(depend)):
if depend[i] == 0 and tagging[i] != 'root':
tagging[i] = 'UNK'
elif depend[i] != 0 and tagging[i] == 'root':
tagging[i] = 'UNK'
elif depend[i] > len(tagging):
depend[i] = len(tagging)
tagging = [(string[i], tagging[i]) for i in range(len(depend))]
indexing = [(string[i], depend[i]) for i in range(len(depend))]
result = []
for i in range(len(tagging)):
result.append(
'%d\t%s\t_\t_\t_\t_\t%d\t%s\t_\t_'
% (i + 1, tagging[i][0], int(indexing[i][1]), tagging[i][1])
)
d = DependencyGraph('\n'.join(result), top_relation_label = 'root')
return d, tagging, indexing
def print_features(self, top_k = 10):
"""
Print important top-k features for tagging dependency
Parameters
----------
top_k : int
"""
if not isinstance(top_k, int):
raise ValueError('input must be an integer')
print('Top-%d tagging positive:' % (top_k))
state_features(Counter(self._tag.state_features_).most_common(top_k))
print('\nTop-%d tagging negative:' % (top_k))
state_features(
Counter(self._tag.state_features_).most_common()[-top_k:]
)
def print_transitions_tag(self, top_k = 10):
"""
Print important top-k transitions for tagging dependency
Parameters
----------
top_k : int
"""
if not isinstance(top_k, int):
raise ValueError('input must be an integer')
print('Top-%d likely tagging transitions:' % (top_k))
transitions(Counter(self._tag.transition_features_).most_common(top_k))
print('\nTop-%d unlikely tagging transitions:' % (top_k))
transitions(
Counter(self._tag.transition_features_).most_common()[-top_k:]
)
def print_transitions_index(self, top_k = 10):
"""
Print important top-k transitions for indexing dependency
Parameters
----------
top_k : int
"""
if not isinstance(top_k, int):
raise ValueError('input must be an integer')
print('Top-%d likely indexing transitions:' % (top_k))
transitions(
Counter(self._depend.transition_features_).most_common(top_k)
)
print('\nTop-%d unlikely indexing transitions:' % (top_k))
transitions(
Counter(self._depend.transition_features_).most_common()[-top_k:]
)
class USER_XGB:
def __init__(self, xgb, label, vectorize, cleaning = simple_textcleaning):
self.xgb = xgb
self.label = label
self.vectorize = vectorize
self._cleaning = cleaning
def predict(self, string, get_proba = False):
"""
Classify a string
Parameters
----------
string : str
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: result
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
vectors = self.vectorize.transform([self._cleaning(string)])
result = self.xgb.predict(
xgb.DMatrix(vectors), ntree_limit = self.xgb.best_ntree_limit
)[0]
if get_proba:
return {self.label[i]: result[i] for i in range(len(result))}
else:
return self.label[np.argmax(result)]
def predict_batch(self, strings, get_proba = False):
"""
Classify a list of strings
Parameters
----------
strings: list
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: list of results
"""
if not isinstance(strings, list):
raise ValueError('input must be a list')
if not isinstance(strings[0], str):
raise ValueError('input must be list of strings')
strings = [self._cleaning(string) for string in strings]
vectors = self.vectorize.transform(strings)
results = self.xgb.predict(
xgb.DMatrix(vectors), ntree_limit = self.xgb.best_ntree_limit
)
if get_proba:
outputs = []
for result in results:
outputs.append(
{self.label[i]: result[i] for i in range(len(result))}
)
return outputs
else:
return [self.label[i] for i in np.argmax(results, axis = 1)]
class USER_BAYES:
def __init__(
self, multinomial, label, vectorize, cleaning = simple_textcleaning
):
self.multinomial = multinomial
self.label = label
self.vectorize = vectorize
self._cleaning = cleaning
def predict(self, string, get_proba = False):
"""
Classify a string
Parameters
----------
string : str
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: result
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
vectors = self.vectorize.transform([self._cleaning(string)])
if get_proba:
result = self.multinomial.predict_proba(vectors)[0]
return {self.label[i]: result[i] for i in range(len(result))}
else:
return self.label[self.multinomial.predict(vectors)[0]]
def predict_batch(self, strings, get_proba = False):
"""
Classify a list of strings
Parameters
----------
strings: list
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: list of results
"""
if not isinstance(strings, list):
raise ValueError('input must be a list')
if not isinstance(strings[0], str):
raise ValueError('input must be list of strings')
strings = [self._cleaning(string) for string in strings]
vectors = self.vectorize.transform(strings)
if get_proba:
results = self.multinomial.predict_proba(vectors)
outputs = []
for result in results:
outputs.append(
{self.label[i]: result[i] for i in range(len(result))}
)
return outputs
else:
return [
self.label[result]
for result in self.multinomial.predict(vectors)
]
class TOXIC:
def __init__(self, models, vectors):
self._models = models
self._vectors = vectors
self._class_names = [
'toxic',
'severe_toxic',
'obscene',
'threat',
'insult',
'identity_hate',
]
def _stack(self, strings):
char_features = self._vectors['char'].transform(strings)
word_features = self._vectors['word'].transform(strings)
return hstack([char_features, word_features])
def predict(self, string, get_proba = False):
"""
Classify a string
Parameters
----------
string : str
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: result
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
stacked = self._stack([classification_textcleaning(string, True)])
result = {} if get_proba else []
for no, label in enumerate(self._class_names):
if get_proba:
result[label] = self._models[no].predict_proba(stacked)[0, 1]
else:
prob = self._models[no].predict(stacked)[0]
if prob:
result.append(label)
return result
def predict_batch(self, strings, get_proba = False):
"""
Classify a list of strings
Parameters
----------
strings: list
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: list of results
"""
if not isinstance(strings, list):
raise ValueError('input must be a list')
if not isinstance(strings[0], str):
raise ValueError('input must be list of strings')
stacked = self._stack(
[classification_textcleaning(i, True) for i in strings]
)
result = []
for no in range(len(self._class_names)):
if get_proba:
probs = self._models[no].predict_proba(stacked)[:, 1]
else:
probs = self._models[no].predict(stacked)
result.append(probs)
result = np.array(result).T
dicts = []
for row in result:
nested = {} if get_proba else []
for no, label in enumerate(self._class_names):
if get_proba:
nested[label] = row[no]
else:
if row[no]:
nested.append(label)
dicts.append(nested)
return dicts
class LANGUAGE_DETECTION:
def __init__(self, model, label, vectorizer, mode = 'sklearn'):
self._model = model
self._label = label
self._vectorizer = vectorizer
self._mode = mode
def predict(self, string, get_proba = False):
"""
Classify a string
Parameters
----------
string : str
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: result
"""
if not isinstance(string, str):
raise ValueError('input must be a string')
string = language_detection_textcleaning(string)
vectors = self._vectorizer.transform([string])
if self._mode == 'xgb':
result = self._model.predict(
xgb.DMatrix(vectors), ntree_limit = self._model.best_ntree_limit
)[0]
if get_proba:
return {self._label[i]: result[i] for i in range(len(result))}
else:
return self._label[np.argmax(result)]
else:
if get_proba:
result = self._model.predict_proba(vectors)[0]
return {self._label[i]: result[i] for i in range(len(result))}
else:
return self._label[self._model.predict(vectors)[0]]
def predict_batch(self, strings, get_proba = False):
"""
Classify a list of strings
Parameters
----------
strings: list
get_proba: bool, optional (default=False)
If True, it will return probability of classes.
Returns
-------
string: list of results
"""
if not isinstance(strings, list):
raise ValueError('input must be a list')
if not isinstance(strings[0], str):
raise ValueError('input must be list of strings')
strings = [
language_detection_textcleaning(string) for string in strings
]
vectors = self._vectorizer.transform(strings)
if self._mode == 'xgb':
results = self._model.predict(
xgb.DMatrix(vectors), ntree_limit = self._model.best_ntree_limit
)
if get_proba:
outputs = []
for result in results:
outputs.append(
{self._label[i]: result[i] for i in range(len(result))}
)
return outputs
else:
return [self._label[i] for i in np.argmax(results, axis = 1)]
else:
if get_proba:
results = self._model.predict_proba(vectors)
outputs = []
for result in results:
outputs.append(
{self._label[i]: result[i] for i in range(len(result))}
)
return outputs
else:
return [
self._label[result]
for result in self._model.predict(vectors)
]
```
#### File: Malaya/malaya/summarize.py
```python
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import numpy as np
import re
import random
from scipy.linalg import svd
from operator import itemgetter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.decomposition import NMF, LatentDirichletAllocation
from .texts._text_functions import (
summary_textcleaning,
classification_textcleaning,
STOPWORDS,
split_by_dot,
)
from .stem import sastrawi
from ._models import _skip_thought
from .cluster import cluster_words
class _DEEP_SUMMARIZER:
def __init__(
self, sess, x, logits, attention, dictionary, maxlen, model = None
):
self._sess = sess
self._X = x
self._logits = logits
self._attention = attention
self.dictionary = dictionary
self._maxlen = maxlen
self._rev_dictionary = {v: k for k, v in self.dictionary.items()}
self._model = model
def vectorize(self, corpus):
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
sequences = _skip_thought.batch_sequence(
corpus, self.dictionary, maxlen = self._maxlen
)
return self._sess.run(
self._logits, feed_dict = {self._X: np.array(sequences)}
)
def summarize(
self, corpus, top_k = 3, important_words = 3, return_cluster = True
):
"""
Summarize list of strings / corpus
Parameters
----------
corpus: str, list
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
Returns
-------
string: summarized string
"""
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
sequences = _skip_thought.batch_sequence(
corpus, self.dictionary, maxlen = self._maxlen
)
encoded, attention = self._sess.run(
[self._logits, self._attention],
feed_dict = {self._X: np.array(sequences)},
)
attention = attention.sum(axis = 0)
kmeans = KMeans(n_clusters = top_k, random_state = 0)
kmeans = kmeans.fit(encoded)
avg = []
for j in range(top_k):
idx = np.where(kmeans.labels_ == j)[0]
avg.append(np.mean(idx))
closest, _ = pairwise_distances_argmin_min(
kmeans.cluster_centers_, encoded
)
indices = np.argsort(attention)[::-1]
top_words = [self._rev_dictionary[i] for i in indices[:important_words]]
ordering = sorted(range(top_k), key = lambda k: avg[k])
summarized = '. '.join([corpus[closest[idx]] for idx in ordering])
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
def deep_model_news():
"""
Load skip-thought summarization deep learning model trained on news dataset.
Returns
-------
_DEEP_SUMMARIZER: _DEEP_SUMMARIZER class
"""
sess, x, logits, attention, dictionary, maxlen = (
_skip_thought.news_load_model()
)
return _DEEP_SUMMARIZER(sess, x, logits, attention, dictionary, maxlen)
def deep_model_wiki():
"""
Load residual network with Bahdanau Attention summarization deep learning model trained on wikipedia dataset.
Returns
-------
_DEEP_SUMMARIZER: _DEEP_SUMMARIZER class
"""
print(
'WARNING: this model is using convolutional based, Tensorflow-GPU above 1.10 may got a problem. Please downgrade to Tensorflow-GPU v1.8 if got any cuDNN error.'
)
sess, x, logits, attention, dictionary, maxlen = (
_skip_thought.wiki_load_model()
)
return _DEEP_SUMMARIZER(sess, x, logits, attention, dictionary, maxlen)
def train_skip_thought(
corpus,
epoch = 5,
batch_size = 16,
embedding_size = 256,
maxlen = 50,
vocab_size = None,
stride = 1,
):
"""
Train a deep skip-thought network for summarization agent
Parameters
----------
corpus: str, list
epoch: int, (default=5)
iteration numbers
batch_size: int, (default=32)
batch size for every feed, batch size must <= size of corpus
embedding_size: int, (default=256)
vector size representation for a word
maxlen: int, (default=50)
max length of a string to be train
vocab_size: int, (default=None)
max vocabulary size, None for no limit
stride: int, (default=1)
stride size, skipping value for sentences
Returns
-------
_DEEP_SUMMARIZER: malaya.skip_thought._DEEP_SUMMARIZER class
"""
if not isinstance(epoch, int):
raise ValueError('epoch must be an integer')
if not isinstance(batch_size, int):
raise ValueError('batch_size must be an integer')
if not isinstance(embedding_size, int):
raise ValueError('embedding_size must be an integer')
if not isinstance(maxlen, int):
raise ValueError('maxlen must be an integer')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
corpus = [summary_textcleaning(i) for i in corpus]
t_range = int((len(corpus) - 3) / stride + 1)
left, middle, right = [], [], []
for i in range(t_range):
slices = corpus[i * stride : i * stride + 3]
left.append(slices[0])
middle.append(slices[1])
right.append(slices[2])
if batch_size > len(left):
raise ValueError('batch size must smaller with corpus size')
left, middle, right = shuffle(left, middle, right)
sess, model, dictionary, _ = _skip_thought.train_model(
middle,
left,
right,
epoch = epoch,
batch_size = batch_size,
embedding_size = embedding_size,
maxlen = maxlen,
vocab_size = vocab_size,
)
return _DEEP_SUMMARIZER(
sess,
model.INPUT,
model.get_thought,
model.attention,
dictionary,
maxlen,
model = model,
)
def lsa(
corpus,
maintain_original = False,
ngram = (1, 3),
min_df = 2,
top_k = 3,
important_words = 3,
return_cluster = True,
**kwargs
):
"""
summarize a list of strings using LSA.
Parameters
----------
corpus: list
maintain_original: bool, (default=False)
If False, will apply malaya.text_functions.classification_textcleaning
ngram: tuple, (default=(1,3))
n-grams size to train a corpus
min_df: int, (default=2)
minimum document frequency for a word
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
return_cluster: bool, (default=True)
if True, will cluster important_words to similar texts
Returns
-------
dictionary: result
"""
if not isinstance(maintain_original, bool):
raise ValueError('maintain_original must be a boolean')
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(ngram, tuple):
raise ValueError('ngram must be a tuple')
if not len(ngram) == 2:
raise ValueError('ngram size must equal to 2')
if not isinstance(min_df, int) or isinstance(min_df, float):
raise ValueError('min_df must be an integer or a float')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
splitted_fullstop = [summary_textcleaning(i) for i in corpus]
splitted_fullstop = [
classification_textcleaning(i) if not maintain_original else i
for i in splitted_fullstop
if len(i)
]
stemmed = [sastrawi(i) for i in splitted_fullstop]
tfidf = TfidfVectorizer(
ngram_range = ngram, min_df = min_df, stop_words = STOPWORDS, **kwargs
).fit(stemmed)
U, S, Vt = svd(tfidf.transform(stemmed).todense().T, full_matrices = False)
summary = [
(splitted_fullstop[i], np.linalg.norm(np.dot(np.diag(S), Vt[:, b]), 2))
for i in range(len(splitted_fullstop))
for b in range(len(Vt))
]
summary = sorted(summary, key = itemgetter(1))
summary = dict(
(v[0], v) for v in sorted(summary, key = lambda summary: summary[1])
).values()
summarized = '. '.join([a for a, b in summary][len(summary) - (top_k) :])
indices = np.argsort(tfidf.idf_)[::-1]
features = tfidf.get_feature_names()
top_words = [features[i] for i in indices[:important_words]]
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
def nmf(
corpus,
maintain_original = False,
ngram = (1, 3),
min_df = 2,
top_k = 3,
important_words = 3,
return_cluster = True,
**kwargs
):
"""
summarize a list of strings using NMF.
Parameters
----------
corpus: list
maintain_original: bool, (default=False)
If False, will apply malaya.text_functions.classification_textcleaning
ngram: tuple, (default=(1,3))
n-grams size to train a corpus
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
min_df: int, (default=2)
minimum document frequency for a word
return_cluster: bool, (default=True)
if True, will cluster important_words to similar texts
Returns
-------
dictionary: result
"""
if not isinstance(maintain_original, bool):
raise ValueError('maintain_original must be a boolean')
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(ngram, tuple):
raise ValueError('ngram must be a tuple')
if not len(ngram) == 2:
raise ValueError('ngram size must equal to 2')
if not isinstance(min_df, int) or isinstance(min_df, float):
raise ValueError('min_df must be an integer or a float')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
splitted_fullstop = [summary_textcleaning(i) for i in corpus]
splitted_fullstop = [
classification_textcleaning(i) if not maintain_original else i
for i in splitted_fullstop
if len(i)
]
stemmed = [sastrawi(i) for i in splitted_fullstop]
tfidf = TfidfVectorizer(
ngram_range = ngram, min_df = min_df, stop_words = STOPWORDS, **kwargs
).fit(stemmed)
densed_tfidf = tfidf.transform(stemmed).todense()
nmf = NMF(len(splitted_fullstop)).fit(densed_tfidf)
vectors = nmf.transform(densed_tfidf)
components = nmf.components_.mean(axis = 1)
summary = [
(
splitted_fullstop[i],
np.linalg.norm(np.dot(np.diag(components), vectors[:, b]), 2),
)
for i in range(len(splitted_fullstop))
for b in range(len(vectors))
]
summary = sorted(summary, key = itemgetter(1))
summary = dict(
(v[0], v) for v in sorted(summary, key = lambda summary: summary[1])
).values()
summarized = '. '.join([a for a, b in summary][len(summary) - (top_k) :])
indices = np.argsort(tfidf.idf_)[::-1]
features = tfidf.get_feature_names()
top_words = [features[i] for i in indices[:important_words]]
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
def lda(
corpus,
maintain_original = False,
ngram = (1, 3),
min_df = 2,
top_k = 3,
important_words = 3,
return_cluster = True,
**kwargs
):
"""
summarize a list of strings using LDA.
Parameters
----------
corpus: list
maintain_original: bool, (default=False)
If False, will apply malaya.text_functions.classification_textcleaning
ngram: tuple, (default=(1,3))
n-grams size to train a corpus
min_df: int, (default=2)
minimum document frequency for a word
top_k: int, (default=3)
number of summarized strings
important_words: int, (default=3)
number of important words
return_cluster: bool, (default=True)
if True, will cluster important_words to similar texts
Returns
-------
dictionary: result
"""
if not isinstance(maintain_original, bool):
raise ValueError('maintain_original must be a boolean')
if not isinstance(top_k, int):
raise ValueError('top_k must be an integer')
if not isinstance(important_words, int):
raise ValueError('important_words must be an integer')
if not isinstance(return_cluster, bool):
raise ValueError('return_cluster must be a boolean')
if not isinstance(ngram, tuple):
raise ValueError('ngram must be a tuple')
if not len(ngram) == 2:
raise ValueError('ngram size must equal to 2')
if not isinstance(min_df, int) or isinstance(min_df, float):
raise ValueError('min_df must be an integer or a float')
if not isinstance(corpus, list) and not isinstance(corpus, str):
raise ValueError('corpus must be a list')
if isinstance(corpus, list):
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
if isinstance(corpus, str):
corpus = corpus.replace('\n', '.')
corpus = split_by_dot(corpus)
else:
corpus = [c + '.' for c in corpus]
corpus = ' '.join(corpus)
corpus = re.findall('(?=\S)[^.\n]+(?<=\S)', corpus)
splitted_fullstop = [summary_textcleaning(i) for i in corpus]
splitted_fullstop = [
classification_textcleaning(i) if not maintain_original else i
for i in splitted_fullstop
if len(i)
]
stemmed = [sastrawi(i) for i in splitted_fullstop]
tfidf = TfidfVectorizer(
ngram_range = ngram, min_df = min_df, stop_words = STOPWORDS, **kwargs
).fit(stemmed)
densed_tfidf = tfidf.transform(stemmed).todense()
lda = LatentDirichletAllocation(len(splitted_fullstop)).fit(densed_tfidf)
vectors = lda.transform(densed_tfidf)
components = lda.components_.mean(axis = 1)
summary = [
(
splitted_fullstop[i],
np.linalg.norm(np.dot(np.diag(components), vectors[:, b]), 2),
)
for i in range(len(splitted_fullstop))
for b in range(len(vectors))
]
summary = sorted(summary, key = itemgetter(1))
summary = dict(
(v[0], v) for v in sorted(summary, key = lambda summary: summary[1])
).values()
summarized = '. '.join([a for a, b in summary][len(summary) - (top_k) :])
indices = np.argsort(tfidf.idf_)[::-1]
features = tfidf.get_feature_names()
top_words = [features[i] for i in indices[:important_words]]
if return_cluster:
return {
'summary': summarized,
'top-words': top_words,
'cluster-top-words': cluster_words(top_words),
}
return {'summary': summarized, 'top-words': top_words}
```
|
{
"source": "Jeansding/PedestrianDetectionSystem",
"score": 3
}
|
#### File: PedestrianDetectionSystem/python/main.py
```python
import cv2
import numpy as np
import requests
from inference import OD
import subprocess as sp
import threading
import time
from PIL import Image
RTMP_HOST = 'xx.xx.xx.xx'
rtmpUrl = 'rtmp://' + RTMP_HOST + ':1935/live/stream'
od = OD()
use_channel = 1
shared_image = (np.ones((540, 960, 3), dtype=np.uint8) * 255).astype(np.uint8)
process_image = (np.ones((540, 960, 3), dtype=np.uint8) * 255).astype(np.uint8)
people_count = 0
class SecondThread(threading.Thread):
def __init__(self):
super(SecondThread, self).__init__() # 注意:一定要显式的调用父类的初始化函数。
# self.arg=arg
def run(self): # 定义每个线程要运行的函数
print('second thread is run!')
global shared_image
while True:
camera = cv2.VideoCapture('rtmp://' + RTMP_HOST + '/live/android')
if (camera.isOpened()):
print ('Open camera 1')
break
else:
print ('Fail to open camera 1!')
time.sleep(0.05)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 864) # 2560x1920 2217x2217 2952×1944 1920x1080
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# camera.set(cv2.CAP_PROP_FPS, 5)
size = (int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)), int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)))
sizeStr = str(960) + 'x' + str(540)
fps = camera.get(cv2.CAP_PROP_FPS) # 30p/self
# fps = int(fps)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('res_mv.avi', fourcc, fps, size)
while True:
ret, frame = camera.read() # 逐帧采集视频流
if frame is not None:
image = Image.fromarray(frame)
image = image.resize((960, 540))
frame = np.array(image)
# frame.resize((960, 540))
if use_channel == 1:
shared_image = frame
class TFThread(threading.Thread):
def __init__(self):
super(TFThread, self).__init__() # 注意:一定要显式的调用父类的初始化函数。
# self.arg=arg
def run(self): # 定义每个线程要运行的函数
print('tensorflow thread is run!')
global shared_image
global process_image
global people_count
while True:
frame, pc = od.infer(shared_image)
process_image = frame
people_count = pc
time.sleep(0.05)
# print(process_image)
command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec','rawvideo',
'-pix_fmt', 'bgr24',
'-s', '960x540',
'-r', str(5),
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'flv',
rtmpUrl]
global pipe
pipe = sp.Popen(command, stdin=sp.PIPE)
class PushThread(threading.Thread):
def __init__(self):
super(PushThread, self).__init__() # 注意:一定要显式的调用父类的初始化函数。
# self.arg=arg
def run(self): # 定义每个线程要运行的函数
print('push thread is run!')
global process_image
url = "http://127.0.0.1:8080/PeopleDetection/people"
count = 0
while True:
###########################图片采集
#print(process_image)
#print(pipe)
#print(pipe.stdin)
pipe.stdin.write(process_image.tostring()) # 存入管道
# print('push!')
param = {'peopleNum': str(people_count)}
count += 1
if count % 25 == 0:
try:
r = requests.post(url=url, data=param)
except:
pass
time.sleep(0.198)
class GetChannelThread(threading.Thread):
def __init__(self):
super(GetChannelThread, self).__init__() # 注意:一定要显式的调用父类的初始化函数。
# self.arg=arg
def run(self): # 定义每个线程要运行的函数
print('get channel thread is run!')
global use_channel
url = 'http://127.0.0.1:8080/PeopleDetection/get_channel'
while True:
try:
r = requests.get(url=url)
use_channel = int(eval(r.content)['data'])
print('当前通道:' + str(use_channel))
except:
pass
time.sleep(5)
second_thread = SecondThread()
second_thread.start()
tf_thread = TFThread()
tf_thread.start()
push_thread = PushThread()
push_thread.start()
get_channel_thread = GetChannelThread()
#get_channel_thread.start()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.