metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johanattia/tensorflow-saint",
"score": 3
} |
#### File: src/losses/simclr.py
```python
from typing import Any, Dict
import tensorflow as tf
LARGE_NUM = 1e9
class SimCLRLoss(tf.keras.losses.Loss):
"""SimCLR loss implementation for self-supervised learning.
Official references from Google:
* Article: `Big Self-Supervised Models are Strong Semi-Supervised Learners` (https://arxiv.org/abs/2006.10029)
* Code: https://github.com/google-research/simclr/blob/master/tf2/objective.py
Example:
```python
>>> import tensorflow as tf
>>> import structured_transformers
>>> loss_fn = structured_transformers.losses.SimCLR()
```
"""
def __init__(
self,
temperature: float = 0.05,
margin: float = 0.001,
reduction=tf.keras.losses.Reduction.AUTO,
name="SimCLR",
):
"""_summary_
Args:
temperature (float, optional): _description_. Defaults to 0.05.
margin (float, optional): _description_. Defaults to 0.001.
reduction (_type_, optional): _description_. Defaults to tf.keras.losses.Reduction.AUTO.
name (_type_, optional): _description_. Defaults to `SimCLR`.
"""
super().__init__(reduction=reduction, name=name)
self.temperature = temperature
self.margin = margin
def call(self, hidden1: tf.Tensor, hidden2: tf.Tensor) -> tf.Tensor:
"""_summary_
Args:
hidden1 (tf.Tensor): _description_
hidden2 (tf.Tensor): _description_
Returns:
tf.Tensor: _description_
"""
batch_size = tf.shape(hidden1)[0]
diag = tf.eye(batch_size)
hidden1 = tf.math.l2_normalize(hidden1, axis=1)
hidden2 = tf.math.l2_normalize(hidden2, axis=1)
return NotImplemented
def get_config(self) -> Dict[str, Any]:
base_config = super().get_config()
config = {
"temperature": self.temperature,
"margin": self.margin,
}
return dict(list(base_config.items()) + list(config.items()))
``` |
{
"source": "JohanBekker/Covid19-Dashboard",
"score": 2
} |
#### File: Covid19-Dashboard/app/app.py
```python
import dash
from dash import dcc, html, Input, Output
import dash_labs as dl
import dash_bootstrap_components as dbc
from dash_bootstrap_templates import load_figure_template
#from dash_bootstrap_templates import ThemeSwitchAIO
import pandas as pd
from datetime import datetime
import time
import numpy as np
from sqlalchemy import create_engine
import os
load_figure_template("lux")
url_theme1 = dbc.themes.LUX
app = dash.Dash(__name__, plugins=[
dl.plugins.pages], external_stylesheets=[url_theme1])
server = app.server
def last_date_per_column(df):
columns_dict = {'besmettingen': 'Total_reported', 'ziekenhuis': 'Hospital_admission',
'ic': 'IC_admission', 'overleden': 'Deceased'}
index_dict = {}
for key in columns_dict.keys():
search = 1
index = -1
while search:
if np.isnan(df[columns_dict[key]].iat[index]):
index += -1
else:
index_dict[key] = index
search = 0
return index_dict
DB_URL = os.environ.get('DATABASE_URL')
DB_URL = DB_URL.replace('postgres', 'postgresql')
# DB_URL =
def load_data(DB_URL):
#con = psycopg2.connect(DB_URL)
#cur = con.cursor()
engine = create_engine(DB_URL, echo=False)
query = """select * from final_df"""
df = pd.read_sql(query, engine)
engine.dispose()
return df
#filepath = './files/final_df.csv'
#df_data = pd.read_csv(filepath, sep=',')
df_data = load_data(DB_URL)
last_date_dict = last_date_per_column(df_data)
time_since_update = time.time()
columns_dict = {'besmettingen': 'Total_reported', 'ziekenhuis': 'Hospital_admission',
'ic': 'IC_admission', 'overleden': 'Deceased'}
def Card_generator(column, data, date_dict):
# Generate the cards in the navigation bar on the left side
card_dict = {'besmettingen': 'Besmettingen', 'ziekenhuis': 'Ziekenhuis Opnames',
'ic': 'IC Opnames', 'overleden': 'Overleden'}
data_dict = {'besmettingen': 'Total_reported', 'ziekenhuis': 'Hospital_admission',
'ic': 'IC_admission', 'overleden': 'Deceased'}
index = date_dict[column]
aantal = data[data_dict[column]].iat[index]
date = data.Date_of_statistics.iat[index]
date = datetime.strptime(date, "%Y-%m-%d")
date = date.strftime('%d-%m-%Y')
card_content = [
dbc.CardHeader(card_dict[column], className="card-header"),
dbc.CardBody(
[
html.H5(aantal, className="card-title"),
html.P(
date,
className="card-text",
),
]
),
]
return card_content
# Create contact icons in the header top right
contact = dbc.Row(
[
dbc.Col(html.A(
html.Img(src='assets/images/linkedin.png', className='img'),
href='https://www.linkedin.com/in/johan-bekker-3501a6168/'
)),
# dbc.Col(html.A(
# html.Img(src='/assets/images/twitter.png',className='img'),
# href=''
# )),
# dbc.Col(html.A(
# html.Img(src='/assets/images/kaggle.png',className='img'),
# href=''
# )),
dbc.Col(html.A(
html.Img(src='/assets/images/github.png', className='img',
id='github'),
href='https://github.com/JohanBekker'
)),
# dbc.Col(
# dbc.Button(
# "Portfolio", color="link", className="ms-2", n_clicks=0,
# href='https://www.datascienceportfol.io/JohanBekker'
# ),
# width="auto",
# ),
],
className="g-0 ms-auto flex-nowrap mt-3 mt-md-0",
align="center",
)
# Navigation bar header
navbar = dbc.Navbar(
dbc.Container(
[
html.A(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
#dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")),
dbc.Col(dbc.NavbarBrand(
"Covid19 Dashboard Nederland")),
],
align="center",
className="g-0",
),
# href="https://plotly.com", #It's possible to make the header title a link
style={"textDecoration": "none"},
),
dbc.NavbarToggler(id="navbar-toggler", n_clicks=0),
dbc.Collapse(
contact, # add contact icons to header
id="navbar-collapse",
is_open=False,
navbar=True,
),
], fluid=True
),
color="dark",
dark=True,
)
# the style arguments for the sidebar.
SIDEBAR_STYLE = {
# "position": "fixed",
# "position": "left",
# "top": "5rem",
"height": "150vh",
"left": 0,
"bottom": 0,
# "width": "14rem",
"padding": "2rem 1rem",
"background-color": "#f8f9fa",
# "border": "#2C3E50",
# "outline": "#2C3E50",
}
# Put the links to the pages in a list to use in the navigation sidebar
pages = [page for page in dash.page_registry.values()]
# Put together the sidebar components
sidebar = html.Div(
[
#html.H2("Sidebar", className="display-4"),
# html.Hr(),
dbc.Nav(
[dbc.NavLink("Covid Cijfers", href=pages[0]["path"], active="exact"),
dbc.NavLink("Vaccinatie Cijfers",
href=pages[2]["path"], active="exact"),
dbc.NavLink("Databronnen",
href=pages[1]["path"], active="exact"),
html.Br(),
# html.P(
# "Laatste data:", className="text-center"
# ),
dbc.Card(Card_generator('besmettingen', df_data, last_date_dict),
color="success", inverse=True, id="besmettingen-card"),
html.Hr(),
dbc.Card(Card_generator('ziekenhuis', df_data, last_date_dict),
color="info", inverse=True, id="ziekenhuis-card"),
html.Hr(),
dbc.Card(Card_generator('ic', df_data, last_date_dict),
color="warning", inverse=True, id="ic-card"),
html.Hr(),
dbc.Card(Card_generator('overleden', df_data, last_date_dict),
color="danger", inverse=True, id="overleden-card"),
],
vertical=True,
pills=True,
),
],
style=SIDEBAR_STYLE
)
# Put all components together in the layout (header, sidebar, page)
app.layout = dbc.Container([
dbc.Row(navbar),
dbc.Row(
[
dbc.Col(sidebar, width=2, style={"padding": "0rem"}),
dbc.Col(dl.plugins.page_container, width={
'size': 10, "offset": 0}, className="g-0"),
]),
dcc.Interval(id='interval1', interval=3600 * 1000, n_intervals=0),
#html.Div(id='placeholder', style={'display':'none'}),
], fluid=True)
@app.callback([Output('besmettingen-card', 'children'),
Output('ziekenhuis-card', 'children'),
Output('ic-card', 'children'),
Output('overleden-card', 'children')],
Input('interval1', 'n_intervals'))
def update_cards(n):
print('Update Cards')
global df_data, last_date_dict
df_data = load_data(DB_URL)
#df_data = pd.read_csv(filepath, sep=',')
last_date_dict = last_date_per_column(df_data)
content1 = Card_generator('besmettingen', df_data, last_date_dict)
content2 = Card_generator('ziekenhuis', df_data, last_date_dict)
content3 = Card_generator('ic', df_data, last_date_dict)
content4 = Card_generator('overleden', df_data, last_date_dict)
return content1, content2, content3, content4
# if __name__ == "__main__":
# app.run_server(debug=True, host='0.0.0.0')
# app.run_server(debug=True)
```
#### File: app/pages/vaccinatiecijfers.py
```python
import pandas as pd
import dash_bootstrap_components as dbc
from dash import dcc, html, Input, Output, callback, dash_table
import dash
from sqlalchemy import create_engine
import os
dash.register_page(__name__)
DB_URL = os.environ.get('DATABASE_URL')
DB_URL = DB_URL.replace('postgres', 'postgresql')
# DB_URL =
def load_data(DB_URL):
#con = psycopg2.connect(DB_URL)
#cur = con.cursor()
engine = create_engine(DB_URL, echo=False)
query = """select * from vaccinatiegraad_per_wijk_per_week"""
df = pd.read_sql(query, engine)
engine.dispose()
return df
df = load_data(DB_URL)
df.reset_index(inplace=True, drop=True)
#filepath4 = './files/vaccinatiegraad_per_wijk_per_week.csv'
#df = pd.read_csv(filepath4)
#df.reset_index(inplace=True, drop=True)
columns = ['Date_of_statistics', 'Region_name', 'Birth_year',
'Vaccination_coverage_partly', 'Vaccination_coverage_completed', 'Age_group']
column_dict = {'Date_of_statistics': 'Datum', 'Region_name': 'Naam', 'Birth_year': 'Geboortejaar',
'Vaccination_coverage_partly': 'Gedeeltelijk Gevaccineerd', 'Vaccination_coverage_completed': 'Volledig Gevaccineerd', 'Age_group': 'Leeftijdsgroep'}
df_veiligheidsregio = df[df['Region_level'] ==
'Veiligheidsregio'].reset_index(drop=True)
df_veiligheidsregio = df_veiligheidsregio[columns]
df_gemeente = df[df['Region_level'] == 'Gemeente'].reset_index(drop=True)
df_gemeente = df_gemeente[columns]
layout = html.Div(
[
dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab(label="Gemeente", tab_id="gemeente",
tab_style={"cursor": "pointer"},
label_style={"color": "#1A1A1A"}),
dbc.Tab(label="Veiligheidsregio", tab_id="veiligheidsregio",
tab_style={"cursor": "pointer"},
label_style={"color": "#1A1A1A"}),
],
id="tabs",
active_tab="gemeente",
)
),
dbc.CardBody([
dbc.Row([
dbc.Col(dash_table.DataTable(id='vaccination_table',
columns=[
{'id': key, 'name': column_dict[key]} for key in column_dict.keys()],
page_size=200,
style_table={
'height': '800px', 'whiteSpace': 'normal'},
fixed_rows={'headers': True},
style_cell_conditional=[{'if': {'column_id': 'Region_name'},
'width': '18%'},
{'if': {'column_id': 'Date_of_statistics'},
'width': '10%'},
{'if': {'column_id': 'Birth_year'},
'width': '11%'},
{'if': {'column_id': 'Age_group'},
'width': '14%'}],
style_data={'color': 'black',
'backgroundColor': 'white'},
style_data_conditional=[
{'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(220, 220, 220)'}
],
style_header={
'backgroundColor': '#343A40',
'color': 'white',
'fontWeight': 'bold'
}
),
id="card-content", className="card-text",
width={'size': 12, "offset": 0}),
]),
])
], style={"height": "150vh", "outline": "#2C3E50"},
),
dcc.Interval(id='interval3', interval=3600 * 1000, n_intervals=0),
html.Div(id='placeholder3', style={'display': 'none'}),
]),
@callback(Output('vaccination_table', 'data'),
Input('tabs', 'active_tab'))
def update_table(tabs):
if tabs == "gemeente":
return df_gemeente.to_dict('records')
else:
return df_veiligheidsregio.to_dict('records')
@callback(Output('placeholder3', 'children'),
Input('interval3', 'n_intervals'))
def update_cards(n):
global df, df_veiligheidsregio, df_gemeente
#df = pd.read_csv(filepath4)
#df.reset_index(inplace=True, drop=True)
df = load_data(DB_URL)
df.reset_index(inplace=True, drop=True)
df_veiligheidsregio = df[df['Region_level'] ==
'Veiligheidsregio'].reset_index(drop=True)
df_veiligheidsregio = df_veiligheidsregio[columns]
df_gemeente = df[df['Region_level'] == 'Gemeente'].reset_index(drop=True)
df_gemeente = df_gemeente[columns]
return {}
``` |
{
"source": "johanbjorn/search-real-time-video-feed",
"score": 3
} |
#### File: source/captionlambda3/old.py
```python
def get_text_from_transcribe(ts_file_path):
# Check to make sure that TS file exists
if not os.path.isfile(ts_file_path):
print("EXCEPTION: ts file doesn't exist to make PCM file for Transcribe : " + ts_file_path)
sys.exit()
# Use ffmpeg to create PCM audio file for Transcribe
output_pcm = TMP_DIR + str(make_random_string()) + '.pcm'
cmd = './ffmpeg -hide_banner -nostats -loglevel error -y -i ' + ts_file_path + ' -vn -f s16le -acodec pcm_s16le -ac 1 -ar 16000 ' + output_pcm + ' > /dev/null 2>&1 '
wav_ffmpeg_response = os.popen(cmd).read()
# After FFMPEG send the file into S3 and generate presigned URL.
s3_key = 'audio_files/' + output_pcm.split('/')[-1]
upload_file_s3(output_pcm, s3_key)
presigned_url = get_presigned_url_s3(s3_key)
# Remove the file I just uploaded to s3
os.remove(output_pcm)
# Use Presigned url with the API for security.
client = boto3.client('lambda')
try:
response = client.invoke(FunctionName=TRANSCRIBE_LAMBDA_ARN, Payload=json.dumps({'body' : presigned_url}))
json_res = json.loads(json.loads(response['Payload'].read())['body'])
# Get Text
text = json_res['transcript']
print("DEBUG: Text returned from Transcribe Streaming is: " + text)
except Exception as e:
print("EXCEPTION: AWS Transcribe Streaming is throttling! Putting empty subtitle into stream. Increase Transcribe Streaming Limits: " + str(e))
# Set the text to nothing.
text = ""
return text
``` |
{
"source": "JohanBlome/micapp",
"score": 3
} |
#### File: micapp/scripts/audiocmp.py
```python
import argparse
import math
import os
import pandas as pd
import numpy as np
import soundfile as sf
import sys
default_values = {
'debug': 0,
'mode': 'safe',
'output': 'audio_compare',
}
def dBToFloat(val):
""" "
Calculates a float value ranging from -1.0 to 1.0
Where 1.0 is 0dB
"""
return 10 ** (val / 20.0)
def floatToDB(val):
"""
Calculates the dB values from a floating point representation
ranging between -1.0 to 1.0 where 1.0 is 0 dB
"""
if val <= 0:
return -100.0
else:
return 20.0 * math.log10(val)
def amplify_and_write_file(inputfile, outputfile, gain_dB):
blocksize = inputfile.channels * inputfile.samplerate
block_counter = 0
inputfile.seek(0)
factor = dBToFloat(gain_dB)
while inputfile.tell() < inputfile.frames:
data = inputfile.read(blocksize)
for channel in range(0, inputfile.channels):
if inputfile.channels == 1:
data_ = data
else:
data_ = data[:, channel]
outputfile.write(data_ * factor)
block_counter += 1
def audio_levels(audiofile, start=0, end=-1):
"""
Calculates rms and max peak level in dB
Input: soundfile, start frame, end frame
Output: rms, peak, crest, bias, floor
where
* peak is the highest nominal value
* rms is the total average of the squared values
* crest is the ratios between peak and rms
* bias is potential dc bias (low frequency residual)
* floor is the lowest rms value in a non overlapping 250ms block
"""
blocksize = audiofile.channels * int(audiofile.samplerate/4)
peak_level = [0] * audiofile.channels
rms = [0] * audiofile.channels
peak = [0] * audiofile.channels
total_level = [0] * audiofile.channels
crest = [0] * audiofile.channels
papr = [0] * audiofile.channels
bias = [0] * audiofile.channels
floor = [0] * audiofile.channels
block_counter = 0
audiofile.seek(start)
while audiofile.tell() < audiofile.frames:
tmp = [0] * audiofile.channels
data = audiofile.read(blocksize)
for channel in range(0, audiofile.channels):
if audiofile.channels == 1:
data_ = data
else:
data_ = data[:, channel]
total_level[channel] += np.sum(data_)
rms[channel] += np.mean(np.square(data_))
peak[channel] = max(abs(data_))
if peak[channel] > peak_level[channel]:
peak_level[channel] = peak[channel]
tmp[channel] = floatToDB(np.mean(np.square(data_)))
if tmp[channel] < floor[channel]:
floor[channel] = round(tmp[channel], 2)
block_counter += 1
for channel in range(0, audiofile.channels):
rms[channel] = np.sqrt(rms[channel] / block_counter)
crest[channel] = round(peak_level[channel] / rms[channel], 2)
papr[channel] = round(floatToDB(peak_level[channel] / rms[channel]), 2)
# sign is not important now
bias[channel] = round(
floatToDB(abs(
total_level[channel] /
(block_counter * blocksize))
),
2,
)
rms[channel] = round(floatToDB(rms[channel]), 2)
peak_level[channel] = round(floatToDB(peak_level[channel]), 2)
return rms, peak_level, crest, papr, bias, floor
MODE_CHOICES = {
'safe': 'Check the highest crest factor and use that to adjust rms',
'rms': 'rms to -24dB per file, clipping allowed',
'peak': 'peak to -1 per file',
'rms_common': 'use a rms value with no clipping in any file, max -1dB',
'peak_common': 'use a peak value with no clipping in any file, max -1dB',
}
def adjust_row(row, mode, target, workdir):
if mode == 'rms':
diff = target - row.rms
elif mode == 'peak':
diff = target - row.peak
elif mode == 'safe':
diff = target - row.rms
elif mode == 'rms_common':
diff = target - row.rms
elif mode == 'peak_common':
diff = target - row.peak
adjust(row.af, mode, diff, workdir)
row.af.close()
RMS_TARGET = -24
PEAK_TARGET = -1
def get_target(mode, data):
# calculate the target
if mode == 'rms':
return RMS_TARGET
elif mode == 'peak':
return PEAK_TARGET
elif mode == 'safe':
# take the one with largest crest factor and use
# the crest to calculate a rms value (add 1 dB for safety)
return -data['papr'].max() - 1
elif mode == 'rms_common':
# take the one with highest peak and use
# that to calculate a peak adjusted value
return data['rms'].max()
elif mode == 'peak_common':
# take the one with highest peak and use
# that to calculate a peak adjusted value
return data['peak'].max()
def adjust(audiofile, suffix, adjustment_db, workdir):
sign = ''
if adjustment_db > 0:
sign = '+'
base_file_name = os.path.basename(audiofile.name).strip()
new_name = (f'{workdir}/{os.path.splitext(base_file_name)[0]}_'
f'{sign}{round(adjustment_db,2)}_{suffix}.wav')
output = sf.SoundFile(new_name, 'w', format='WAV', samplerate=48000,
channels=1, subtype='PCM_16', endian='FILE')
amplify_and_write_file(audiofile, output, adjustment_db)
output.close()
return new_name
def align(files, mode, workdir):
file_props = []
if not os.path.exists(workdir):
os.mkdir(workdir)
status_report_name = f'{workdir}/info.txt'
with open(status_report_name, 'w') as report:
report.write(f'alignment_mode: {mode}\n\n')
for fname in files:
af = sf.SoundFile(fname, 'r')
rms, peak_level, crest, papr, bias, floor = audio_levels(af)
file_props.append([af, fname, rms[0], peak_level[0], crest[0],
papr[0], bias[0], floor[0]])
report.write(f'{fname}\n')
report.write('\n rms : {0:4.1f} dB'.format(rms[0]))
report.write('\n peak : {0:4.1f} dB'.format(peak_level[0]))
report.write('\n crest: {0:4.1f}'.format(crest[0]))
report.write('\n papr: {0:4.1f} dB'.format(papr[0]))
report.write('\n bias : {0:4.1f} dB'.format(bias[0]))
report.write('\n floor: {0:4.1f} dB'.format(floor[0]))
report.write('\n____\n')
labels = ['af', 'filename', 'rms', 'peak', 'crest', 'papr', 'bias',
'floor']
data = pd.DataFrame.from_records(
file_props, columns=labels, coerce_float=True)
# do not print the SoundFile string
labels = labels[1:]
print(f'{data.to_csv(columns=labels)}')
# get the target for adjustment
target = get_target(mode, data)
# run the adjustment
data.apply(adjust_row, args=(mode, target, workdir), axis=1)
def get_options(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-d', '--debug', action='count',
dest='debug', default=default_values['debug'],
help='Increase verbosity (use multiple times for more)',)
parser.add_argument(
'--quiet', action='store_const',
dest='debug', const=-1,
help='Zero verbosity',)
parser.add_argument(
'--mode', type=str,
default=default_values['mode'],
choices=MODE_CHOICES.keys(),
metavar='%s' % (' | '.join('{}: {}'.format(k, v) for k, v in
MODE_CHOICES.items())),
help='function arg',)
parser.add_argument(
'-o', '--output', type=str,
default=default_values['output'])
parser.add_argument(
'files', nargs='+', help='file(s) to analyze (pcm mono)')
options = parser.parse_args(argv[1:])
if len(argv) == 1:
parser.print_help()
sys.exit(0)
return options
def main(argv):
options = get_options(argv)
align(options.files, options.mode, options.output)
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "johan--/botforms",
"score": 2
} |
#### File: botforms/botform/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import post_save
from .tasks import generate_pdf, notify_webhook
class Forms(models.Model):
title = models.CharField(max_length=140)
description = models.TextField(blank=True, null=True)
schema = models.TextField(blank=True, null=True, help_text="JSON rep of the form schema")
# PDF Output fields
generate_pdf = models.BooleanField(default=False)
pdf_output_template = models.TextField(blank=True, null=True)
# Webhook url
webhook_url = models.URLField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
class Submissions(models.Model):
form = models.ForeignKey(Forms, related_name="submissions")
data = models.TextField(blank=True, null=True, help_text="JSON rep of user input")
pdf = models.URLField(max_length=400, blank=True, null=True, help_text='PDF Output URL')
reference = models.CharField(max_length=45, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
def on_new_submission(sender, instance, created, **kwargs):
try:
if created and instance.form.generate_pdf:
submission_id = instance.pk
generate_pdf.delay({'submission_id': submission_id})
if instance.form.webhook_url:
notify_webhook.delay({
'submission_id': submission_id,
'webhook_url': instance.form.webhook_url
})
except Exception as ex:
print(str(ex))
post_save.connect(on_new_submission, sender=Submissions)
```
#### File: botforms/botforms/celery.py
```python
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
BASE_PATH = os.path.dirname(os.path.abspath('manage.py'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'botforms.settings')
app = Celery('botforms')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
```
#### File: botforms/botform/tasks.py
```python
from celery import task
from xhtml2pdf import pisa
from slugify import slugify
from django.conf import settings
from django.forms.models import model_to_dict
from django.template import loader, Context, Template
import json
import os
import requests
def convertHtmlToPdf(sourceHtml, outputFilename):
"""
Utility function
"""
pdf_generated = True
with open(outputFilename, "w+b") as resultFile:
pisaStatus = pisa.CreatePDF(sourceHtml, dest=resultFile)
if pisaStatus.err:
pdf_generated = False
return pdf_generated
@task
def generate_pdf(payload):
"""
Generate pdf based on submission
"""
from botform.models import Submissions
submission_id = payload.get('submission_id')
submission_obj = Submissions.objects.get(pk=submission_id)
form_obj = submission_obj.form
submission_data = json.loads(submission_obj.data)
form_slug = slugify(form_obj.title)
domain = os.environ.get('DOMAIN')
file_name = '%s/%s-%s.pdf' % (settings.MEDIA_ROOT, form_slug, submission_id)
pdf_url = 'http://%s%s/%s-%s.pdf' % (domain, settings.MEDIA_URL, form_slug, submission_id)
context = Context(
{
'FORM': model_to_dict(form_obj),
'SUBMISSION': model_to_dict(submission_obj),
'SUBMISSION_DATA': submission_data
}
)
pdf_output_template = form_obj.pdf_output_template
template = Template(str(pdf_output_template))
source_html = template.render(context)
pdf_generated = convertHtmlToPdf(source_html, file_name)
if pdf_generated:
submission_obj.pdf = pdf_url
submission_obj.save()
@task
def notify_webhook(payload):
try:
from botform.models import Submissions
from botform.serializers import SubmissionsSer
submission_id = payload.get('submission_id')
submission_obj = Submissions.objects.get(pk=submission_id)
webhook_url = payload.get('webhook_url')
req_data = SubmissionsSer(submission_obj, many=False).data
res = requests.post(webhook_url, json=req_data)
if res.status_code != 200:
raise Exception(res.text)
except Exception as ex:
print("Error: %s " % str(ex))
``` |
{
"source": "johan-boule/fastcov",
"score": 3
} |
#### File: test/unit/test_diff_parser.py
```python
import pytest
import fastcov
def test_DiffParser_parseTargetFile_usual():
actual = fastcov.DiffParser()._parseTargetFile('+++ file1/tests \n')
assert actual == 'file1/tests'
actual = fastcov.DiffParser()._parseTargetFile('+++ test2')
assert actual == 'test2'
def test_DiffParser_parseTargetFile_empty():
actual = fastcov.DiffParser()._parseTargetFile('+++ ')
assert actual == ''
def test_DiffParser_parseTargetFile_with_prefix():
actual = fastcov.DiffParser()._parseTargetFile('+++ b/dir2/file\n')
assert actual == 'dir2/file'
actual = fastcov.DiffParser()._parseTargetFile('+++ b/b/file\n')
assert actual == 'b/file'
actual = fastcov.DiffParser()._parseTargetFile('+++ bfile/b\n')
assert actual == 'bfile/b'
def test_DiffParser_parseTargetFile_with_timestamp():
actual = fastcov.DiffParser()._parseTargetFile('+++ base/test_file.txt\t2002-02-21 23:30:39.942229878 -0800')
assert actual == 'base/test_file.txt'
def test_DiffParser_parseHunkBoundaries_usual():
tstart,tlen, sstart, slen = fastcov.DiffParser()._parseHunkBoundaries('@@ -1,2 +3,4 @@ zero line \n', 1)
assert sstart == 1
assert slen == 2
assert tstart == 3
assert tlen == 4
def test_DiffParser_parseHunkBoundaries_count_missed():
tstart,tlen, sstart, slen = fastcov.DiffParser()._parseHunkBoundaries('@@ -11 +13,14 @@ zero line \n', 1)
assert sstart == 11
assert slen == 1
assert tstart == 13
assert tlen == 14
tstart, tlen, sstart, slen = fastcov.DiffParser()._parseHunkBoundaries('@@ -11 +13 @@ zero line \n', 1)
assert sstart == 11
assert slen == 1
assert tstart == 13
assert tlen == 1
tstart, tlen, sstart, slen = fastcov.DiffParser()._parseHunkBoundaries('@@ -11,12 +13 @@ zero line \n', 1)
assert sstart == 11
assert slen == 12
assert tstart == 13
assert tlen == 1
def test_DiffParser_parseHunkBoundaries_invalid_hunk():
with pytest.raises(fastcov.DiffParseError) as e:
fastcov.DiffParser()._parseHunkBoundaries('@@ 1112 @@ zero line \n', 1)
with pytest.raises(fastcov.DiffParseError) as e:
fastcov.DiffParser()._parseHunkBoundaries('@@ @@ zero line \n', 1)
def test_DiffParser_parseDiffFile_empty():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/empty.diff', '/base')
assert not result
def test_DiffParser_parseDiffFile_no_files():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/no_files.diff', '/base')
assert not result
def test_DiffParser_parseDiffFile_deleted_file():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/deleted_file.diff', '/base')
assert not result
def test_DiffParser_parseDiffFile_file_deleted_added_modifiled():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/file_del_mod_add_lines.diff', '/base')
assert result == {'/base/test.txt': {4, 8}}
def test_DiffParser_parseDiffFile_file_deleted_added_check_other_base():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/file_del_mod_add_lines.diff', '/base/base2')
assert result == {'/base/base2/test.txt': {4, 8}}
def test_DiffParser_parseDiffFile_file_renamed_and_edited():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/renamed_edited.diff', '/base')
assert result == {'/base/test5.txt': {8}}
def test_DiffParser_parseDiffFile_few_files_with_few_added_hunks():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/few_files_with_few_added_hunks.diff', '/base')
assert result == {"/base/test.txt":{2,3,4,5,6,7,8,9,10,11,23,24,25,26,27,28,29,30,31,32,33},
"/base/test2.txt":{10,11,12,13},
"/base/test3.txt":{5,6,7,8,9,10,11,21,22,23,24,25}
}
def test_DiffParser_parseDiffFile_few_files_with_few_modified_hunks():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/few_files_with_few_modified_hunks.diff', '/base')
assert result == {"/base/test.txt":{2,3,4,5,6,7,8,9,10,25,26,27,28,29,30,31,32,33},
"/base/test2.txt":{10,11,12},
"/base/test3.txt":{2,3,4,22,23,24}
}
def test_DiffParser_parseDiffFile_invalid():
with pytest.raises(fastcov.DiffParseError) as e:
fastcov.DiffParser()._parseHunkBoundaries('@@ 1112 @@ zero line \n', 1)
def test_DiffParser_parseDiffFile_hunk_longer_than_expected():
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/hunk_longer_than_expected.diff', '/base')
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/hunk_longer_than_expected2.diff', '/base')
def test_DiffParser_parseDiffFile_last_hunk_longer_than_expected():
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/last_hunk_longer_than_expected.diff', '/base')
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/last_hunk_longer_than_expected2.diff', '/base')
def test_DiffParser_parseDiffFile_hunk_shorter_than_expected():
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/hunk_shorter_than_expected.diff', '/base')
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/hunk_shorter_than_expected2.diff', '/base')
def test_DiffParser_parseDiffFile_last_hunk_shorter_than_expected():
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/last_hunk_shorter_than_expected.diff', '/base')
with pytest.raises(fastcov.DiffParseError) as e:
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/last_hunk_shorter_than_expected2.diff', '/base')
def test_DiffParser_parseDiffFile_bad_encoding():
result = fastcov.DiffParser().parseDiffFile('diff_tests_data/bad_encoding.diff', '/base')
assert result == {'/base/test.txt': {4, 8}}
```
#### File: test/unit/test_version_parse.py
```python
import fastcov
def test_ubuntu_18_04():
version_str = "gcov (Ubuntu 7.3.0-27ubuntu1~18.04) 7.3.0"
assert fastcov.parseVersionFromLine(version_str) == (7,3,0)
def test_ubuntu_test_ppa():
version_str = "gcov (Ubuntu 9.1.0-2ubuntu2~16.04) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_experimental():
version_str = "gcov (GCC) 9.0.1 20190401 (experimental)"
assert fastcov.parseVersionFromLine(version_str) == (9,0,1)
def test_upstream():
version_str = "gcov (GCC) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_no_version():
version_str = "gcov (GCC)"
assert fastcov.parseVersionFromLine(version_str) == (0,0,0)
```
#### File: fastcov/utils/fastcov_summary.py
```python
SCRIPT_DESCRIPTION = """
Author: <NAME>
https://github.com/RPGillespie6/fastcov
A simple utility for summarizing a fastcov coverage file and returning non-zero rc if coverage thresholds are not met.
Sample Usage:
$ ./fastcov_summary.py coverage.json --line-coverage-threshold 70
"""
import sys
import json
import time
import argparse
from collections import defaultdict
# Interesting metrics
START_TIME = time.monotonic()
# For when things go wrong...
# Start error codes at 3 because 1-2 are special
# See https://stackoverflow.com/a/1535733/2516916
EXIT_CODE = 0
EXIT_CODES = {
"function_coverage_threshold": 3,
"line_coverage_threshold": 4,
"branch_coverage_threshold": 5,
}
def setExitCode(key):
global EXIT_CODE
EXIT_CODE = EXIT_CODES[key]
def stopwatch():
"""Return number of seconds since last time this was called."""
global START_TIME
end_time = time.monotonic()
delta = end_time - START_TIME
START_TIME = end_time
return delta
def parseArgs():
parser = argparse.ArgumentParser(description=SCRIPT_DESCRIPTION)
parser.add_argument('fastcov_coverage', help='The fastcov coverage file to summarize')
parser.add_argument('-f', '--function-coverage-threshold', type=float, default=0, help='Fail if function coverage falls below this %%')
parser.add_argument('-l', '--line-coverage-threshold', type=float, default=0, help='Fail if line coverage falls below this %%')
parser.add_argument('-b', '--branch-coverage-threshold', type=float, default=0, help='Fail if branch coverage falls below this %%')
return parser.parse_args()
def main():
args = parseArgs()
with open(args.fastcov_coverage) as f:
coverage = json.load(f)
totals = {
"function": {"percent": 0, "total": 0, "hit": 0},
"line": {"percent": 0, "total": 0, "hit": 0},
"branch": {"percent": 0, "total": 0, "hit": 0},
}
for source in coverage["sources"].values():
ss = {
"function": defaultdict(bool),
"line": defaultdict(bool),
"branch": defaultdict(bool),
}
for test in source.values():
for name, info in test["functions"].items():
ss["function"][name] = ss["function"][name] or bool(info["execution_count"])
for line, hits in test["lines"].items():
ss["line"][line] = ss["line"][line] or bool(hits)
for line, branches in test["branches"].items():
for i, hits in enumerate(branches):
key = line + "_" + str(i)
ss["branch"][key] = ss["branch"][key] or bool(hits)
for t in ["function", "line", "branch"]:
totals[t]["total"] += len(ss[t].keys())
totals[t]["hit"] += sum(ss[t].values())
for t in ["function", "line", "branch"]:
if totals[t]["total"]:
totals[t]["percent"] = 100 * totals[t]["hit"] / totals[t]["total"]
total_processing_time = stopwatch()
print("Total Processing Time: {:.3f}s".format(total_processing_time))
print()
print("Coverage Rates")
print("==============")
for t in ["function", "line", "branch"]:
if totals[t]["total"]:
print("{}: {:.1f}% ({} of {})".format(t, totals[t]["percent"], totals[t]["hit"], totals[t]["total"]))
else:
print("{}: {:.1f}% (not measured)".format(t, totals[t]["percent"]))
print()
for t in ["function", "line", "branch"]:
arg_key = t + "_coverage_threshold"
threshold = getattr(args, arg_key)
if totals[t]["percent"] < threshold:
print("{} coverage threshold failed ({:.1f}% < {:.1f}%)".format(t, totals[t]["percent"], threshold))
setExitCode(arg_key)
elif threshold > 0:
print("{} coverage threshold passed ({}%)".format(t, threshold))
# If there was an error along the way...
if EXIT_CODE:
sys.exit(EXIT_CODE)
if __name__ == '__main__':
main()
``` |
{
"source": "johanbrandhorst/grpc-web-compatibility-test",
"score": 3
} |
#### File: proxy/grpcwsgi/server.py
```python
import sys
from wsgiref.simple_server import make_server
import sonora.wsgi
import echo_pb2
import echo_pb2_grpc
import grpc
class Echo(echo_pb2_grpc.EchoServiceServicer):
def Echo(self, request, context):
return echo_pb2.EchoResponse(message=request.message)
def EchoAbort(self, request, context):
context.set_code(grpc.StatusCode.ABORTED)
return echo_pb2.EchoResponse(message=request.message)
def ServerStreamingEcho(self, request, context):
for _ in range(request.message_count):
yield echo_pb2.EchoResponse(message=request.message)
def ServerStreamingEchoAbort(self, request, context):
for _ in range(request.message_count // 2):
yield echo_pb2.EchoResponse(message=request.message)
context.set_code(grpc.StatusCode.ABORTED)
def main(args):
grpc_wsgi_app = sonora.wsgi.grpcWSGI(None)
with make_server("", 8080, grpc_wsgi_app) as httpd:
echo_pb2_grpc.add_EchoServiceServicer_to_server(Echo(), grpc_wsgi_app)
httpd.serve_forever()
if __name__ == "__main__":
sys.exit(main(sys.argv))
``` |
{
"source": "JohanBrorson/download-chromedriver",
"score": 3
} |
#### File: JohanBrorson/download-chromedriver/download_chromedriver_test.py
```python
import pytest
from unittest import mock
from download_chromedriver import get_current_platform
@mock.patch('platform.system', mock.MagicMock(return_value="Linux"))
def test_get_current_platform_linux():
current_platform = get_current_platform()
assert current_platform == 'linux64'
@mock.patch('platform.system', mock.MagicMock(return_value="Windows"))
def test_get_current_platform_windows():
current_platform = get_current_platform()
assert current_platform == 'win32'
@mock.patch('platform.system', mock.MagicMock(return_value="Darwin"))
def test_get_current_platform_macos():
current_platform = get_current_platform()
assert current_platform == 'mac64'
@mock.patch('platform.system', mock.MagicMock(return_value="Unsupported"))
def test_get_current_platform_unsupported():
with pytest.raises(SystemExit) as pytest_wrapped_e:
get_current_platform()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
``` |
{
"source": "johanbso/Machine-Learning-Project",
"score": 3
} |
#### File: johanbso/Machine-Learning-Project/preProcessing.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
"""
Transforms csv-file to a pandas dataFrame
@:param fileName - Name of csv file
@:return dataFrame - Data frame containing the training/testing data
"""
def getData(fileName):
# importing the dataset
dataFrame = pd.read_csv(fileName)
return dataFrame
def rfe(df):
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(x, y)
#print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
"""
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
"""
features = [f for f, s in zip(x.columns, rfecv.support_) if s]
features.append('diagnosis')
return features
"""
Plots the features importance using a number of randomized decision trees
@:param dataFrame - dataFrame containing the training/testing data
"""
def featureImportance(dataFrame):
plt.rcParams['figure.figsize'] = 15, 6
sns.set_style("darkgrid")
x = dataFrame.iloc[:, :-1]
y = dataFrame.iloc[:, -1]
model = ExtraTreesClassifier()
model.fit(x, y)
feat_importances = pd.Series(model.feature_importances_, index=x.columns)
feat_importances.sort_values().plot(kind='barh')
plt.show()
return feat_importances.sort_values().index
"""
Removes all features not selected
@:param dataFrame - Data frame containing the training/testing data
@:param dropColumns - Vector containing the name of the columns to be removed from the dataset
@:return df - Data frame with only the selecte features and the target values
"""
def featureReduction(dataFrame, dropColumns):
df = dataFrame.drop(dropColumns, axis=1)
return df
"""
Split the data in to 4 vectors, one containing the training features(x_train), one containing the training target values
(y_train), one containing the test features(x_test), and one containing the test target values(y_test).
@:param df - Data frame containing the training/testing data
@:return x_train, x_test, y_train, y_test - Vectors containing the data used in training and testing
"""
def divideData(df):
x = df.iloc[:, :(len(df.columns) - 1)].values
y = df.iloc[:, -1].values
# random state = ?
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
return x_train, x_test, y_train, y_test
"""
Standardize the features
@:param x_train - Training feature vector
@:param x_test - Test feature vector
@:return x_train, x_test - Vectors containing scaled data
"""
def featureScaler(x_train, x_test):
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
return x_train, x_test
"""
Plots features in pairs, in order to analyze correlation.
@:param dataFrame - Data frame containing traning data
@:param targetColumn - Name of the column containing the target values
"""
def runPairPlot(df):
targetvalue = "Diagnosis"
df = df.rename(columns={'diagnosis': targetvalue}, inplace=False)
df[targetvalue] = df[targetvalue].map({0: 'Malignant', 1: 'Benign'})
sns.pairplot(df, hue=targetvalue)
plt.show()
"""
Perform Principal Component Analysis, reducing the dimensions of the data set.
@:param df - DataFrame containing training and test data
@:targetvar - The target variable
@:return - Transformed DataFrame with fewer dimensions
"""
def pca(df, targetvar):
features = []
for feature in df:
if feature != targetvar:
features.append(feature)
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
y = df.loc[:,[targetvar]].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pcaa = PCA(n_components=2)
principalComponents = pcaa.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, df[[targetvar]]], axis=1)
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 component PCA', fontsize=20)
targets = [0, 1]
colors = ['r', 'g']
for target, color in zip(targets, colors):
indicesToKeep = finalDf[targetvar] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']
, finalDf.loc[indicesToKeep, 'principal component 2']
, c=color
, s=50)
ax.legend(['Diagnosis = Malignant ', 'Diagnosis = Benign'])
ax.grid()
plt.show()
"""
return finalDf
"""
Create boxplot, used in outlier detection.
@:param df - DataFrame containing training and test data
@:targetvar - The target variable
"""
def boxplot(df):
targetvalue = "Diagnosis"
df = df.rename(columns={'diagnosis': targetvalue}, inplace=False)
df[targetvalue] = df[targetvalue].map({0: 'Malignant', 1: 'Benign'})
y = df[targetvalue] # M or B
x = df.drop(targetvalue, axis=1)
ax = sns.countplot(y, label="Count") # M = 212, B = 357
plt.show()
data = x
data_n_2 = (data - data.mean()) / (data.std()) # standardization
data = pd.concat([y, data_n_2.iloc[:, 0:8]], axis=1)
data = pd.melt(data, id_vars=targetvalue,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.boxplot(x="features", y="value", hue=targetvalue, data=data)
plt.xticks(rotation=90)
plt.show()
data = pd.concat([y, data_n_2.iloc[:, 8:]], axis=1)
data = pd.melt(data, id_vars=targetvalue,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.boxplot(x="features", y="value", hue=targetvalue, data=data)
plt.xticks(rotation=90)
plt.show()
"""
Create violinplot (histograms), used in feature selection .
@:param df - DataFrame containing the data set
"""
def violin_plot(dataFrame):
targetvalue = "Diagnosis"
df = dataFrame.rename(columns={'diagnosis': targetvalue}, inplace=False)
df[targetvalue] = df[targetvalue].map({0:'Malignant', 1:'Benign'})
data = df.drop([targetvalue], axis = 1)
data_n_2 = (data - data.mean()) / (data.std()) # standardization
# Plot histograms for the 10 first features
data = pd.concat([df[targetvalue], data_n_2.iloc[:, 0:10]], axis=1)
data = pd.melt(data, id_vars=targetvalue,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.violinplot(x="features", y="value", hue=targetvalue, data=data, split=True, inner="quart")
plt.xticks(rotation=90)
plt.show()
# Plot histograms for the 11-20 first features
data = pd.concat([df[targetvalue], data_n_2.iloc[:, 10:20]], axis=1)
data = pd.melt(data, id_vars=targetvalue,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.violinplot(x="features", y="value", hue=targetvalue, data=data, split=True, inner="quart")
plt.xticks(rotation=90)
plt.show()
# Plot histograms for the 21-> first features
data = pd.concat([df[targetvalue], data_n_2.iloc[:, 20:]], axis=1)
data = pd.melt(data, id_vars=targetvalue,
var_name="features",
value_name='value')
plt.figure(figsize=(10, 10))
sns.violinplot(x="features", y="value", hue=targetvalue, data=data, split=True, inner="quart")
plt.xticks(rotation=90)
plt.show()
```
#### File: johanbso/Machine-Learning-Project/Results.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from sklearn.metrics import confusion_matrix, accuracy_score, plot_confusion_matrix
"""
Analyze the accuracy of a classifier.
@:param classifier - Classifier to be analyzed
@:param x_test - Test feature value vector
@:param y_test - Test target value vector
@:return classifier - The fraction of correctly classified samples.
"""
def accuracy(classifier, x_test, y_test):
# Predicting the test set
y_pred = classifier.predict(x_test)
# Making Confusion Matrix and calculating accuracy score
cm = confusion_matrix(y_test, y_pred)
ac = accuracy_score(y_test, y_pred)
return cm, ac
"""
Presents the perfomance of the methods in diagram
@:param methodNames - String vector containing the names of the methods
@:param performances - Double vector containing the accuracy scores of the respective methods
"""
def PresentResults(methodNames, perfomances):
plt.rcParams['figure.figsize']=15,6
sns.set_style("darkgrid")
ax = sns.barplot(x=methodNames, y=perfomances, palette = "rocket", saturation =1.5)
plt.xlabel("Classifier Models", fontsize = 20 )
plt.ylabel("% of Accuracy", fontsize = 20)
plt.title("Accuracy of the Classifier Models", fontsize = 20)
plt.xticks(fontsize = 12, horizontalalignment = 'center', rotation = 8)
plt.yticks(fontsize = 13)
for p in ax.patches:
width, height = p.get_width(), p.get_height()
x, y = p.get_xy()
ax.annotate(f'{height:.2%}', (x + width/2, y + height*1.02), ha='center', fontsize = 'x-large')
plt.show()
"""
Presents the perfomance of the methods in a ROC plot
@:param x_test - Test features
@:param performances - Test target values
@:param models - Classifiers
@:param ann - Artificial Neural Network
"""
def roc_plot(x_test, y_test, x_test_pca, y_test_pca, models, models_pca):
names = ['KNN', "SVM"]
names_pca = ['KNN (PCA)', "SVM (PCA)"]
count = 0
plt.title('Receiver Operating Characteristic')
# Plot the models not using PCA
for model in models:
probs = model.predict_proba(x_test)
preds = probs[:, 1]
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
plt.plot(fpr, tpr, label='AUC ' + names[count] + ' = %0.2f' % roc_auc)
count = count + 1
count = 0
# Plot the models using PCA
for model in models_pca:
probs = model.predict_proba(x_test_pca)
preds = probs[:, 1]
fpr, tpr, threshold = metrics.roc_curve(y_test_pca, preds)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, label='AUC ' + names_pca[count] + ' = %0.2f' % roc_auc)
count = count + 1
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1.1])
plt.ylim([0, 1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
```
#### File: johanbso/Machine-Learning-Project/SVC_optimized.py
```python
from sklearn.svm import SVC
import Results
"""
Train an optimal K - Nearest Neighbors classifier.
@:param x_train - Training feature value vector
@:param y_train - Training target value vector
@:param x_test - Test feature value vector
@:param y_test - Test target value vector
@:return optimal_classifier - Optimized K - Nearest Neighbors classifier
"""
def svc_classifier(x_train, y_train, x, y):
c = []
a = []
for d in range(1, 10):
print("training")
classifier = SVC(kernel='rbf', degree=d, gamma='scale', shrinking=True, C=4.6, probability=True)
classifier.fit(x_train, y_train)
a.append(Results.accuracy(classifier, x, y)[1])
c.append(classifier)
print("done")
index = a.index(max(a))
classifier = c[index]
return classifier
```
#### File: johanbso/Machine-Learning-Project/SVC.py
```python
from sklearn.metrics import plot_confusion_matrix
from sklearn.svm import SVC
import pandas as pd
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import operator
import Results
import preProcessing
import Results
import time
"""
Train an optimal K - Nearest Neighbors classifier.
@:param x_train - Training feature value vector
@:param y_train - Training target value vector
@:param x_test - Test feature value vector
@:param y_test - Test target value vector
@:return optimal_classifier - Optimized K - Nearest Neighbors classifier
"""
def svc_classifier(x_train, y_train, x_test, y_test):
tid = time.time()
num = 0;
classifier_list = []
kernel = ['linear', 'poly', 'rbf', 'sigmoid']
gammas = ['scale', 'auto']
shrink = [True, False]
ac_list = []
attributelist = []
for i in range(1, 160, 3):
for bol in shrink:
for k in kernel:
for gamma in gammas:
for d in range(1, 10):
tid2 = time.time()
print('Time: ', tid2 - tid)
print("Antall noder:", num)
num += 1
if k != 'poly' and d == 1:
classifier = SVC(kernel=k, degree=d, gamma=gamma, shrinking=bol, C=i/10.0,
probability=True)
classifier.fit(x_train, y_train)
cm, ac = Results.accuracy(classifier, x_test, y_test)
ac_list.append(ac)
classifier_list.append(classifier)
attributelist.append([ac, k, gamma, bol, i/10])
elif k == 'poly':
classifier = SVC(kernel=k, degree=d, gamma=gamma, shrinking=bol, C=i / 10.0)
classifier.fit(x_train, y_train)
cm, ac = Results.accuracy(classifier, x_test, y_test)
ac_list.append(ac)
classifier_list.append(classifier)
attributelist.append([ac, k, gamma, bol, i / 10])
sortedonacc = sorted(attributelist, key=operator.itemgetter(0), reverse=True)
df = pd.DataFrame(sortedonacc)
df.columns = ["Accuracy", "Kernel", "Gamma", "Shrink_Heuristic", 'Regularization_parameter']
print("SVM table:")
print(df.to_latex(index=False))
index = ac_list.index(max(ac_list))
optimal_classifier = classifier_list[index]
return optimal_classifier
``` |
{
"source": "johancc/CocktailHelper",
"score": 4
} |
#### File: johancc/CocktailHelper/drink_recipe_maker.py
```python
from time import sleep
from typing import Union
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from cocktail import CocktailRecipe
from drink_lookup import get_drink_by_name, get_drinks_based_on_ingredient
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scope)
gc = gspread.authorize(credentials)
def write_cocktail_instructions_return_next_row(
cocktail_recipe: CocktailRecipe,
ws: gspread.Worksheet = None,
row: int = None) -> int:
"""
Writes a cocktail_recipe recipe into a google sheet worksheet.
Format:
0, 0, 0,
Cocktail name,
Instruction 1,
...,
Instruction limit
The cocktail has a separator line after the previous entry in the
worksheet, unless the worksheet is empty.
:param ws: The worksheet where to write the cocktail_recipe to
:param cocktail_recipe: The cocktail_recipe recipe to write
:param row: the row the entries should populate from
:return: The next empty row below the instructions for this cocktail.
"""
current_row = write_cocktail_header_return_next_row(cocktail_recipe, ws, row)
for instruction in cocktail_recipe.get_instructions():
ws.update_cell(current_row, 2, instruction)
current_row += 1
return current_row
def write_cocktail_header_return_next_row(cocktail_recipe: CocktailRecipe, ws: gspread.Worksheet, row: int = None):
"""
Writes a separator line and drink name into the spreadsheet at the next available row
:param cocktail_recipe: Recipe to which to write the header for.
:param ws: Worksheet that should be written to.
:param row: Row which to write the header into.
:return: The rwo below the cocktail header
"""
top_margin_row = 3
current_row = row
if row is None:
current_row = find_next_empty_row_index(ws)
if current_row >= top_margin_row:
write_separator_line(ws, current_row, width=3, separator="0")
current_row += 1
ws.update_cell(current_row, 1, cocktail_recipe.get_name())
current_row += 1
return current_row
def write_ingredient_header_return_next_row(ingredient: str, ws: gspread.Worksheet, row: int = None):
"""
Writes a separator line and ingredient name into the spreadsheet at the next available row.
If no row is given, it finds the next empty row in the worksheet.
:param ingredient: The ingredient of the drinks
:param ws: Worksheet that should be written to.
:param row: Row which to write the header to.
:return: The row below the ingredient header.
"""
top_margin_row = 3
current_row = row
if current_row is None:
current_row = find_next_empty_row_index(ws)
if current_row >= top_margin_row:
write_separator_line(ws, current_row, width=2, separator="0")
current_row += 1
ws.update_cell(current_row, 1, ingredient)
current_row += 1
return current_row
def write_cocktail_ingredients_into_spreadsheet_return_next_row(cocktail_recipe: CocktailRecipe, ws: gspread.Worksheet,
next_empty_row=None) -> int:
"""
Inserts the cocktail_recipe recipe into the bottom of the spreadsheet.
The first entry in the row is the
:param cocktail_recipe: The cocktail_recipe recipe to insert into the spreadsheet.
:param ws: The worksheet to insert the cocktail_recipe to
:param next_empty_row: If known, the next empty row where to write the ingredients to.
:return The next empty row below the current row.
"""
current_row = write_cocktail_header_return_next_row(cocktail_recipe, ws, next_empty_row)
for ingredient, amount in cocktail_recipe.get_ingredients().items():
ws.update_cell(current_row, 2, ingredient)
ws.update_cell(current_row, 3, amount)
current_row += 1
return current_row
def write_cocktail_names_based_on_ingredient_return_next_row(ingredient: str, ws: gspread.Worksheet,
next_empty_row=None, limit: int = 1) -> int:
"""
:param ingredient: The main ingredient of the drinks whose name to write.
:param ws: The worksheet to insert the drink name to
:param next_empty_row: If known, the next empty row where to write the drink names to.
:param limit: The maximum number of drink names to write for the given ingredient.
:return: The next empty row below where the drink names were written.
"""
current_row = write_ingredient_header_return_next_row(ingredient, ws, next_empty_row)
for cocktail in get_drinks_based_on_ingredient(ingredient, limit):
ws.update_cell(current_row, 2, cocktail.get_name())
current_row += 1
return current_row
def write_separator_line(ws: gspread.Worksheet, separator_row_index: int, width: int,
separator: Union[str, int]) -> None:
"""
Writes a separator line to mark the start of a new entry.
:param ws: Worksheet to write the line to
:param separator_row_index: index of the row to write the line in
:param width: How many cells should be written to in the given row
:param separator: The value which to fills the cells with
:return: None
"""
for col in range(1, width + 1):
ws.update_cell(separator_row_index, col, separator)
def find_next_empty_row_index(
ws: gspread.Worksheet,
empty_rows_below: int = 1,
min_horizontal_empty_cells: int = 3,
start_index: int = 1) -> int:
"""
Finds the next empty row index in a worksheet. It also ensures that there
are at least empty_rows_below rows below the row which are empty.
:param ws: The worksheet to look for an empty row in
:param empty_rows_below: The number of rows below the returned row that should be empty
:param min_horizontal_empty_cells: Minimum number of empty continuous cells needed, from left to right.
:param start_index: Row to start the search from
:return: The index of the row in the worksheet that is empty and has the specified number of rows below.
"""
row = start_index
while True:
if is_row_empty(ws, row, min_horizontal_empty_cells):
satisfies_requirements = True
for row_below in range(row + 1, empty_rows_below + 1):
if not is_row_empty(ws, row_below, min_horizontal_empty_cells):
satisfies_requirements = False
break
if satisfies_requirements:
return row
else:
row += 1
else:
row += 1
def is_row_empty(ws: gspread.Worksheet, row: int, min_horizontal_empty_cells: int) -> bool:
"""
Checks whether a given row in a worksheet is empty
:param ws: Worksheet to check whether the given row is empty on.
:param row: The index of the row to check
:param min_horizontal_empty_cells: Minimum number of empty continuous cells needed, from left to right.
:return: Whether there are at least min_horizontal_empty continuous empty cells in the row from left to right.
"""
for col in range(1, 1 + min_horizontal_empty_cells):
if ws.cell(row, col).value:
return False
return True
if __name__ == '__main__':
sheet_name = "EID Data" # Sheet name which the project has authorization for."
sheet = gc.open(sheet_name)
drinks_to_insert = {
"<NAME>",
"<NAME>",
"Margarita",
"Whiskey Sour",
"Mojito",
"Daiquiri",
"Martini",
"Old Fashioned",
"White Russian",
"Cuba Libre",
"Long Island Iced Tea",
}
ingredients_to_insert = {
"Gin",
"Vodka",
"Tequila",
"Whiskey",
"Rum",
"Light rum",
"Coffee liqueur",
"Lemonade",
"Scotch",
"Tea",
"Kahlua",
"Everclear",
"7-up"
}
# Pre-processing
ingredients_sheet = sheet.add_worksheet("Ingredients", 100, 100)
ingredients_sheet.insert_row(["Drink Name", "Ingredient", "Amount"])
instructions_sheet = sheet.add_worksheet("Instructions", 100, 100)
instructions_sheet.insert_row(["Drink Name", "Instruction"])
drink_by_ingredient_sheet = sheet.add_worksheet("Ingredient to Drink", 100, 100)
drink_by_ingredient_sheet.insert_row(["Ingredient", "Drink Names"])
next_empty_row_ingredient = 2 # starting row
next_empty_row_instructions = 2
next_empty_row_drink_by_ingredient = 2
while drinks_to_insert and False:
name = drinks_to_insert.pop()
cocktail = get_drink_by_name(name)
try:
next_empty_row_ingredient = write_cocktail_ingredients_into_spreadsheet_return_next_row(
cocktail, ingredients_sheet, next_empty_row_ingredient)
next_empty_row_instructions = write_cocktail_instructions_return_next_row(
cocktail, instructions_sheet, next_empty_row_instructions)
except gspread.exceptions.APIError:
print("Reached rate limit. Waiting 30 seconds.")
sleep(30)
drinks_to_insert.add(name)
while ingredients_to_insert:
main_ingredient = ingredients_to_insert.pop()
try:
next_empty_row_drink_by_ingredient = write_cocktail_names_based_on_ingredient_return_next_row(
main_ingredient, drink_by_ingredient_sheet, next_empty_row_drink_by_ingredient, limit=5
)
except gspread.exceptions.APIError:
print("Reached rate limit. Waiting 30 seconds.")
sleep(30)
ingredients_to_insert.add(main_ingredient)
``` |
{
"source": "JohanChane/JohanChane.github.io",
"score": 3
} |
#### File: MyProjects/Python/python-ba-PythonCheetSheet.py
```python
import math
## ## Basic
def funcForDebug():
print('### funcForDebug')
i = 100
print(type(i))
print(type(int))
print(dir())
print(id(i))
print(isinstance(True, bool))
print(issubclass(bool, int))
# 类会被视作其自身的子类
# True
print(issubclass(int, int))
del i
funcForDebug()
def funcForOutput():
print('### funcForOutput')
# ### `f/F` 前缀与 `{<expression>}`
print(f'__name__ = {__name__}')
def func():
return 'ABC'
print(f'ret: {func()}')
print(f'The value of pi is approximately {math.pi:.3f}.')
table = {'Sjoerd': 4127, 'Jack': 4098, 'Dcab': 7678}
for name, phone in table.items():
print(f'{name:10} ==> {phone:10d}')
# ### str.format()
print('We are the {} who say "{}!"'.format('knights', 'Ni'))
print('{0} and {1}'.format('spam', 'eggs'))
print('{1} and {0}'.format('spam', 'eggs'))
print('The story of {0}, {1}, and {other}.'.format('Bill', 'Manfred', other='Georg'))
# ### 旧的字符串格式化方法
print('The value of pi is approximately %5.3f.' % math.pi)
funcForOutput()
def funcForStringConcaten():
print('### funcForStringConcaten')
# 解析时连接
print('ABC' 'DEF')
# 运行时连接
print('ABC' + 'DEF')
funcForStringConcaten()
def funcForEscapeChar():
print('### funcForEscapeChar')
print('\'')
print('\"')
# escape newline
str1 = 'ABC\
DEF'
print(str1)
# ### 圆括号、方括号或花括号以内的表达式允许分成多个物理行,无需使用反斜杠。
str1 = ('ABC',
'DEF')
print(str1)
str1 = ['ABC',
'DEF']
print(str1)
str1 = {'ABC',
'DEF'}
print(str1)
funcForEscapeChar()
## ### data type
def funcForDataType():
print('### funcForDataType')
# ### 数字类型
print('#### 数字类型')
# #### 整数
i = 100
b = True
# #### 浮点数
f = 1.5
# ### complex
cplx =4.7 + 0.666j # 定义一个虚数
print(cplx) # 输出这个虚数
print(cplx.real) # 输出实部
print(cplx.imag) # 输出虚部
print(cplx.conjugate()) # 输出该复数的共轭复数
# ### 序列类型
print('#### 序列类型')
# #### 不可变序列
str1 = 'ABC'
tuple1 = (1, 2)
# #### 可变序列类型
list1 = [1, 2]
# ##### 列表解析
list2 = [2 * i for i in range(1, 100, 10)]
print(list2)
# ### 集合类型
print('#### 集合类型')
set2 = {1, 2}
# 添加元素
set2.add(3)
print(set2)
# TypeError: 'int' object is not iterable
# set2.update(4)
set2.update([4])
# 删除元素
set2.remove(4)
set2.discard(3)
set2.pop()
print(set2)
# 集合的基本操作
set3 = {1, 2, 3}
set4 = {3, 4, 5}
# TypeError: unsupported operand type(s) for +: 'set' and 'set'
# print(set3 + set4)
# 集合减
print(set3 - set4)
# 并集
print(set3 | set4)
# 交集
print(set3 & set4)
# 不同时包含于 set3 和 set4 的元素
print(set3 ^ set4)
# TypeError: unsupported operand type(s) for *: 'set' and 'set'
# print(set3 * set4)
# TypeError: unsupported operand type(s) for /: 'set' and 'set'
# print(set3 / set4)
set1 = {1, 2}
frozenset1 = frozenset('ABC')
print(frozenset1)
# TypeError: unhashable type: 'set'
# print(hash(set1))
print(hash(frozenset1))
# 元素必须是不可变对象
# set2 = {[1, 2]}
# 元素是不重复的
set3 = {1, 1, 2}
print(set3)
# ### 映射类型
print('#### 映射类型')
# #### dict
# key 的类型只能是 hashable
dict1 = {0.1: 1, 0.1+0.1j: 2, (1,2): 3, (1, (1, 2)): 4}
dict1[0.1]
dict1[0.1+0.1j]
dict1[(1,2)]
# 出错,key 不是 hashable
# dict2 = {(1, [1, 2])}
funcForDataType()
## ### Branch Control
def funcForBranchControl():
print('### funcForBranchControl')
if False:
pass
elif True:
pass
else:
pass
funcForBranchControl()
## ### Flow Control
def funcForLoopControl():
print('### funcForLoopControl')
while True:
# continue
break
words = ['cat', 'window', 'defenestrate']
for w in words:
print(w, len(w))
# range(start : end : step)
for i in range(5):
print(i, end='\t')
print('')
funcForLoopControl()
## ### Function
gVar = 100
def funcForFuncBasic():
print('### funcForFuncBasic')
localVar = 10
# 使用全局变量
global gVar
gVar = 1000
print(gVar)
funcForFuncBasic()
# x 可接收位置实参或关键字实参
# y 只能接收关键字实参
# keywordOnlyArgs 后面不能有参数(即只能是最后一个参数)
def funcForFuncArgs(x, *posOnlyArgs, y, **keywordOnlyArgs):
print('### funcForFuncArgs')
print('x = {}'.format(x))
print(posOnlyArgs)
print('y = {}'.format(y))
print(keywordOnlyArgs)
funcForFuncArgs(1, 2, 3, y = 4, key1 = 5, key2 = 6)
def funcForFuncReturn():
print('### funcForFuncReturn')
return [1, 2]
print(funcForFuncReturn())
# 默认情况下是传引用,一些可变对象可指定传副本。不可变对象传副本没有意义,因为对象被修改时,会创建新的对象。
def funcForWayOfPassArgs(list1, set1, dict1):
print('### funcForWayOfPassArgs')
list1[0] = 10
set1.pop()
dict1['key1'] = 10
list1 = [1, 2]
set1 = {1, 2}
dict1 = {'key1': 1, 'key2': 2}
funcForWayOfPassArgs(list1.copy(), set1.copy(), dict1.copy())
print('list1 = {}, set1 = {}, dict1 = {}'.format(list1, set1, dict1))
funcForWayOfPassArgs(list1, set1, dict1)
print('list1 = {}, set1 = {}, dict1 = {}'.format(list1, set1, dict1))
## ### 内嵌函数与闭包
def funcForInnerFunc():
print('### funcForInnerFunc')
# 内嵌函数
def innerFunc():
print('innerFunc')
innerFunc()
funcForInnerFunc()
def funcForClosure():
print('### funcForClosure')
x = 5
# 内嵌函数
def closure():
# 指明 x 不是局部变量
nonlocal x
x += 1
print(x)
return closure
closure = funcForClosure()
closure()
## ### lambda
print('### lambda')
def func(a, b):
return a + b
lmbd = lambda a, b: a + b
print(lmbd(10, 20))
## ### 异常
def funcForException():
print('### funcForException')
# ### 异常
try:
1/0
except ZeroDivisionError as err:
print(err)
finally:
print('finally')
# ### 语法异常
try:
ABC
except NameError as err:
print('err = {}'.format(err))
funcForException()
## ## Class
## ### super
print('### super')
class Base1(object):
def foo(self):
print('Base1')
class Base2(object):
def foo(self):
print('Base2')
class MyClass(Base1, Base2):
def foo(self):
print('MyClass')
myClass = MyClass()
# 根据 myClass 实例返回 MyClass 父类实例
superOfObj = super(MyClass, myClass)
# 根据 MyClass 类对象返回 MyClass 父类的类对象
superOfClass = super(MyClass, MyClass)
superOfObj.foo()
# 因为 superOfClass 是一个类对象,也可说 superOfClass 是未绑定的,所以要传一个实例。
superOfClass.foo(myClass)
## ### 属性
print('### ClassForClassProperties')
class ClassForClassProperties():
classProperty = 10
def __init__(self):
self.instanceProperty = 100
classForClassProperties = ClassForClassProperties()
print(classForClassProperties.instanceProperty)
print(ClassForClassProperties.classProperty)
print(classForClassProperties.classProperty)
## ### 方法
print('### ClassForClassMethods')
class ClassForClassMethods():
# default method
def defaultMethod(self):
pass
@classmethod
def classMethod(cls):
print(cls)
@staticmethod
def staticMethod():
pass
classForClassMethods = ClassForClassMethods()
classForClassMethods.defaultMethod()
ClassForClassMethods.defaultMethod(classForClassMethods)
ClassForClassMethods.classMethod()
classForClassMethods.classMethod()
ClassForClassMethods.staticMethod()
classForClassMethods.staticMethod()
print('### ClassForClassAccessModifiers')
class ClassForClassAccessModifiers():
def __init__(self):
self.__privateProperty = 10
self.publicProperty = 100
def __privateMethod(self):
print('__privateMethod')
def publicMethod(self):
print('publicMethod')
classForClassAccessModifiers = ClassForClassAccessModifiers()
print(classForClassAccessModifiers.publicProperty)
# print(classForClassAccessModifiers.__privateProperty)
classForClassAccessModifiers.publicMethod()
# classForClassAccessModifiers.__privateMethod()
## ### 多态
print('### Polymorphic')
class Base():
def method(self):
print('Base method()')
class MyClass(Base):
def method(self):
print('MyClass method()')
base = Base()
base.method()
base = MyClass()
base.method()
## ### 单继承
print('### SingleInheriting')
class Base():
def __init__(self):
print('Base')
class MyClass(Base):
def __init__(self):
# Base.__init__(self)
super().__init__()
myClass = MyClass()
## ### 多重继承
print('### MultiInheriting')
class Base1():
def __init__(self):
print('Base1')
class Base2():
def __init__(self):
print('Base2')
class MyClass(Base1, Base2):
def __init__(self):
# 无法用 super 调用父类的构造函数了
Base1.__init__(self)
Base2.__init__(self)
myClass = MyClass()
## #### 子类同名覆盖父类的问题
print('### Overriding in inheriting')
class Base():
def __init__(self):
self.x = 100
def method(self):
print('Base.method()')
class MyClass(Base):
def __init__(self):
super().__init__()
# 无法通过 super() 调用父类的属性
# AttributeError: 'super' object has no attribute 'x'
# print(super().x)
print(self.x)
# 这里是赋值而不是创建 x
self.x = 1000
# 可通过 super() 调用父类的 method()
super().method()
self.method()
def method(self):
print('MyClass.method()')
myClass = MyClass()
## ### 抽象类
print('### AbstractClass')
from abc import ABC, abstractmethod
class MyAbstractClass(ABC):
@abstractmethod
def myAbstractMethod(self):
pass
@classmethod
@abstractmethod
def myAbstractClassmethod(cls):
pass
@staticmethod
@abstractmethod
def myAbstractStaticmethod():
pass
class MyClass(MyAbstractClass):
def myAbstractMethod(self):
pass
@classmethod
def myAbstractClassmethod(cls):
pass
@staticmethod
def myAbstractStaticmethod():
pass
myClass = MyClass()
myClass.myAbstractMethod()
MyClass.myAbstractClassmethod()
MyClass.myAbstractStaticmethod()
## ## `__*__` 标识符
## ### `__new__(), __init__()`
print('### `__new__(), __init__()`')
class MyClass(object):
def __new__(cls):
print('create a MyClass instance')
# 相当于 return object.__new__(cls)
return super(MyClass, cls).__new__(cls)
def __init__(self):
print('__init__()')
myClass = MyClass()
print(type(myClass))
print(MyClass.__mro__)
## ### 自定义属性访问
print('### 自定义属性访问')
## #### property()
print('#### `property()`')
class MyClass():
def __init__(self, size = 10):
self.size = size
def getSize(self):
return self.size
def setSize(self, value):
self.size = value
def delSize(self):
del self.size
# 添加属性 `x`
x = property(getSize, setSize, delSize)
myClass = MyClass()
print(myClass.x)
myClass.x = 100
del myClass.x
## #### `__getattr__(),__getattribute__(), __setattr__(), __delattr__()`
print('#### `__getattr__(),__getattribute__(), __setattr__(), __delattr__()`')
class MyClass():
# 定义当用户试图获取一个不存在的属性时的行为
def __getattr__(self, name):
print('getattr')
# 定义属性被访问时的行为
def __getattribute__(self, name):
print('getattribute')
return super().__getattribute__(name)
def __setattr__(self, name, value):
print('setattr')
super().__setattr__(name, value)
def __delattr__(self, name):
print('delattr')
super().__delattr__(name)
myClass = MyClass()
# 输出 `getattribute, setattr`
print(myClass.x)
myClass.x = 1
del myClass.x
## ### 模拟数字类型
print('### 模拟数字类型')
class Computation():
def __init__(self,value):
self.value = value
def __add__(self,other):
return self.value + other
def __sub__(self,other):
return self.value - other
c = Computation(5)
print(c + 5)
print(c - 3)
``` |
{
"source": "johan--/commcare-hq",
"score": 2
} |
#### File: management/commands/2015_05_29_update_subscriptions.py
```python
from django.core.management import BaseCommand
from corehq.apps.accounting.models import BillingAccount, Subscription, SubscriptionType, ProBonoStatus, EntryPoint
import csv
import re
class Command(BaseCommand):
help = ("Updates service type, entry point, and pro bono status based on given CSV file")
def handle(self, *args, **options):
if len(args) != 1:
print "Invalid arguments: %s" % str(args)
return
completed = 0
total = 0
filename = args[0]
with open(filename) as f:
reader = csv.reader(f)
reader.next()
for row in reader:
total = total + 1
domain = row[0]
plan_version, subscription = Subscription.get_subscribed_plan_by_domain(domain)
if subscription is None:
print "Could not find Subscription for %s" % domain
account = BillingAccount.get_account_by_domain(domain)
if account is None:
print "Could not find BillingAccount for %s" % domain
if account is not None and subscription is not None:
'''
service_type = self.normalize(row[1]) # self service, contracted, or not set
if service_type == "selfservice":
#print "%s service_type => SELF_SERVICE" % domain
subscription.service_type = SubscriptionType.SELF_SERVICE
elif service_type == "contracted":
#print "%s service_type => CONTRACTED" % domain
subscription.service_type = SubscriptionType.CONTRACTED
elif service_type == "notset":
#print "%s service_type => NOT_SET" % domain
subscription.service_type = SubscriptionType.NOT_SET
else:
pass
#print "Skipping service type for %s" % domain
entry_point = self.normalize(row[2]) # yes if self starter, might be missing
if entry_point == "yes":
#print "%s entry_point => SELF_STARTED" % domain
account.entry_point = EntryPoint.SELF_STARTED
elif entry_point == "no":
#print "%s entry_point => CONTRACTED" % domain
account.entry_point = EntryPoint.CONTRACTED
else:
#print "Skipping entry point for %s" % domain
pass
'''
pro_bono_status = self.normalize(row[3]) # yes/no
if pro_bono_status == "yes":
#print "%s pro_bono_status => YES" % domain
subscription.pro_bono_status = ProBonoStatus.YES
elif pro_bono_status == "discounted":
#print "%s pro_bono_status => DISCOUNTED" % domain
subscription.pro_bono_status = ProBonoStatus.DISCOUNTED
else:
#print "%s pro_bono_status => NO" % domain
subscription.pro_bono_status = ProBonoStatus.NO
'''print "setting %s's service_type=%s, entry_point=%s, pro_bono=%s" % (
domain, subscription.service_type, account.entry_point, subscription.pro_bono_status
)'''
subscription.save()
account.save()
completed = completed + 1
print "Completed %i of %i domains" % (completed, total)
def normalize(self, str):
return re.sub(r'[^a-z]', "", str.lower())
```
#### File: management/commands/cchq_software_plan_bootstrap.py
```python
from __future__ import absolute_import, print_function, unicode_literals
# Standard library imports
from collections import defaultdict
from decimal import Decimal
import logging
from optparse import make_option
# Django imports
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django_prbac.models import Role
# Use current models
DefaultProductPlan = apps.get_model('accounting', 'DefaultProductPlan')
Feature = apps.get_model('accounting', 'Feature')
SoftwareProduct = apps.get_model('accounting', 'SoftwareProduct')
FeatureRate = apps.get_model('accounting', 'FeatureRate')
SoftwarePlan = apps.get_model('accounting', 'SoftwarePlan')
SoftwarePlanVersion = apps.get_model('accounting', 'SoftwarePlanVersion')
SoftwareProductRate = apps.get_model('accounting', 'SoftwareProductRate')
Subscription = apps.get_model('accounting', 'Subscription')
from corehq.apps.accounting.models import (
SoftwareProductType, SoftwarePlanEdition, SoftwarePlanVisibility, FeatureType,
)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Populate a fresh db with standard set of Software Plans.'
option_list = BaseCommand.option_list + (
make_option('--dry-run', action='store_true', default=False,
help='Do not actually modify the database, just verbosely log what happen'),
make_option('--verbose', action='store_true', default=False,
help='Enable debug output'),
make_option('--fresh-start', action='store_true', default=False,
help='Wipe all plans and start over. USE CAUTION. Also instantiate plans.'),
make_option('--flush', action='store_true', default=False,
help='Wipe all plans and start over. USE CAUTION.'),
make_option('--force-reset', action='store_true', default=False,
help='Assign latest version of all DefaultProductPlans to current '
'subscriptions and delete older versions.'),
make_option('--testing', action='store_true', default=False,
help='Run this command for testing purposes.'),
)
def handle(self, dry_run=False, verbose=False, fresh_start=False, flush=False, force_reset=False,
testing=False, *args, **options):
logger.info('Bootstrapping standard plans. Enterprise plans will have to be created via the admin UIs.')
self.for_tests = testing
if self.for_tests:
logger.info("Initializing Plans and Roles for Testing")
self.verbose = verbose
if force_reset:
confirm_force_reset = raw_input("Are you sure you want to assign the latest default plan version to all"
"current subscriptions and remove the older versions? Type 'yes' to "
"continue.")
if confirm_force_reset == 'yes':
self.force_reset_subscription_versions()
return
if fresh_start or flush:
confirm_fresh_start = raw_input("Are you sure you want to delete all SoftwarePlans and start over? "
"You can't do this if there are any active Subscriptions."
" Type 'yes' to continue.\n")
if confirm_fresh_start == 'yes':
self.flush_plans()
if not flush:
self.product_types = [p[0] for p in SoftwareProductType.CHOICES]
self.editions = [
SoftwarePlanEdition.COMMUNITY,
SoftwarePlanEdition.STANDARD,
SoftwarePlanEdition.PRO,
SoftwarePlanEdition.ADVANCED,
SoftwarePlanEdition.ENTERPRISE,
]
self.feature_types = [f[0] for f in FeatureType.CHOICES]
self.ensure_plans(dry_run=dry_run)
def flush_plans(self):
if self.verbose:
logger.info("Flushing ALL SoftwarePlans...")
DefaultProductPlan.objects.all().delete()
SoftwarePlanVersion.objects.all().delete()
SoftwarePlan.objects.all().delete()
SoftwareProductRate.objects.all().delete()
SoftwareProduct.objects.all().delete()
FeatureRate.objects.all().delete()
Feature.objects.all().delete()
def force_reset_subscription_versions(self):
for default_plan in DefaultProductPlan.objects.all():
software_plan = default_plan.plan
latest_version = software_plan.get_version()
subscriptions_to_update = Subscription.objects.filter(plan_version__plan__pk=software_plan.pk).exclude(
plan_version=latest_version).all()
# assign latest version of software plan to all subscriptions referencing that software plan
if self.verbose:
logger.info('Updating %d subscriptions to latest version of %s.' %
(len(subscriptions_to_update), software_plan.name))
for subscription in subscriptions_to_update:
subscription.plan_version = latest_version
subscription.save()
# delete all old versions of that software plan
versions_to_remove = software_plan.softwareplanversion_set.exclude(pk=latest_version.pk).all()
if self.verbose:
logger.info("Removing %d old versions." % len(versions_to_remove))
versions_to_remove.delete()
def ensure_plans(self, dry_run=False):
edition_to_features = self.ensure_features(dry_run=dry_run)
for product_type in self.product_types:
for edition in self.editions:
role_slug = self.BOOTSTRAP_EDITION_TO_ROLE[edition]
try:
role = Role.objects.get(slug=role_slug)
except ObjectDoesNotExist:
logger.info("Could not find the role '%s'. Did you forget to run cchq_prbac_bootstrap?")
logger.info("Aborting. You should figure this out.")
return
software_plan_version = SoftwarePlanVersion(role=role)
product, product_rates = self.ensure_product_and_rate(product_type, edition, dry_run=dry_run)
feature_rates = self.ensure_feature_rates(edition_to_features[edition], edition, dry_run=dry_run)
software_plan = SoftwarePlan(
name='%s Edition' % product.name, edition=edition, visibility=SoftwarePlanVisibility.PUBLIC
)
if dry_run:
logger.info("[DRY RUN] Creating Software Plan: %s" % software_plan.name)
else:
try:
software_plan = SoftwarePlan.objects.get(name=software_plan.name)
if self.verbose:
logger.info("Plan '%s' already exists. Using existing plan to add version."
% software_plan.name)
except SoftwarePlan.DoesNotExist:
software_plan.save()
if self.verbose:
logger.info("Creating Software Plan: %s" % software_plan.name)
software_plan_version.plan = software_plan
software_plan_version.save()
for product_rate in product_rates:
product_rate.save()
software_plan_version.product_rates.add(product_rate)
for feature_rate in feature_rates:
feature_rate.save()
software_plan_version.feature_rates.add(feature_rate)
software_plan_version.save()
if edition == SoftwarePlanEdition.ADVANCED:
trials = [True, False]
else:
trials = [False]
for is_trial in trials:
default_product_plan = DefaultProductPlan(product_type=product.product_type, edition=edition, is_trial=is_trial)
if dry_run:
logger.info("[DRY RUN] Setting plan as default for product '%s' and edition '%s'." %
(product.product_type, default_product_plan.edition))
else:
try:
default_product_plan = DefaultProductPlan.objects.get(product_type=product.product_type,
edition=edition, is_trial=is_trial)
if self.verbose:
logger.info("Default for product '%s' and edition "
"'%s' already exists." % (
product.product_type, default_product_plan.edition
))
except ObjectDoesNotExist:
default_product_plan.plan = software_plan
default_product_plan.save()
if self.verbose:
logger.info("Setting plan as default for product '%s' and edition '%s'." %
(product.product_type,
default_product_plan.edition))
def ensure_product_and_rate(self, product_type, edition, dry_run=False):
"""
Ensures that all the necessary SoftwareProducts and SoftwareProductRates are created for the plan.
"""
if self.verbose:
logger.info('Ensuring Products and Product Rates')
product = SoftwareProduct(name='%s %s' % (product_type, edition), product_type=product_type)
if edition == SoftwarePlanEdition.ENTERPRISE:
product.name = "Dimagi Only %s" % product.name
product_rates = []
BOOTSTRAP_PRODUCT_RATES = {
SoftwarePlanEdition.COMMUNITY: [
SoftwareProductRate(), # use all the defaults
],
SoftwarePlanEdition.STANDARD: [
SoftwareProductRate(monthly_fee=Decimal('100.00')),
],
SoftwarePlanEdition.PRO: [
SoftwareProductRate(monthly_fee=Decimal('500.00')),
],
SoftwarePlanEdition.ADVANCED: [
SoftwareProductRate(monthly_fee=Decimal('1000.00')),
],
SoftwarePlanEdition.ENTERPRISE: [
SoftwareProductRate(monthly_fee=Decimal('0.00')),
],
}
for product_rate in BOOTSTRAP_PRODUCT_RATES[edition]:
if dry_run:
logger.info("[DRY RUN] Creating Product: %s" % product)
logger.info("[DRY RUN] Corresponding product rate of $%d created." % product_rate.monthly_fee)
else:
try:
product = SoftwareProduct.objects.get(name=product.name)
if self.verbose:
logger.info("Product '%s' already exists. Using "
"existing product to add rate."
% product.name)
except SoftwareProduct.DoesNotExist:
product.save()
if self.verbose:
logger.info("Creating Product: %s" % product)
if self.verbose:
logger.info("Corresponding product rate of $%d created."
% product_rate.monthly_fee)
product_rate.product = product
product_rates.append(product_rate)
return product, product_rates
def ensure_features(self, dry_run=False):
"""
Ensures that all the Features necessary for the plans are created.
"""
if self.verbose:
logger.info('Ensuring Features')
edition_to_features = defaultdict(list)
for edition in self.editions:
for feature_type in self.feature_types:
feature = Feature(name='%s %s' % (feature_type, edition), feature_type=feature_type)
if edition == SoftwarePlanEdition.ENTERPRISE:
feature.name = "Dimagi Only %s" % feature.name
if dry_run:
logger.info("[DRY RUN] Creating Feature: %s" % feature)
else:
try:
feature = Feature.objects.get(name=feature.name)
if self.verbose:
logger.info("Feature '%s' already exists. Using "
"existing feature to add rate."
% feature.name)
except ObjectDoesNotExist:
feature.save()
if self.verbose:
logger.info("Creating Feature: %s" % feature)
edition_to_features[edition].append(feature)
return edition_to_features
def ensure_feature_rates(self, features, edition, dry_run=False):
"""
Ensures that all the FeatureRates necessary for the plans are created.
"""
if self.verbose:
logger.info('Ensuring Feature Rates')
feature_rates = []
BOOTSTRAP_FEATURE_RATES = {
SoftwarePlanEdition.COMMUNITY: {
FeatureType.USER: FeatureRate(monthly_limit=2 if self.for_tests else 50,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: FeatureRate(monthly_limit=0), # use defaults here
},
SoftwarePlanEdition.STANDARD: {
FeatureType.USER: FeatureRate(monthly_limit=4 if self.for_tests else 100,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: FeatureRate(monthly_limit=3 if self.for_tests else 100),
},
SoftwarePlanEdition.PRO: {
FeatureType.USER: FeatureRate(monthly_limit=6 if self.for_tests else 500,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: FeatureRate(monthly_limit=5 if self.for_tests else 500),
},
SoftwarePlanEdition.ADVANCED: {
FeatureType.USER: FeatureRate(monthly_limit=8 if self.for_tests else 1000,
per_excess_fee=Decimal('1.00')),
FeatureType.SMS: FeatureRate(monthly_limit=7 if self.for_tests else 1000),
},
SoftwarePlanEdition.ENTERPRISE: {
FeatureType.USER: FeatureRate(monthly_limit=-1, per_excess_fee=Decimal('0.00')),
FeatureType.SMS: FeatureRate(monthly_limit=-1),
},
}
for feature in features:
feature_rate = BOOTSTRAP_FEATURE_RATES[edition][feature.feature_type]
feature_rate.feature = feature
if dry_run:
logger.info("[DRY RUN] Creating rate for feature '%s': %s" % (feature.name, feature_rate))
elif self.verbose:
logger.info("Creating rate for feature '%s': %s" % (feature.name, feature_rate))
feature_rates.append(feature_rate)
return feature_rates
BOOTSTRAP_EDITION_TO_ROLE = {
SoftwarePlanEdition.COMMUNITY: 'community_plan_v0',
SoftwarePlanEdition.STANDARD: 'standard_plan_v0',
SoftwarePlanEdition.PRO: 'pro_plan_v0',
SoftwarePlanEdition.ADVANCED: 'advanced_plan_v0',
SoftwarePlanEdition.ENTERPRISE: 'enterprise_plan_v0',
}
```
#### File: management/commands/hide_invoices_by_id.py
```python
from __future__ import unicode_literals, absolute_import, print_function
from optparse import make_option
from django.core.management import BaseCommand
from corehq.apps.accounting.models import Invoice, InvoiceBaseManager
class Command(BaseCommand):
help = 'Hides the specified invoice(s) from showing on reports'
option_list = BaseCommand.option_list + (
make_option('-u', '--unhide',
action='store_true',
default=False,
dest='unhide',
help="Make invoice(s) visible to the operations team"
"that were previously suppressed."),
)
def handle(self, *args, **options):
is_visible = options.get('unhide', False)
for invoice_id in args:
try:
invoice = super(InvoiceBaseManager, Invoice.objects).get_queryset().get(pk=invoice_id)
except Invoice.DoesNotExist:
print("Invoice {} was not found".format(invoice_id))
continue
invoice.is_hidden_to_ops = not is_visible
invoice.save()
print("Invoice {} is {} the operations team".format(
invoice_id,
'visible to' if is_visible else 'hidden from'
))
```
#### File: management/commands/make_domain_enterprise_level.py
```python
from django.core.management import BaseCommand
from corehq.apps.domain.models import Domain
from corehq.apps.accounting.exceptions import NewSubscriptionError
from corehq.apps.accounting.models import (
BillingAccount,
SoftwarePlanEdition,
SoftwarePlanVersion,
Subscription,
BillingAccountType)
class Command(BaseCommand):
help = ('Create a billing account and an enterprise level subscription '
'for the given domain')
def handle(self, *args, **options):
if len(args) != 1:
print "Invalid arguments: %s" % str(args)
return
domain = Domain.get_by_name(args[0])
if not domain:
print "Invalid domain name: %s" % args[0]
return
plan_version, subscription = Subscription.get_subscribed_plan_by_domain(domain.name)
if plan_version.plan.edition == SoftwarePlanEdition.ENTERPRISE:
print "Domain %s is already enterprise level" % domain.name
return
if subscription:
subscription.change_plan(self.enterprise_plan_version)
else:
try:
self.make_new_enterprise_subscription(domain)
except NewSubscriptionError as e:
print e.message
return
print 'Domain %s has been upgraded to enterprise level.' % domain.name
def make_new_enterprise_subscription(self, domain):
account, _ = BillingAccount.get_or_create_account_by_domain(
domain.name,
account_type=BillingAccountType.CONTRACT,
created_by="management command",
)
Subscription.new_domain_subscription(
account,
domain.name,
self.enterprise_plan_version,
)
@property
def enterprise_plan_version(self):
return SoftwarePlanVersion.objects.filter(
plan__edition=SoftwarePlanEdition.ENTERPRISE
)[0]
```
#### File: apps/accounting/payment_handlers.py
```python
from decimal import Decimal
import logging
import stripe
from django.conf import settings
from django.utils.translation import ugettext as _
from corehq.apps.accounting.models import (
BillingAccount,
CreditLine,
Invoice,
PaymentRecord,
SoftwareProductType,
FeatureType,
PaymentMethod,
StripePaymentMethod,
)
from corehq.apps.accounting.user_text import get_feature_name
from corehq.apps.accounting.utils import fmt_dollar_amount
from corehq.apps.domain.models import Domain
from corehq.const import USER_DATE_FORMAT
from dimagi.utils.decorators.memoized import memoized
stripe.api_key = settings.STRIPE_PRIVATE_KEY
logger = logging.getLogger('accounting')
class BaseStripePaymentHandler(object):
"""Handler for paying via Stripe's API
"""
receipt_email_template = None
receipt_email_template_plaintext = None
def __init__(self, payment_method, domain):
self.payment_method = payment_method
self.domain = domain
@property
def cost_item_name(self):
"""Returns a name for the cost item that's used in the logging messages.
"""
raise NotImplementedError("you must implement cost_item_name")
@property
@memoized
def core_product(self):
domain = Domain.get_by_name(self.domain)
return SoftwareProductType.get_type_by_domain(domain)
def create_charge(self, amount, card=None, customer=None):
"""Process the HTTPRequest used to make this payment
returns a dict to be used as the json response for the request.
"""
raise NotImplementedError("you must implement process_request")
def get_charge_amount(self, request):
"""Returns a Decimal of the amount to be charged.
"""
raise NotImplementedError("you must implement get_charge_amount")
def update_credits(self, payment_record):
"""Updates any relevant Credit lines
"""
raise NotImplementedError("you must implement update_credits")
def get_amount_in_cents(self, amount):
amt_cents = amount * Decimal('100')
return int(amt_cents.quantize(Decimal(10)))
def process_request(self, request):
customer = None
amount = self.get_charge_amount(request)
card = request.POST.get('stripeToken')
remove_card = request.POST.get('removeCard')
is_saved_card = request.POST.get('selectedCardType') == 'saved'
save_card = request.POST.get('saveCard') and not is_saved_card
autopay = request.POST.get('autopayCard')
billing_account = BillingAccount.get_account_by_domain(self.domain)
generic_error = {
'error': {
'message': _(
"Something went wrong while processing your payment. "
"We're working quickly to resolve the issue. No charges "
"were issued. Please try again in a few hours."
),
},
}
try:
if remove_card:
self.payment_method.remove_card(card)
return {'success': True, 'removedCard': card, }
if save_card:
card = self.payment_method.create_card(card, billing_account, autopay=autopay)
if save_card or is_saved_card:
customer = self.payment_method.customer
charge = self.create_charge(amount, card=card, customer=customer)
except stripe.error.CardError as e:
# card was declined
return e.json_body
except (
stripe.error.AuthenticationError,
stripe.error.InvalidRequestError,
stripe.error.APIConnectionError,
stripe.error.StripeError,
) as e:
logger.error(
"[BILLING] A payment for %(cost_item)s failed due "
"to a Stripe %(error_class)s: %(error_msg)s" % {
'error_class': e.__class__.__name__,
'cost_item': self.cost_item_name,
'error_msg': e.json_body['error']
}, exc_info=True)
return generic_error
except Exception as e:
logger.error(
"[BILLING] A payment for %(cost_item)s failed due "
"to: %(error_msg)s" % {
'cost_item': self.cost_item_name,
'error_msg': e,
}, exc_info=True)
return generic_error
payment_record = PaymentRecord.create_record(
self.payment_method, charge.id, amount
)
self.update_credits(payment_record)
try:
self.send_email(payment_record)
except Exception:
logger.error(
"[BILLING] Failed to send out an email receipt for "
"payment related to PaymentRecord No. %s. "
"Everything else succeeded."
% payment_record.id, exc_info=True
)
return {
'success': True,
'card': card,
'wasSaved': save_card,
'changedBalance': amount,
}
def get_email_context(self):
return {
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
}
def send_email(self, payment_record):
additional_context = self.get_email_context()
from corehq.apps.accounting.tasks import send_purchase_receipt
send_purchase_receipt.delay(
payment_record, self.core_product, self.receipt_email_template,
self.receipt_email_template_plaintext, additional_context
)
class InvoiceStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/invoice_receipt_email.html'
receipt_email_template_plaintext = 'accounting/invoice_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain, invoice):
super(InvoiceStripePaymentHandler, self).__init__(payment_method, domain)
self.invoice = invoice
@property
def cost_item_name(self):
return _("Invoice #%s") % self.invoice.id
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description="Payment for Invoice %s" % self.invoice.invoice_number,
)
def get_charge_amount(self, request):
"""Returns a Decimal of the amount to be charged.
"""
if request.POST['paymentAmount'] == 'full':
return self.invoice.balance.quantize(Decimal(10) ** -2)
return Decimal(request.POST['customPaymentAmount'])
def update_credits(self, payment_record):
# record the credit to the account
CreditLine.add_credit(
payment_record.amount, account=self.invoice.subscription.account,
payment_record=payment_record,
)
CreditLine.add_credit(
-payment_record.amount,
account=self.invoice.subscription.account,
invoice=self.invoice,
)
self.invoice.update_balance()
self.invoice.save()
def get_email_context(self):
context = super(InvoiceStripePaymentHandler, self).get_email_context()
context.update({
'balance': fmt_dollar_amount(self.invoice.balance),
'is_paid': self.invoice.is_paid,
'date_due': self.invoice.date_due.strftime(USER_DATE_FORMAT) if self.invoice.date_due else 'None',
'invoice_num': self.invoice.invoice_number,
})
return context
class BulkStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/bulk_payment_receipt_email.html'
receipt_email_template_plaintext = 'accounting/bulk_payment_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain):
super(BulkStripePaymentHandler, self).__init__(payment_method, domain)
@property
def cost_item_name(self):
return _('Bulk Payment for project space %s' % self.domain)
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description=self.cost_item_name,
)
@property
def invoices(self):
return Invoice.objects.filter(
subscription__subscriber__domain=self.domain,
is_hidden=False,
)
@property
def balance(self):
return sum(invoice.balance for invoice in self.invoices)
def get_charge_amount(self, request):
if request.POST['paymentAmount'] == 'full':
return self.balance
return Decimal(request.POST['customPaymentAmount'])
def update_credits(self, payment_record):
amount = payment_record.amount
for invoice in self.invoices:
deduct_amount = min(amount, invoice.balance)
amount -= deduct_amount
if deduct_amount > 0:
# TODO - refactor duplicated functionality
CreditLine.add_credit(
deduct_amount, account=invoice.subscription.account,
payment_record=payment_record,
)
CreditLine.add_credit(
-deduct_amount,
account=invoice.subscription.account,
invoice=invoice,
)
invoice.update_balance()
invoice.save()
if amount:
account = BillingAccount.get_or_create_account_by_domain(self.domain)
CreditLine.add_credit(
amount, account=account,
payment_record=payment_record,
)
def get_email_context(self):
context = super(BulkStripePaymentHandler, self).get_email_context()
context.update({
'is_paid': all(invoice.is_paid for invoice in self.invoices),
'domain': self.domain,
'balance': self.balance,
})
return context
class CreditStripePaymentHandler(BaseStripePaymentHandler):
receipt_email_template = 'accounting/credit_receipt_email.html'
receipt_email_template_plaintext = 'accounting/credit_receipt_email_plaintext.txt'
def __init__(self, payment_method, domain, account, subscription=None, post_data=None):
super(CreditStripePaymentHandler, self).__init__(payment_method, domain)
self.features = [{'type': feature_type[0],
'amount': Decimal(post_data.get(feature_type[0], 0))}
for feature_type in FeatureType.CHOICES
if Decimal(post_data.get(feature_type[0], 0)) > 0]
self.products = [{'type': product_type[0],
'amount': Decimal(post_data.get(product_type[0], 0))}
for product_type in SoftwareProductType.CHOICES
if Decimal(post_data.get(product_type[0], 0)) > 0]
self.post_data = post_data
self.account = account
self.subscription = subscription
self.credit_lines = []
@property
def cost_item_name(self):
credit_types = [unicode(product['type']) for product in self._humanized_products()]
credit_types += [unicode(feature['type']) for feature in self._humanized_features()]
return _("Credits: {credit_types} for {sub_or_account}").format(
credit_types=", ".join(credit_types),
sub_or_account=("Subscription %s" % self.subscription
if self.subscription is None
else "Account %s" % self.account.id)
)
def _humanized_features(self):
return [{'type': get_feature_name(feature['type'], self.core_product),
'amount': fmt_dollar_amount(feature['amount'])}
for feature in self.features]
def _humanized_products(self):
return [{'type': product['type'],
'amount': fmt_dollar_amount(product['amount'])}
for product in self.products]
def get_charge_amount(self, request):
return Decimal(request.POST['amount'])
def create_charge(self, amount, card=None, customer=None):
return stripe.Charge.create(
card=card,
customer=customer,
amount=self.get_amount_in_cents(amount),
currency=settings.DEFAULT_CURRENCY,
description="Payment for %s" % self.cost_item_name,
)
def update_credits(self, payment_record):
for feature in self.features:
feature_amount = feature['amount']
if feature_amount >= 0.5:
self.credit_lines.append(CreditLine.add_credit(
feature_amount,
account=self.account,
subscription=self.subscription,
feature_type=feature['type'],
payment_record=payment_record,
))
else:
logger.error("[BILLING] {account} tried to make a payment for {feature} for less than $0.5."
"You should follow up with them.".format(account=self.account,
feature=feature['type']))
for product in self.products:
plan_amount = product['amount']
if plan_amount >= 0.5:
self.credit_lines.append(CreditLine.add_credit(
plan_amount,
account=self.account,
subscription=self.subscription,
product_type=product['type'],
payment_record=payment_record,
))
else:
logger.error("[BILLING] {account} tried to make a payment for {product} for less than $0.5."
"You should follow up with them.".format(account=self.account,
product=product['type']))
def process_request(self, request):
response = super(CreditStripePaymentHandler, self).process_request(request)
if self.credit_lines:
response.update({
'balances': [{'type': cline.product_type if cline.product_type else cline.feature_type,
'balance': fmt_dollar_amount(cline.balance)}
for cline in self.credit_lines]
})
return response
def get_email_context(self):
context = super(CreditStripePaymentHandler, self).get_email_context()
context.update({
'items': self._humanized_products() + self._humanized_features()
})
return context
class AutoPayInvoicePaymentHandler(object):
def pay_autopayable_invoices(self, date_due):
""" Pays the full balance of all autopayable invoices on date_due """
autopayable_invoices = Invoice.autopayable_invoices(date_due)
for invoice in autopayable_invoices:
logging.info("[Billing][Autopay] Autopaying invoice {}".format(invoice.id))
amount = invoice.balance.quantize(Decimal(10) ** -2)
auto_payer = invoice.subscription.account.auto_pay_user
payment_method = StripePaymentMethod.objects.get(web_user=auto_payer)
autopay_card = payment_method.get_autopay_card(invoice.subscription.account)
if autopay_card is None:
continue
try:
payment_record = payment_method.create_charge(autopay_card, amount_in_dollars=amount)
except stripe.error.CardError:
self._handle_card_declined(invoice, payment_method)
continue
except payment_method.STRIPE_GENERIC_ERROR as e:
self._handle_card_errors(invoice, payment_method, e)
continue
else:
invoice.pay_invoice(payment_record)
self._send_payment_receipt(invoice, payment_record)
def _send_payment_receipt(self, invoice, payment_record):
from corehq.apps.accounting.tasks import send_purchase_receipt
receipt_email_template = 'accounting/invoice_receipt_email.html'
receipt_email_template_plaintext = 'accounting/invoice_receipt_email_plaintext.txt'
try:
domain = invoice.subscription.account.created_by_domain
product = SoftwareProductType.get_type_by_domain(Domain.get_by_name(domain))
context = {
'invoicing_contact_email': settings.INVOICING_CONTACT_EMAIL,
'balance': fmt_dollar_amount(invoice.balance),
'is_paid': invoice.is_paid,
'date_due': invoice.date_due.strftime(USER_DATE_FORMAT) if invoice.date_due else 'None',
'invoice_num': invoice.invoice_number,
}
send_purchase_receipt.delay(
payment_record, product, receipt_email_template, receipt_email_template_plaintext, context,
)
except:
self._handle_email_failure(invoice, payment_record)
def _handle_card_declined(self, invoice):
logger.error("[Billing][Autopay] An automatic payment failed for invoice: {} "
"because the card was declined. This invoice will not be automatically paid."
.format(invoice.id))
def _handle_card_errors(self, invoice, e):
logger.error("[Billing][Autopay] An automatic payment failed for invoice: {invoice} "
"because the of {error}. This invoice will not be automatically paid."
.format(invoice=invoice.id, error=e))
def _handle_email_failure(self, payment_record):
logger.error("[Billing][Autopay] During an automatic payment, sending a payment receipt failed"
" for Payment Record: {}. Everything else succeeded".format(payment_record.id))
```
#### File: accounting/tests/base_tests.py
```python
from django.test import TestCase
from corehq.apps.accounting import generator
from corehq.apps.domain.models import Domain
from django_prbac.models import Role
class BaseAccountingTest(TestCase):
def setUp(self):
Role.get_cache().clear()
generator.instantiate_accounting_for_tests()
def tearDown(self):
for domain in Domain.get_all():
domain.delete()
```
#### File: accounting/tests/test_autopay.py
```python
import random
import mock
from stripe import Charge
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.generator import FakeStripeCard, FakeStripeCustomer
from corehq.apps.accounting.models import Invoice, StripePaymentMethod, PaymentRecord
from corehq.apps.accounting.payment_handlers import AutoPayInvoicePaymentHandler
class TestBillingAutoPay(BaseInvoiceTestCase):
def setUp(self):
super(TestBillingAutoPay, self).setUp()
self.account.created_by_domain = self.domain
self.account.save()
self.currency = generator.init_default_currency()
self.web_user = generator.arbitrary_web_user()
self.dimagi_user = generator.arbitrary_web_user(is_dimagi=True)
self.fake_card = FakeStripeCard()
self.fake_stripe_customer = FakeStripeCustomer(cards=[self.fake_card])
self.account.update_autopay_user(self.web_user.username)
self.invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
self.account_2 = generator.billing_account(self.dimagi_user, self.web_user)
self.domain_2 = generator.arbitrary_domain()
self.subscription_2, self.subscription_length_2 = generator.generate_domain_subscription_from_date(
generator.get_start_date(), self.account_2, self.domain_2.name,
min_num_months=self.min_subscription_length,
)
tasks.generate_invoices(self.invoice_date)
@mock.patch.object(StripePaymentMethod, 'customer')
def test_get_autopayable_invoices(self, fake_customer):
fake_customer.__get__ = mock.Mock(return_value=self.fake_stripe_customer)
self.payment_method = StripePaymentMethod(web_user=self.web_user.username,
customer_id=self.fake_stripe_customer.id)
self.payment_method.set_autopay(self.fake_card, self.account)
self.payment_method.save()
autopayable_invoice = Invoice.objects.filter(subscription=self.subscription)
date_due = autopayable_invoice.first().date_due
autopayable_invoices = Invoice.autopayable_invoices(date_due)
self.assertItemsEqual(autopayable_invoices, autopayable_invoice)
@mock.patch.object(StripePaymentMethod, 'customer')
@mock.patch.object(Charge, 'create')
def test_pay_autopayable_invoices(self, fake_charge, fake_customer):
fake_customer.__get__ = mock.Mock(return_value=self.fake_stripe_customer)
self.payment_method = StripePaymentMethod(web_user=self.web_user.username,
customer_id=self.fake_stripe_customer.id)
self.payment_method.set_autopay(self.fake_card, self.account)
self.payment_method.save()
original_outbox_length = len(mail.outbox)
autopayable_invoice = Invoice.objects.filter(subscription=self.subscription)
date_due = autopayable_invoice.first().date_due
AutoPayInvoicePaymentHandler().pay_autopayable_invoices(date_due)
self.assertAlmostEqual(autopayable_invoice.first().get_total(), 0)
self.assertEqual(len(PaymentRecord.objects.all()), 1)
self.assertEqual(len(mail.outbox), original_outbox_length + 1)
```
#### File: accounting/tests/test_invoicing.py
```python
from decimal import Decimal
import random
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.core.management import call_command
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import (
SmsGatewayFee, SmsGatewayFeeCriteria, SmsUsageFee, SmsUsageFeeCriteria,
SmsBillable,
)
from corehq.apps.accounting import generator, tasks, utils
from corehq.apps.accounting.models import (
Invoice, FeatureType, LineItem, Subscriber, DefaultProductPlan,
CreditAdjustment, CreditLine, SubscriptionAdjustment, SoftwareProductType,
SoftwarePlanEdition, BillingRecord, BillingAccount, SubscriptionType,
InvoiceBaseManager, SMALL_INVOICE_THRESHOLD, Subscription
)
class BaseInvoiceTestCase(BaseAccountingTest):
min_subscription_length = 3
def setUp(self):
super(BaseInvoiceTestCase, self).setUp()
self.billing_contact = generator.arbitrary_web_user()
self.dimagi_user = generator.arbitrary_web_user(is_dimagi=True)
self.currency = generator.init_default_currency()
self.account = generator.billing_account(
self.dimagi_user, self.billing_contact)
self.domain = generator.arbitrary_domain()
self.subscription, self.subscription_length = generator.generate_domain_subscription_from_date(
generator.get_start_date(), self.account, self.domain.name, min_num_months=self.min_subscription_length,
)
self.community_plan = DefaultProductPlan.objects.get(
product_type=SoftwareProductType.COMMCARE,
edition=SoftwarePlanEdition.COMMUNITY
).plan.get_version()
def tearDown(self):
self.billing_contact.delete()
self.dimagi_user.delete()
self.domain.delete()
CreditAdjustment.objects.all().delete()
CreditLine.objects.all().delete()
BillingRecord.objects.all().delete()
LineItem.objects.all().delete()
SubscriptionAdjustment.objects.all().delete()
Invoice.objects.all().delete()
generator.delete_all_subscriptions()
generator.delete_all_accounts()
super(BaseInvoiceTestCase, self).tearDown()
class TestInvoice(BaseInvoiceTestCase):
"""
Tests that invoices are properly generated for the first month, last month, and a random month in the middle
of a subscription for a domain.
"""
def test_no_invoice_before_start(self):
"""
No invoice gets created if the subscription didn't start in the previous month.
"""
tasks.generate_invoices(self.subscription.date_start)
self.assertEqual(self.subscription.invoice_set.count(), 0)
def test_subscription_invoice(self):
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
tasks.generate_invoices(invoice_date)
self.assertEqual(self.subscription.invoice_set.count(), 1)
self.assertEqual(self.subscription.subscriber.domain, self.domain.name)
invoice = self.subscription.invoice_set.latest('date_created')
num_product_line_items = invoice.lineitem_set.get_products().count()
self.assertEqual(num_product_line_items, self.subscription.plan_version.product_rates.count())
num_feature_line_items = invoice.lineitem_set.get_features().count()
self.assertEqual(num_feature_line_items, self.subscription.plan_version.feature_rates.count())
self.assertEqual(invoice.subscription, self.subscription)
self.assertGreater(invoice.balance, Decimal('0.0000'))
def test_no_invoice_after_end(self):
"""
No invoices should be generated for the months after the end date of the subscription.
"""
invoice_date = utils.months_from_date(self.subscription.date_end, 2)
tasks.generate_invoices(invoice_date)
self.assertEqual(self.subscription.invoice_set.count(), 0)
def test_community_no_charges_no_invoice(self):
"""
No invoices should be generated for domains that are not on a subscription and do not
have any per_excess charges on users or SMS messages
"""
domain = generator.arbitrary_domain()
tasks.generate_invoices()
self.assertRaises(ObjectDoesNotExist,
lambda: Invoice.objects.get(subscription__subscriber__domain=domain.name))
domain.delete()
def test_community_invoice(self):
"""
For an unsubscribed domain with any charges over the community limit for the month of invoicing,
make sure that an invoice is generated in addition to a subscription for that month to
the community plan.
"""
domain = generator.arbitrary_domain()
generator.create_excess_community_users(domain)
account = BillingAccount.get_or_create_account_by_domain(
domain, created_by=self.dimagi_user)[0]
billing_contact = generator.arbitrary_contact_info(account, self.dimagi_user)
billing_contact.save()
account.date_confirmed_extra_charges = datetime.date.today()
account.save()
tasks.generate_invoices()
subscriber = Subscriber.objects.get(domain=domain.name)
invoices = Invoice.objects.filter(subscription__subscriber=subscriber)
self.assertEqual(invoices.count(), 1)
invoice = invoices.get()
self.assertEqual(invoice.subscription.subscriber.domain, domain.name)
self.assertEqual(invoice.subscription.date_start, invoice.date_start)
self.assertEqual(
invoice.subscription.date_end - datetime.timedelta(days=1),
invoice.date_end
)
domain.delete()
def test_date_due_not_set_small_invoice(self):
"""Date Due doesn't get set if the invoice is small"""
Subscription.objects.all().delete()
subscription_length = 5 # months
plan = DefaultProductPlan.objects.get(
edition=SoftwarePlanEdition.STANDARD,
product_type=SoftwareProductType.COMMCARE,
is_trial=False
).plan.get_version()
subscription, _ = generator.generate_domain_subscription_from_date(
generator.get_start_date(),
self.account,
self.domain.name,
subscription_length=subscription_length,
plan_version=plan,
)
invoice_date_small = utils.months_from_date(subscription.date_start, 1)
tasks.generate_invoices(invoice_date_small)
small_invoice = subscription.invoice_set.first()
self.assertTrue(small_invoice.balance <= SMALL_INVOICE_THRESHOLD)
self.assertIsNone(small_invoice.date_due)
def test_date_due_set_large_invoice(self):
"""Date Due only gets set for a large invoice (> $100)"""
Subscription.objects.all().delete()
subscription_length = 5 # months
plan = DefaultProductPlan.objects.get(
edition=SoftwarePlanEdition.ADVANCED,
product_type=SoftwareProductType.COMMCARE,
is_trial=False
).plan.get_version()
subscription, _ = generator.generate_domain_subscription_from_date(
generator.get_start_date(),
self.account,
self.domain.name,
subscription_length=subscription_length,
plan_version=plan,
)
invoice_date_large = utils.months_from_date(subscription.date_start, 3)
tasks.generate_invoices(invoice_date_large)
large_invoice = subscription.invoice_set.last()
self.assertTrue(large_invoice.balance > SMALL_INVOICE_THRESHOLD)
self.assertIsNotNone(large_invoice.date_due)
def test_date_due_gets_set_autopay(self):
"""Date due always gets set for autopay """
Subscription.objects.all().delete()
subscription_length = 4
plan = DefaultProductPlan.objects.get(
edition=SoftwarePlanEdition.STANDARD,
product_type=SoftwareProductType.COMMCARE,
is_trial=False
).plan.get_version()
autopay_subscription, _ = generator.generate_domain_subscription_from_date(
generator.get_start_date(),
self.account,
self.domain.name,
subscription_length=subscription_length,
plan_version=plan
)
autopay_subscription.account.update_autopay_user(self.billing_contact.username)
invoice_date_autopay = utils.months_from_date(autopay_subscription.date_start, 1)
tasks.generate_invoices(invoice_date_autopay)
autopay_invoice = autopay_subscription.invoice_set.last()
self.assertTrue(autopay_invoice.balance <= SMALL_INVOICE_THRESHOLD)
self.assertIsNotNone(autopay_invoice.date_due)
class TestContractedInvoices(BaseInvoiceTestCase):
def setUp(self):
super(TestContractedInvoices, self).setUp()
generator.delete_all_subscriptions()
self.subscription, self.subscription_length = generator.generate_domain_subscription_from_date(
generator.get_start_date(),
self.account,
self.domain.name,
min_num_months=self.min_subscription_length,
service_type=SubscriptionType.CONTRACTED,
)
self.invoice_date = utils.months_from_date(
self.subscription.date_start,
random.randint(2, self.subscription_length)
)
def test_contracted_invoice_email_recipient(self):
"""
For contracted invoices, emails should be sent to <EMAIL>
"""
expected_recipient = ["<EMAIL>"]
tasks.generate_invoices(self.invoice_date)
self.assertEqual(Invoice.objects.count(), 1)
actual_recipient = Invoice.objects.first().email_recipients
self.assertEqual(actual_recipient, expected_recipient)
def test_contracted_invoice_email_template(self):
"""
Emails for contracted invoices should use the contracted invoices template
"""
expected_template = BillingRecord.INVOICE_CONTRACTED_HTML_TEMPLATE
tasks.generate_invoices(self.invoice_date)
self.assertEqual(BillingRecord.objects.count(), 1)
actual_template = BillingRecord.objects.first().html_template
self.assertTrue(actual_template, expected_template)
class TestProductLineItem(BaseInvoiceTestCase):
"""
Tests that the Product line item is properly generated and prorated (when applicable) in an invoice.
"""
def setUp(self):
super(TestProductLineItem, self).setUp()
self.product_rate = self.subscription.plan_version.product_rates.get()
self.prorate = Decimal("%.2f" % round(self.product_rate.monthly_fee / 30, 2))
def test_standard(self):
"""
For the Product Line Item, make sure that the Product rate is not prorated:
- base_cost uses the correct monthly fee
- base_description is not None
- unit_description is None
- unit_cost is 0.0
- quantity is 1
- subtotal = monthly fee
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
product_line_items = invoice.lineitem_set.filter(feature_rate__exact=None)
self.assertEqual(product_line_items.count(), 1)
product_line_item = product_line_items.get()
self.assertIsNotNone(product_line_item.base_description)
self.assertEqual(product_line_item.base_cost, self.product_rate.monthly_fee)
self.assertIsNone(product_line_item.unit_description)
self.assertEqual(product_line_item.unit_cost, Decimal('0.0000'))
self.assertEqual(product_line_item.quantity, 1)
self.assertEqual(product_line_item.subtotal, self.product_rate.monthly_fee)
# no adjustments
self.assertEqual(product_line_item.total, self.product_rate.monthly_fee)
def test_prorate(self):
"""
Make sure that the product is prorated for the first and last invoices, which fall in a partial month:
- base_cost is 0.0
- base_description is None
- unit_description is not None
- unit_cost is prorated
- quantity > 1
- subtotal = unit_cost * quantity
"""
first_invoice_date = utils.months_from_date(self.subscription.date_start, 1)
tasks.generate_invoices(first_invoice_date)
last_invoice_date = utils.months_from_date(self.subscription.date_end, 1)
tasks.generate_invoices(last_invoice_date)
for invoice in self.subscription.invoice_set.all():
product_line_items = invoice.lineitem_set.filter(feature_rate__exact=None)
self.assertEqual(product_line_items.count(), 1)
product_line_item = product_line_items.get()
self.assertGreater(product_line_item.quantity, 1)
self.assertEqual(product_line_item.unit_cost, self.prorate)
self.assertIsNotNone(product_line_item.unit_description)
self.assertEqual(product_line_item.base_cost, Decimal('0.0000'))
self.assertIsNone(product_line_item.base_description)
self.assertEqual(product_line_item.subtotal, product_line_item.unit_cost * product_line_item.quantity)
# no adjustments
self.assertEqual(product_line_item.total, product_line_item.unit_cost * product_line_item.quantity)
class TestUserLineItem(BaseInvoiceTestCase):
def setUp(self):
super(TestUserLineItem, self).setUp()
self.user_rate = self.subscription.plan_version.feature_rates.filter(feature__feature_type=FeatureType.USER)[:1].get()
def test_under_limit(self):
"""
Make sure that the User rate produced:
- base_description is None
- base_cost is 0.0
- unit_cost is equal to the per_excess_fee
- quantity is equal to 0
- unit_description is None
- total and subtotals are 0.0
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
num_users = lambda: random.randint(0, self.user_rate.monthly_limit)
num_active = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_active)
num_inactive = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_inactive, is_active=False)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
self.assertIsNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, 0)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(user_line_item.total, Decimal('0.0000'))
def test_over_limit(self):
"""
Make sure that the User rate produced:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is equal to the per_excess_fee on the user rate
- quantity is equal to number of commcare users in that domain minus the monthly_limit on the user rate
- total and subtotals are equal to number of extra users * per_excess_fee
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
num_users = lambda: random.randint(self.user_rate.monthly_limit + 1, self.user_rate.monthly_limit + 2)
num_active = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_active)
num_inactive = num_users()
generator.arbitrary_commcare_users_for_domain(self.domain.name, num_inactive, is_active=False)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
# there is no base cost
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
num_to_charge = num_active - self.user_rate.monthly_limit
self.assertIsNotNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, num_to_charge)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, num_to_charge * self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.total, num_to_charge * self.user_rate.per_excess_fee)
def test_community_over_limit(self):
"""
For a domain under community (no subscription) with users over the community limit, make sure that:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is equal to the per_excess_fee on the user rate
- quantity is equal to number of commcare users in that domain minus the monthly_limit on the user rate
- total and subtotals are equal to number of extra users * per_excess_fee
"""
domain = generator.arbitrary_domain()
num_active = generator.create_excess_community_users(domain)
account = BillingAccount.get_or_create_account_by_domain(
domain, created_by=self.dimagi_user)[0]
billing_contact = generator.arbitrary_contact_info(account, self.dimagi_user)
billing_contact.save()
account.date_confirmed_extra_charges = datetime.date.today()
account.save()
tasks.generate_invoices()
subscriber = Subscriber.objects.get(domain=domain.name)
invoice = Invoice.objects.filter(subscription__subscriber=subscriber).get()
user_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.USER).get()
self.assertIsNone(user_line_item.base_description)
self.assertEqual(user_line_item.base_cost, Decimal('0.0000'))
num_to_charge = num_active - self.community_plan.user_limit
self.assertIsNotNone(user_line_item.unit_description)
self.assertEqual(user_line_item.quantity, num_to_charge)
self.assertEqual(user_line_item.unit_cost, self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.subtotal, num_to_charge * self.user_rate.per_excess_fee)
self.assertEqual(user_line_item.total, num_to_charge * self.user_rate.per_excess_fee)
domain.delete()
class TestSmsLineItem(BaseInvoiceTestCase):
def setUp(self):
super(TestSmsLineItem, self).setUp()
self.sms_rate = self.subscription.plan_version.feature_rates.filter(feature__feature_type=FeatureType.SMS).get()
def test_under_limit(self):
"""
Make sure that the Line Item for the SMS Rate has the following:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is 0.0
- quantity is equal to 1
- total and subtotals are 0.0
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
sms_date = utils.months_from_date(invoice_date, -1)
num_sms = random.randint(0, self.sms_rate.monthly_limit/2)
generator.arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, INCOMING, sms_date, num_sms
)
generator.arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, OUTGOING, sms_date, num_sms
)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
sms_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.SMS).get()
# there is no base cost
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
self.assertEqual(sms_line_item.unit_cost, Decimal('0.0000'))
self.assertIsNotNone(sms_line_item.unit_description)
self.assertEqual(sms_line_item.subtotal, Decimal('0.0000'))
self.assertEqual(sms_line_item.total, Decimal('0.0000'))
self._delete_sms_billables()
def test_over_limit(self):
"""
Make sure that the Line Item for the SMS Rate has the following:
- base_description is None
- base_cost is 0.0
- unit_description is not None
- unit_cost is greater than 0.0
- quantity is equal to 1
- total and subtotals are greater than zero
"""
invoice_date = utils.months_from_date(self.subscription.date_start, random.randint(2, self.subscription_length))
sms_date = utils.months_from_date(invoice_date, -1)
num_sms = random.randint(self.sms_rate.monthly_limit + 1, self.sms_rate.monthly_limit + 2)
generator.arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, INCOMING, sms_date, num_sms
)
generator.arbitrary_sms_billables_for_domain(
self.subscription.subscriber.domain, OUTGOING, sms_date, num_sms
)
tasks.generate_invoices(invoice_date)
invoice = self.subscription.invoice_set.latest('date_created')
sms_line_item = invoice.lineitem_set.get_feature_by_type(FeatureType.SMS).get()
# there is no base cost
self.assertIsNone(sms_line_item.base_description)
self.assertEqual(sms_line_item.base_cost, Decimal('0.0000'))
self.assertEqual(sms_line_item.quantity, 1)
self.assertGreater(sms_line_item.unit_cost, Decimal('0.0000'))
self.assertIsNotNone(sms_line_item.unit_description)
self.assertGreater(sms_line_item.subtotal, Decimal('0.0000'))
self.assertGreater(sms_line_item.total, Decimal('0.0000'))
self._delete_sms_billables()
def _delete_sms_billables(self):
SmsBillable.objects.all().delete()
SmsGatewayFee.objects.all().delete()
SmsGatewayFeeCriteria.objects.all().delete()
SmsUsageFee.objects.all().delete()
SmsUsageFeeCriteria.objects.all().delete()
class TestManagementCmdInvoice(BaseInvoiceTestCase):
def test_hide_invoices(self):
"""
Tests hiding invoices via the management command
"""
invoice_date = utils.months_from_date(self.subscription.date_start,
random.randint(2, self.subscription_length))
tasks.generate_invoices(invoice_date)
invoices = self.subscription.invoice_set.all()
# Basic hide invoices
call_command('hide_invoices_by_id', *[i.pk for i in invoices])
for i in invoices:
self.assertTrue(super(
InvoiceBaseManager, Invoice.objects).get_queryset().get(pk=i.pk).is_hidden_to_ops)
# Basic unhide invoices
call_command('hide_invoices_by_id', *[i.pk for i in invoices], unhide=True)
for i in invoices:
self.assertFalse(super(
InvoiceBaseManager, Invoice.objects).get_queryset().get(pk=i.pk).is_hidden_to_ops)
```
#### File: accounting/tests/test_renew_subscription.py
```python
import datetime
from corehq.apps.domain.models import Domain
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting import generator
from corehq.apps.accounting.models import (
Subscription,
BillingAccount,
DefaultProductPlan,
SoftwarePlanEdition
)
class TestRenewSubscriptions(BaseAccountingTest):
def setUp(self):
super(TestRenewSubscriptions, self).setUp()
self.domain = Domain(
name="test-domain-sub",
is_active=True,
)
self.domain.save()
self.admin_user = generator.arbitrary_web_user()
self.admin_user.add_domain_membership(self.domain.name, is_admin=True)
self.admin_user.save()
self.account = BillingAccount.get_or_create_account_by_domain(
self.domain.name, created_by=self.admin_user.username)[0]
self.standard_plan = DefaultProductPlan.get_default_plan_by_domain(
self.domain.name, edition=SoftwarePlanEdition.STANDARD)
today = datetime.date.today()
yesterday = today + datetime.timedelta(days=-1)
tomorrow = today + datetime.timedelta(days=1)
self.subscription = Subscription.new_domain_subscription(
self.account,
self.domain.name,
self.standard_plan,
web_user=self.admin_user.username,
date_start=yesterday,
date_end=tomorrow,
)
self.subscription.save()
def test_simple_renewal(self):
today = datetime.date.today()
new_end_date = today + datetime.timedelta(days=9)
renewed_subscription = self.subscription.renew_subscription(
date_end=new_end_date
)
self.assertEqual(renewed_subscription.date_end, new_end_date)
self.assertEqual(renewed_subscription.date_start, self.subscription.date_end)
self.assertEqual(renewed_subscription.plan_version, self.subscription.plan_version)
def test_change_plan_on_renewal(self):
today = datetime.date.today()
new_end_date = today + datetime.timedelta(days=9)
new_edition = SoftwarePlanEdition.ADVANCED
new_plan = DefaultProductPlan.get_default_plan_by_domain(self.domain.name, new_edition)
renewed_subscription = self.subscription.renew_subscription(
date_end=new_end_date,
new_version=new_plan
)
self.assertEqual(renewed_subscription.plan_version, new_plan)
```
#### File: apps/accounting/utils.py
```python
import calendar
from collections import namedtuple
import datetime
from decimal import Decimal
from django.conf import settings
from django.template.loader import render_to_string
from corehq.util.view_utils import absolute_reverse
from django.utils.translation import ugettext_lazy as _
from corehq import privileges
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
from corehq.apps.accounting.exceptions import (
AccountingError,
ProductPlanNotFoundError,
)
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.dates import add_months
from django_prbac.models import Role, UserRole
EXCHANGE_RATE_DECIMAL_PLACES = 9
def get_first_last_days(year, month):
last_day = calendar.monthrange(year, month)[1]
date_start = datetime.date(year, month, 1)
date_end = datetime.date(year, month, last_day)
return date_start, date_end
def get_previous_month_date_range(reference_date=None):
reference_date = reference_date or datetime.date.today()
last_month_year, last_month = add_months(reference_date.year, reference_date.month, -1)
return get_first_last_days(last_month_year, last_month)
def months_from_date(reference_date, months_from_date):
year, month = add_months(reference_date.year, reference_date.month, months_from_date)
return datetime.date(year, month, 1)
def ensure_domain_instance(domain):
if not isinstance(domain, Domain):
domain = Domain.get_by_name(domain)
return domain
def fmt_feature_rate_dict(feature, feature_rate=None):
"""
This will be turned into a JSON representation of this Feature and its FeatureRate
"""
if feature_rate is None:
feature_rate = feature.get_rate()
return {
'name': feature.name,
'feature_type': feature.feature_type,
'feature_id': feature.id,
'rate_id': feature_rate.id,
'monthly_fee': feature_rate.monthly_fee.__str__(),
'monthly_limit': feature_rate.monthly_limit,
'per_excess_fee': feature_rate.per_excess_fee.__str__(),
}
def fmt_product_rate_dict(product, product_rate=None):
"""
This will be turned into a JSON representation of this SoftwareProduct and its SoftwareProductRate
"""
if product_rate is None:
product_rate = product.get_rate()
return {
'name': product.name,
'product_type': product.product_type,
'product_id': product.id,
'rate_id': product_rate.id,
'monthly_fee': product_rate.monthly_fee.__str__(),
}
def get_privileges(plan_version):
role = plan_version.role.get_cached_role()
return set([grant.to_role.slug for grant in role.memberships_granted.all()])
ChangeStatusResult = namedtuple('ChangeStatusResult', ['adjustment_reason', 'downgraded_privs', 'upgraded_privs'])
def get_change_status(from_plan_version, to_plan_version):
from_privs = (
get_privileges(from_plan_version)
if from_plan_version is not None
else set(privileges.MAX_PRIVILEGES)
)
to_privs = get_privileges(to_plan_version) if to_plan_version is not None else set()
downgraded_privs = from_privs.difference(to_privs)
upgraded_privs = to_privs
from corehq.apps.accounting.models import SubscriptionAdjustmentReason as Reason
if from_plan_version is None:
adjustment_reason = Reason.CREATE
else:
adjustment_reason = Reason.SWITCH
if len(downgraded_privs) == 0 and len(upgraded_privs) > 0:
adjustment_reason = Reason.UPGRADE
elif len(upgraded_privs) == 0 and len(downgraded_privs) > 0:
adjustment_reason = Reason.DOWNGRADE
return ChangeStatusResult(adjustment_reason, downgraded_privs, upgraded_privs)
def domain_has_privilege_cache_args(domain, privilege_slug, **assignment):
return [
domain.name if isinstance(domain, Domain) else domain,
privilege_slug
]
@quickcache(domain_has_privilege_cache_args, timeout=10)
def domain_has_privilege(domain, privilege_slug, **assignment):
from corehq.apps.accounting.models import Subscription
try:
plan_version = Subscription.get_subscribed_plan_by_domain(domain)[0]
privilege = Role.get_privilege(privilege_slug, assignment)
if privilege is None:
return False
if plan_version.role.has_privilege(privilege):
return True
except ProductPlanNotFoundError:
return False
except AccountingError:
pass
return False
def is_active_subscription(date_start, date_end):
today = datetime.date.today()
return ((date_start is None or date_start <= today)
and (date_end is None or today < date_end))
def has_subscription_already_ended(subscription):
return (subscription.date_end is not None
and subscription.date_end <= datetime.date.today())
def get_money_str(amount):
if amount is not None:
if amount < 0:
fmt = "-$%0.2f"
amount = abs(amount)
else:
fmt = "$%0.2f"
return fmt % amount
return ""
def get_address_from_invoice(invoice):
from corehq.apps.accounting.invoice_pdf import Address
from corehq.apps.accounting.models import BillingContactInfo
try:
contact_info = BillingContactInfo.objects.get(
account=invoice.account,
)
return Address(
name=(
"%s %s" %
(contact_info.first_name
if contact_info.first_name is not None else "",
contact_info.last_name
if contact_info.last_name is not None else "")
),
company_name=contact_info.company_name,
first_line=contact_info.first_line,
second_line=contact_info.second_line,
city=contact_info.city,
region=contact_info.state_province_region,
postal_code=contact_info.postal_code,
country=contact_info.country,
)
except BillingContactInfo.DoesNotExist:
return Address()
def get_dimagi_from_email_by_product(product):
return ("Dimagi %(product)s Accounts <%(email)s>" % {
'product': product,
'email': settings.INVOICING_CONTACT_EMAIL,
})
def quantize_accounting_decimal(decimal_value):
return decimal_value.quantize(Decimal(10) ** -2)
def fmt_dollar_amount(decimal_value):
return _("USD %s") % quantize_accounting_decimal(decimal_value)
def get_customer_cards(account, username, domain):
from corehq.apps.accounting.models import (
StripePaymentMethod, PaymentMethodType,
)
try:
payment_method = StripePaymentMethod.objects.get(
web_user=username,
method_type=PaymentMethodType.STRIPE
)
stripe_customer = payment_method.customer
return stripe_customer.cards
except (StripePaymentMethod.DoesNotExist):
pass
return None
def is_accounting_admin(user):
accounting_privilege = Role.get_privilege(privileges.ACCOUNTING_ADMIN)
if accounting_privilege is None:
return False
try:
return user.prbac_role.has_privilege(accounting_privilege)
except (AttributeError, UserRole.DoesNotExist):
return False
def get_active_reminders_by_domain_name(domain_name):
from corehq.apps.reminders.models import (
CaseReminderHandler,
REMINDER_TYPE_DEFAULT,
REMINDER_TYPE_KEYWORD_INITIATED,
)
db = CaseReminderHandler.get_db()
key = [domain_name]
reminder_rules = db.view(
'reminders/handlers_by_reminder_type',
startkey=key,
endkey=(key + [{}]),
reduce=False
).all()
return [
CaseReminderHandler.wrap(reminder_doc)
for reminder_doc in iter_docs(db, [r['id'] for r in reminder_rules])
if (
reminder_doc.get('active', True)
and reminder_doc.get('reminder_type', REMINDER_TYPE_DEFAULT) != REMINDER_TYPE_KEYWORD_INITIATED
)
]
def make_anchor_tag(href, name, attrs={}):
context = {
'href': href,
'name': name,
'attrs': attrs,
}
return render_to_string('accounting/partials/anchor_tag.html', context)
def get_default_domain_url(domain):
from corehq.apps.domain.views import DefaultProjectSettingsView
return absolute_reverse(
DefaultProjectSettingsView.urlname,
args=[domain],
)
```
#### File: apps/app_manager/detail_screen.py
```python
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.suite_xml import xml_models as sx
from corehq.apps.app_manager.suite_xml import const
from corehq.apps.app_manager.util import is_sort_only_column
from corehq.apps.app_manager.xpath import (
CaseXPath,
CommCareSession,
IndicatorXpath,
LedgerdbXpath,
LocationXpath,
XPath,
dot_interpolate,
UserCaseXPath)
from corehq.apps.hqmedia.models import CommCareMultimedia
CASE_PROPERTY_MAP = {
# IMPORTANT: if you edit this you probably want to also edit
# the corresponding map in cloudcare
# (corehq/apps/cloudcare/static/cloudcare/js/backbone/cases.js)
'external-id': 'external_id',
'date-opened': 'date_opened',
'status': '@status',
'name': 'case_name',
'owner_id': '@owner_id',
}
def get_column_generator(app, module, detail, column, sort_element=None,
order=None, detail_type=None):
cls = get_class_for_format(column.format)
return cls(app, module, detail, column, sort_element, order, detail_type=detail_type)
def get_class_for_format(slug):
return get_class_for_format._format_map.get(slug, FormattedDetailColumn)
get_class_for_format._format_map = {}
class register_format_type(object):
def __init__(self, slug):
self.slug = slug
def __call__(self, klass):
get_class_for_format._format_map[self.slug] = klass
return klass
def get_column_xpath_generator(app, module, detail, column):
cls = get_class_for_type(column.field_type)
return cls(app, module, detail, column)
def get_class_for_type(slug):
return get_class_for_type._type_map.get(slug, BaseXpathGenerator)
get_class_for_type._type_map = {}
class register_type_processor(object):
def __init__(self, slug):
self.slug = slug
def __call__(self, klass):
get_class_for_type._type_map[self.slug] = klass
return klass
class BaseXpathGenerator(object):
def __init__(self, app, module, detail, column):
self.app = app
self.module = module
self.detail = detail
self.column = column
self.id_strings = id_strings
@property
def xpath(self):
return self.column.field
class FormattedDetailColumn(object):
header_width = None
template_width = None
template_form = None
def __init__(self, app, module, detail, column, sort_element=None,
order=None, detail_type=None):
self.app = app
self.module = module
self.detail = detail
self.detail_type = detail_type
self.column = column
self.sort_element = sort_element
self.order = order
self.id_strings = id_strings
@property
def locale_id(self):
if not is_sort_only_column(self.column):
return self.id_strings.detail_column_header_locale(
self.module, self.detail_type, self.column,
)
else:
return None
@property
def header(self):
header = sx.Header(
text=sx.Text(locale_id=self.locale_id),
width=self.header_width
)
return header
variables = None
@property
def template(self):
template = sx.Template(
text=sx.Text(xpath_function=self.xpath_function),
form=self.template_form,
width=self.template_width,
)
if self.variables:
for key, value in sorted(self.variables.items()):
template.text.xpath.variables.node.append(
sx.XpathVariable(name=key, locale_id=value).node
)
return template
@property
def sort_node(self):
if not (self.app.enable_multi_sort and self.detail.display == 'short'):
return
sort = None
if self.sort_xpath_function:
sort = sx.Sort(
text=sx.Text(xpath_function=self.sort_xpath_function),
type='string',
)
if self.sort_element:
if not sort:
# these have to be distinguished for the UI to be able to give
# user friendly choices
if self.sort_element.type in ('date', 'plain'):
sort_type = 'string'
else:
sort_type = self.sort_element.type
sort = sx.Sort(
text=sx.Text(xpath_function=self.xpath_function),
type=sort_type,
)
sort.order = self.order
sort.direction = self.sort_element.direction
# Flag field as index by making order "-2"
# this is for the CACHE_AND_INDEX toggle
# (I know, I know, it's hacky - blame Clayton)
if sort.type == 'index':
sort.type = 'string'
sort.order = -2
return sort
@property
def xpath(self):
return get_column_xpath_generator(self.app, self.module, self.detail,
self.column).xpath
XPATH_FUNCTION = u"{xpath}"
def evaluate_template(self, template):
if template:
return template.format(
xpath=self.xpath,
app=self.app,
module=self.module,
detail=self.detail,
column=self.column
)
@property
def xpath_function(self):
return self.evaluate_template(self.XPATH_FUNCTION)
@property
def hidden_header(self):
return sx.Header(
text=sx.Text(),
width=0,
)
@property
def hidden_template(self):
return sx.Template(
text=sx.Text(xpath_function=self.sort_xpath_function),
width=0,
)
SORT_XPATH_FUNCTION = None
@property
def sort_xpath_function(self):
return self.evaluate_template(self.SORT_XPATH_FUNCTION)
@property
def fields(self):
if self.app.enable_multi_sort:
yield sx.Field(
header=self.header,
template=self.template,
sort_node=self.sort_node,
)
elif self.sort_xpath_function and self.detail.display == 'short':
yield sx.Field(
header=self.header,
template=self.hidden_template,
)
yield sx.Field(
header=self.hidden_header,
template=self.template,
)
else:
yield sx.Field(
header=self.header,
template=self.template,
)
class HideShortHeaderColumn(FormattedDetailColumn):
@property
def header(self):
if self.detail.display == 'short':
header = sx.Header(
text=sx.Text(),
width=self.template_width
)
else:
header = super(HideShortHeaderColumn, self).header
return header
class HideShortColumn(HideShortHeaderColumn):
@property
def template_width(self):
if self.detail.display == 'short':
return 0
@register_format_type('plain')
class Plain(FormattedDetailColumn):
pass
@register_format_type('date')
class Date(FormattedDetailColumn):
XPATH_FUNCTION = u"if({xpath} = '', '', format_date(date(if({xpath} = '', 0, {xpath})),'short'))"
SORT_XPATH_FUNCTION = u"{xpath}"
@register_format_type('time-ago')
class TimeAgo(FormattedDetailColumn):
XPATH_FUNCTION = u"if({xpath} = '', '', string(int((today() - date({xpath})) div {column.time_ago_interval})))"
SORT_XPATH_FUNCTION = u"{xpath}"
@register_format_type('phone')
class Phone(FormattedDetailColumn):
@property
def template_form(self):
if self.detail.display == 'long':
return 'phone'
@register_format_type('enum')
class Enum(FormattedDetailColumn):
def _make_xpath(self, type):
if type == 'sort':
xpath_fragment_template = u"if({xpath} = '{key}', {i}, "
elif type == 'display':
xpath_fragment_template = u"if({xpath} = '{key}', ${key_as_var}, "
else:
raise ValueError('type must be in sort, display')
parts = []
for i, item in enumerate(self.column.enum):
parts.append(
xpath_fragment_template.format(
key=item.key,
key_as_var=item.key_as_variable,
xpath=self.xpath,
i=i,
)
)
parts.append(u"''")
parts.append(u")" * len(self.column.enum))
return ''.join(parts)
@property
def xpath_function(self):
return self._make_xpath(type='display')
@property
def sort_xpath_function(self):
return self._make_xpath(type='sort')
@property
def variables(self):
variables = {}
for item in self.column.enum:
v_key = item.key_as_variable
v_val = self.id_strings.detail_column_enum_variable(
self.module, self.detail_type, self.column, v_key)
variables[v_key] = v_val
return variables
@register_format_type('enum-image')
class EnumImage(Enum):
template_form = 'image'
@property
def header_width(self):
return self.template_width
@property
def template_width(self):
'''
Set column width to accommodate widest image.
'''
width = 0
if self.app.enable_case_list_icon_dynamic_width:
for i, item in enumerate(self.column.enum):
for path in item.value.values():
map_item = self.app.multimedia_map[path]
if map_item is not None:
image = CommCareMultimedia.get(map_item.multimedia_id)
if image is not None:
for media in image.aux_media:
width = max(width, media.media_meta['size']['width'])
if width == 0:
return '13%'
return str(width)
@register_format_type('late-flag')
class LateFlag(HideShortHeaderColumn):
template_width = "11%"
XPATH_FUNCTION = u"if({xpath} = '', '*', if(today() - date({xpath}) > {column.late_flag}, '*', ''))"
@register_format_type('invisible')
class Invisible(HideShortColumn):
pass
@register_format_type('filter')
class Filter(HideShortColumn):
@property
def fields(self):
return []
@register_format_type('calculate')
class Calculate(FormattedDetailColumn):
@property
def xpath_function(self):
return dot_interpolate(self.column.calc_xpath, self.xpath)
@register_format_type('address')
class Address(HideShortColumn):
template_form = 'address'
template_width = 0
@register_format_type('picture')
class Picture(FormattedDetailColumn):
template_form = 'image'
@register_format_type('audio')
class Audio(FormattedDetailColumn):
template_form = 'audio'
@register_format_type('graph')
class Graph(FormattedDetailColumn):
template_form = "graph"
@property
def template(self):
template = sx.GraphTemplate(
form=self.template_form,
graph=sx.Graph(
type=self.column.graph_configuration.graph_type,
series=[
sx.Series(
nodeset=s.data_path,
x_function=s.x_function,
y_function=s.y_function,
radius_function=s.radius_function,
configuration=sx.ConfigurationGroup(
configs=[
# TODO: It might be worth wrapping
# these values in quotes (as appropriate)
# to prevent the user from having to
# figure out why their unquoted colors
# aren't working.
sx.ConfigurationItem(id=k, xpath_function=v)
for k, v in s.config.iteritems()]
)
)
for s in self.column.graph_configuration.series],
configuration=sx.ConfigurationGroup(
configs=(
[
sx.ConfigurationItem(id=k, xpath_function=v)
for k, v
in self.column.graph_configuration.config.iteritems()
] + [
sx.ConfigurationItem(
id=k,
locale_id=self.id_strings.graph_configuration(
self.module,
self.detail_type,
self.column,
k
)
)
for k, v
in self.column.graph_configuration.locale_specific_config.iteritems()
]
)
),
annotations=[
sx.Annotation(
x=sx.Text(xpath_function=a.x),
y=sx.Text(xpath_function=a.y),
text=sx.Text(
locale_id=self.id_strings.graph_annotation(
self.module,
self.detail_type,
self.column,
i
)
)
)
for i, a in enumerate(
self.column.graph_configuration.annotations
)]
)
)
# TODO: what are self.variables and do I need to care about them here?
# (see FormattedDetailColumn.template)
return template
@register_type_processor(const.FIELD_TYPE_ATTACHMENT)
class AttachmentXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
return const.FIELD_TYPE_ATTACHMENT + "/" + self.column.field_property
@register_type_processor(const.FIELD_TYPE_PROPERTY)
class PropertyXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
if self.column.model == 'product':
return self.column.field
parts = self.column.field.split('/')
if self.column.model == 'case':
parts[-1] = CASE_PROPERTY_MAP.get(parts[-1], parts[-1])
property = parts.pop()
indexes = parts
use_relative = property != '#owner_name'
if use_relative:
case = CaseXPath('')
else:
case = CaseXPath(u'current()')
if indexes and indexes[0] == 'user':
case = CaseXPath(UserCaseXPath().case())
else:
for index in indexes:
case = case.index_id(index).case()
if property == '#owner_name':
return self.owner_name(case.property('@owner_id'))
else:
return case.property(property)
@staticmethod
def owner_name(owner_id):
groups = XPath(u"instance('groups')/groups/group")
group = groups.select('@id', owner_id)
return XPath.if_(
group.count().neq(0),
group.slash('name'),
XPath.if_(
CommCareSession.userid.eq(owner_id),
CommCareSession.username,
XPath.string('')
)
)
@register_type_processor(const.FIELD_TYPE_INDICATOR)
class IndicatorXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
indicator_set, indicator = self.column.field_property.split('/', 1)
instance_id = self.id_strings.indicator_instance(indicator_set)
return IndicatorXpath(instance_id).instance().slash(indicator)
@register_type_processor(const.FIELD_TYPE_LOCATION)
class LocationXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
from corehq.apps.locations.util import parent_child
hierarchy = parent_child(self.app.domain)
return LocationXpath('commtrack:locations').location(self.column.field_property, hierarchy)
@register_type_processor(const.FIELD_TYPE_LEDGER)
class LedgerXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
session_case_id = 'case_id_case_{0}'.format(self.module.case_type)
section = self.column.field_property
return "if({0} = 0 or {1} = 0 or {2} = 0, '', {3})".format(
LedgerdbXpath(session_case_id).ledger().count(),
LedgerdbXpath(session_case_id).ledger().section(section).count(),
LedgerdbXpath(session_case_id).ledger().section(section).entry(u'current()/@id').count(),
LedgerdbXpath(session_case_id).ledger().section(section).entry(u'current()/@id')
)
@register_type_processor(const.FIELD_TYPE_SCHEDULE)
class ScheduleXpathGenerator(BaseXpathGenerator):
@property
def xpath(self):
return "${}".format(self.column.field_property)
```
#### File: management/commands/build_apps.py
```python
import contextlib
import json
import time
from django.core.management.base import BaseCommand
from lxml import etree
import os
from corehq.apps.app_manager.models import Application, RemoteApp
try:
from guppy import hpy
track_perf = True
except ImportError:
track_perf = False
_parser = etree.XMLParser(remove_blank_text=True)
def normalize_xml(xml):
xml = etree.fromstring(xml, parser=_parser)
return etree.tostring(xml, pretty_print=True)
@contextlib.contextmanager
def record_performance_stats(filepath, slug):
hp = hpy()
before = hp.heap()
start = time.clock()
try:
yield
finally:
end = time.clock()
after = hp.heap()
leftover = after - before
with open(filepath, 'a') as f:
f.write('{},{},{}\n'.format(slug, leftover.size, end - start))
class Command(BaseCommand):
args = '<path_to_dir> <build-slug>'
help = """
Pass in a path to a directory (dir, below) with the following layout:
dir/
src/
[app-slug].json
[app-slug].json
...
"""
def handle(self, *args, **options):
path, build_slug = args
app_slugs = []
perfpath = os.path.join(path, '{}-performance.txt'.format(build_slug))
if os.path.exists(perfpath):
os.remove(perfpath)
for name in os.listdir(os.path.join(path, 'src')):
_JSON = '.json'
if name.endswith(_JSON):
app_slugs.append(name[:-len(_JSON)])
for slug in app_slugs:
print 'Fetching %s...' % slug
source_path = os.path.join(path, 'src', '%s.json' % slug)
with open(source_path) as f:
j = json.load(f)
if j['doc_type'] == 'Application':
app = Application.wrap(j)
elif j['doc_type'] == 'RemoteApp':
app = RemoteApp.wrap(j)
app.version = 1
if not app.domain:
app.domain = "test"
build_path = os.path.join(path, build_slug, slug)
print ' Creating files...'
if track_perf:
with record_performance_stats(perfpath, slug):
files = app.create_all_files()
else:
files = app.create_all_files()
self.write_files(files, build_path)
def write_files(self, files, path):
for filename, payload in files.items():
filepath = os.path.join(path, filename)
dirpath, filename = os.path.split(filepath)
try:
os.makedirs(dirpath)
except OSError:
# file exists
pass
with open(filepath, 'w') as f:
if filepath.endswith('.xml'):
payload = normalize_xml(payload)
f.write(payload)
```
#### File: management/commands/find_intents.py
```python
from django.core.management import BaseCommand
from corehq.apps.domain.models import Domain
import csv
import sys
class Command(BaseCommand):
def handle(self, *args, **options):
csvWriter = csv.writer(sys.stdout)
for domain in Domain.get_all():
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
intents = form.wrapped_xform().odk_intents
if len(intents):
csvWriter.writerow([domain.name, app.name,
module.name, form.name, intents])
```
#### File: management/commands/migrate_adv_form_action_parent.py
```python
from corehq.apps.app_manager.management.commands.helpers import AppMigrationCommandBase
from corehq.apps.app_manager.models import Application
class Command(AppMigrationCommandBase):
help = "Migrate single parent index to CaseIndex list in advanced form actions."
include_builds = False # AdvancedAction lazy-migrates reverted builds
def migrate_app(self, app_doc):
modules = [m for m in app_doc['modules'] if m.get('module_type', '') == 'advanced']
should_save = False
for module in modules:
for form in module['forms']:
for action_name in form.get('actions', {}):
if action_name == 'load_update_cases':
for action in form['actions'][action_name]:
if 'parent_tag' in action:
if action['parent_tag']:
action['case_index'] = {
'tag': action['parent_tag'],
'reference_id': action.get('parent_reference_id', 'parent'),
'relationship': action.get('relationship', 'child'),
}
del action['parent_tag']
action.pop('parent_reference_id', None)
action.pop('relationship', None)
should_save = True
elif action_name == 'open_cases':
for action in form['actions'][action_name]:
if 'parent_tag' in action:
if action['parent_tag']:
case_index = {
'tag': action['parent_tag'],
'reference_id': action.get('parent_reference_id', 'parent'),
'relationship': action.get('relationship', 'child'),
}
if hasattr(action.get('case_indices'), 'append'):
action['case_indices'].append(case_index)
else:
action['case_indices'] = [case_index]
del action['parent_tag']
action.pop('parent_reference_id', None)
action.pop('relationship', None)
should_save = True
return Application.wrap(app_doc) if should_save else None
```
#### File: management/commands/migrate_template_apps_form_ids.py
```python
import re
from corehq.apps.app_manager.management.commands.helpers import AppMigrationCommandBase
from corehq.apps.app_manager.models import Application, load_app_template, ATTACHMENT_REGEX
from corehq.apps.app_manager.util import update_unique_ids
from corehq.apps.es import AppES
def _get_first_form_id(app):
return app['modules'][0]['forms'][0]['unique_id']
class Command(AppMigrationCommandBase):
help = "Migrate apps that have been created from template apps " \
"to make sure that their form ID's are unique."
include_builds = False
def migrate_app(self, app_doc):
should_save = False
template_slug = app_doc['created_from_template']
template = load_app_template(template_slug)
if _get_first_form_id(app_doc) == _get_first_form_id(template):
should_save = True
app = Application.wrap(app_doc)
_attachments = {}
for name in app_doc.get('_attachments', {}):
if re.match(ATTACHMENT_REGEX, name):
_attachments[name] = app.fetch_attachment(name)
app_doc['_attachments'] = _attachments
app_doc = update_unique_ids(app_doc)
return Application.wrap(app_doc) if should_save else None
def get_app_ids(self):
q = AppES().created_from_template(True).is_build(False).fields('_id')
results = q.run()
return [app['_id'] for app in results.hits]
```
#### File: apps/app_manager/signals.py
```python
from django.dispatch.dispatcher import Signal
from corehq.apps.domain.models import Domain
from corehq.apps.app_manager.const import CAREPLAN_GOAL, CAREPLAN_TASK
from corehq.apps.app_manager.models import CareplanModule, CareplanConfig, CareplanAppProperties
def create_app_structure_repeat_records(sender, application, **kwargs):
from corehq.apps.receiverwrapper.models import AppStructureRepeater
domain = application.domain
if domain:
repeaters = AppStructureRepeater.by_domain(domain)
for repeater in repeaters:
repeater.register(application)
def update_careplan_config(config, parent_app_id, application):
app_props = config.app_configs.get(parent_app_id, CareplanAppProperties())
app_props.latest_release = application.get_id
for module in application.get_modules():
if isinstance(module, CareplanModule):
app_props.name = module.default_name()
app_props.case_type = module.case_type
app_props.goal_conf = {
"edit_module_id": module.id,
"edit_form_id": module.get_form_by_type(CAREPLAN_GOAL, 'update').id,
"create_module_id": module.id,
"create_form_id": module.get_form_by_type(CAREPLAN_GOAL, 'create').id,
}
app_props.task_conf = {
"edit_module_id": module.id,
"edit_form_id": module.get_form_by_type(CAREPLAN_TASK, 'update').id,
"create_module_id": module.id,
"create_form_id": module.get_form_by_type(CAREPLAN_TASK, 'create').id,
}
break
config.app_configs[parent_app_id] = app_props
config.save()
domain = Domain.get_by_name(application.domain)
if not domain.has_careplan:
domain.has_careplan = True
domain.save()
def careplan_removed(domain_name, config, app_id):
if config and app_id in config.app_configs:
del config.app_configs[app_id]
config.save()
if not config.app_configs:
domain = Domain.get_by_name(domain_name)
domain.has_careplan = False
domain.save()
def update_project_careplan_config(sender, application, **kwargs):
domain_name = application.domain
config = CareplanConfig.for_domain(domain_name)
if application.doc_type == 'Application-Deleted':
if application.has_careplan_module:
careplan_removed(domain_name, config, application.get_id)
def update_project_careplan_config_release(sender, application, **kwargs):
domain_name = application.domain
config = CareplanConfig.for_domain(domain_name)
parent_app_id = application.copy_of
latest_app = application.get_latest_app(released_only=True)
if latest_app and latest_app.is_released and latest_app.has_careplan_module:
config = config or CareplanConfig(domain=domain_name)
update_careplan_config(config, parent_app_id, latest_app)
else:
careplan_removed(domain_name, config, parent_app_id)
app_post_save = Signal(providing_args=['application'])
app_post_save.connect(create_app_structure_repeat_records)
app_post_save.connect(update_project_careplan_config)
app_post_release = Signal(providing_args=['application'])
app_post_release.connect(update_project_careplan_config_release)
```
#### File: suite_xml/sections/details.py
```python
from __future__ import absolute_import
from collections import namedtuple
import os
from xml.sax.saxutils import escape
from eulxml.xmlmap.core import load_xmlobject_from_string
from corehq.apps.app_manager.const import RETURN_TO
from corehq.apps.app_manager.suite_xml.const import FIELD_TYPE_LEDGER
from corehq.apps.app_manager.suite_xml.contributors import SectionContributor
from corehq.apps.app_manager.suite_xml.post_process.instances import EntryInstances
from corehq.apps.app_manager.suite_xml.xml_models import Text, Xpath, Locale, Id, Header, Template, Field, Lookup, Extra, \
Response, Detail, LocalizedAction, Stack, Action, Display, PushFrame, StackDatum
from corehq.apps.app_manager.suite_xml.features.scheduler import schedule_detail_variables
from corehq.apps.app_manager.util import create_temp_sort_column
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.exceptions import SuiteError
from corehq.apps.app_manager.xpath import session_var, XPath
from dimagi.utils.decorators.memoized import memoized
class DetailContributor(SectionContributor):
section_name = 'details'
def get_section_elements(self):
r = []
if not self.app.use_custom_suite:
for module in self.modules:
for detail_type, detail, enabled in module.get_details():
if enabled:
if detail.custom_xml:
d = load_xmlobject_from_string(
detail.custom_xml,
xmlclass=Detail
)
r.append(d)
else:
detail_column_infos = get_detail_column_infos(
detail,
include_sort=detail_type.endswith('short'),
)
if detail_column_infos:
if detail.use_case_tiles:
r.append(self.build_case_tile_detail(
module, detail, detail_type
))
else:
d = self.build_detail(
module,
detail_type,
detail,
detail_column_infos,
list(detail.get_tabs()),
id_strings.detail(module, detail_type),
Text(locale_id=id_strings.detail_title_locale(
module, detail_type
)),
0,
len(detail_column_infos)
)
if d:
r.append(d)
if module.fixture_select.active:
d = Detail(
id=id_strings.fixture_detail(module),
title=Text(),
)
xpath = Xpath(function=module.fixture_select.display_column)
if module.fixture_select.localize:
template_text = Text(locale=Locale(child_id=Id(xpath=xpath)))
else:
template_text = Text(xpath_function=module.fixture_select.display_column)
fields = [Field(header=Header(text=Text()),
template=Template(text=template_text),
sort_node='')]
d.fields = fields
r.append(d)
return r
def build_detail(self, module, detail_type, detail, detail_column_infos,
tabs, id, title, start, end):
"""
Recursively builds the Detail object.
(Details can contain other details for each of their tabs)
"""
from corehq.apps.app_manager.detail_screen import get_column_generator
d = Detail(id=id, title=title)
if tabs:
tab_spans = detail.get_tab_spans()
for tab in tabs:
sub_detail = self.build_detail(
module,
detail_type,
detail,
detail_column_infos,
[],
None,
Text(locale_id=id_strings.detail_tab_title_locale(
module, detail_type, tab
)),
tab_spans[tab.id][0],
tab_spans[tab.id][1]
)
if sub_detail:
d.details.append(sub_detail)
if len(d.details):
return d
else:
return None
# Base case (has no tabs)
else:
# Add lookup
if detail.lookup_enabled and detail.lookup_action:
d.lookup = Lookup(
name=detail.lookup_name or None,
action=detail.lookup_action,
image=detail.lookup_image or None,
)
d.lookup.extras = [Extra(**e) for e in detail.lookup_extras]
d.lookup.responses = [Response(**r) for r in detail.lookup_responses]
# Add variables
variables = list(
schedule_detail_variables(module, detail, detail_column_infos)
)
if variables:
d.variables.extend(variables)
# Add fields
for column_info in detail_column_infos[start:end]:
fields = get_column_generator(
self.app, module, detail,
detail_type=detail_type, *column_info
).fields
d.fields.extend(fields)
# Add actions
if module.case_list_form.form_id and detail_type.endswith('short')\
and not module.put_in_root:
target_form = self.app.get_form(module.case_list_form.form_id)
if target_form.is_registration_form(module.case_type):
self._add_action_to_detail(d, module)
try:
if not self.app.enable_multi_sort:
d.fields[0].sort = 'default'
except IndexError:
pass
else:
# only yield the Detail if it has Fields
return d
def _add_action_to_detail(self, detail, module):
# add form action to detail
form = self.app.get_form(module.case_list_form.form_id)
if self.app.enable_localized_menu_media:
case_list_form = module.case_list_form
detail.action = LocalizedAction(
menu_locale_id=id_strings.case_list_form_locale(module),
media_image=bool(len(case_list_form.all_image_paths())),
media_audio=bool(len(case_list_form.all_audio_paths())),
image_locale_id=id_strings.case_list_form_icon_locale(module),
audio_locale_id=id_strings.case_list_form_audio_locale(module),
stack=Stack(),
for_action_menu=True,
)
else:
detail.action = Action(
display=Display(
text=Text(locale_id=id_strings.case_list_form_locale(module)),
media_image=module.case_list_form.default_media_image,
media_audio=module.case_list_form.default_media_audio,
),
stack=Stack()
)
frame = PushFrame()
frame.add_command(XPath.string(id_strings.form_command(form)))
target_form_dm = self.entries_helper.get_datums_meta_for_form_generic(form)
source_form_dm = self.entries_helper.get_datums_meta_for_form_generic(module.get_form(0))
for target_meta in target_form_dm:
if target_meta.requires_selection:
# This is true for registration forms where the case being created is a subcase
try:
[source_dm] = [
source_meta for source_meta in source_form_dm
if source_meta.case_type == target_meta.case_type
]
except ValueError:
raise SuiteError("Form selected as case list form requires a case "
"but no matching case could be found: {}".format(form.unique_id))
else:
frame.add_datum(StackDatum(
id=target_meta.datum.id,
value=session_var(source_dm.datum.id))
)
else:
s_datum = target_meta.datum
frame.add_datum(StackDatum(id=s_datum.id, value=s_datum.function))
frame.add_datum(StackDatum(id=RETURN_TO, value=XPath.string(id_strings.menu_id(module))))
detail.action.stack.add_frame(frame)
def build_case_tile_detail(self, module, detail, detail_type):
"""
Return a Detail node from an apps.app_manager.models.Detail that is
configured to use case tiles.
This method does so by injecting the appropriate strings into a template
string.
"""
from corehq.apps.app_manager.detail_screen import get_column_xpath_generator
template_args = {
"detail_id": id_strings.detail(module, detail_type),
"title_text_id": id_strings.detail_title_locale(
module, detail_type
)
}
# Get field/case property mappings
cols_by_tile = {col.case_tile_field: col for col in detail.columns}
for template_field in ["header", "top_left", "sex", "bottom_left", "date"]:
column = cols_by_tile.get(template_field, None)
if column is None:
raise SuiteError(
'No column was mapped to the "{}" case tile field'.format(
template_field
)
)
template_args[template_field] = {
"prop_name": get_column_xpath_generator(
self.app, module, detail, column
).xpath,
"locale_id": id_strings.detail_column_header_locale(
module, detail_type, column,
),
# Just using default language for now
# The right thing to do would be to reference the app_strings.txt I think
"prefix": escape(
column.header.get(self.app.default_language, "")
)
}
if column.format == "enum":
template_args[template_field]["enum_keys"] = {}
for mapping in column.enum:
template_args[template_field]["enum_keys"][mapping.key] = \
id_strings.detail_column_enum_variable(
module, detail_type, column, mapping.key_as_variable
)
# Populate the template
detail_as_string = self._case_tile_template_string.format(**template_args)
return load_xmlobject_from_string(detail_as_string, xmlclass=Detail)
@property
@memoized
def _case_tile_template_string(self):
"""
Return a string suitable for building a case tile detail node
through `String.format`.
"""
with open(os.path.join(
os.path.dirname(os.path.dirname(__file__)), "case_tile_templates", "tdh.txt"
)) as f:
return f.read().decode('utf-8')
class DetailsHelper(object):
def __init__(self, app, modules=None):
self.app = app
self._modules = modules
@property
@memoized
def modules(self):
return self._modules or list(self.app.get_modules())
@property
@memoized
def active_details(self):
return {
id_strings.detail(module, detail_type)
for module in self.modules for detail_type, detail, enabled in module.get_details()
if enabled and detail.columns
}
def get_detail_id_safe(self, module, detail_type):
detail_id = id_strings.detail(
module=module,
detail_type=detail_type,
)
return detail_id if detail_id in self.active_details else None
def get_default_sort_elements(detail):
from corehq.apps.app_manager.models import SortElement
if not detail.columns:
return []
def get_sort_params(column):
if column.field_type == FIELD_TYPE_LEDGER:
return dict(type='int', direction='descending')
else:
return dict(type='string', direction='ascending')
col_0 = detail.get_column(0)
sort_elements = [SortElement(
field=col_0.field,
**get_sort_params(col_0)
)]
for column in detail.columns[1:]:
if column.field_type == FIELD_TYPE_LEDGER:
sort_elements.append(SortElement(
field=column.field,
**get_sort_params(column)
))
return sort_elements
def get_detail_column_infos(detail, include_sort):
"""
This is not intented to be a widely used format
just a packaging of column info into a form most convenient for rendering
"""
DetailColumnInfo = namedtuple('DetailColumnInfo',
'column sort_element order')
if not include_sort:
return [DetailColumnInfo(column, None, None) for column in detail.get_columns()]
if detail.sort_elements:
sort_elements = detail.sort_elements
else:
sort_elements = get_default_sort_elements(detail)
# order is 1-indexed
sort_elements = {s.field: (s, i + 1)
for i, s in enumerate(sort_elements)}
columns = []
for column in detail.get_columns():
sort_element, order = sort_elements.pop(column.field, (None, None))
columns.append(DetailColumnInfo(column, sort_element, order))
# sort elements is now populated with only what's not in any column
# add invisible columns for these
sort_only = sorted(sort_elements.items(),
key=lambda (field, (sort_element, order)): order)
for field, (sort_element, order) in sort_only:
column = create_temp_sort_column(field, len(columns))
columns.append(DetailColumnInfo(column, sort_element, order))
return columns
def get_instances_for_module(app, module, additional_xpaths=None):
"""
This method is used by CloudCare when filtering cases.
"""
modules = list(app.get_modules())
helper = DetailsHelper(app, modules)
details = DetailContributor(None, app, modules).get_section_elements()
detail_mapping = {detail.id: detail for detail in details}
details_by_id = detail_mapping
detail_ids = [helper.get_detail_id_safe(module, detail_type)
for detail_type, detail, enabled in module.get_details()
if enabled]
detail_ids = filter(None, detail_ids)
xpaths = set()
if additional_xpaths:
xpaths.update(additional_xpaths)
for detail_id in detail_ids:
xpaths.update(details_by_id[detail_id].get_all_xpaths())
return EntryInstances.get_required_instances(xpaths)
```
#### File: suite_xml/sections/resources.py
```python
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.suite_xml.contributors import SectionContributor
from corehq.apps.app_manager.suite_xml.xml_models import LocaleResource, XFormResource
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from corehq.apps.app_manager.util import languages_mapping
class FormResourceContributor(SectionContributor):
section_name = 'xform_resources'
def get_section_elements(self):
first = []
last = []
for form_stuff in self.app.get_forms(bare=False):
form = form_stuff["form"]
if form_stuff['type'] == 'module_form':
path = './modules-{module.id}/forms-{form.id}.xml'.format(**form_stuff)
this_list = first
else:
path = './user_registration.xml'
this_list = last
resource = XFormResource(
id=id_strings.xform_resource(form),
version=form.get_version(),
local=path,
remote=path,
)
if form_stuff['type'] == 'module_form' and self.app.build_version >= '2.9':
resource.descriptor = u"Form: (Module {module_name}) - {form_name}".format(
module_name=trans(form_stuff["module"]["name"], langs=[self.app.default_language]),
form_name=trans(form["name"], langs=[self.app.default_language])
)
elif path == './user_registration.xml':
resource.descriptor = u"User Registration Form"
this_list.append(resource)
for x in first:
yield x
for x in last:
yield x
class LocaleResourceContributor(SectionContributor):
section_name = 'locale_resources'
def get_section_elements(self):
for lang in ["default"] + self.app.build_langs:
path = './{lang}/app_strings.txt'.format(lang=lang)
resource = LocaleResource(
language=lang,
id=id_strings.locale_resource(lang),
version=self.app.version,
local=path,
remote=path,
)
if self.app.build_version >= '2.9':
unknown_lang_txt = u"Unknown Language (%s)" % lang
resource.descriptor = u"Translations: %s" % languages_mapping().get(lang, [unknown_lang_txt])[0]
yield resource
```
#### File: app_manager/tests/test_bulk_app_translation.py
```python
import codecs
import tempfile
from django.test import SimpleTestCase
from StringIO import StringIO
from corehq.util.spreadsheets.excel import WorkbookJSONReader
from couchexport.export import export_raw
from couchexport.models import Format
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.app_manager.translations import \
process_bulk_app_translation_upload, expected_bulk_app_sheet_rows, \
expected_bulk_app_sheet_headers
class BulkAppTranslationTestBase(SimpleTestCase, TestXmlMixin):
def setUp(self):
"""
Instantiate an app from file_path + app.json
"""
super(BulkAppTranslationTestBase, self).setUp()
self.app = Application.wrap(self.get_json("app"))
def upload_raw_excel_translations(self, excel_headers, excel_data, expected_messages=None):
"""
Prepares bulk app translation excel file and uploads it
Structure of the xlsx file can be specified as following
excel_headers:
(("employee", ("id", "name", "gender")),
("building", ("id", "name", "address")))
excel_data:
(("employee", (("1", "cory", "m"),
("2", "christian", "m"),
("3", "amelia", "f"))),
("building", (("1", "dimagi", "585 mass ave."),
("2", "old dimagi", "529 main st."))))
"""
if not expected_messages:
expected_messages = ["App Translations Updated!"]
file = StringIO()
export_raw(excel_headers, excel_data, file, format=Format.XLS_2007)
with tempfile.TemporaryFile(suffix='.xlsx') as f:
f.write(file.getvalue())
messages = process_bulk_app_translation_upload(self.app, f)
self.assertListEqual(
[m[1] for m in messages], expected_messages
)
def do_upload(self, name, expected_messages=None):
"""
Upload the bulk app translation file at file_path + upload.xlsx
Note: Use upload_raw_excel_translations() instead. It allows easy modifications
and diffs of xlsx data.
ToDo: Refactor tests using do_upload to use upload_raw_excel_translations(), use
WorkbookJSONReader.work_book_headers_as_tuples(), and
WorkbookJSONReader.work_book_data_as_tuples(), for making tuples from excel files
"""
if not expected_messages:
expected_messages = ["App Translations Updated!"]
with codecs.open(self.get_path(name, "xlsx")) as f:
messages = process_bulk_app_translation_upload(self.app, f)
self.assertListEqual(
[m[1] for m in messages], expected_messages
)
def assert_question_label(self, text, module_id, form_id, language, question_path):
"""
assert that the given text is equal to the label of the given question.
Return the label of the given question
:param text:
:param module_id: module index
:param form_id: form index
:param question_path: path to question (including "/data/")
:return: the label of the question
"""
form = self.app.get_module(module_id).get_form(form_id)
labels = {}
for lang in self.app.langs:
for question in form.get_questions(
[lang], include_triggers=True, include_groups=True):
labels[(question['value'], lang)] = question['label']
self.assertEqual(
labels[(question_path, language)],
text
)
def assert_case_property_label(self, text, field, module_id, short_or_long, language):
module = self.app.get_module(module_id)
cols = module.case_details[short_or_long].columns
col = next(col for col in cols if col.field == field)
self.assertEqual(text, col.header.get(language, None))
class BulkAppTranslationBasicTest(BulkAppTranslationTestBase):
file_path = "data", "bulk_app_translation", "basic"
upload_headers = (
("Modules_and_forms", (
"Type", "sheet_name", "default_en", "default_fra", "label_for_cases_en", "label_for_cases_fra", 'icon_filepath_en', 'icon_filepath_fra', 'audio_filepath_en', 'audio_filepath_fra', "unique_id"
)),
("module1", (
"case_property", "list_or_detail", "default_en", "default_fra"
)),
("module1_form1", (
"label", "default_en", "default_fra", "audio_en", "audio_fra", "image_en", "image_fra", "video_en", "video_fra",
))
)
upload_headers_bad_column = ( # bad column is default-fra
("Modules_and_forms", (
"Type", "sheet_name", "default_en", "default_fra",
"label_for_cases_en", "label_for_cases_fra", "icon_filepath_en", "icon_filepath_fra",
"audio_filepath_en", "audio_filepath_fra" , "unique_id"
)),
("module1", (
"case_property", "list_or_detail", "default_en", "default_fra"
)),
("module1_form1", (
"label", "default_en", "default-fra", "audio_en", "audio_fra",
"image_en", "image_fra", "video_en", "video_fra",
))
)
upload_data = (
("Modules_and_forms", (
("Module", "module1", "My & awesome module", "", "Cases", "Cases", "", "", "", "", "8f4f7085a93506cba4295eab9beae8723c0cee2a"),
("Form", "module1_form1", "My more & awesome form", "", "", "", "", "", "", "", "93ea2a40df57d8f33b472f5b2b023882281722d4")
)),
("module1", (
("name", "list", "Name", "Nom"),
("name", "detail", "", "Nom"),
("other-prop (ID Mapping Text)", "detail", "Other Prop", ""),
("foo (ID Mapping Value)", "detail", "bar", "french bar"),
("baz (ID Mapping Value)", "detail", "quz", ""),
)),
("module1_form1", (
("question1-label", "in english", "it's in french", "", "", "", "", "", ""),
("question2-label", "one < two", "un < deux", "", "", "", "", "", ""),
("question2-item1-label", "item1", "item1", "", "", "", "", "", ""),
("question2-item2-label", "item2", "item2", "", "", "", "", "", ""),
("question3-label", "question3", "question3's label", "", "", "", "", "", ""),
("question3/question4-label", 'question6: <output value="/data/question6"/>', 'question6: <output value="/data/question6"/>', "", "", "", "", "", ""),
("question3/question5-label", "English Label", "English Label", "", "", "", "", "", ""),
("question7-label", 'question1: <output value="/data/question1"/> < 5', "question7", "", "", "", "", "", ""),
('add_markdown-label', 'add_markdown: ~~new \u0939\u093f markdown~~', 'add_markdown: ~~new \u0939\u093f markdown~~', '', '', '', '', '', ''),
('remove_markdown-label', 'remove_markdown', 'remove_markdown', '', '', '', '', '', ''),
('update_markdown-label', '## smaller_markdown', '## smaller_markdown', '', '', '', '', '', ''),
('vetoed_markdown-label', '*i just happen to like stars a lot*', '*i just happen to like stars a lot*', '', '', '', '', '', ''),
))
)
upload_no_change_headers = (
('Modules_and_forms', ('Type', 'sheet_name', 'default_en', 'default_fra', 'label_for_cases_en', 'label_for_cases_fra', 'icon_filepath_en', 'icon_filepath_fra', 'audio_filepath_en', 'audio_filepath_fra', 'unique_id')),
('module1', ('case_property', 'list_or_detail', 'default_en', 'default_fra')),
('module1_form1', ('label', 'default_en', 'default_fra', 'audio_en', 'audio_fra', 'image_en', 'image_fra', 'video_en', 'video_fra'))
)
upload_no_change_data = (
('Modules_and_forms',
(('Module', 'module1', 'My & awesome module', '', 'Cases', 'Cases', '', '', '', '', '8f4f7085a93506cba4295eab9beae8723c0cee2a'),
('Form', 'module1_form1', 'My more & awesome form', '', '', '', '', '', '', '', '93ea2a40df57d8f33b472f5b2b023882281722d4'))),
('module1',
(('name', 'list', 'Name', ''),
('name', 'detail', 'Name', ''),
('other-prop (ID Mapping Text)', 'detail', 'Other Prop', 'Autre Prop'),
('foo (ID Mapping Value)', 'detail', 'bar', ''),
('baz (ID Mapping Value)', 'detail', 'quz', ''))),
('module1_form1',
(('question1-label', 'question1', 'question1', '', '', '', '', '', ''),
('question2-label', 'question2', 'question2', '', '', '', '', '', ''),
('question2-item1-label', 'item1', 'item1', '', '', '', '', '', ''),
('question2-item2-label', 'item2', 'item2', '', '', '', '', '', ''),
('question3-label', 'question3', 'question3', '', '', '', '', '', ''),
('question3/question4-label', 'question4', 'question4', '', '', '', '', '', ''),
('question3/question5-label', 'question5', 'question5', '', '', '', '', '', ''),
('question7-label', 'question7', 'question7', '', '', '', '', '', ''),
('add_markdown-label', 'add_markdown', 'add_markdown', '', '', '', '', '', ''),
('remove_markdown-label', 'remove_markdown: ~~remove this~~', 'remove_markdown: ~~remove this~~', '', '', '', '', '', ''),
('update_markdown-label', '# update_markdown', '# update_markdown', '', '', '', '', '', ''),
('vetoed_markdown-label', '*i just happen to like stars*', '*i just happen to like stars*', '', '', '', '', '', ''),
))
)
def test_set_up(self):
self._shared_test_initial_set_up()
def test_no_change_upload(self):
self.upload_raw_excel_translations(self.upload_no_change_headers, self.upload_no_change_data)
self._shared_test_initial_set_up()
def _shared_test_initial_set_up(self):
self.assert_question_label("question1", 0, 0, "en", "/data/question1")
self.assert_case_property_label("Autre Prop", "other-prop", 0, "long", "fra")
def test_change_upload(self):
self.upload_raw_excel_translations(self.upload_headers, self.upload_data)
self.assert_question_label("in english", 0, 0, "en", "/data/question1")
self.assert_question_label("it's in french", 0, 0, "fra", "/data/question1")
# Test that translations can be deleted.
self.assert_question_label("English Label", 0, 0, "fra", "/data/question3/question5")
self.assert_case_property_label(None, "other-prop", 0, "long", "fra")
self.assert_case_property_label(None, "name", 0, "long", "en")
module = self.app.get_module(0)
self.assertEqual(
module.case_details.long.columns[1].enum[0].value['fra'],
'french bar'
)
self.assertEqual(
module.case_details.short.columns[0].header['fra'],
'Nom'
)
# Test special characters and output refs
self.assert_question_label("one < two", 0, 0, "en", "/data/question2")
self.assert_question_label("un < deux", 0, 0, "fra", "/data/question2")
self.assert_question_label("question3's label", 0, 0, "fra", "/data/question3")
self.assert_question_label("question6: ____", 0, 0, "en", "/data/question3/question4")
self.assert_question_label("question1: ____ < 5", 0, 0, "en", "/data/question7")
# Test markdown
self.assert_question_label("add_markdown: ~~new \u0939\u093f markdown~~", 0, 0, "en", "/data/add_markdown")
self.assert_question_label("remove_markdown", 0, 0, "en", "/data/remove_markdown")
self.assert_question_label("## smaller_markdown", 0, 0, "en", "/data/update_markdown")
self.assert_question_label("*i just happen to like stars a lot*", 0, 0, "en", "/data/vetoed_markdown")
form = self.app.get_module(0).get_form(0)
self.assertXmlEqual(self.get_xml("change_upload_form"), form.render_xform())
def test_missing_itext(self):
self.app = Application.wrap(self.get_json("app_no_itext"))
self.assert_question_label('question1', 0, 0, "en", "/data/question1")
try:
self.upload_raw_excel_translations(self.upload_no_change_headers, self.upload_no_change_data)
except Exception as e:
self.fail(e)
def test_bad_column_name(self):
self.upload_raw_excel_translations(self.upload_headers_bad_column,
self.upload_data,
expected_messages=[
u'Sheet "module1_form1" has less columns than expected. Sheet '
'will be processed but the following translations will be '
'unchanged: default_fra',
u'Sheet "module1_form1" has unrecognized columns. Sheet will '
'be processed but ignoring the following columns: default-fra',
u'App Translations Updated!'
]
)
class MismatchedItextReferenceTest(BulkAppTranslationTestBase):
"""
Test the bulk app translation upload when the itext reference in a question
in the xform body does not match the question's id/path.
The upload is an unchanged download.
"""
file_path = "data", "bulk_app_translation", "mismatched_ref"
def test_unchanged_upload(self):
self.do_upload("upload")
self.assert_question_label("question2", 0, 0, "en", "/data/foo/question2")
class BulkAppTranslationFormTest(BulkAppTranslationTestBase):
file_path = "data", "bulk_app_translation", "form_modifications"
def test_removing_form_translations(self):
self.do_upload("modifications")
form = self.app.get_module(0).get_form(0)
self.assertXmlEqual(self.get_xml("expected_form"), form.render_xform())
class BulkAppTranslationDownloadTest(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'bulk_app_translation', 'download')
maxDiff = None
excel_headers = (
('Modules_and_forms', ('Type', 'sheet_name', 'default_en', 'label_for_cases_en', 'icon_filepath_en', 'audio_filepath_en', 'unique_id')),
('module1', ('case_property', 'list_or_detail', 'default_en')),
('module1_form1', ('label', 'default_en', 'audio_en', 'image_en', 'video_en'))
)
excel_data = (
('Modules_and_forms',
(('Module', 'module1', 'Stethoscope', 'Cases', 'jr://file/commcare/image/module0.png', '', '58ce5c9cf6eda401526973773ef216e7980bc6cc'),
('Form',
'module1_form1',
'Stethoscope Form',
'',
'jr://file/commcare/image/module0_form0.png',
'',
'c480ace490edc870ae952765e8dfacec33c69fec'))),
('module1', (('name', 'list', 'Name'), ('name', 'detail', 'Name'))),
('module1_form1',
(('What_does_this_look_like-label', 'What does this look like?', '', 'jr://file/commcare/image/data/What_does_this_look_like.png', ''),
('no_media-label', 'No media', '', '', ''),
('has_refs-label', 'Here is a ref <output value="/data/no_media"/> with some trailing text and "bad" < xml.', '', '', '')))
)
@classmethod
def setUpClass(cls):
cls.app = Application.wrap(cls.get_json("app"))
# Todo, refactor this into BulkAppTranslationTestBase.upload_raw_excel_translations
file = StringIO()
export_raw(cls.excel_headers, cls.excel_data, file, format=Format.XLS_2007)
with tempfile.TemporaryFile(suffix='.xlsx') as f:
f.write(file.getvalue())
wb_reader = WorkbookJSONReader(f)
cls.expected_workbook = [{'name': ws.title, 'rows': list(ws)}
for ws in wb_reader.worksheets]
def test_download(self):
actual_headers = expected_bulk_app_sheet_headers(self.app)
actual_rows = expected_bulk_app_sheet_rows(self.app)
actual_workbook = [
{'name': title,
'rows': [dict(zip(headers, row)) for row in actual_rows[title]]}
for title, headers in actual_headers
]
for actual_sheet, expected_sheet in zip(actual_workbook,
self.expected_workbook):
self.assertEqual(actual_sheet, expected_sheet)
self.assertEqual(actual_workbook, self.expected_workbook)
class RenameLangTest(SimpleTestCase):
def test_rename_lang_empty_form(self):
app = Application.new_app('domain', "Untitled Application", application_version=APP_V2)
module = app.add_module(Module.new_module('module', None))
form1 = app.new_form(module.id, "Untitled Form", None)
form1.source = '<source>'
# form with no source
form2 = app.new_form(module.id, "Empty form", None)
app.rename_lang('en', 'fra')
self.assertNotIn('en', module.name)
self.assertIn('fra', module.name)
self.assertNotIn('en', form1.name)
self.assertIn('fra', form1.name)
self.assertNotIn('en', form2.name)
self.assertIn('fra', form2.name)
```
#### File: app_manager/tests/test_media_suite.py
```python
from copy import deepcopy
from django.test import SimpleTestCase
from django.test.utils import override_settings
from lxml import etree
from corehq.apps.app_manager import id_strings
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module, ReportModule, ReportAppConfig
from corehq.apps.app_manager.tests.app_factory import AppFactory
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.builds.models import BuildSpec
from corehq.apps.hqmedia.models import CommCareImage, CommCareAudio
import commcare_translations
class MediaSuiteTest(SimpleTestCase, TestXmlMixin):
file_path = ('data', 'suite')
def test_all_media_paths(self):
image_path = 'jr://file/commcare/image{}.jpg'
audio_path = 'jr://file/commcare/audio{}.mp3'
app = Application.wrap(self.get_json('app'))
app.get_module(0).case_list.show = True
app.get_module(0).case_list.set_icon('en', image_path.format('4'))
app.get_module(0).case_list.set_audio('en', audio_path.format('4'))
app.get_module(0).set_icon('en', image_path.format('1'))
app.get_module(0).set_audio('en', audio_path.format('1'))
app.get_module(0).case_list_form.form_id = app.get_module(0).get_form(0).unique_id
app.get_module(0).case_list_form.set_icon('en', image_path.format('2'))
app.get_module(0).case_list_form.set_audio('en', audio_path.format('2'))
app.get_module(0).get_form(0).set_icon('en', image_path.format('3'))
app.get_module(0).get_form(0).set_audio('en', audio_path.format('3'))
should_contain_media = [image_path.format(num) for num in [1, 2, 3, 4]] + \
[audio_path.format(num) for num in [1, 2, 3, 4]]
self.assertTrue(app.get_module(0).uses_media())
self.assertEqual(app.all_media_paths, set(should_contain_media))
@override_settings(BASE_ADDRESS='192.cc.hq.1')
def test_case_list_media(self):
app = Application.wrap(self.get_json('app'))
app.get_module(0).case_list_form.form_id = app.get_module(0).get_form(0).unique_id
image_path = 'jr://file/commcare/case_list_image.jpg'
audo_path = 'jr://file/commcare/case_list_audo.mp3'
app.get_module(0).case_list_form.set_icon('en', image_path)
app.get_module(0).case_list_form.set_audio('en', audo_path)
app.create_mapping(CommCareImage(_id='123'), image_path, save=False)
app.create_mapping(CommCareAudio(_id='456'), audo_path, save=False)
app.set_media_versions(previous_version=None)
self.assertXmlEqual(self.get_xml('media_suite'), app.create_media_suite())
def test_update_image_id(self):
"""
When an image is updated, change only version number, not resource id
"""
app = Application.wrap(self.get_json('app'))
image_path = 'jr://file/commcare/case_list_image.jpg'
app.get_module(0).case_list_form.set_icon('en', image_path)
app.version = 1
app.create_mapping(CommCareImage(_id='123'), image_path, save=False)
app.set_media_versions(previous_version=None)
old_app = deepcopy(app)
app.version = 2
app.create_mapping(CommCareImage(_id='456'), image_path, save=False)
app.set_media_versions(previous_version=old_app)
old_image = old_app.multimedia_map[image_path]
new_image = app.multimedia_map[image_path]
self.assertEqual(old_image.unique_id, new_image.unique_id)
self.assertNotEqual(old_image.version, new_image.version)
def test_all_media_report_module(self):
"""
Report Modules don't support media
"""
from corehq.apps.userreports.tests import get_sample_report_config
app = Application.new_app('domain', "Untitled Application", application_version=APP_V2)
report_module = app.add_module(ReportModule.new_module('Reports', None))
report_module.unique_id = 'report_module'
report = get_sample_report_config()
report._id = 'd3ff18cd83adf4550b35db8d391f6008'
report_app_config = ReportAppConfig(report_id=report._id,
header={'en': 'CommBugz'})
report_app_config._report = report
report_module.report_configs = [report_app_config]
report_module._loaded = True
image_path = 'jr://file/commcare/case_list_image.jpg'
audio_path = 'jr://file/commcare/case_list_audo.mp3'
app.get_module(0).case_list_form.set_icon('en', image_path)
app.get_module(0).case_list_form.set_audio('en', audio_path)
self.assertFalse(app.get_module(0).uses_media())
self.assertEqual(len(app.all_media), 0)
class LocalizedMediaSuiteTest(SimpleTestCase, TestXmlMixin):
"""
For CC >= 2.21
Tests following for form, module, case_list_menu, case_list_form
- suite.xml should contain correct localized media references
- app_strings should contain all of above media references
- translations should be correct for each of above app_strings
"""
file_path = ('data', 'suite')
image_path = 'jr://file/commcare/case_list_image.jpg'
audio_path = 'jr://file/commcare/case_list_audo.mp3'
hindi_image = 'jr://file/commcare/case_list_image_hin.jpg'
hindi_audio = 'jr://file/commcare/case_list_audo_hin.mp3'
def setUp(self):
self.app = Application.new_app('domain', "my app", application_version=APP_V2)
self.module = self.app.add_module(Module.new_module("Module 1", None))
self.form = self.app.new_form(0, "Form 1", None)
self.min_spec = BuildSpec.from_string('2.21/latest')
self.app.build_spec = self.min_spec
def makeXML(self, menu_locale_id, image_locale_id, audio_locale_id):
XML_template = """
<partial>
<display>
<text>
<locale id="{menu_locale_id}"/>
</text>
<text form="image">
<locale id="{image_locale_id}"/>
</text>
<text form="audio">
<locale id="{audio_locale_id}"/>
</text>
</display>
</partial>
"""
return XML_template.format(
menu_locale_id=menu_locale_id,
image_locale_id=image_locale_id,
audio_locale_id=audio_locale_id,
)
def XML_without_media(self, menu_locale_id, for_action_menu=False):
if for_action_menu:
XML_template = """
<partial>
<display>
<text>
<locale id="{menu_locale_id}"/>
</text>
</display>
</partial>
"""
else:
XML_template = """
<partial>
<text>
<locale id="{menu_locale_id}"/>
</text>
</partial>
"""
return XML_template.format(
menu_locale_id=menu_locale_id,
)
def test_form_suite(self):
no_media_xml = self.XML_without_media("forms.m0f0")
self.assertXmlPartialEqual(no_media_xml, self.app.create_suite(), "./entry/command[@id='m0-f0']/text")
self.form.set_icon('en', self.image_path)
self.form.set_audio('en', self.audio_path)
XML = self.makeXML("forms.m0f0", "forms.m0f0.icon", "forms.m0f0.audio")
self.assertXmlPartialEqual(XML, self.app.create_suite(), "./entry/command[@id='m0-f0']/display")
self._assert_app_strings_available(self.app, 'en')
icon_locale = id_strings.form_icon_locale(self.form)
audio_locale = id_strings.form_audio_locale(self.form)
self._test_correct_icon_translations(self.app, self.form, icon_locale)
self._test_correct_audio_translations(self.app, self.form, audio_locale)
def test_module_suite(self):
no_media_xml = self.XML_without_media("modules.m0")
self.assertXmlPartialEqual(no_media_xml, self.app.create_suite(), "././menu[@id='m0']/text")
self.module.set_icon('en', self.image_path)
self.module.set_audio('en', self.audio_path)
XML = self.makeXML("modules.m0", "modules.m0.icon", "modules.m0.audio")
self.assertXmlPartialEqual(XML, self.app.create_suite(), "./menu[@id='m0']/display")
self._assert_app_strings_available(self.app, 'en')
icon_locale = id_strings.module_icon_locale(self.module)
audio_locale = id_strings.module_audio_locale(self.module)
self._test_correct_icon_translations(self.app, self.module, icon_locale)
self._test_correct_audio_translations(self.app, self.module, audio_locale)
def test_case_list_form_media(self):
app = AppFactory.case_list_form_app_factory().app
app.build_spec = self.min_spec
no_media_xml = self.XML_without_media("case_list_form.m0", for_action_menu=True)
self.assertXmlPartialEqual(
no_media_xml,
app.create_suite(),
"./detail[@id='m0_case_short']/action/display"
)
app.get_module(0).case_list_form.set_icon('en', self.image_path)
app.get_module(0).case_list_form.set_audio('en', self.audio_path)
XML = self.makeXML("case_list_form.m0", "case_list_form.m0.icon", "case_list_form.m0.audio")
self.assertXmlPartialEqual(XML, app.create_suite(), "./detail[@id='m0_case_short']/action/display")
self._assert_app_strings_available(app, 'en')
icon_locale = id_strings.case_list_form_icon_locale(app.get_module(0))
audio_locale = id_strings.case_list_form_audio_locale(app.get_module(0))
self._test_correct_icon_translations(app, app.get_module(0).case_list_form, icon_locale)
self._test_correct_audio_translations(app, app.get_module(0).case_list_form, audio_locale)
def test_case_list_menu_media(self):
self.module.case_list.show = True
no_media_xml = self.XML_without_media("case_lists.m0")
self.assertXmlPartialEqual(no_media_xml, self.app.create_suite(), "./entry/command[@id='m0-case-list']/")
self.module.case_list.set_icon('en', self.image_path)
self.module.case_list.set_audio('en', self.audio_path)
XML = self.makeXML(
"case_lists.m0",
"case_lists.m0.icon",
"case_lists.m0.audio",
)
self.assertXmlPartialEqual(
XML,
self.app.create_suite(),
"./entry/command[@id='m0-case-list']/"
)
self._assert_app_strings_available(self.app, 'en')
icon_locale = id_strings.case_list_icon_locale(self.module)
audio_locale = id_strings.case_list_audio_locale(self.module)
self._test_correct_icon_translations(self.app, self.module.case_list, icon_locale)
self._test_correct_audio_translations(self.app, self.module.case_list, audio_locale)
def _assert_app_strings_available(self, app, lang):
et = etree.XML(app.create_suite())
locale_elems = et.findall(".//locale/[@id]")
locale_strings = [elem.attrib['id'] for elem in locale_elems]
app_strings = commcare_translations.loads(app.create_app_strings(lang))
for string in locale_strings:
if string not in app_strings:
raise AssertionError("App strings did not contain %s" % string)
if not app_strings.get(string, '').strip():
raise AssertionError("App strings has blank entry for %s" % string)
def _test_correct_icon_translations(self, app, menu, menu_locale_id):
# english should have right translation
self._assert_valid_media_translation(app, 'en', menu_locale_id, self.image_path)
# default should have any random translation
self._assert_valid_media_translation(app, 'default', menu_locale_id, self.image_path)
# hindi shouldn't have translation strings
with self.assertRaises(KeyError):
self._assert_valid_media_translation(app, 'hin', menu_locale_id, self.image_path)
# set media for hindi
menu.set_icon('hin', self.hindi_image)
# hindi should have right translation
self._assert_valid_media_translation(app, 'hin', menu_locale_id, self.hindi_image)
def _test_correct_audio_translations(self, app, menu, menu_locale_id):
# english should have right translation
self._assert_valid_media_translation(app, 'en', menu_locale_id, self.audio_path)
# default should have any random translation
self._assert_valid_media_translation(app, 'default', menu_locale_id, self.audio_path)
# hindi shouldn't have translation strings
with self.assertRaises(KeyError):
self._assert_valid_media_translation(app, 'hin', menu_locale_id, self.audio_path)
# set media for hindi
menu.set_audio('hin', self.hindi_audio)
# hindi should have right translation
self._assert_valid_media_translation(app, 'hin', menu_locale_id, self.hindi_audio)
def _assert_valid_media_translation(self, app, lang, media_locale_id, media_path):
# assert that <lang>/app_strings.txt contains media_locale_id=media_path
app_strings = commcare_translations.loads(app.create_app_strings(lang))
self.assertEqual(app_strings[media_locale_id], media_path)
```
#### File: app_manager/tests/test_translations.py
```python
from lxml import etree
from django.test import SimpleTestCase
from corehq.apps.app_manager.translations import escape_output_value
class AppManagerTranslationsTest(SimpleTestCase):
def test_escape_output_value(self):
test_cases = [
('hello', '<value>hello</value>'),
('abc < def > abc', '<value>abc < def > abc</value>'),
("bee's knees", "<value>bee's knees</value>"),
('unfortunate <xml expression', '<value>unfortunate <xml expression</value>'),
(u'क्लिक', '<value>क्लिक</value>'),
(''', '<value>&#39</value>'),
('Here is a ref <output value="/data/no_media" /> with some trailing text and bad < xml.',
'<value>Here is a ref <output value="/data/no_media"/> with some trailing text and bad < xml.</value>')
]
for input, expected_output in test_cases:
self.assertEqual(expected_output, etree.tostring(escape_output_value(input)))
```
#### File: app_manager/views/app_summary.py
```python
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_noop, ugettext_lazy as _
from djangular.views.mixins import JSONResponseMixin, allow_remote_invocation
from corehq.apps.app_manager.view_helpers import ApplicationViewMixin
from corehq.apps.app_manager.xform import VELLUM_TYPES
from corehq.apps.domain.views import LoginAndDomainMixin
from corehq.apps.hqwebapp.views import BasePageView
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
from corehq.apps.style.decorators import use_bootstrap3
class AppSummaryView(JSONResponseMixin, LoginAndDomainMixin, BasePageView, ApplicationViewMixin):
urlname = 'app_summary'
page_title = ugettext_noop("Summary")
template_name = 'app_manager/summary.html'
@use_bootstrap3
def dispatch(self, request, *args, **kwargs):
return super(AppSummaryView, self).dispatch(request, *args, **kwargs)
@property
def main_context(self):
context = super(AppSummaryView, self).main_context
context.update({
'domain': self.domain,
})
return context
@property
def page_context(self):
if not self.app or self.app.doc_type == 'RemoteApp':
raise Http404()
form_name_map = {}
for module in self.app.get_modules():
for form in module.get_forms():
form_name_map[form.unique_id] = {
'module_name': module.name,
'form_name': form.name
}
return {
'VELLUM_TYPES': VELLUM_TYPES,
'form_name_map': form_name_map,
'langs': self.app.langs,
}
@property
def parent_pages(self):
return [
{
'title': _("Applications"),
'url': reverse('view_app', args=[self.domain, self.app_id]),
},
{
'title': self.app.name,
'url': reverse('view_app', args=[self.domain, self.app_id]),
}
]
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain, self.app_id])
@allow_remote_invocation
def get_case_data(self, in_data):
return {
'response': self.app.get_case_metadata().to_json(),
'success': True,
}
@allow_remote_invocation
def get_form_data(self, in_data):
modules = []
for module in self.app.get_modules():
forms = []
for form in module.get_forms():
questions = form.get_questions(
self.app.langs,
include_triggers=True,
include_groups=True,
include_translations=True
)
forms.append({
'id': form.unique_id,
'name': form.name,
'questions': [FormQuestionResponse(q).to_json() for q in questions],
})
modules.append({
'id': module.unique_id,
'name': module.name,
'forms': forms
})
return {
'response': modules,
'success': True,
}
```
#### File: app_manager/views/media_utils.py
```python
def process_media_attribute(attribute, resp, val):
if val:
if val.startswith('jr://'):
pass
elif val.startswith('/file/'):
val = 'jr:/' + val
elif val.startswith('file/'):
val = 'jr://' + val
elif val.startswith('/'):
val = 'jr://file' + val
else:
val = 'jr://file/' + val
resp['corrections'][attribute] = val
else:
val = None
return val
def handle_media_edits(request, item, should_edit, resp, lang):
if 'corrections' not in resp:
resp['corrections'] = {}
for attribute in ('media_image', 'media_audio'):
if should_edit(attribute):
media_path = process_media_attribute(attribute, resp, request.POST.get(attribute))
item._set_media(attribute, lang, media_path)
```
#### File: app_manager/views/utils.py
```python
import json
from urllib import urlencode
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import require_deploy_apps
CASE_TYPE_CONFLICT_MSG = (
"Warning: The form's new module "
"has a different case type from the old module.<br />"
"Make sure all case properties you are loading "
"are available in the new case type"
)
@require_deploy_apps
def back_to_main(request, domain, app_id=None, module_id=None, form_id=None,
unique_form_id=None):
"""
returns an HttpResponseRedirect back to the main page for the App Manager app
with the correct GET parameters.
This is meant to be used by views that process a POST request,
which then redirect to the main page.
"""
# TODO: Refactor this function. The length of the args matters :(
page = None
params = {}
args = [domain]
if app_id is not None:
args.append(app_id)
if unique_form_id is not None:
app = get_app(domain, app_id)
obj = app.get_form(unique_form_id, bare=False)
if obj['type'] == 'user_registration':
page = 'view_user_registration'
else:
module_id = obj['module'].id
form_id = obj['form'].id
if module_id is not None:
args.append(module_id)
if form_id is not None:
args.append(form_id)
if page:
view_name = page
else:
view_name = {
1: 'view_app',
2: 'view_app',
3: 'view_module',
4: 'view_form',
}[len(args)]
return HttpResponseRedirect(
"%s%s" % (
reverse('corehq.apps.app_manager.views.%s' % view_name, args=args),
"?%s" % urlencode(params) if params else ""
)
)
def get_langs(request, app):
lang = request.GET.get(
'lang',
request.COOKIES.get('lang', app.langs[0] if hasattr(app, 'langs') and app.langs else '')
)
langs = None
if app and hasattr(app, 'langs'):
if not app.langs and not app.is_remote_app:
# lots of things fail if the app doesn't have any languages.
# the best we can do is add 'en' if there's nothing else.
app.langs.append('en')
app.save()
if not lang or lang not in app.langs:
lang = (app.langs or ['en'])[0]
langs = [lang] + app.langs
return lang, langs
def bail(request, domain, app_id, not_found=""):
if not_found:
messages.error(request, 'Oops! We could not find that %s. Please try again' % not_found)
else:
messages.error(request, 'Oops! We could not complete your request. Please try again')
return back_to_main(request, domain, app_id)
def encode_if_unicode(s):
return s.encode('utf-8') if isinstance(s, unicode) else s
def validate_langs(request, existing_langs, validate_build=True):
o = json.loads(request.body)
langs = o['langs']
rename = o['rename']
build = o['build']
assert set(rename.keys()).issubset(existing_langs)
assert set(rename.values()).issubset(langs)
# assert that there are no repeats in the values of rename
assert len(set(rename.values())) == len(rename.values())
# assert that no lang is renamed to an already existing lang
for old, new in rename.items():
if old != new:
assert(new not in existing_langs)
# assert that the build langs are in the correct order
if validate_build:
assert sorted(build, key=lambda lang: langs.index(lang)) == build
return (langs, rename, build)
```
#### File: app_manager/views/view_generic.py
```python
from django.http import Http404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from corehq.apps.app_manager.views.modules import get_module_template, \
get_module_view_context
from corehq import privileges
from corehq.apps.app_manager.forms import CopyApplicationForm
from corehq.apps.app_manager.views.apps import get_apps_base_context, \
get_app_view_context
from corehq.apps.app_manager.views.forms import \
get_form_view_context_and_template
from corehq.apps.app_manager.views.utils import bail, encode_if_unicode
from corehq.apps.hqmedia.controller import (
MultimediaImageUploadController,
MultimediaAudioUploadController,
)
from corehq.apps.hqmedia.models import (
ApplicationMediaReference,
CommCareImage,
)
from corehq.apps.hqmedia.views import (
ProcessImageFileUploadView,
ProcessAudioFileUploadView,
)
from corehq.apps.app_manager.util import (
get_all_case_properties,
get_commcare_versions,
get_usercase_properties,
)
from dimagi.utils.couch.resource_conflict import retry_resource
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.models import (
ANDROID_LOGO_PROPERTY_MAPPING,
ModuleNotFoundException,
)
from django_prbac.utils import has_privilege
@retry_resource(3)
def view_generic(request, domain, app_id=None, module_id=None, form_id=None,
is_user_registration=False, copy_app_form=None):
"""
This is the main view for the app. All other views redirect to here.
"""
if form_id and not module_id:
return bail(request, domain, app_id)
app = module = form = None
try:
if app_id:
app = get_app(domain, app_id)
if is_user_registration:
if not app.show_user_registration:
raise Http404()
form = app.get_user_registration()
if module_id:
try:
module = app.get_module(module_id)
except ModuleNotFoundException:
raise Http404()
if not module.unique_id:
module.get_or_create_unique_id()
app.save()
if form_id:
try:
form = module.get_form(form_id)
except IndexError:
raise Http404()
except ModuleNotFoundException:
return bail(request, domain, app_id)
context = get_apps_base_context(request, domain, app)
if app and app.copy_of:
# don't fail hard.
return HttpResponseRedirect(reverse(
"corehq.apps.app_manager.views.view_app", args=[domain, app.copy_of]
))
# grandfather in people who set commcare sense earlier
if app and 'use_commcare_sense' in app:
if app['use_commcare_sense']:
if 'features' not in app.profile:
app.profile['features'] = {}
app.profile['features']['sense'] = 'true'
del app['use_commcare_sense']
app.save()
context.update({
'module': module,
'form': form,
})
lang = context['lang']
if app and not module and hasattr(app, 'translations'):
context.update({"translations": app.translations.get(lang, {})})
if form:
template, form_context = get_form_view_context_and_template(
request, domain, form, context['langs'], is_user_registration
)
context.update({
'case_properties': get_all_case_properties(app),
'usercase_properties': get_usercase_properties(app),
})
context.update(form_context)
elif module:
template = get_module_template(module)
# make sure all modules have unique ids
app.ensure_module_unique_ids(should_save=True)
module_context = get_module_view_context(app, module)
context.update(module_context)
elif app:
template = "app_manager/app_view.html"
context.update(get_app_view_context(request, app))
else:
from corehq.apps.dashboard.views import NewUserDashboardView
from corehq.apps.style.utils import set_bootstrap_version3
from crispy_forms.utils import set_template_pack
set_bootstrap_version3()
set_template_pack('bootstrap3')
template = NewUserDashboardView.template_name
context.update({'templates': NewUserDashboardView.templates(domain)})
# update multimedia context for forms and modules.
menu_host = form or module
if menu_host:
default_file_name = 'module%s' % module_id
if form_id:
default_file_name = '%s_form%s' % (default_file_name, form_id)
specific_media = {
'menu': {
'menu_refs': app.get_menu_media(
module, module_id, form=form, form_index=form_id, to_language=lang
),
'default_file_name': '{name}_{lang}'.format(name=default_file_name, lang=lang),
}
}
if module and module.uses_media():
def _make_name(suffix):
return "{default_name}_{suffix}_{lang}".format(
default_name=default_file_name,
suffix=suffix,
lang=lang,
)
specific_media['case_list_form'] = {
'menu_refs': app.get_case_list_form_media(module, module_id, to_language=lang),
'default_file_name': _make_name('case_list_form'),
}
specific_media['case_list_menu_item'] = {
'menu_refs': app.get_case_list_menu_item_media(module, module_id, to_language=lang),
'default_file_name': _make_name('case_list_menu_item'),
}
specific_media['case_list_lookup'] = {
'menu_refs': app.get_case_list_lookup_image(module, module_id),
'default_file_name': '{}_case_list_lookup'.format(default_file_name),
}
if hasattr(module, 'product_details'):
specific_media['product_list_lookup'] = {
'menu_refs': app.get_case_list_lookup_image(module, module_id, type='product'),
'default_file_name': '{}_product_list_lookup'.format(default_file_name),
}
context.update({
'multimedia': {
"references": app.get_references(),
"object_map": app.get_object_map(),
'upload_managers': {
'icon': MultimediaImageUploadController(
"hqimage",
reverse(ProcessImageFileUploadView.name,
args=[app.domain, app.get_id])
),
'audio': MultimediaAudioUploadController(
"hqaudio", reverse(ProcessAudioFileUploadView.name,
args=[app.domain, app.get_id])
),
},
}
})
context['multimedia'].update(specific_media)
error = request.GET.get('error', '')
context.update({
'error': error,
'app': app,
})
# Pass form for Copy Application to template:
context.update({
'copy_app_form': copy_app_form if copy_app_form is not None else CopyApplicationForm(app_id)
})
context['latest_commcare_version'] = get_commcare_versions(request.user)[-1]
if app and app.doc_type == 'Application' and has_privilege(request, privileges.COMMCARE_LOGO_UPLOADER):
uploader_slugs = ANDROID_LOGO_PROPERTY_MAPPING.keys()
from corehq.apps.hqmedia.controller import MultimediaLogoUploadController
from corehq.apps.hqmedia.views import ProcessLogoFileUploadView
context.update({
"sessionid": request.COOKIES.get('sessionid'),
'uploaders': [
MultimediaLogoUploadController(
slug,
reverse(
ProcessLogoFileUploadView.name,
args=[domain, app_id, slug],
)
)
for slug in uploader_slugs
],
"refs": {
slug: ApplicationMediaReference(
app.logo_refs.get(slug, {}).get("path", slug),
media_class=CommCareImage,
module_id=app.logo_refs.get(slug, {}).get("m_id"),
).as_dict()
for slug in uploader_slugs
},
"media_info": {
slug: app.logo_refs.get(slug)
for slug in uploader_slugs if app.logo_refs.get(slug)
},
})
response = render(request, template, context)
response.set_cookie('lang', encode_if_unicode(lang))
return response
```
#### File: callcenter/tests/test_indicators.py
```python
from collections import namedtuple
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from corehq.apps.callcenter.const import DATE_RANGES, WEEK1, WEEK0, MONTH0, MONTH1
from corehq.apps.callcenter.indicator_sets import AAROHI_MOTHER_FORM, CallCenterIndicators, \
cache_key, CachedIndicators
from corehq.apps.callcenter.models import CallCenterIndicatorConfig, TypedIndicator
from corehq.apps.callcenter.utils import sync_call_center_user_case
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.callcenter.tests.sql_fixture import load_data, load_custom_data, clear_data
from corehq.apps.groups.models import Group
from corehq.apps.hqcase.utils import submit_case_blocks, get_case_by_domain_hq_user_id
from corehq.apps.users.models import CommCareUser
from django.test import TestCase
from django.core import cache
CASE_TYPE = 'cc_flw'
locmem_cache = cache.caches['locmem']
def create_domain_and_user(domain_name, username):
domain = create_domain(domain_name)
user = CommCareUser.create(domain_name, username, '***')
domain.call_center_config.enabled = True
domain.call_center_config.case_owner_id = user.user_id
domain.call_center_config.case_type = CASE_TYPE
domain.save()
sync_call_center_user_case(user)
return domain, user
def create_cases_for_types(domain, case_types):
for i, case_type in enumerate(case_types):
submit_case_blocks(
CaseBlock(
create=True,
case_id='person%s' % i,
case_type=case_type,
user_id='user%s' % i,
).as_string(), domain)
def get_indicators(prefix, values, case_type=None, is_legacy=False, limit_ranges=None):
"""
Generate indicators e.g. cases_opened_week0, cases_opened_{case_type}_week0 etc.
"""
ranges = DATE_RANGES
limit_ranges = limit_ranges or DATE_RANGES
data = {}
separator = '' if is_legacy else '_'
infix = '{}{}{}'.format(separator, case_type, separator) if case_type else separator
for i, r in enumerate(ranges):
if r in limit_ranges:
r = r.title() if is_legacy else r
indicator_name = '{prefix}{infix}{suffix}'.format(
prefix=prefix,
infix=infix,
suffix=r)
data[indicator_name] = values[i]
return data
StaticIndicators = namedtuple('StaticIndicators', 'name, values, is_legacy, infix')
def expected_standard_indicators(no_data=False, include_legacy=True, include_totals=True, case_types=None, limit_ranges=None):
case_types = case_types if case_types is not None else ['person', 'dog']
expected = {}
expected_values = []
if include_totals:
expected_values.extend([
StaticIndicators('forms_submitted', [2L, 4L, 7L, 0L], False, None),
StaticIndicators('cases_total', [4L, 4L, 6L, 5L], False, None),
StaticIndicators('cases_opened', [0L, 1L, 3L, 5L], False, None),
StaticIndicators('cases_closed', [0L, 0L, 2L, 2L], False, None),
StaticIndicators('cases_active', [0L, 1L, 3L, 5L], False, None),
])
if 'dog' in case_types:
expected_values.extend ([
StaticIndicators('cases_total', [3L, 3L, 3L, 5L], False, 'dog'),
StaticIndicators('cases_opened', [0L, 0L, 0L, 5L], False, 'dog'),
StaticIndicators('cases_closed', [0L, 0L, 0L, 2L], False, 'dog'),
StaticIndicators('cases_active', [0L, 0L, 0L, 5L], False, 'dog')
])
if 'person' in case_types:
expected_values.extend ([
StaticIndicators('cases_total', [1L, 1L, 3L, 0L], False, 'person'),
StaticIndicators('cases_opened', [0L, 1L, 3L, 0L], False, 'person'),
StaticIndicators('cases_closed', [0L, 0L, 2L, 0L], False, 'person'),
StaticIndicators('cases_active', [0L, 1L, 3L, 0L], False, 'person'),
])
if include_legacy:
expected_values.extend([
StaticIndicators('formsSubmitted', [2L, 4L, 7L, 0L], True, None),
StaticIndicators('casesUpdated', [0L, 1L, 3L, 5L], True, None),
])
expected['totalCases'] = 0L if no_data else 5L
for val in expected_values:
values = [0L] * 4 if no_data else val.values
expected.update(get_indicators(val.name, values, val.infix, val.is_legacy, limit_ranges))
return expected
class BaseCCTests(TestCase):
def setUp(self):
locmem_cache.clear()
def _test_indicators(self, user, data_set, expected):
user_case = get_case_by_domain_hq_user_id(user.domain, user.user_id, CASE_TYPE)
case_id = user_case.case_id
self.assertIn(case_id, data_set)
user_data = data_set[case_id]
mismatches = []
for k, v in expected.items():
expected_value = user_data.pop(k, None)
if expected_value != v:
mismatches.append('{}: {} != {}'.format(k, v, expected_value))
if mismatches:
self.fail('Mismatching indicators:\n{}'.format('\t\n'.join(mismatches)))
if user_data:
self.fail('Additional indicators:\n{}'.format('\t\n'.join(user_data.keys())))
class CallCenterTests(BaseCCTests):
@classmethod
def setUpClass(cls):
cls.cc_domain, cls.cc_user = create_domain_and_user('callcentertest', 'user1')
load_data(cls.cc_domain.name, cls.cc_user.user_id)
cls.cc_user_no_data = CommCareUser.create(cls.cc_domain.name, 'user3', '***')
cls.aarohi_domain, cls.aarohi_user = create_domain_and_user('aarohi', 'user2')
load_custom_data(cls.aarohi_domain.name, cls.aarohi_user.user_id, xmlns=AAROHI_MOTHER_FORM)
# create one case of each type so that we get the indicators where there is no data for the period
create_cases_for_types(cls.cc_domain.name, ['person', 'dog'])
@classmethod
def tearDownClass(cls):
cls.cc_domain.delete()
cls.aarohi_domain.delete()
clear_data()
def check_cc_indicators(self, data_set, expected):
self._test_indicators(self.cc_user, data_set, expected)
expected_no_data = expected_standard_indicators(no_data=True)
self._test_indicators(self.cc_user_no_data, data_set, expected_no_data)
def test_standard_indicators(self):
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache
)
self.assertEqual(
set(indicator_set.user_to_case_map.keys()),
set([self.cc_user.get_id, self.cc_user_no_data.get_id])
)
self.assertEqual(indicator_set.users_needing_data, set([self.cc_user.get_id, self.cc_user_no_data.get_id]))
self.assertEqual(indicator_set.owners_needing_data, set([self.cc_user.get_id, self.cc_user_no_data.get_id]))
self.check_cc_indicators(indicator_set.get_data(), expected_standard_indicators())
def test_standard_indicators_no_legacy(self):
config = CallCenterIndicatorConfig.default_config(self.cc_domain.name, include_legacy=False)
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
indicator_config=config
)
self._test_indicators(
self.cc_user,
indicator_set.get_data(),
expected_standard_indicators(include_legacy=False))
def test_standard_indicators_case_totals_only(self):
config = CallCenterIndicatorConfig.default_config(self.cc_domain.name, include_legacy=False)
config.cases_total.all_types = False
config.cases_opened.all_types = False
config.cases_closed.all_types = False
config.cases_active.all_types = False
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
indicator_config=config
)
self._test_indicators(
self.cc_user,
indicator_set.get_data(),
expected_standard_indicators(
include_legacy=False,
include_totals=True,
case_types=[])
)
def test_standard_indicators_load_config_from_db(self):
config = CallCenterIndicatorConfig.default_config(self.cc_domain.name, include_legacy=False)
config.save()
self.addCleanup(config.delete)
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
)
self._test_indicators(
self.cc_user,
indicator_set.get_data(),
expected_standard_indicators(include_legacy=False))
def test_standard_indicators_case_dog_only(self):
config = CallCenterIndicatorConfig.default_config(self.cc_domain.name, include_legacy=False)
config.forms_submitted.active = False
def dog_only(conf):
conf.total.active = False
conf.all_types = False
conf.types = [TypedIndicator(active=True, date_ranges=[WEEK0, MONTH0], type='dog')]
dog_only(config.cases_total)
dog_only(config.cases_opened)
dog_only(config.cases_closed)
dog_only(config.cases_active)
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
indicator_config=config
)
self._test_indicators(
self.cc_user,
indicator_set.get_data(),
expected_standard_indicators(
include_legacy=False,
include_totals=False,
case_types=['dog'],
limit_ranges=[WEEK0, MONTH0])
)
def test_standard_indicators_case_week1_only(self):
config = CallCenterIndicatorConfig.default_config(self.cc_domain.name, include_legacy=False)
config.forms_submitted.date_ranges = [WEEK1]
config.cases_total.total.date_ranges = [WEEK1]
config.cases_opened.total.date_ranges = [WEEK1]
config.cases_closed.total.date_ranges = [WEEK1]
config.cases_active.total.date_ranges = [WEEK1]
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
indicator_config=config
)
self._test_indicators(
self.cc_user,
indicator_set.get_data(),
expected_standard_indicators(
include_legacy=False,
include_totals=True,
limit_ranges=[WEEK1])
)
def test_sync_log(self):
user_case = get_case_by_domain_hq_user_id(self.cc_domain.name, self.cc_user.get_id, CASE_TYPE)
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache,
override_cases=[user_case]
)
self.assertEqual(indicator_set.user_to_case_map.keys(), [self.cc_user.get_id])
self.assertEqual(indicator_set.users_needing_data, set([self.cc_user.get_id]))
self.assertEqual(indicator_set.owners_needing_data, set([self.cc_user.get_id]))
self._test_indicators(self.cc_user, indicator_set.get_data(), expected_standard_indicators())
def test_custom_indicators(self):
expected = {'totalCases': 0L}
expected.update(get_indicators('formsSubmitted', [3L, 3L, 9L, 0L], is_legacy=True))
expected.update(get_indicators('forms_submitted', [3L, 3L, 9L, 0L]))
expected.update(get_indicators('casesUpdated', [0L, 0L, 0L, 0L], is_legacy=True))
expected.update(get_indicators('cases_total', [0L, 0L, 0L, 0L]))
expected.update(get_indicators('cases_opened', [0L, 0L, 0L, 0L]))
expected.update(get_indicators('cases_closed', [0L, 0L, 0L, 0L]))
expected.update(get_indicators('cases_active', [0L, 0L, 0L, 0L]))
# custom
expected.update(get_indicators('motherForms', [3L, 3L, 9L, 0L], is_legacy=True))
expected.update(get_indicators('childForms', [0L, 0L, 0L, 0L], is_legacy=True))
expected.update(get_indicators('motherDuration', [3L, 4L, 4L, 0L], is_legacy=True))
indicator_set = CallCenterIndicators(
self.aarohi_domain.name,
self.aarohi_domain.default_timezone,
self.aarohi_domain.call_center_config.case_type,
self.aarohi_user,
custom_cache=locmem_cache
)
self._test_indicators(
self.aarohi_user,
indicator_set.get_data(),
expected
)
def test_caching(self):
user_case = get_case_by_domain_hq_user_id(self.cc_domain.name, self.cc_user._id, CASE_TYPE)
expected_indicators = {'a': 1, 'b': 2}
cached_data = CachedIndicators(
user_id=self.cc_user.get_id,
case_id=user_case.case_id,
domain=self.cc_domain.name,
indicators=expected_indicators
)
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user,
custom_cache=locmem_cache
)
locmem_cache.set(cache_key(self.cc_user.get_id, indicator_set.reference_date), cached_data.to_json())
self.assertEqual(
set(indicator_set.user_to_case_map.keys()),
set([self.cc_user.get_id, self.cc_user_no_data.get_id])
)
self.assertEquals(indicator_set.users_needing_data, set([self.cc_user_no_data.get_id]))
self.assertEqual(indicator_set.owners_needing_data, set([self.cc_user_no_data.get_id]))
self.check_cc_indicators(indicator_set.get_data(), expected_indicators)
def test_no_cases_owned_by_user(self):
"""
Test to verify that only data belonging to users managed by the supervisor is returned.
"""
indicator_set = CallCenterIndicators(
self.cc_domain.name,
self.cc_domain.default_timezone,
self.cc_domain.call_center_config.case_type,
self.cc_user_no_data,
custom_cache=locmem_cache
)
self.assertEqual(indicator_set.user_to_case_map.keys(), [])
self.assertEqual(indicator_set.users_needing_data, set())
self.assertEqual(indicator_set.owners_needing_data, set())
self.assertEqual(indicator_set.get_data(), {})
class CallCenterSupervisorGroupTest(BaseCCTests):
@classmethod
def setUpClass(cls):
domain_name = 'cc_test_supervisor_group'
cls.domain = create_domain(domain_name)
cls.supervisor = CommCareUser.create(domain_name, 'supervisor@' + domain_name, '***')
cls.supervisor_group = Group(
domain=domain_name,
name='supervisor group',
case_sharing=True,
users=[cls.supervisor.get_id]
)
cls.supervisor_group.save()
cls.domain.call_center_config.enabled = True
cls.domain.call_center_config.case_owner_id = cls.supervisor_group.get_id
cls.domain.call_center_config.case_type = 'cc_flw'
cls.domain.save()
cls.user = CommCareUser.create(domain_name, 'user@' + domain_name, '***')
sync_call_center_user_case(cls.user)
load_data(domain_name, cls.user.user_id)
# create one case of each type so that we get the indicators where there is no data for the period
create_cases_for_types(domain_name, ['person', 'dog'])
@classmethod
def tearDownClass(cls):
cls.domain.delete()
clear_data()
def test_users_assigned_via_group(self):
"""
Ensure that users who are assigned to the supervisor via a group are also included
in final data set.
"""
indicator_set = CallCenterIndicators(
self.domain.name,
self.domain.default_timezone,
self.domain.call_center_config.case_type,
self.supervisor,
custom_cache=locmem_cache
)
self.assertEqual(indicator_set.user_to_case_map.keys(), [self.user.get_id])
self.assertEqual(indicator_set.users_needing_data, set([self.user.get_id]))
self.assertEqual(indicator_set.owners_needing_data, set([self.user.get_id]))
self._test_indicators(self.user, indicator_set.get_data(), expected_standard_indicators())
class CallCenterCaseSharingTest(BaseCCTests):
@classmethod
def setUpClass(cls):
domain_name = 'cc_test_case_sharing'
cls.domain = create_domain(domain_name)
cls.supervisor = CommCareUser.create(domain_name, 'supervisor@' + domain_name, '***')
cls.domain.call_center_config.enabled = True
cls.domain.call_center_config.case_owner_id = cls.supervisor.get_id
cls.domain.call_center_config.case_type = 'cc_flw'
cls.domain.save()
cls.user = CommCareUser.create(domain_name, 'user@' + domain_name, '***')
sync_call_center_user_case(cls.user)
cls.group = Group(
domain=domain_name,
name='case sharing group',
case_sharing=True,
users=[cls.user.user_id]
)
cls.group.save()
load_data(
domain_name,
cls.user.user_id,
'not this user',
cls.group.get_id,
case_opened_by=cls.user.user_id,
case_closed_by=cls.user.user_id)
# create one case of each type so that we get the indicators where there is no data for the period
create_cases_for_types(domain_name, ['person', 'dog'])
@classmethod
def tearDownClass(cls):
cls.domain.delete()
clear_data()
def test_cases_owned_by_group(self):
"""
Ensure that indicators include cases owned by a case sharing group the user is part of.
"""
indicator_set = CallCenterIndicators(
self.domain.name,
self.domain.default_timezone,
self.domain.call_center_config.case_type,
self.supervisor,
custom_cache=locmem_cache
)
self.assertEqual(indicator_set.user_to_case_map.keys(), [self.user.get_id])
self.assertEqual(indicator_set.users_needing_data, set([self.user.get_id]))
self.assertEqual(indicator_set.owners_needing_data, set([self.user.get_id, self.group.get_id]))
expected = expected_standard_indicators()
expected['totalCases'] = 0L # no cases with user_id = self.user.get_id
self._test_indicators(self.user, indicator_set.get_data(), expected)
class CallCenterTestOpenedClosed(BaseCCTests):
@classmethod
def setUpClass(cls):
domain_name = 'cc_test_opened_closed'
cls.domain = create_domain(domain_name)
cls.supervisor = CommCareUser.create(domain_name, 'supervisor@' + domain_name, '***')
cls.domain.call_center_config.enabled = True
cls.domain.call_center_config.case_owner_id = cls.supervisor.get_id
cls.domain.call_center_config.case_type = 'cc_flw'
cls.domain.save()
cls.user = CommCareUser.create(domain_name, 'user@' + domain_name, '***')
sync_call_center_user_case(cls.user)
load_data(domain_name, cls.user.user_id, case_opened_by='not me', case_closed_by='not me')
# create one case of each type so that we get the indicators where there is no data for the period
create_cases_for_types(domain_name, ['person', 'dog'])
@classmethod
def tearDownClass(cls):
cls.domain.delete()
clear_data()
def test_opened_closed(self):
"""
Test that cases_closed and cases_opened indicators count based on the user that
opened / closed the case and not the case owner.
"""
indicator_set = CallCenterIndicators(
self.domain.name,
self.domain.default_timezone,
self.domain.call_center_config.case_type,
self.supervisor,
custom_cache=locmem_cache
)
expected = expected_standard_indicators()
# cases opened / closed by another user so expect 0
for key in expected:
if key.startswith('cases_opened') or key.startswith('cases_closed'):
expected[key] = 0L
self._test_indicators(self.user, indicator_set.get_data(), expected)
```
#### File: callcenter/tests/test_location_owners.py
```python
from django.test import TestCase
from casexml.apps.case.tests import delete_all_cases
from corehq.apps.callcenter.utils import sync_call_center_user_case
from corehq.apps.domain.models import CallCenterProperties
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.hqcase.utils import get_case_by_domain_hq_user_id
from corehq.apps.locations.models import LocationType
from corehq.apps.locations.tests import make_loc
from corehq.apps.users.models import CommCareUser
TEST_DOMAIN = "cc-location-owner-test-domain"
CASE_TYPE = "cc-case-type"
LOCATION_TYPE = "my-location"
class CallCenterLocationOwnerTest(TestCase):
@classmethod
def get_call_center_config(cls):
return CallCenterProperties(
enabled=True,
use_user_location_as_owner=True,
case_owner_id=None,
case_type=CASE_TYPE
)
@classmethod
def setUpClass(cls):
cls.domain = create_domain(TEST_DOMAIN)
user = CommCareUser.create(TEST_DOMAIN, 'user1', '***')
cls.user_id = user.user_id
cls.domain.call_center_config = cls.get_call_center_config()
cls.domain.save()
LocationType.objects.get_or_create(
domain=cls.domain.name,
name=LOCATION_TYPE,
)
@classmethod
def tearDownClass(cls):
cls.domain.delete()
def setUp(self):
self.user = CommCareUser.get(self.user_id)
def tearDown(self):
delete_all_cases()
def test_no_location_sync(self):
self.user.unset_location()
self.user.save()
sync_call_center_user_case(self.user)
case = get_case_by_domain_hq_user_id(TEST_DOMAIN, self.user._id, CASE_TYPE)
self.assertEqual(case.owner_id, "")
def test_location_sync(self):
location = make_loc('loc', type=LOCATION_TYPE, domain=TEST_DOMAIN)
self.user.set_location(location)
self.user.save()
case = get_case_by_domain_hq_user_id(TEST_DOMAIN, self.user._id, CASE_TYPE)
self.assertEqual(case.owner_id, location._id)
def test_location_change_sync(self):
location = make_loc('loc', type=LOCATION_TYPE, domain=TEST_DOMAIN)
self.user.set_location(location)
self.user.save()
location_2 = make_loc('loc2', type=LOCATION_TYPE, domain=TEST_DOMAIN)
self.user.set_location(location_2)
self.user.save()
case = get_case_by_domain_hq_user_id(TEST_DOMAIN, self.user._id, CASE_TYPE)
self.assertEqual(case.owner_id, location_2._id)
```
#### File: apps/callcenter/utils.py
```python
from __future__ import absolute_import
from collections import namedtuple
from datetime import datetime, timedelta
import pytz
from casexml.apps.case.dbaccessors import get_open_case_docs_in_domain
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
import uuid
from xml.etree import ElementTree
from corehq.apps.app_manager.const import USERCASE_TYPE
from corehq.apps.domain.models import Domain
from corehq.apps.es.domains import DomainES
from corehq.apps.es import filters
from corehq.apps.hqcase.utils import submit_case_blocks, get_case_by_domain_hq_user_id
from corehq.feature_previews import CALLCENTER
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import UserTime, ServerTime
from dimagi.utils.couch import CriticalSection
class DomainLite(namedtuple('DomainLite', 'name default_timezone cc_case_type use_fixtures')):
def midnights(self, utcnow=None):
"""Returns a list containing two datetimes in UTC that corresponds to midnight
in the domains timezone on either side of the current UTC datetime.
i.e. [<previous midnight in TZ>, <next midnight in TZ>]
>>> d = DomainLite('', 'Asia/Kolkata', '', True)
>>> d.midnights(datetime(2015, 8, 27, 18, 30, 0 ))
[datetime.datetime(2015, 8, 26, 18, 30), datetime.datetime(2015, 8, 27, 18, 30)]
>>> d.midnights(datetime(2015, 8, 27, 18, 31, 0 ))
[datetime.datetime(2015, 8, 27, 18, 30), datetime.datetime(2015, 8, 28, 18, 30)]
"""
utcnow = utcnow or datetime.utcnow()
tz = pytz.timezone(self.default_timezone)
current_time_tz = ServerTime(utcnow).user_time(tz).done()
midnight_tz1 = current_time_tz.replace(hour=0, minute=0, second=0, microsecond=0)
midnight_tz_utc1 = UserTime(midnight_tz1).server_time().done()
midnight_tz_utc2 = midnight_tz_utc1 + timedelta(days=(1 if midnight_tz_utc1 < utcnow else -1))
return sorted([midnight_tz_utc1, midnight_tz_utc2])
CallCenterCase = namedtuple('CallCenterCase', 'case_id hq_user_id')
def sync_user_case(commcare_user, case_type, owner_id):
"""
Each time a CommCareUser is saved this method gets called and creates or updates
a case associated with the user with the user's details.
This is also called to create user cases when the usercase is used for the
first time.
"""
with CriticalSection(['user_case_%s_for_%s' % (case_type, commcare_user._id)]):
domain = commcare_user.project
def valid_element_name(name):
try:
ElementTree.fromstring('<{}/>'.format(name))
return True
except ElementTree.ParseError:
return False
# remove any keys that aren't valid XML element names
fields = {k: v for k, v in commcare_user.user_data.items() if valid_element_name(k)}
# language or phone_number can be null and will break
# case submission
fields.update({
'name': commcare_user.name or commcare_user.raw_username,
'username': commcare_user.raw_username,
'email': commcare_user.email,
'language': commcare_user.language or '',
'phone_number': commcare_user.phone_number or ''
})
case = get_case_by_domain_hq_user_id(domain.name, commcare_user._id, case_type)
close = commcare_user.to_be_deleted() or not commcare_user.is_active
caseblock = None
if case:
props = dict(case.dynamic_case_properties())
changed = close != case.closed
changed = changed or case.type != case_type
changed = changed or case.name != fields['name']
changed = changed or case.owner_id != owner_id
if not changed:
for field, value in fields.items():
if field != 'name' and props.get(field) != value:
changed = True
break
if changed:
caseblock = CaseBlock(
create=False,
case_id=case._id,
owner_id=owner_id,
case_type=case_type,
close=close,
update=fields
)
else:
fields['hq_user_id'] = commcare_user._id
caseblock = CaseBlock(
create=True,
case_id=uuid.uuid4().hex,
owner_id=owner_id,
user_id=owner_id,
case_type=case_type,
update=fields
)
if caseblock:
casexml = ElementTree.tostring(caseblock.as_xml())
submit_case_blocks(casexml, domain.name)
def sync_call_center_user_case(user):
domain = user.project
if domain and domain.call_center_config.enabled:
owner_id = domain.call_center_config.case_owner_id
if domain.call_center_config.use_user_location_as_owner:
owner_id = user.location_id
sync_user_case(
user,
domain.call_center_config.case_type,
owner_id
)
def sync_usercase(user):
domain = user.project
if domain and domain.usercase_enabled:
sync_user_case(
user,
USERCASE_TYPE,
user.get_id
)
def is_midnight_for_domain(midnight_form_domain, error_margin=15, current_time=None):
current_time = current_time or datetime.utcnow()
diff = current_time - midnight_form_domain
return diff.days >= 0 and diff < timedelta(minutes=error_margin)
def get_call_center_domains():
result = (
DomainES()
.is_active()
.is_snapshot(False)
.filter(filters.term('call_center_config.enabled', True))
.fields(['name', 'default_timezone', 'call_center_config.case_type', 'call_center_config.use_fixtures'])
.run()
)
def to_domain_lite(hit):
return DomainLite(
name=hit['name'],
default_timezone=hit['default_timezone'],
cc_case_type=hit.get('call_center_config.case_type', ''),
use_fixtures=hit.get('call_center_config.use_fixtures', True)
)
return [to_domain_lite(hit) for hit in result.hits]
def get_call_center_cases(domain_name, case_type, user=None):
all_cases = []
if user:
docs = (doc for owner_id in user.get_owner_ids()
for doc in get_open_case_docs_in_domain(domain_name, case_type,
owner_id=owner_id))
else:
docs = get_open_case_docs_in_domain(domain_name, case_type)
for case_doc in docs:
hq_user_id = case_doc.get('hq_user_id', None)
if hq_user_id:
all_cases.append(CallCenterCase(
case_id=case_doc['_id'],
hq_user_id=hq_user_id
))
return all_cases
@quickcache(['domain'])
def get_call_center_case_type_if_enabled(domain):
if CALLCENTER.enabled(domain):
return Domain.get_by_name(domain).call_center_config.case_type
```
#### File: management/commands/reprocess_error_forms.py
```python
from collections import defaultdict
from django.core.management.base import BaseCommand, CommandError, LabelCommand
from corehq.apps.cleanup.management.commands.reprocess_error_form import reprocess_form_cases
from corehq.apps.cleanup.xforms import iter_problem_forms
from optparse import make_option
from dimagi.utils.parsing import string_to_datetime
class Command(BaseCommand):
args = '<domain> <since>'
help = ('Reprocesses all documents tagged as errors and tries to '
'regenerate the appropriate case blocks for them. Can pass in '
'a domain and date to process forms received after that date or '
'just a domain to process all problem forms in the domain.')
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual reprocessing, just print the ids that would be affected"),)
def handle(self, *args, **options):
domain = since = None
if len(args) == 1:
domain = args[0]
elif len(args) == 2:
domain = args[0]
since = string_to_datetime(args[1])
else:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
succeeded = []
failed = []
error_messages = defaultdict(lambda: 0)
for form in iter_problem_forms(domain, since):
print "%s\t%s\t%s\t%s\t%s" % (form._id, form.received_on,
form.xmlns,
form.xpath('form/meta/username'),
form.problem.strip())
if not options["dryrun"]:
try:
reprocess_form_cases(form)
except Exception, e:
failed.append(form._id)
error_messages[str(e)] += 1
else:
succeeded.append(form._id)
print "%s / %s forms successfully processed, %s failures" % \
(len(succeeded), len(succeeded) + len(failed), len(failed))
if error_messages:
print "The following errors were seen: \n%s" % \
("\n".join("%s: %s" % (v, k) for k, v in error_messages.items()))
```
#### File: apps/cleanup/xforms.py
```python
from couchforms.dbaccessors import get_form_ids_by_type
from couchforms.models import XFormError
from dimagi.utils.couch.database import iter_docs
def iter_problem_forms(domain, since=None):
problem_ids = get_form_ids_by_type(domain, 'XFormError', start=since)
for doc in iter_docs(XFormError.get_db(), problem_ids):
yield XFormError.wrap(doc)
```
#### File: commtrack/dbaccessors/supply_point_case_by_domain_external_id.py
```python
from corehq.apps.commtrack.models import SupplyPointCase
def get_supply_point_case_by_domain_external_id(domain, external_id):
return SupplyPointCase.view('hqcase/by_domain_external_id',
key=[domain, str(external_id)],
reduce=False,
include_docs=True,
limit=1).first()
```
#### File: management/commands/update_supply_point_locations.py
```python
from xml.etree import ElementTree
from django.core.management.base import BaseCommand
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import submit_case_blocks
def needs_update(case):
return (case.get('location_id', None) and
case['owner_id'] != case['location_id'])
def case_block(case):
return ElementTree.tostring(CaseBlock(
create=False,
case_id=case['_id'],
owner_id=case['location_id'],
).as_xml())
def get_cases(domain):
supply_point_ids = (case['id'] for case in CommCareCase.get_db().view(
'commtrack/supply_point_by_loc',
startkey=[domain],
endkey=[domain, {}],
reduce=False,
include_docs=False,
).all())
return iter_docs(CommCareCase.get_db(), supply_point_ids)
def update_supply_points(domain):
case_blocks = (case_block(c) for c in get_cases(domain) if needs_update(c))
if case_blocks:
for chunk in chunked(case_blocks, 100):
submit_case_blocks(chunk, domain)
print "updated {} cases on domain {}".format(len(chunk), domain)
class Command(BaseCommand):
help = ("Make sure all supply point cases have their owner_id set "
"to the location_id")
def handle(self, *args, **options):
all_domains = Domain.get_all_names()
total = len(all_domains)
finished = 0
for domain in all_domains:
update_supply_points(domain)
finished += 1
if finished % 100 == 0:
print "Processed {} of {} domains".format(finished, total)
```
#### File: commtrack/resources/v0_1.py
```python
from tastypie import fields
from corehq.apps.api.resources.v0_1 import CustomResourceMeta, DomainAdminAuthentication
from corehq.apps.products.models import Product
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.api.resources import HqBaseResource
"""
Implementation of the CommCare Supply APIs. For more information see:
https://confluence.dimagi.com/display/lmis/API
"""
class ProductResource(HqBaseResource):
type = "product"
id = fields.CharField(attribute='_id', readonly=True, unique=True)
code = fields.CharField(attribute='code', readonly=True, unique=True)
name = fields.CharField(attribute='name', readonly=True)
unit = fields.CharField(attribute='unit', readonly=True, null=True)
description = fields.CharField(attribute='description', readonly=True, null=True)
category = fields.CharField(attribute='category', readonly=True, null=True)
last_modified = fields.DateTimeField(attribute='last_modified', readonly=True, null=True)
# TODO:
# price?
def obj_get(self, request, **kwargs):
return get_object_or_not_exist(Product, kwargs['pk'], kwargs['domain'])
def obj_get_list(self, request, **kwargs):
return Product.by_domain(kwargs['domain'])
class Meta(CustomResourceMeta):
authentication = DomainAdminAuthentication()
resource_name = 'product'
limit = 0
```
#### File: commtrack/tests/test_dbaccessors.py
```python
from django.test import TestCase
from casexml.apps.case.models import CommCareCase
from corehq.apps.commtrack.dbaccessors import \
get_supply_point_ids_in_domain_by_location, \
get_supply_points_json_in_domain_by_location, \
get_supply_point_case_by_location_id, get_supply_point_case_by_location
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.locations.models import Location
class SupplyPointDBAccessorsTest(TestCase):
@classmethod
def setUpClass(cls):
cls.domain = 'supply-point-dbaccessors'
cls.locations = [
Location(domain=cls.domain),
Location(domain=cls.domain),
Location(domain=cls.domain),
]
Location.get_db().bulk_save(cls.locations)
cls.supply_points = [
CommCareCase(domain=cls.domain, type='supply-point',
location_id=cls.locations[0]._id),
CommCareCase(domain=cls.domain, type='supply-point',
location_id=cls.locations[1]._id),
CommCareCase(domain=cls.domain, type='supply-point',
location_id=cls.locations[2]._id),
]
locations_by_id = {location._id: location
for location in cls.locations}
cls.location_supply_point_pairs = [
(locations_by_id[supply_point.location_id], supply_point)
for supply_point in cls.supply_points
]
CommCareCase.get_db().bulk_save(cls.supply_points)
@classmethod
def tearDownClass(cls):
pass
def test_get_supply_point_ids_in_domain_by_location(self):
self.assertEqual(
get_supply_point_ids_in_domain_by_location(self.domain),
{location._id: supply_point._id
for location, supply_point in self.location_supply_point_pairs}
)
def test_get_supply_points_json_in_domain_by_location(self):
self.assertItemsEqual(
get_supply_points_json_in_domain_by_location(self.domain),
[(location._id, supply_point.to_json())
for location, supply_point in self.location_supply_point_pairs]
)
def test_get_supply_point_case_by_location_id(self):
actual = get_supply_point_case_by_location_id(
self.domain, self.locations[0]._id)
expected = SupplyPointCase.wrap(self.supply_points[0].to_json())
self.assertEqual(type(actual), type(expected))
self.assertEqual(actual.to_json(), expected.to_json())
def test_get_supply_point_case_by_location(self):
actual = get_supply_point_case_by_location(self.locations[0])
expected = SupplyPointCase.wrap(self.supply_points[0].to_json())
self.assertEqual(type(actual), type(expected))
self.assertEqual(actual.to_json(), expected.to_json())
```
#### File: apps/commtrack/util.py
```python
from xml.etree import ElementTree
from casexml.apps.case.models import CommCareCase
from corehq import toggles, feature_previews
from corehq.apps.commtrack import const
from corehq.apps.commtrack.const import RequisitionActions
from corehq.apps.commtrack.models import CommtrackConfig, SupplyPointCase, CommtrackActionConfig, \
CommtrackRequisitionConfig
from corehq.apps.products.models import Product
from corehq.apps.programs.models import Program
from corehq.apps.locations.models import Location
import itertools
from datetime import date, timedelta
from calendar import monthrange
from corehq.apps.hqcase.utils import submit_case_blocks
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from django.utils.text import slugify
from unidecode import unidecode
from django.utils.translation import ugettext as _
import re
def all_sms_codes(domain):
config = CommtrackConfig.for_domain(domain)
actions = dict((action.keyword, action) for action in config.actions)
products = dict((p.code, p) for p in Product.by_domain(domain))
commands = {
config.multiaction_keyword: {'type': 'stock_report_generic', 'caption': 'Stock Report'},
}
sms_codes = zip(('action', 'product', 'command'), (actions, products, commands))
return dict(itertools.chain(*([(k.lower(), (type, v)) for k, v in codes.iteritems()] for type, codes in sms_codes)))
def get_supply_point(domain, site_code=None, loc=None):
if loc is None:
loc = Location.view('commtrack/locations_by_code',
key=[domain, site_code.lower()],
include_docs=True).first()
if loc:
case = SupplyPointCase.get_by_location(loc)
else:
case = None
return {
'case': case,
'location': loc,
}
def make_program(domain, name, code, default=False):
p = Program()
p.domain = domain
p.name = name
p.code = code.lower()
p.default = default
p.save()
return p
def get_or_create_default_program(domain):
program = Program.default_for_domain(domain)
if program:
return program
else:
return make_program(
domain,
_('Uncategorized'),
_('uncategorized'),
default=True
)
def _create_commtrack_config_if_needed(domain):
if CommtrackConfig.for_domain(domain):
return
CommtrackConfig(
domain=domain,
multiaction_enabled=True,
multiaction_keyword='report',
actions=[
CommtrackActionConfig(
action='receipts',
keyword='r',
caption='Received',
),
CommtrackActionConfig(
action='consumption',
keyword='c',
caption='Consumed',
),
CommtrackActionConfig(
action='consumption',
subaction='loss',
keyword='l',
caption='Losses',
),
CommtrackActionConfig(
action='stockonhand',
keyword='soh',
caption='Stock on hand',
),
CommtrackActionConfig(
action='stockout',
keyword='so',
caption='Stock-out',
),
],
).save()
def _enable_commtrack_previews(domain):
for toggle_class in (
toggles.COMMTRACK,
toggles.VELLUM_TRANSACTION_QUESTION_TYPES,
toggles.VELLUM_ADVANCED_ITEMSETS,
toggles.STOCK_TRANSACTION_EXPORT,
):
toggle_class.set(domain, True, toggles.NAMESPACE_DOMAIN)
def make_domain_commtrack(domain_object):
domain_object.commtrack_enabled = True
domain_object.locations_enabled = True
domain_object.save()
_create_commtrack_config_if_needed(domain_object.name)
get_or_create_default_program(domain_object.name)
_enable_commtrack_previews(domain_object.name)
def get_default_requisition_config():
return CommtrackRequisitionConfig(
enabled=True,
actions=[
CommtrackActionConfig(
action=RequisitionActions.REQUEST,
keyword='req',
caption='Request',
),
# TODO not tested yet, so not included
# CommtrackActionConfig(
# action=RequisitionActions.APPROVAL,
# keyword='approve',
# caption='Approved',
# ),
CommtrackActionConfig(
action=RequisitionActions.FULFILL,
keyword='fulfill',
caption='Fulfilled',
),
CommtrackActionConfig(
action=RequisitionActions.RECEIPTS,
keyword='rec',
caption='Requisition Receipts',
),
],
)
def due_date_weekly(dow, past_period=0): # 0 == sunday
"""compute the next due date on a weekly schedule, where reports are
due on 'dow' day of the week (0:sunday, 6:saturday). 'next' due date
is the first due date that occurs today or in the future. if past_period
is non-zero, return the due date that occured N before the next due date
"""
cur_weekday = date.today().isoweekday()
days_till_due = (dow - cur_weekday) % 7
return date.today() + timedelta(days=days_till_due - 7 * past_period)
def due_date_monthly(day, from_end=False, past_period=0):
"""compute the next due date on a monthly schedule, where reports are
due on 'day' day of the month. (if from_end is true, due date is 'day' days
before the end of the month, where 0 is the last day of the month). 'next' due date
is the first due date that occurs today or in the future. if past_period
is non-zero, return the due date that occured N before the next due date
"""
if from_end:
assert False, 'not supported yet'
month_diff = -past_period
if date.today().day > day:
month_diff += 1
month_seq = date.today().year * 12 + (date.today().month - 1)
month_seq += month_diff
y = month_seq // 12
m = month_seq % 12 + 1
return date(y, m, min(day, monthrange(y, m)[1]))
def submit_mapping_case_block(user, index):
mapping = user.get_location_map_case()
if mapping:
caseblock = CaseBlock(
create=False,
case_id=mapping._id,
index=index
)
else:
caseblock = CaseBlock(
create=True,
case_type=const.USER_LOCATION_OWNER_MAP_TYPE,
case_id=location_map_case_id(user),
owner_id=user._id,
index=index,
case_name=const.USER_LOCATION_OWNER_MAP_TYPE.replace('-', ' '),
user_id=const.COMMTRACK_USERNAME,
)
submit_case_blocks(
ElementTree.tostring(
caseblock.as_xml()
),
user.domain,
)
def location_map_case_id(user):
return 'user-owner-mapping-' + user._id
def get_commtrack_location_id(user, domain):
if (
user and
user.get_domain_membership(domain.name) and
user.get_domain_membership(domain.name).location_id and
domain.commtrack_enabled
):
return user.get_domain_membership(domain.name).location_id
else:
return None
def get_case_wrapper(data):
return {
const.SUPPLY_POINT_CASE_TYPE: SupplyPointCase,
}.get(data.get('type'), CommCareCase)
def unicode_slug(text):
return slugify(unicode(unidecode(text)))
def encode_if_needed(val):
return val.encode("utf8") if isinstance(val, unicode) else val
def _fetch_ending_numbers(s):
matcher = re.compile("\d*$")
return matcher.search(s).group()
def generate_code(object_name, existing_codes):
if not object_name:
object_name = 'no name'
matcher = re.compile("[\W\d]+")
name_slug = matcher.sub(
'_',
unicode_slug(object_name.lower())
).strip('_')
postfix = _fetch_ending_numbers(object_name)
while name_slug + postfix in existing_codes:
if postfix:
postfix = str(int(postfix) + 1)
else:
postfix = '1'
return name_slug + postfix
```
#### File: management/commands/convert_custom_location_data.py
```python
from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields import models as cdm
from corehq.apps.locations.models import Location
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
"""
Create a CustomDataFieldsDefinition based on existing custom location
information on each domain
"""
help = ''
def handle(self, *args, **options):
for domain in Domain.get_all_names():
fields_definition = cdm.CustomDataFieldsDefinition.get_or_create(
domain,
'LocationFields'
)
had_fields = bool(fields_definition.fields)
existing_field_slugs = set([field.slug for field in fields_definition.fields])
for location in Location.by_domain(domain):
location_data = location.metadata
for key in location_data.keys():
if (key and key not in existing_field_slugs
and not cdm.is_system_key(key)):
existing_field_slugs.add(key)
fields_definition.fields.append(cdm.CustomDataField(
slug=key,
label=key,
is_required=False,
))
for field in fields_definition.fields:
if cdm.is_system_key(field.slug):
fields_definition.fields.remove(field)
# Only save a definition for domains which use custom location data
if fields_definition.fields or had_fields:
fields_definition.save()
print 'finished domain "{}"'.format(domain)
```
#### File: apps/data_analytics/malt_generator.py
```python
import logging
from corehq.apps.app_manager.const import AMPLIFIES_NOT_SET
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.data_analytics.models import MALTRow
from corehq.apps.domain.models import Domain
from corehq.apps.smsforms.app import COMMCONNECT_DEVICE_ID
from corehq.apps.sofabed.models import FormData, MISSING_APP_ID
from corehq.apps.users.util import DEMO_USER_ID, JAVA_ADMIN_USERNAME
from corehq.util.quickcache import quickcache
from django.db import IntegrityError
from django.db.models import Count
from django.http.response import Http404
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MALTTableGenerator(object):
"""
Populates SQL table with data for given list of monthly-datespans
See .models.MALTRow
"""
def __init__(self, datespan_object_list):
self.monthspan_list = datespan_object_list
def build_table(self):
for domain in Domain.get_all():
malt_rows_to_save = []
logger.info("Building MALT for {}".format(domain.name))
all_users_by_id = {user._id: user for user in domain.all_users()}
for monthspan in self.monthspan_list:
try:
malt_rows_to_save.extend(self._get_malt_row_dicts(domain.name, monthspan, all_users_by_id))
except Exception as ex:
logger.error("Failed to get rows for domain {name}. Exception is {ex}".format
(name=domain.name, ex=str(ex)), exc_info=True)
self._save_to_db(malt_rows_to_save, domain._id)
def _get_malt_row_dicts(self, domain_name, monthspan, all_users_by_id):
malt_row_dicts = []
forms_query = self._get_forms_queryset(domain_name, monthspan)
apps_submitted_for = forms_query.values('app_id', 'user_id', 'username').annotate(
num_of_forms=Count('instance_id')
)
for app_row_dict in apps_submitted_for:
app_id = app_row_dict['app_id']
num_of_forms = app_row_dict['num_of_forms']
try:
wam, pam, is_app_deleted = self._app_data(domain_name, app_id)
user_id, username, user_type, email = self._user_data(
app_row_dict['user_id'],
app_row_dict['username'],
all_users_by_id
)
except Exception as ex:
logger.error("Failed to get rows for user {id}, app {app_id}. Exception is {ex}".format
(id=user_id, app_id=app_id, ex=str(ex)), exc_info=True)
continue
malt_dict = {
'month': monthspan.startdate,
'user_id': user_id,
'username': username,
'email': email,
'user_type': user_type,
'domain_name': domain_name,
'num_of_forms': num_of_forms,
'app_id': app_id,
'wam': MALTRow.AMPLIFY_COUCH_TO_SQL_MAP.get(wam, MALTRow.NOT_SET),
'pam': MALTRow.AMPLIFY_COUCH_TO_SQL_MAP.get(pam, MALTRow.NOT_SET),
'is_app_deleted': is_app_deleted,
}
malt_row_dicts.append(malt_dict)
return malt_row_dicts
@classmethod
def _save_to_db(cls, malt_rows_to_save, domain_id):
try:
MALTRow.objects.bulk_create(
[MALTRow(**malt_dict) for malt_dict in malt_rows_to_save]
)
except IntegrityError:
# no update_or_create in django-1.6
for malt_dict in malt_rows_to_save:
cls._update_or_create(malt_dict)
except Exception as ex:
logger.error("Failed to insert rows for domain with id {id}. Exception is {ex}".format(
id=domain_id, ex=str(ex)), exc_info=True)
@classmethod
def _update_or_create(cls, malt_dict):
try:
# try update
unique_field_dict = {k: v
for (k, v) in malt_dict.iteritems()
if k in MALTRow.get_unique_fields()}
prev_obj = MALTRow.objects.get(**unique_field_dict)
for k, v in malt_dict.iteritems():
setattr(prev_obj, k, v)
prev_obj.save()
except MALTRow.DoesNotExist:
# create
try:
MALTRow(**malt_dict).save()
except Exception as ex:
logger.error("Failed to insert malt-row {}. Exception is {}".format(
str(malt_dict),
str(ex)
), exc_info=True)
except Exception as ex:
logger.error("Failed to insert malt-row {}. Exception is {}".format(
str(malt_dict),
str(ex)
), exc_info=True)
def _get_forms_queryset(self, domain_name, monthspan):
start_date = monthspan.computed_startdate
end_date = monthspan.computed_enddate
return FormData.objects.exclude(
device_id=COMMCONNECT_DEVICE_ID,
).filter(
domain=domain_name,
received_on__range=(start_date, end_date)
)
@classmethod
@quickcache(['domain', 'app_id'])
def _app_data(cls, domain, app_id):
try:
app = get_app(domain, app_id)
except Http404:
if app_id is not MISSING_APP_ID:
logger.debug("App not found %s" % app_id)
return (AMPLIFIES_NOT_SET, AMPLIFIES_NOT_SET, False)
return (getattr(app, 'amplifies_workers', AMPLIFIES_NOT_SET),
getattr(app, 'amplifies_project', AMPLIFIES_NOT_SET),
app.is_deleted())
@classmethod
def _user_data(cls, user_id, username, all_users_by_id):
if user_id in all_users_by_id:
user = all_users_by_id[user_id]
return (user._id, user.username, user.doc_type, user.email)
elif user_id == DEMO_USER_ID:
return (user_id, username, 'DemoUser', '')
elif username == JAVA_ADMIN_USERNAME:
return (user_id, username, 'AdminUser', '')
else:
return (user_id, username, 'UnknownUser', '')
```
#### File: apps/domain/auth.py
```python
import base64
import re
from django.contrib.auth import authenticate
from django.http import HttpResponse
from tastypie.authentication import ApiKeyAuthentication
J2ME = 'j2me'
ANDROID = 'android'
def determine_authtype_from_header(request, default=None):
"""
Guess the auth type, based on the headers found in the request.
"""
auth_header = (request.META.get('HTTP_AUTHORIZATION') or '').lower()
if auth_header.startswith('basic '):
return 'basic'
elif auth_header.startswith('digest '):
return 'digest'
elif all(ApiKeyAuthentication().extract_credentials(request)):
return 'api_key'
return default
def determine_authtype_from_request(request, default='basic'):
"""
Guess the auth type, based on the (phone's) user agent or the
headers found in the request.
"""
user_agent = request.META.get('HTTP_USER_AGENT')
type_to_auth_map = {
J2ME: 'digest',
ANDROID: 'basic',
}
user_type = guess_phone_type_from_user_agent(user_agent)
if user_type is not None:
return type_to_auth_map.get(user_type, default)
else:
return determine_authtype_from_header(request, default=default)
def guess_phone_type_from_user_agent(user_agent):
"""
A really dumb utility that guesses the phone type based on the user-agent header.
"""
j2me_pattern = '[Nn]okia|NOKIA|CLDC|cldc|MIDP|midp|Series60|Series40|[Ss]ymbian|SymbOS|[Mm]aemo'
if user_agent:
if re.search(j2me_pattern, user_agent):
return J2ME
elif 'Android' in user_agent:
return ANDROID
return None
def basicauth(realm=''):
# stolen and modified from: https://djangosnippets.org/snippets/243/
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
uname, passwd = base64.b64decode(auth[1]).split(':', 1)
user = authenticate(username=uname, password=<PASSWORD>)
if user is not None and user.is_active:
request.user = user
return view(request, *args, **kwargs)
# Either they did not provide an authorization header or
# something in the authorization attempt failed. Send a 401
# back to them to ask them to authenticate.
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
```
#### File: management/commands/bootstrap.py
```python
from django.core.management.base import LabelCommand, CommandError
from corehq.apps.domain.models import Domain
from django.conf import settings
class Command(LabelCommand):
help = "Bootstrap a domain and user who owns it."
args = "<domain> <email> <password>"
label = ""
def handle(self, *args, **options):
from corehq.apps.users.models import WebUser
if len(args) != 3:
raise CommandError('Usage: manage.py bootstrap <domain> <email> <password>')
domain_name, username, passwd = args
domain = Domain.get_or_create_with_name(domain_name, is_active=True)
couch_user = WebUser.create(domain_name, username, passwd)
couch_user.add_domain_membership(domain_name, is_admin=True)
couch_user.is_superuser = True
couch_user.is_staff = True
couch_user.save()
print "user %s created and added to domain %s" % (couch_user.username, domain)
if not getattr(settings, 'BASE_ADDRESS', None):
print ("Warning: You must set BASE_ADDRESS setting "
"in your localsettings.py file in order for commcare-hq "
"to be able to generate absolute urls. "
"This is necessary for a number of features.")
```
#### File: management/commands/migrate_domain_countries.py
```python
from django.core.management.base import LabelCommand
from django_countries.data import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {v.lower(): k for k, v in COUNTRIES.iteritems()}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
if domain.deployment._doc.get('countries', None):
continue
try:
country = None
if domain.deployment._doc.get('country', None):
country = domain.deployment._doc['country']
elif domain._doc.get('country', None):
country = domain._doc['country']
if country:
if ',' in country:
countries = country.split(',')
elif ' and ' in country:
countries = country.split(' and ')
else:
countries = [country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s" % e
```
#### File: management/commands/pre_auth_submissions.py
```python
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import LabelCommand
from corehq.apps.domain.models import Domain
from corehq.apps.app_manager.models import ApplicationBase
class Command(LabelCommand):
def handle(self, *args, **options):
db = Domain.get_db()
def get_doc_ids():
for result in db.view(
'domain/domains',
reduce=False).all():
yield result['id']
for result in ApplicationBase.get_db().view(
'app_manager/applications',
startkey=[None],
endkey=[None, {}],
reduce=False):
yield result['id']
for doc in iter_docs(db, get_doc_ids()):
if 'secure_submissions' not in doc:
print 'Updated', doc.get('doc_type'), doc.get('_id')
doc['secure_submissions'] = False
db.save_doc(doc)
```
#### File: management/commands/copy_doc.py
```python
import os
from couchdbkit import Database
from dimagi.utils.couch.database import get_db
from django.core.management.base import LabelCommand, CommandError
from corehq.apps.domainsync.config import DocumentTransform, save
class Command(LabelCommand):
help = ("Copy couch docs given as comma-separated list of IDs or path to file containing one ID per line. "
"If domain is supplied save the doc with the given domain instead of its original domain.")
args = '<sourcedb> <doc_ids_or_file_path> (<domain>)'
label = ""
def handle(self, *args, **options):
if len(args) < 2 or len(args) > 3:
raise CommandError('Usage is copy_doc %s' % self.args)
sourcedb = Database(args[0])
doc_ids_or_file = args[1]
domain = args[2] if len(args) == 3 else None
if os.path.isfile(doc_ids_or_file):
with open(doc_ids_or_file) as f:
doc_ids = f.read().splitlines()
else:
doc_ids = doc_ids_or_file.split(',')
print "Starting copy of {} docs".format(len(doc_ids))
for doc_id in doc_ids:
print 'Copying doc: {}'.format(doc_id)
doc_json = sourcedb.get(doc_id)
if domain:
doc_json['domain'] = domain
dt = DocumentTransform(doc_json, sourcedb)
save(dt, get_db())
```
#### File: management/commands/copy_utils.py
```python
from casexml.apps.stock.models import StockReport, StockTransaction, DocDomainMapping
from corehq.apps.products.models import SQLProduct
from phonelog.models import DeviceReportEntry
def copy_postgres_data_for_docs(remote_postgres_slug, doc_ids, simulate=False):
"""
Copies a set of data associated with a list of doc-ids from a remote postgres
database to the locally configured one.
"""
# can make this more configurable or less hard coded eventually
# also note that ordering here is important for foreign key dependencies
postgres_models = [
(SQLProduct, 'product_id'),
(StockReport, 'form_id'),
(StockTransaction, 'case_id'),
(DocDomainMapping, 'doc_id'),
# StockState objects are "derived" and get created by StockTransaction post_save signal.
# We may want to directly port these over in the future.
# (StockState, 'case_id'),
(DeviceReportEntry, 'xform_id'),
]
for model, doc_field in postgres_models:
query_set = model.objects.using(remote_postgres_slug).filter(
**{'{}__in'.format(doc_field): doc_ids}
)
count = query_set.count()
print "Copying {} models ({})".format(model.__name__, count)
if not simulate:
for i, item in enumerate(query_set):
# this can cause primary key conflicts to overwrite local data I think. Oh well?
item.save(using='default')
print 'Synced {}/{} {}'.format(i + 1, count, model.__name__)
```
#### File: domainsync/tests/test_deidentification.py
```python
from django.test import TestCase
from corehq.apps.hqadmin.dbaccessors import get_all_forms_in_all_domains
import os
import json
from corehq.form_processor.interfaces import FormProcessorInterface
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import get_db
from ..config import DocumentTransform
from ..deidentification.forms import deidentify_form
class FormDeidentificationTestCase(TestCase):
def setUp(self):
for item in get_all_forms_in_all_domains():
item.delete()
def testCRSReg(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "crs_reg.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
instance = FormProcessorInterface.post_xform(xml_data)
instance = XFormInstance.get(instance.id)
transform = DocumentTransform(instance._doc, get_db())
self.assertTrue("IDENTIFIER" in json.dumps(transform.doc))
self.assertTrue("IDENTIFIER" in transform.attachments["form.xml"])
deidentified = deidentify_form(transform)
self.assertTrue("IDENTIFIER" not in json.dumps(deidentified.doc))
self.assertTrue("IDENTIFIER" not in deidentified.attachments["form.xml"])
def testCRSChecklist(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "crs_checklist.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
instance = FormProcessorInterface.post_xform(xml_data)
instance = XFormInstance.get(instance.id)
transform = DocumentTransform(instance._doc, get_db())
self.assertTrue("IDENTIFIER" in json.dumps(transform.doc))
self.assertTrue("IDENTIFIER" in transform.attachments["form.xml"])
self.assertTrue("YESNO" in json.dumps(transform.doc))
self.assertTrue("YESNO" in transform.attachments["form.xml"])
deidentified = deidentify_form(transform)
self.assertTrue("IDENTIFIER" not in json.dumps(deidentified.doc))
self.assertTrue("IDENTIFIER" not in deidentified.attachments["form.xml"])
self.assertTrue("YESNO" not in json.dumps(deidentified.doc))
self.assertTrue("YESNO" not in deidentified.attachments["form.xml"])
```
#### File: apps/domain/tasks.py
```python
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from corehq.apps.domain.views import EditInternalDomainInfoView
from corehq.apps.es.domains import DomainES
from corehq.apps.es.forms import FormES
from corehq.apps.users.models import WebUser
from dimagi.utils.django.email import send_HTML_email
from dimagi.utils.web import get_url_base
def _domains_over_x_forms(num_forms=200, domains=None):
form_domains = FormES().domain_facet().size(0)
if domains:
form_domains = form_domains.domain(domains)
form_domains = form_domains.run().facet('domain', 'terms')
return {x['term'] for x in form_domains if x['count'] > num_forms}
def _real_incomplete_domains():
incomplete_domains = (
DomainES()
.fields(["name"])
.non_test_domains()
.incomplete_domains()
.run()
.raw_hits
)
return {x['fields']['name'] for x in incomplete_domains}
def incomplete_domains_to_email():
domains = _real_incomplete_domains()
domains = _domains_over_x_forms(domains=list(domains))
email_domains = []
for domain in domains:
users = list(WebUser.get_dimagi_emails_by_domain(domain))
if users:
email_domains.append(
{
"domain_name": domain,
"email_to": users,
"settings_link": get_url_base() + reverse(
EditInternalDomainInfoView.urlname,
args=[domain]
)
}
)
return email_domains
@periodic_task(
run_every=crontab(minute=0, hour=0, day_of_week="monday", day_of_month="15-21"),
queue='background_queue'
)
def fm_reminder_email():
"""
Reminds FMs to update their domains with up to date information
"""
email_domains = incomplete_domains_to_email()
for domain in email_domains:
email_content = render_to_string(
'domain/email/fm_outreach.html', domain)
email_content_plaintext = render_to_string(
'domain/email/fm_outreach.txt', domain)
send_HTML_email(
"Please update your project settings for " + domain['domain_name'],
domain['email_to'],
email_content,
email_from=settings.MASTER_LIST_EMAIL,
text_content=email_content_plaintext,
cc=[settings.MASTER_LIST_EMAIL],
)
def incomplete_self_started_domains():
"""
Returns domains that have submitted 200 forms, but haven't filled out any
project information
"""
domains = _real_incomplete_domains()
domains = _domains_over_x_forms(domains=list(domains))
email_domains = []
for domain in domains:
users = list(WebUser.get_dimagi_emails_by_domain(domain))
if not users:
email_domains.append(domain)
return email_domains
@periodic_task(
run_every=crontab(minute=0, hour=0, day_of_week="monday", day_of_month="15-21"),
queue='background_queue',
)
def self_starter_email():
"""
Emails MASTER_LIST_EMAIL incomplete self started domains
Doesn't actually look at self-started attribute.
"""
domains = incomplete_self_started_domains()
if len(domains) > 0:
email_content = render_to_string(
'domain/email/self_starter.html', {'domains': domains})
email_content_plaintext = render_to_string(
'domain/email/self_starter.txt', {'domains': domains})
send_HTML_email(
"Incomplete Self Started Domains",
settings.MASTER_LIST_EMAIL,
email_content,
text_content=email_content_plaintext,
)
```
#### File: domain/tests/test_delete_domain.py
```python
from datetime import datetime
from django.test import TestCase
from casexml.apps.stock.models import DocDomainMapping, StockReport, StockTransaction
from corehq.apps.domain.models import Domain
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.locations.models import Location, LocationType, SQLLocation
from corehq.apps.products.models import Product, SQLProduct
class TestDeleteDomain(TestCase):
def _create_data(self, domain_name, i):
product = Product(domain=domain_name, name='test-{}'.format(i))
product.save()
location = Location(
domain=domain_name,
site_code='testcode-{}'.format(i),
name='test-{}'.format(i),
location_type='facility'
)
location.save()
SupplyPointCase.create_from_location(domain_name, location)
report = StockReport.objects.create(
type='balance',
domain=domain_name,
form_id='fake',
date=datetime.utcnow()
)
StockTransaction.objects.create(
report=report,
product_id=product.get_id,
sql_product=SQLProduct.objects.get(product_id=product.get_id),
section_id='stock',
type='stockonhand',
case_id=location.linked_supply_point().get_id,
stock_on_hand=100
)
def setUp(self):
self.domain = Domain(name="test", is_active=True)
self.domain.save()
self.domain2 = Domain(name="test2", is_active=True)
self.domain2.save()
LocationType.objects.create(
domain='test',
name='facility',
)
LocationType.objects.create(
domain='test2',
name='facility',
)
LocationType.objects.create(
domain='test',
name='facility2',
)
LocationType.objects.create(
domain='test2',
name='facility2',
)
for i in xrange(2):
self._create_data('test', i)
self._create_data('test2', i)
def _assert_sql_counts(self, domain, number):
self.assertEqual(StockTransaction.objects.filter(report__domain=domain).count(), number)
self.assertEqual(StockReport.objects.filter(domain=domain).count(), number)
self.assertEqual(SQLLocation.objects.filter(domain=domain).count(), number)
self.assertEqual(SQLProduct.objects.filter(domain=domain).count(), number)
self.assertEqual(DocDomainMapping.objects.filter(domain_name=domain).count(), number)
self.assertEqual(LocationType.objects.filter(domain=domain).count(), number)
def test_sql_objects_deletion(self):
self._assert_sql_counts('test', 2)
self.domain.delete()
self._assert_sql_counts('test', 0)
self._assert_sql_counts('test2', 2)
def tearDown(self):
self.domain2.delete()
```
#### File: apps/es/apps.py
```python
from .es_query import HQESQuery
from . import filters
class AppES(HQESQuery):
index = 'apps'
@property
def builtin_filters(self):
return [
is_build,
is_released,
created_from_template,
uses_case_sharing,
cloudcare_enabled,
] + super(AppES, self).builtin_filters
def is_build(build=True):
filter = filters.empty('copy_of')
if build:
return filters.NOT(filter)
return filter
def is_released(released=True):
return filters.term('is_released', released)
def created_from_template(from_template=True):
filter = filters.empty('created_from_template')
if from_template:
return filters.NOT(filter)
return filter
def uses_case_sharing(case_sharing=True):
return filters.term('case_sharing', case_sharing)
def cloudcare_enabled(cloudcare_enabled):
return filters.term('cloudcare_enabled', cloudcare_enabled)
```
#### File: apps/es/cases.py
```python
from .es_query import HQESQuery
from . import filters
class CaseES(HQESQuery):
index = 'cases'
@property
def builtin_filters(self):
return [
opened_range,
closed_range,
is_closed,
case_type,
owner,
active_in_range,
] + super(CaseES, self).builtin_filters
def opened_range(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('opened_on', gt, gte, lt, lte)
def closed_range(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('closed_on', gt, gte, lt, lte)
def is_closed(closed=True):
return filters.term('closed', closed)
def case_type(type_):
return filters.term('type.exact', type_)
def owner(owner_id):
return filters.term('owner_id', owner_id)
def active_in_range(gt=None, gte=None, lt=None, lte=None):
"""Restricts cases returned to those with actions during the range"""
return filters.nested(
"actions",
filters.date_range("actions.date", gt, gte, lt, lte)
)
```
#### File: export/tests/test_form_schema.py
```python
import os
from couchdbkit.exceptions import ResourceConflict
from django.test.testcases import SimpleTestCase
from fakecouch import FakeCouchDb
from jsonobject.exceptions import BadValueError
from corehq.apps.app_manager.models import Application
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.reports.models import FormQuestionSchema
class FormQuestionSchemaTest(SimpleTestCase, TestXmlMixin):
file_path = ['data']
root = os.path.dirname(__file__)
def test(self):
app = Application.wrap(self.get_json('question_schema_test_app'))
app._id = '123'
app.version = 1
xmlns = 'http://openrosa.org/formdesigner/284D3F7C-9C10-48E6-97AC-C37927CBA89A'
schema = FormQuestionSchema(xmlns=xmlns)
schema.update_for_app(app)
self.assertIn(app.get_id, schema.processed_apps)
self.assertEqual(app.version, schema.last_processed_version)
self.assertEqual(schema.question_schema['form.multi_root'].options, ['item1', 'item2', 'item3'])
self.assertEqual(schema.question_schema['form.group1.multi_level1'].options, ['item1', 'item2'])
self.assertEqual(schema.question_schema['form.group1.question6.multi_level_2'].options, ['item1', 'item2'])
self.assertEqual(schema.question_schema['form.repeat_1.multi_level_1_repeat'].options, ['item1', 'item2'])
self.assertEqual(schema.question_schema['form.repeat_1.multi_level_1_repeat'].repeat_context, 'form.repeat_1')
updated_form_xml = self.get_xml('question_schema_update_form')
app.get_form_by_xmlns(xmlns).source = updated_form_xml
app.version = 2
schema.update_for_app(app)
self.assertEqual(1, len(schema.processed_apps))
self.assertIn(app.get_id, schema.processed_apps)
self.assertEqual(app.version, schema.last_processed_version)
self.assertEqual(schema.question_schema['form.new_multi'].options, ['z_first', 'a_last'])
self.assertEqual(schema.question_schema['form.group1.multi_level1'].options, ['item1', 'item2', '1_item'])
class TestGetOrCreateSchema(SimpleTestCase):
def setUp(self):
self.db = FormQuestionSchema.get_db()
self.fakedb = FakeCouchDb()
FormQuestionSchema.set_db(self.fakedb)
self.domain = 'test'
self.app_id = '123'
self.xmlns = 'this_xmlns'
self.schema = FormQuestionSchema(domain=self.domain, app_id=self.app_id, xmlns=self.xmlns)
def tearDown(self):
FormQuestionSchema.set_db(self.db)
def test_required_props(self):
with self.assertRaises(BadValueError):
schema = FormQuestionSchema(app_id=self.app_id, xmlns=self.xmlns)
schema.save()
# with self.assertRaises(BadValueError):
# schema = FormQuestionSchema(domain=self.domain, xmlns=self.xmlns)
# schema.save()
with self.assertRaises(BadValueError):
schema = FormQuestionSchema(domain=self.domain, app_id=self.app_id)
schema.save()
def test_unique_key(self):
self.schema.save()
dupe_schema = FormQuestionSchema(domain=self.domain, app_id=self.app_id, xmlns=self.xmlns)
with self.assertRaises(ResourceConflict):
dupe_schema.save()
def test_get_existing(self):
self.schema.save()
schema = FormQuestionSchema.get_or_create(self.domain, self.app_id, self.xmlns)
self.assertIsNotNone(schema)
self.assertEqual(schema._rev, self.schema._rev)
def test_get_new(self):
self.schema.save()
schema = FormQuestionSchema.get_or_create('new_domain', self.app_id, self.xmlns)
self.assertIsNotNone(schema)
def test_migrate_old(self):
self.schema._id = '123'
self.schema.last_processed_version = 12
self.schema.save()
second_schema = FormQuestionSchema(domain=self.domain, app_id=self.app_id, xmlns=self.xmlns, _id='1234')
second_schema.save()
self.assertEqual(len(self.fakedb.mock_docs), 2)
self.fakedb.add_view(
'form_question_schema/by_xmlns',
[(
{'key': [self.domain, self.app_id, self.xmlns], 'include_docs': True},
[
self.schema.to_json(), second_schema.to_json()
]
)]
)
schema = FormQuestionSchema.get_or_create(self.domain, self.app_id, self.xmlns)
self.assertEqual(schema.last_processed_version, self.schema.last_processed_version)
self.assertNotEqual(schema.get_id, self.schema.get_id)
self.assertNotEqual(schema.get_id, second_schema.get_id)
self.assertEqual(len(self.fakedb.mock_docs), 1)
self.assertTrue(schema.get_id in self.fakedb.mock_docs)
```
#### File: apps/fixtures/utils.py
```python
import re
BAD_SLUG_PATTERN = r"([/\\<>\s])"
def clean_fixture_field_name(field_name):
"""Effectively slugifies a fixture's field name so that we don't send
bad XML back from the phone. Ideally, the fixture name should be
verified as a good slug before using it.
"""
subbed_string = re.sub(BAD_SLUG_PATTERN, '_', field_name)
if subbed_string.startswith('xml'):
subbed_string = subbed_string.replace('xml', '_', 1)
return subbed_string
def is_field_name_invalid(field_name):
return bool(re.search(BAD_SLUG_PATTERN, field_name))
```
#### File: management/commands/groups_last_modified.py
```python
from django.core.management.base import BaseCommand
from corehq.apps.groups.models import Group
from dimagi.utils.couch.database import iter_docs
from datetime import datetime
class Command(BaseCommand):
help = 'Populate last_modified field for groups'
def handle(self, *args, **options):
self.stdout.write("Processing groups...\n")
relevant_ids = set([r['id'] for r in Group.get_db().view(
'groups/all_groups',
reduce=False,
).all()])
to_save = []
for group in iter_docs(Group.get_db(), relevant_ids):
if 'last_modified' not in group or not group['last_modified']:
print group['_id']
group['last_modified'] = datetime.utcnow().isoformat()
to_save.append(group)
if len(to_save) > 500:
Group.get_db().bulk_save(to_save)
to_save = []
if to_save:
Group.get_db().bulk_save(to_save)
```
#### File: apps/hqadmin/escheck.py
```python
import logging
from datetime import datetime
import time
from couchdbkit import ResourceNotFound
import itertools
from casexml.apps.case.models import CommCareCase
from corehq.apps.domain.models import Domain
from corehq.apps.hqadmin.dbaccessors import iter_all_forms_most_recent_first, \
iter_all_cases_most_recent_first
from corehq.elastic import get_es
from corehq.pillows.mappings.case_mapping import CASE_INDEX
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_INDEX
from corehq.pillows.mappings.reportxform_mapping import REPORT_XFORM_INDEX
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX
from couchforms.models import XFormInstance
from django.conf import settings
from dimagi.utils.logging import notify_error
CLUSTER_HEALTH = 'cluster_health'
def check_es_cluster_health():
"""
The color state of the cluster health is just a simple indicator for how a cluster is running
It'll mainly be useful for finding out if shards are in good/bad state (red)
There are better realtime tools for monitoring ES clusters which should probably be looked at. specifically paramedic or bigdesk
"""
ret = {}
es = get_es()
cluster_health = es.get('_cluster/health')
ret[CLUSTER_HEALTH] = cluster_health['status']
return ret
def check_index_by_doc(es_index, db, doc_id, interval=10):
"""
Given a doc, update it in couch (meaningless save that updates rev)
and check to make sure that ES will eventually see it after some arbitrary delay
"""
target_rev = None
try:
couch_doc = db.open_doc(doc_id if doc_id else "")
# due to a way that long polling works we have to save it twice because the pillow
# doesn't seem to pick up on the last line until there is a new one available.
target_revs = []
for i in range(2):
save_results = db.save_doc(couch_doc)
target_revs.append(save_results['rev'])
except ResourceNotFound:
pass
time.sleep(interval)
return _check_es_rev(es_index, doc_id, target_revs)
def is_real_submission(xform_view_row):
"""
helper filter function for filtering hqadmin/forms_over_time
just filters out devicereports
"""
return xform_view_row['doc']['xmlns'] != 'http://code.javarosa.org/devicereport'
def check_reportxform_es_index(doc_id=None, interval=10):
do_check = False
for domain in settings.ES_XFORM_FULL_INDEX_DOMAINS:
domain_doc = Domain.get_by_name(domain)
if domain_doc is not None:
do_check = True
break
if do_check:
db = XFormInstance.get_db()
es_index = REPORT_XFORM_INDEX
check_doc_id = doc_id if doc_id else _get_latest_doc_from_index(es_index, 'received_on')
return check_index_by_doc(es_index, db, check_doc_id, interval=interval)
else:
return {}
def check_xform_es_index(interval=10):
db = XFormInstance.get_db()
forms = iter_all_forms_most_recent_first()
check_doc_id = _get_first_id_or_none(forms, skipfunc=is_real_submission)
return check_index_by_doc(XFORM_INDEX, db, check_doc_id, interval=interval)
def is_case_recent(case_view_row):
"""
helper filter function for filtering hqadmin/cases_over_time
the view emits a key [YYYY, MM] this just sanity checks to make sure that it's a recent case,
not some wrongly future emitted case
"""
if case_view_row['key'] > [datetime.utcnow().year, datetime.utcnow().month]:
return False
else:
return True
def check_reportcase_es_index(doc_id=None, interval=10):
do_check = False
for domain in settings.ES_CASE_FULL_INDEX_DOMAINS:
domain_doc = Domain.get_by_name(domain)
if domain_doc is not None:
do_check = True
break
if do_check:
db = CommCareCase.get_db()
es_index = REPORT_CASE_INDEX
check_doc_id = doc_id if doc_id else _get_latest_doc_from_index(es_index, sort_field='opened_on')
return check_index_by_doc(es_index, db, check_doc_id, interval=interval)
else:
return {}
def check_case_es_index(interval=10):
db = CommCareCase.get_db()
cases = iter_all_cases_most_recent_first()
check_doc_id = _get_first_id_or_none(cases, skipfunc=is_case_recent)
return check_index_by_doc(CASE_INDEX, db, check_doc_id, interval=interval)
def _get_latest_doc_from_index(es_index, sort_field):
"""
Query elasticsearch index sort descending by the sort field
and get the doc_id back so we can then do a rev-update check.
This si because there's no direct view known ahead of time what's inside the report* index,
so just get it directly from the index and do the modify check workflow.
"""
recent_query = {
"filter": {
"match_all": {}
},
"sort": {sort_field: "desc"},
"size": 1
}
es = get_es()
try:
res = es[es_index].get('_search', data=recent_query)
if 'hits' in res:
if 'hits' in res['hits']:
result = res['hits']['hits'][0]
return result['_source']['_id']
except Exception, ex:
logging.error("Error querying get_latest_doc_from_index[%s]: %s" % (es_index, ex))
return None
def _get_first_id_or_none(docs, skipfunc=None):
# don't check more than 5000 docs, not worth it
for doc in itertools.islice(docs, 0, 5000):
if skipfunc(doc):
return doc['id']
return None
def _check_es_rev(index, doc_id, couch_revs):
"""
Specific docid and rev checker.
index: rawes index
doc_id: id to query in ES
couch_rev: target couch_rev that you want to match
"""
es = get_es()
doc_id_query = {
"filter": {
"ids": {"values": [doc_id]}
},
"fields": ["_id", "_rev"]
}
try:
res = es[index].get('_search', data=doc_id_query)
status = False
message = "Not in sync"
if res.has_key('hits'):
if res['hits'].get('total', 0) == 0:
status = False
# if doc doesn't exist it's def. not in sync
message = "Not in sync %s" % index
elif 'hits' in res['hits']:
fields = res['hits']['hits'][0]['fields']
if fields['_rev'] in couch_revs:
status = True
message = "%s OK" % index
else:
status = False
# less likely, but if it's there but the rev is off
message = "Not in sync - %s stale" % index
else:
status = False
message = "Not in sync - query failed"
notify_error("%s: %s" % (message, str(res)))
except Exception, ex:
message = "ES Error: %s" % ex
status = False
return {index: {"index": index, "status": status, "message": message}}
```
#### File: management/commands/record_deploy_success.py
```python
import json
from datadog import api as datadog_api
import requests
from django.core.management import call_command
from django.template.loader import render_to_string
from dimagi.utils import gitinfo
from django.core.management.base import BaseCommand
from corehq.apps.hqadmin.models import HqDeploy
from datetime import datetime
from optparse import make_option
from django.conf import settings
from pillow_retry.models import PillowError
STYLE_MARKDOWN = 'markdown'
STYLE_SLACK = 'slack'
def diff_link(style, url):
if style == STYLE_MARKDOWN:
return '[here]({})'.format(url)
elif style == STYLE_SLACK:
return '<{}|here>'.format(url)
class Command(BaseCommand):
help = "Creates an HqDeploy document to record a successful deployment."
args = "[user]"
option_list = BaseCommand.option_list + (
make_option('--user', help='User', default=False),
make_option('--environment', help='Environment {production|staging etc...}', default=settings.SERVER_ENVIRONMENT),
make_option('--mail_admins', help='Mail Admins', default=False, action='store_true'),
make_option('--url', help='A link to a URL for the deploy', default=False),
)
def handle(self, *args, **options):
root_dir = settings.FILEPATH
git_snapshot = gitinfo.get_project_snapshot(root_dir, submodules=True)
git_snapshot['diff_url'] = options.get('url', None)
deploy = HqDeploy(
date=datetime.utcnow(),
user=options['user'],
environment=options['environment'],
code_snapshot=git_snapshot,
)
deploy.save()
# reset PillowTop errors in the hope that a fix has been deployed
rows_updated = PillowError.bulk_reset_attempts(datetime.utcnow())
if rows_updated:
print "\n---------------- Pillow Errors Reset ----------------\n" \
"{} pillow errors queued for retry\n".format(rows_updated)
deploy_notification_text = (
"CommCareHQ has been successfully deployed to *{}* by *{}*. "
"Find the diff {{diff_link}}".format(
options['environment'],
options['user'],
)
)
if hasattr(settings, 'MIA_THE_DEPLOY_BOT'):
link = diff_link(STYLE_SLACK, git_snapshot['diff_url'])
requests.post(settings.MIA_THE_DEPLOY_BOT, data=json.dumps({
"channel": "#dev",
"username": "Mia the Deploy Bot",
"text": deploy_notification_text.format(diff_link=link),
"icon_emoji": ":see_no_evil:"
}))
if settings.DATADOG_API_KEY:
tags = ['environment:{}'.format(options['environment'])]
link = diff_link(STYLE_MARKDOWN, git_snapshot['diff_url'])
datadog_api.Event.create(
title="Deploy Success",
text=deploy_notification_text.format(diff_link=link),
tags=tags
)
if options['mail_admins']:
snapshot_table = render_to_string('hqadmin/partials/project_snapshot.html', dictionary={'snapshot': git_snapshot})
message = "Deployed by %s, cheers!" % options['user']
snapshot_body = "<html><head><title>Deploy Snapshot</title></head><body><h2>%s</h2>%s</body></html>" % (message, snapshot_table)
call_command('mail_admins', snapshot_body, **{'subject': 'Deploy successful', 'html': True})
```
#### File: apps/hqadmin/models.py
```python
from django.db import models
from dimagi.ext.couchdbkit import *
from dimagi.utils.parsing import json_format_datetime
from pillowtop.utils import get_pillow_by_name
class HqDeploy(Document):
date = DateTimeProperty()
user = StringProperty()
environment = StringProperty()
code_snapshot = DictProperty()
@classmethod
def get_latest(cls, environment, limit=1):
result = HqDeploy.view(
'hqadmin/deploy_history',
startkey=[environment, {}],
endkey=[environment],
reduce=False,
limit=limit,
descending=True,
include_docs=True
)
return result.all()
@classmethod
def get_list(cls, environment, startdate, enddate, limit=50):
return HqDeploy.view(
'hqadmin/deploy_history',
startkey=[environment, json_format_datetime(startdate)],
endkey=[environment, json_format_datetime(enddate)],
reduce=False,
limit=limit,
include_docs=False
).all()
class PillowCheckpointSeqStore(models.Model):
seq = models.TextField()
checkpoint_id = models.CharField(max_length=255, db_index=True)
date_updated = models.DateTimeField(auto_now=True)
@classmethod
def get_by_pillow_name(cls, pillow_name):
try:
pillow = get_pillow_by_name(pillow_name)
except ValueError:
# Could not find the pillow
return None
if not pillow:
return None
try:
store = cls.objects.get(checkpoint_id=pillow.get_checkpoint()['_id'])
except cls.DoesNotExist:
return None
return store
```
#### File: hqadmin/tests/test_utils.py
```python
from django.test import TestCase, override_settings
from pillowtop.listener import BasicPillow
from corehq.apps.domain.models import Domain
from ..utils import pillow_seq_store, EPSILON
from ..models import PillowCheckpointSeqStore
class DummyPillow(BasicPillow):
document_class = Domain
def run(self):
pass
@override_settings(PILLOWTOPS={'test': ['corehq.apps.hqadmin.tests.test_utils.DummyPillow']})
class TestPillowCheckpointSeqStore(TestCase):
def setUp(self):
self.pillow = DummyPillow()
def test_basic_cloudant_seq(self):
seq = '1-blahblah'
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)
self.assertEquals(store.seq, seq)
def test_basic_couchdb_seq(self):
seq = 100
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)
self.assertEquals(store.seq, str(seq))
def test_small_rewind(self):
"""
We should not notify if the seq is not significantly less than the previous
"""
seq = '10-blahblah'
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
seq_rewind = '9-blahblah'
self.pillow.set_checkpoint({'seq': seq_rewind})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)
self.assertEquals(store.seq, seq_rewind)
def test_large_rewind(self):
"""
We should notify if the seq is significantly less than the previous and not update the seq
"""
seq = '{}-blahblah'.format(EPSILON + 10)
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
seq_rewind = '9-blahblah'
self.pillow.set_checkpoint({'seq': seq_rewind})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)
self.assertEquals(store.seq, seq)
def test_get_by_pillow_name(self):
seq = '10-blahblah'
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
store = PillowCheckpointSeqStore.get_by_pillow_name('corehq.apps.hqadmin.tests.test_utils.DummyPillow')
self.assertIsNotNone(store)
store = PillowCheckpointSeqStore.get_by_pillow_name('DummyPillowThatDoesNotExist')
self.assertIsNone(store)
```
#### File: hqcase/tests/test_dbaccessors.py
```python
from django.test import TestCase
from casexml.apps.case.dbaccessors import get_open_case_docs_in_domain, \
get_open_case_ids_in_domain
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.util import create_real_cases_from_dummy_cases
from corehq.apps.hqcase.dbaccessors import get_number_of_cases_in_domain, \
get_case_ids_in_domain, get_case_types_for_domain, get_cases_in_domain, \
get_case_ids_in_domain_by_owner, get_number_of_cases_in_domain_by_owner, \
get_all_case_owner_ids, get_case_properties
from couchforms.models import XFormInstance
class DBAccessorsTest(TestCase):
@classmethod
def setUpClass(cls):
cls.domain = 'lalksdjflakjsdf'
cases = [
CommCareCase(domain=cls.domain, type='type1', name='Alice', user_id='XXX',
prop_a=True, prop_b=True),
CommCareCase(domain=cls.domain, type='type2', name='Bob', user_id='XXX',
prop_a=True, prop_c=True),
CommCareCase(domain=cls.domain, type='type1', name='Candice', user_id='ZZZ'),
CommCareCase(domain=cls.domain, type='type1', name='Derek', user_id='XXX', closed=True),
CommCareCase(domain='maleficent', type='type1', name='Mallory', user_id='YYY',
prop_y=True)
]
cls.forms, cls.cases = create_real_cases_from_dummy_cases(cases)
assert len(cls.cases) == len(cases)
@classmethod
def tearDownClass(cls):
CommCareCase.get_db().bulk_delete(cls.cases)
XFormInstance.get_db().bulk_delete(cls.forms)
def test_get_number_of_cases_in_domain(self):
self.assertEqual(
get_number_of_cases_in_domain(self.domain),
len([case for case in self.cases if case.domain == self.domain])
)
def test_get_number_of_cases_in_domain__type(self):
self.assertEqual(
get_number_of_cases_in_domain(self.domain, type='type1'),
len([case for case in self.cases
if case.domain == self.domain and case.type == 'type1'])
)
def test_get_case_ids_in_domain(self):
self.assertEqual(
set(get_case_ids_in_domain(self.domain)),
{case.get_id for case in self.cases if case.domain == self.domain}
)
def test_get_case_ids_in_domain__type(self):
self.assertEqual(
set(get_case_ids_in_domain(self.domain, type='type1')),
{case.get_id for case in self.cases
if case.domain == self.domain and case.type == 'type1'}
)
def assert_doc_list_equal(self, doc_list_1, doc_list_2, raw_json=False):
if not raw_json:
doc_list_1 = [doc.to_json() for doc in doc_list_1]
doc_list_2 = [doc.to_json() for doc in doc_list_2]
doc_list_1 = sorted(doc_list_1, key=lambda doc: doc['_id'])
doc_list_2 = sorted(doc_list_2, key=lambda doc: doc['_id'])
self.assertEqual(doc_list_1, doc_list_2)
def test_get_cases_in_domain(self):
self.assert_doc_list_equal(
get_cases_in_domain(self.domain),
[case for case in self.cases if case.domain == self.domain]
)
def test_get_cases_in_domain__type(self):
self.assert_doc_list_equal(
get_cases_in_domain(self.domain, type='type1'),
[case for case in self.cases
if case.domain == self.domain and case.type == 'type1'],
)
def test_get_open_case_ids_in_domain(self):
# this is actually in the 'case' app, but testing here
self.assertEqual(
set(get_open_case_ids_in_domain(self.domain, 'type1')),
{case.get_id for case in self.cases
if case.domain == self.domain and case.type == 'type1'
and not case.closed},
)
def test_get_open_case_ids_in_domain__owner_id(self):
# this is actually in the 'case' app, but testing here
self.assertEqual(
set(get_open_case_ids_in_domain(self.domain, 'type1', owner_id='XXX')),
{case.get_id for case in self.cases
if case.domain == self.domain and case.type == 'type1'
and not case.closed and case.user_id == 'XXX'},
)
self.assertEqual(
set(get_open_case_ids_in_domain(self.domain, owner_id='XXX')),
{case.get_id for case in self.cases
if case.domain == self.domain
and not case.closed and case.user_id == 'XXX'},
)
def test_get_open_case_docs_by_type(self):
# this is actually in the 'case' app, but testing here
self.assert_doc_list_equal(
get_open_case_docs_in_domain(self.domain, 'type1'),
[case.to_json() for case in self.cases
if case.domain == self.domain and case.type == 'type1'
and not case.closed],
raw_json=True
)
def test_get_open_case_docs_by_type__owner_id(self):
# this is actually in the 'case' app, but testing here
self.assert_doc_list_equal(
get_open_case_docs_in_domain(self.domain, 'type1', owner_id='XXX'),
[case.to_json() for case in self.cases
if case.domain == self.domain and case.type == 'type1'
and not case.closed and case.user_id == 'XXX'],
raw_json=True
)
def test_get_case_types_for_domain(self):
self.assertEqual(
set(get_case_types_for_domain(self.domain)),
{case.type for case in self.cases if case.domain == self.domain}
)
def test_get_case_ids_in_domain_by_owner(self):
self.assertEqual(
set(get_case_ids_in_domain_by_owner(self.domain, owner_id='XXX')),
{case.get_id for case in self.cases
if case.domain == self.domain and case.user_id == 'XXX'}
)
self.assertEqual(
set(get_case_ids_in_domain_by_owner(
self.domain, owner_id__in=['XXX'])),
{case.get_id for case in self.cases
if case.domain == self.domain and case.user_id == 'XXX'}
)
self.assertEqual(
set(get_case_ids_in_domain_by_owner(self.domain, owner_id='XXX',
closed=False)),
{case.get_id for case in self.cases
if case.domain == self.domain and case.user_id == 'XXX'
and case.closed is False}
)
self.assertEqual(
set(get_case_ids_in_domain_by_owner(self.domain, owner_id='XXX',
closed=True)),
{case.get_id for case in self.cases
if case.domain == self.domain and case.user_id == 'XXX'
and case.closed is True}
)
def test_get_number_of_cases_in_domain_by_owner(self):
self.assertEqual(
get_number_of_cases_in_domain_by_owner(self.domain, owner_id='XXX'),
len([case for case in self.cases
if case.domain == self.domain and case.user_id == 'XXX'])
)
def test_get_all_case_owner_ids(self):
self.assertEqual(
get_all_case_owner_ids(self.domain),
set(case.user_id for case in self.cases
if case.domain == self.domain)
)
# sanity check!
self.assertEqual(
get_all_case_owner_ids(self.domain),
{'XXX', 'ZZZ'},
)
def test_get_case_properties(self):
self.assertItemsEqual(
get_case_properties(self.domain),
{prop
for case in self.cases if case.domain == self.domain
for action in case.actions
for prop in (action.updated_known_properties.keys() +
action.updated_unknown_properties.keys())}
)
```
#### File: apps/hqcase/utils.py
```python
import datetime
import uuid
from xml.etree import ElementTree
import xml.etree.ElementTree as ET
import re
from couchdbkit import ResourceNotFound
from django.core.files.uploadedfile import UploadedFile
from django.template.loader import render_to_string
from casexml.apps.phone.xml import get_case_xml
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from dimagi.utils.parsing import json_format_datetime
from casexml.apps.case.xml import V2
from casexml.apps.phone.caselogic import get_related_cases
from corehq.apps.hqcase.exceptions import CaseAssignmentError
from corehq.apps.receiverwrapper import submit_form_locally
from casexml.apps.case import const
ALLOWED_CASE_IDENTIFIER_TYPES = [
"contact_phone_number",
"external_id",
]
def submit_case_blocks(case_blocks, domain, username="system", user_id="",
xmlns='http://commcarehq.org/case', attachments=None,
form_id=None):
"""
Submits casexml in a manner similar to how they would be submitted from a phone.
returns the UID of the resulting form.
"""
attachments = attachments or {}
now = json_format_datetime(datetime.datetime.utcnow())
if not isinstance(case_blocks, basestring):
case_blocks = ''.join(case_blocks)
form_id = form_id or uuid.uuid4().hex
form_xml = render_to_string('hqcase/xml/case_block.xml', {
'xmlns': xmlns,
'case_block': case_blocks,
'time': now,
'uid': form_id,
'username': username,
'user_id': user_id,
})
submit_form_locally(
instance=form_xml,
domain=domain,
attachments=attachments,
)
return form_id
def get_case_wrapper(data):
from corehq.apps.commtrack.util import get_case_wrapper as commtrack_wrapper
def pact_wrapper(data):
if data['domain'] == 'pact' and data['type'] == 'cc_path_client':
from pact.models import PactPatientCase
return PactPatientCase
wrapper_funcs = [pact_wrapper, commtrack_wrapper]
wrapper = None
for wf in wrapper_funcs:
wrapper = wf(data)
if wrapper is not None:
break
return wrapper
def get_case_by_domain_hq_user_id(domain, user_id, case_type):
"""
Get the 'user case' for user_id. User cases are part of the call center feature.
"""
cases = CommCareCase.view(
'hqcase/by_domain_hq_user_id',
key=[domain, user_id],
reduce=False,
include_docs=True
).all()
for case in cases:
if case.type == case_type:
return case
def get_callcenter_case_mapping(domain, user_ids):
"""
Get the mapping from user_id to 'user case id' for each user in user_ids.
"""
keys = [[domain, user_id] for user_id in user_ids]
rows = CommCareCase.view(
'hqcase/by_domain_hq_user_id',
keys=keys,
reduce=False,
include_docs=False
)
return {r['key'][1]: r['id'] for r in rows}
def get_case_by_identifier(domain, identifier):
# circular import
from corehq.apps.api.es import CaseES
case_es = CaseES(domain)
def _query_by_type(i_type):
q = case_es.base_query(
terms={
i_type: identifier,
},
fields=['_id', i_type],
size=1
)
response = case_es.run_query(q)
raw_docs = response['hits']['hits']
if raw_docs:
return CommCareCase.get(raw_docs[0]['_id'])
# Try by any of the allowed identifiers
for identifier_type in ALLOWED_CASE_IDENTIFIER_TYPES:
case = _query_by_type(identifier_type)
if case is not None:
return case
# Try by case id
try:
case_by_id = CommCareCase.get(identifier)
if case_by_id.domain == domain:
return case_by_id
except (ResourceNotFound, KeyError):
pass
return None
def assign_case(case_or_case_id, owner_id, acting_user=None, include_subcases=True,
include_parent_cases=False, exclude_function=None, update=None):
"""
Assigns a case to an owner. Optionally traverses through subcases and parent cases
and reassigns those to the same owner.
"""
if isinstance(case_or_case_id, basestring):
primary_case = CommCareCase.get(case_or_case_id)
else:
primary_case = case_or_case_id
cases_to_assign = [primary_case]
if include_subcases:
cases_to_assign.extend(get_related_cases([primary_case], primary_case.domain, search_up=False).values())
if include_parent_cases:
cases_to_assign.extend(get_related_cases([primary_case], primary_case.domain, search_up=True).values())
if exclude_function:
cases_to_assign = [c for c in cases_to_assign if not exclude_function(c)]
return assign_cases(cases_to_assign, owner_id, acting_user, update=update)
def assign_cases(caselist, owner_id, acting_user=None, update=None):
"""
Assign all cases in a list to an owner. Won't update if the owner is already
set on the case. Doesn't touch parent cases or subcases.
Returns the list of ids of cases that were reassigned.
"""
if not caselist:
return
def _assert(bool, msg):
if not bool:
raise CaseAssignmentError(msg)
from corehq.apps.users.cases import get_wrapped_owner
# "security"
unique_domains = set([c.domain for c in caselist])
_assert(len(unique_domains) == 1, 'case list had cases spanning multiple domains')
[domain] = unique_domains
_assert(domain, 'domain for cases was empty')
owner = get_wrapped_owner(owner_id)
_assert(owner, 'no owner with id "%s" found' % owner_id)
_assert(owner.domain == domain, 'owner was not in domain %s for cases' % domain)
username = acting_user.username if acting_user else 'system'
user_id = acting_user._id if acting_user else 'system'
filtered_cases = set([c for c in caselist if c.owner_id != owner_id])
if filtered_cases:
caseblocks = [ElementTree.tostring(CaseBlock(
create=False,
case_id=c._id,
owner_id=owner_id,
update=update,
).as_xml()) for c in filtered_cases
]
# todo: this should check whether the submit_case_blocks call actually succeeds
submit_case_blocks(caseblocks, domain, username=username,
user_id=user_id)
return [c._id for c in filtered_cases]
def make_creating_casexml(case, new_case_id, new_parent_ids=None):
new_parent_ids = new_parent_ids or {}
old_case_id = case._id
case._id = new_case_id
local_move_back = {}
for index in case.indices:
new = new_parent_ids[index.referenced_id]
old = index.referenced_id
local_move_back[new] = old
index.referenced_id = new
try:
case_block = get_case_xml(case, (const.CASE_ACTION_CREATE, const.CASE_ACTION_UPDATE), version='2.0')
case_block, attachments = _process_case_block(case_block, case.case_attachments, old_case_id)
finally:
case._id = old_case_id
for index in case.indices:
index.referenced_id = local_move_back[index.referenced_id]
return case_block, attachments
def _process_case_block(case_block, attachments, old_case_id):
def get_namespace(element):
m = re.match('\{.*\}', element.tag)
return m.group(0)[1:-1] if m else ''
def local_attachment(attachment, old_case_id, tag):
mime = attachment['server_mime']
size = attachment['attachment_size']
src = attachment['attachment_src']
attachment_meta, attachment_stream = CommCareCase.fetch_case_attachment(old_case_id, tag)
return UploadedFile(attachment_stream, src, size=size, content_type=mime)
# Remove namespace because it makes looking up tags a pain
root = ET.fromstring(case_block)
xmlns = get_namespace(root)
case_block = re.sub(' xmlns="[^"]+"', '', case_block, count=1)
root = ET.fromstring(case_block)
tag = "attachment"
xml_attachments = root.find(tag)
ret_attachments = {}
if xml_attachments:
for attach in xml_attachments:
attach.attrib['from'] = 'local'
attach.attrib['src'] = attachments[attach.tag]['attachment_src']
ret_attachments[attach.attrib['src']] = local_attachment(attachments[attach.tag], old_case_id, attach.tag)
# Add namespace back in without { } added by ET
root.attrib['xmlns'] = xmlns
return ET.tostring(root), ret_attachments
```
#### File: hqwebapp/templatetags/menu_tags.py
```python
from django import template
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from corehq.apps.domain.models import Domain
import corehq.apps.style.utils as style_utils
from corehq.apps.hqwebapp.models import MaintenanceAlert
from corehq.tabs import MENU_TABS
register = template.Library()
def get_active_tab(visible_tabs, request_path):
for is_active_tab_fn in [
lambda t: t.is_active_fast,
lambda t: t.is_active,
lambda t: t.url and request_path.startswith(t.url),
]:
for tab in visible_tabs:
if is_active_tab_fn(tab):
tab.is_active_tab = True
return tab
class MainMenuNode(template.Node):
def render(self, context):
request = context['request']
current_url_name = context['current_url_name']
couch_user = getattr(request, 'couch_user', None)
project = getattr(request, 'project', None)
domain = context.get('domain')
org = context.get('org')
try:
module = Domain.get_module_by_name(domain)
except (ValueError, AttributeError):
module = None
tabs = getattr(module, 'TABS', MENU_TABS)
visible_tabs = []
for tab_class in tabs:
t = tab_class(
request, current_url_name, domain=domain,
couch_user=couch_user, project=project, org=org)
t.is_active_tab = False
if t.real_is_viewable:
visible_tabs.append(t)
# set the context variable in the highest scope so it can be used in
# other blocks
context.dicts[0]['active_tab'] = get_active_tab(visible_tabs,
request.get_full_path())
template = {
style_utils.BOOTSTRAP_2: 'style/bootstrap2/partials/menu_main.html',
style_utils.BOOTSTRAP_3: 'style/bootstrap3/partials/menu_main.html',
}[style_utils.get_bootstrap_version()]
return mark_safe(render_to_string(template, {
'tabs': visible_tabs,
}))
@register.tag(name="format_main_menu")
def format_main_menu(parser, token):
return MainMenuNode()
@register.simple_tag(takes_context=True)
def format_subtab_menu(context):
active_tab = context.get('active_tab', None)
if active_tab and active_tab.subtabs:
subtabs = [t for t in active_tab.subtabs if t.is_viewable]
else:
subtabs = None
return mark_safe(render_to_string("style/bootstrap2/partials/subtab_menu.html", {
'subtabs': subtabs if subtabs and len(subtabs) > 1 else None
}))
@register.simple_tag(takes_context=True)
def format_sidebar(context):
current_url_name = context['current_url_name']
active_tab = context.get('active_tab', None)
request = context['request']
sections = None
if active_tab and active_tab.subtabs:
# if active_tab is active then at least one of its subtabs should have
# is_active == True, but we guard against the possibility of this not
# being the case by setting sections = None above
for s in active_tab.subtabs:
if s.is_active:
sections = s.sidebar_items
break
if sections is None:
for s in active_tab.subtabs:
if s.url and request.get_full_path().startswith(s.url):
sections = s.sidebar_items
break
else:
sections = active_tab.sidebar_items if active_tab else None
if sections:
# set is_active on active sidebar item by modifying nav by reference
# and see if the nav needs a subnav for the current contextual item
for section_title, navs in sections:
for nav in navs:
if (request.get_full_path().startswith(nav['url']) or
request.build_absolute_uri().startswith(nav['url'])):
nav['is_active'] = True
else:
nav['is_active'] = False
if 'subpages' in nav:
for subpage in nav['subpages']:
if subpage['urlname'] == current_url_name:
if callable(subpage['title']):
actual_context = {}
for d in context.dicts:
actual_context.update(d)
subpage['title'] = subpage['title'](**actual_context)
nav['subpage'] = subpage
break
template = {
style_utils.BOOTSTRAP_2: 'style/bootstrap2/partials/navigation_left_sidebar.html',
style_utils.BOOTSTRAP_3: 'style/bootstrap3/partials/navigation_left_sidebar.html',
}[style_utils.get_bootstrap_version()]
return mark_safe(render_to_string(template, {
'sections': sections
}))
@register.simple_tag
def maintenance_alert():
try:
alert = (MaintenanceAlert.objects
.filter(active=True)
.order_by('-modified'))[0]
except IndexError:
return ''
else:
return format_html(
'<div class="alert alert-warning" style="text-align: center; margin-bottom: 0;">{}</div>',
mark_safe(alert.html),
)
```
#### File: hqwebapp/templatetags/proptable_tags.py
```python
import collections
import datetime
import itertools
import types
from corehq.util.dates import iso_string_to_datetime
from dimagi.ext.jsonobject import DateProperty
from jsonobject.exceptions import BadValueError
from dimagi.utils.chunked import chunked
import pytz
from django import template
from django.template.defaultfilters import yesno
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.html import escape, conditional_escape
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import pretty_doc_info
from corehq.const import USER_DATETIME_FORMAT, USER_DATE_FORMAT
from corehq.util.timezones.conversions import ServerTime, PhoneTime
from dimagi.utils.dates import safe_strftime
register = template.Library()
def _is_list_like(val):
return (isinstance(val, collections.Iterable) and
not isinstance(val, basestring))
def _parse_date_or_datetime(val):
def parse():
if not val:
return None
# datetime is a subclass of date
if isinstance(val, datetime.date):
return val
try:
dt = iso_string_to_datetime(val)
except BadValueError:
try:
return DateProperty().wrap(val)
except BadValueError:
return val
else:
if not any([dt.hour, dt.minute, dt.second, dt.microsecond]):
return dt.date()
else:
return dt
result = parse()
if isinstance(result, datetime.datetime):
assert result.tzinfo is None
return result
def _format_slug_string_for_display(key):
return key.replace('_', ' ').replace('-', ' ')
def _to_html(val, key=None, level=0, timeago=False):
"""
Recursively convert a value to its HTML representation using <dl>s for
dictionaries and <ul>s for lists.
"""
recurse = lambda k, v: _to_html(v, key=k, level=level + 1, timeago=timeago)
def _key_format(k, v):
if not _is_list_like(v):
return _format_slug_string_for_display(k)
else:
return ""
if isinstance(val, types.DictionaryType):
ret = "".join(
["<dl %s>" % ("class='well'" if level == 0 else '')] +
["<dt>%s</dt><dd>%s</dd>" % (_key_format(k, v), recurse(k, v))
for k, v in val.items()] +
["</dl>"])
elif _is_list_like(val):
ret = "".join(
["<dl>"] +
["<dt>%s</dt><dd>%s</dd>" % (key, recurse(None, v)) for v in val] +
["</dl>"])
elif isinstance(val, datetime.date):
if isinstance(val, datetime.datetime):
fmt = USER_DATETIME_FORMAT
else:
fmt = USER_DATE_FORMAT
iso = val.isoformat()
ret = mark_safe("<time %s title='%s' datetime='%s'>%s</time>" % (
"class='timeago'" if timeago else "", iso, iso, safe_strftime(val, fmt)))
else:
if val is None or val == '':
val = '---'
ret = escape(val)
return mark_safe(ret)
def get_display_data(data, prop_def, processors=None, timezone=pytz.utc):
# when prop_def came from a couchdbkit document, it will be a LazyDict with
# a broken pop method. This conversion also has the effect of a shallow
# copy, which we want.
prop_def = dict(prop_def)
default_processors = {
'yesno': yesno,
'doc_info': lambda value: pretty_doc_info(
get_doc_info_by_id(data['domain'], value)
)
}
processors = processors or {}
processors.update(default_processors)
expr = prop_def.pop('expr')
name = prop_def.pop('name', _format_slug_string_for_display(expr))
format = prop_def.pop('format', None)
process = prop_def.pop('process', None)
timeago = prop_def.get('timeago', False)
# todo: nested attributes, jsonpath, indexing into related documents
val = data.get(expr, None)
if prop_def.pop('parse_date', None):
val = _parse_date_or_datetime(val)
# is_utc is deprecated in favor of is_phone_time
# but preserving here for backwards compatibility
# is_utc = False is just reinterpreted as is_phone_time = True
is_phone_time = prop_def.pop('is_phone_time',
not prop_def.pop('is_utc', True))
if isinstance(val, datetime.datetime):
if not is_phone_time:
val = ServerTime(val).user_time(timezone).done()
else:
val = PhoneTime(val, timezone).user_time(timezone).done()
try:
val = conditional_escape(processors[process](val))
except KeyError:
val = mark_safe(_to_html(val, timeago=timeago))
if format:
val = mark_safe(format.format(val))
return {
"expr": expr,
"name": name,
"value": val
}
def get_tables_as_rows(data, definition, processors=None, timezone=pytz.utc):
"""
Return a low-level definition of a group of tables, given a data object and
a high-level declarative definition of the table rows and value
calculations.
"""
sections = []
for section in definition:
rows = [[get_display_data(data, prop, timezone=timezone, processors=processors)
for prop in row]
for row in section['layout']]
max_row_len = max(map(len, rows)) if rows else 0
for row in rows:
if len(row) < max_row_len:
row.append({
"colspan": 2 * (max_row_len - len(row))
})
sections.append({
"name": section.get('name') or '',
"rows": rows
})
return sections
def get_tables_as_columns(*args, **kwargs):
sections = get_tables_as_rows(*args, **kwargs)
for section in sections:
section['columns'] = list(itertools.izip_longest(*section['rows']))
del section['rows']
return sections
@register.simple_tag
def render_tables(tables, options=None):
options = options or {}
id = options.get('id')
style = options.get('style', 'dl')
assert style in ('table', 'dl')
if id is None:
import uuid
id = "a" + str(uuid.uuid4())
if style == 'table':
return render_to_string("hqwebapp/proptable/property_table.html", {
"tables": tables,
"id": id
})
else:
adjust_heights = options.get('adjust_heights', True)
put_loners_in_wells = options.get('put_loners_in_wells', True)
return render_to_string("hqwebapp/proptable/dl_property_table.html", {
"tables": tables,
"id": id,
"adjust_heights": adjust_heights,
"put_loners_in_wells": put_loners_in_wells
})
def get_default_definition(keys, num_columns=1, name=None, assume_phonetimes=True):
"""
Get a default single table layout definition for `keys` split across
`num_columns` columns.
All datetimes will be treated as "phone times".
(See corehq.util.timezones.conversions.PhoneTime for more context.)
"""
# is_phone_time isn't necessary on non-datetime columns,
# but doesn't hurt either, and is easier than trying to detect.
# I believe no caller uses this on non-phone-time datetimes
# but if something does, we'll have to do this in a more targetted way
layout = chunked([{"expr": prop, "is_phone_time": assume_phonetimes}
for prop in keys], num_columns)
return [
{
"name": name,
"layout": layout
}
]
```
#### File: apps/ivr/api.py
```python
from datetime import datetime
from corehq.apps.sms.models import (CallLog, INCOMING, OUTGOING,
MessagingSubEvent, MessagingEvent)
from corehq.apps.sms.mixin import VerifiedNumber, MobileBackend
from corehq.apps.sms.util import strip_plus
from corehq.apps.smsforms.app import start_session, _get_responses
from corehq.apps.smsforms.models import XFORMS_SESSION_IVR, get_session_by_session_id
from corehq.apps.app_manager.models import Form
from corehq.apps.hqmedia.models import HQMediaMapItem
from django.http import HttpResponse
from django.conf import settings
from dimagi.utils.web import get_url_base
from touchforms.formplayer.api import current_question, TouchformsError
from corehq.apps.smsforms.app import submit_unfinished_form
from corehq.apps.smsforms.util import form_requires_input
IVR_EVENT_NEW_CALL = "NEW_CALL"
IVR_EVENT_INPUT = "INPUT"
IVR_EVENT_DISCONNECT = "DISCONNECT"
class GatewayConnectionError(Exception):
pass
class IVRResponseData(object):
def __init__(self, ivr_responses, input_length, session):
self.ivr_responses = ivr_responses
self.input_length = input_length
self.session = session
def convert_media_path_to_hq_url(path, app):
media = app.multimedia_map.get(path, None)
if media is None:
return None
else:
url_base = get_url_base()
return url_base + HQMediaMapItem.format_match_map(path, media_type=media.media_type, media_id=media.multimedia_id)["url"] + "foo.wav"
def validate_answer(answer, question):
"""
Return True if answer is a valid response to question, False if not.
(question is expected to be the XFormsResponse object for the question)
"""
if question.event.datatype == "select":
try:
assert answer is not None
answer = int(answer)
assert answer >= 1 and answer <= len(question.event.choices)
return True
except (ValueError, AssertionError):
return False
else:
try:
assert answer is not None
if isinstance(answer, basestring):
assert len(answer.strip()) > 0
return True
except AssertionError:
return False
def format_ivr_response(text, app):
return {
"text_to_say" : text,
"audio_file_url" : convert_media_path_to_hq_url(text, app) if text.startswith("jr://") else None,
}
def get_input_length(question):
if question.event.type == "question" and question.event.datatype == "select":
return 1
else:
return None
def hang_up_response(gateway_session_id, backend_module=None):
if backend_module:
return HttpResponse(backend_module.get_http_response_string(
gateway_session_id,
[],
collect_input=False,
hang_up=True
))
else:
return HttpResponse("")
def add_metadata(call_log_entry, duration=None):
try:
call_log_entry.duration = int(round(float(duration)))
call_log_entry.save()
except (TypeError, ValueError):
pass
def get_app_module_form(call_log_entry, logged_subevent):
"""
Returns (app, module, form, error)
"""
try:
form = Form.get_form(call_log_entry.form_unique_id)
app = form.get_app()
module = form.get_module()
return (app, module, form, False)
except:
log_error(MessagingEvent.ERROR_CANNOT_FIND_FORM,
call_log_entry, logged_subevent)
return (None, None, None, True)
def start_call_session(recipient, call_log_entry, logged_subevent, app, module, form):
"""
Returns (session, responses, error)
"""
try:
session, responses = start_session(recipient.domain, recipient, app,
module, form, call_log_entry.case_id, yield_responses=True,
session_type=XFORMS_SESSION_IVR,
case_for_case_submission=call_log_entry.case_for_case_submission)
if logged_subevent:
logged_subevent.xforms_session = session
logged_subevent.save()
if len(responses) == 0:
log_error(MessagingEvent.ERROR_FORM_HAS_NO_QUESTIONS,
call_log_entry, logged_subevent)
return (session, responses, True)
return (session, responses, False)
except TouchformsError as e:
additional_error_text = e.response_data.get('human_readable_message', None)
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR,
call_log_entry, logged_subevent, additional_error_text=additional_error_text)
return (None, None, True)
def get_ivr_responses_from_touchforms_responses(call_log_entry, responses, app):
"""
responses is a list of XFormsResponse objects
app is the app from which the form came
"""
ivr_responses = []
question_constraint_failed = False
hang_up = False
for response in responses:
if response.status == 'validation-error':
question_constraint_failed = True
call_log_entry.current_question_retry_count += 1
ivr_responses.append(format_ivr_response(response.text_prompt, app))
elif response.status == 'http-error':
ivr_responses = []
hang_up = True
break
elif response.event.type == "question":
ivr_responses.append(format_ivr_response(response.event.caption, app))
elif response.event.type == "form-complete":
hang_up = True
return (ivr_responses, question_constraint_failed, hang_up)
def process_disconnect(call_log_entry):
if call_log_entry.xforms_session_id is not None:
session = get_session_by_session_id(call_log_entry.xforms_session_id)
if session.is_open:
if call_log_entry.submit_partial_form:
submit_unfinished_form(session.session_id,
call_log_entry.include_case_side_effects)
else:
session.end(completed=False)
session.save()
def answer_question(call_log_entry, recipient, input_data, logged_subevent=None):
"""
Returns a list of (responses, answer_is_valid), where responses is the
list of XFormsResponse objects from touchforms and answer_is_valid is
True if input_data passes validation and False if not.
Returning an empty list for responses will end up forcing a hangup
later on in the workflow.
"""
if call_log_entry.xforms_session_id is None:
return ([], None)
try:
current_q = current_question(call_log_entry.xforms_session_id)
except TouchformsError as e:
log_touchforms_error(e, call_log_entry, logged_subevent)
return ([], None)
if current_q.status == 'http-error':
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR, call_log_entry,
logged_subevent)
return ([], None)
if validate_answer(input_data, current_q):
answer_is_valid = True
try:
responses = _get_responses(recipient.domain, recipient._id,
input_data, yield_responses=True,
session_id=call_log_entry.xforms_session_id)
except TouchformsError as e:
log_touchforms_error(e, call_log_entry, logged_subevent)
return ([], None)
else:
answer_is_valid = False
call_log_entry.current_question_retry_count += 1
responses = [current_q]
return (responses, answer_is_valid)
def handle_known_call_session(call_log_entry, backend_module, ivr_event,
input_data=None, logged_subevent=None):
if (ivr_event == IVR_EVENT_NEW_CALL and
call_log_entry.use_precached_first_response):
# This means we precached the first IVR response when we
# initiated the call, so all we need to do is return that
# response.
return HttpResponse(call_log_entry.first_response)
app, module, form, error = get_app_module_form(call_log_entry, logged_subevent)
if error:
return hang_up_response(call_log_entry.gateway_session_id,
backend_module=backend_module)
recipient = call_log_entry.recipient
answer_is_valid = True
if ivr_event == IVR_EVENT_NEW_CALL:
session, responses, error = start_call_session(recipient,
call_log_entry, logged_subevent, app, module, form)
if error:
return hang_up_response(call_log_entry.gateway_session_id,
backend_module=backend_module)
call_log_entry.xforms_session_id = session.session_id
elif ivr_event == IVR_EVENT_INPUT:
responses, answer_is_valid = answer_question(call_log_entry, recipient,
input_data, logged_subevent=logged_subevent)
else:
responses = []
ivr_responses, question_constraint_failed, hang_up = \
get_ivr_responses_from_touchforms_responses(call_log_entry, responses, app)
if answer_is_valid and not question_constraint_failed:
# If there were no validation errors (including question contraint errors),
# then reset the current question retry count to 0.
call_log_entry.current_question_retry_count = 0
if (call_log_entry.max_question_retries is not None and
call_log_entry.current_question_retry_count > call_log_entry.max_question_retries):
# We have retried to current question too many times without
# getting a valid answer, so force a hang-up.
ivr_responses = []
if len(ivr_responses) == 0:
hang_up = True
input_length = None
if hang_up:
process_disconnect(call_log_entry)
else:
# Set input_length to let the ivr gateway know how many digits we need to collect.
# If the latest XFormsResponse we have was a response to a contraint error, then
# it won't have an event, so in that case we have to get the current question again.
if question_constraint_failed:
current_q = current_question(call_log_entry.xforms_session_id)
else:
current_q = responses[-1]
input_length = get_input_length(current_q)
call_log_entry.save()
return HttpResponse(
backend_module.get_http_response_string(call_log_entry.gateway_session_id,
ivr_responses, collect_input=(not hang_up), hang_up=hang_up,
input_length=input_length))
def log_call(phone_number, gateway_session_id, backend_api=None):
cleaned_number = strip_plus(phone_number)
v = VerifiedNumber.by_extensive_search(cleaned_number)
call = CallLog(
phone_number=cleaned_number,
direction=INCOMING,
date=datetime.utcnow(),
backend_api=backend_api,
gateway_session_id=gateway_session_id,
)
if v:
call.domain = v.domain
call.couch_recipient_doc_type = v.owner_doc_type
call.couch_recipient = v.owner_id
call.save()
def incoming(phone_number, backend_module, gateway_session_id, ivr_event, input_data=None,
duration=None):
"""
The main entry point for all incoming IVR requests.
"""
call_log_entry = CallLog.get_call_by_gateway_session_id(gateway_session_id)
logged_subevent = None
if call_log_entry and call_log_entry.messaging_subevent_id:
logged_subevent = MessagingSubEvent.objects.get(
pk=call_log_entry.messaging_subevent_id)
if call_log_entry:
add_metadata(call_log_entry, duration)
if call_log_entry and call_log_entry.form_unique_id is None:
# If this request is for a call with no form,
# then just short circuit everything and hang up
return hang_up_response(gateway_session_id, backend_module=backend_module)
if call_log_entry and backend_module:
return handle_known_call_session(call_log_entry, backend_module, ivr_event,
input_data=input_data, logged_subevent=logged_subevent)
else:
if not call_log_entry:
log_call(phone_number, gateway_session_id,
backend_api=(backend_module.API_ID if backend_module else None))
return hang_up_response(gateway_session_id, backend_module=backend_module)
def get_ivr_backend(recipient, verified_number=None, unverified_number=None):
if verified_number and verified_number.ivr_backend_id:
return MobileBackend.get(verified_number.ivr_backend_id)
else:
phone_number = (verified_number.phone_number if verified_number
else unverified_number)
phone_number = strip_plus(str(phone_number))
prefixes = settings.IVR_BACKEND_MAP.keys()
prefixes = sorted(prefixes, key=lambda x: len(x), reverse=True)
for prefix in prefixes:
if phone_number.startswith(prefix):
return MobileBackend.get(settings.IVR_BACKEND_MAP[prefix])
return None
def log_error(error, call_log_entry=None, logged_subevent=None,
additional_error_text=None):
if call_log_entry:
call_log_entry.error = True
call_log_entry.error_message = dict(MessagingEvent.ERROR_MESSAGES).get(error)
if additional_error_text:
call_log_entry.error_message += ' %s' % additional_error_text
call_log_entry.save()
if logged_subevent:
logged_subevent.error(error, additional_error_text=additional_error_text)
def log_touchforms_error(touchforms_error, call_log_entry=None, logged_subevent=None):
"""
touchforms_error should be an instance of TouchformsError
"""
additional_error_text = touchforms_error.response_data.get('human_readable_message', None)
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR,
call_log_entry, logged_subevent, additional_error_text)
def get_first_ivr_response_data(recipient, call_log_entry, logged_subevent):
"""
As long as the form has at least one question in it (i.e., it
doesn't consist of all labels), then we can start the touchforms
session now and cache the first IVR response, so that all we
need to do later is serve it up. This makes for less time ringing
when the user is on the phone, waiting for the line to pick up.
If the form consists of all labels, we don't do anything here,
because then we would end up submitting the form right away
regardless of whether the user actually got the call.
Returns (ivr_data, error) where ivr_data is an instance of IVRResponseData
"""
app, module, form, error = get_app_module_form(call_log_entry,
logged_subevent)
if error:
return (None, True)
if form_requires_input(form):
session, responses, error = start_call_session(recipient, call_log_entry,
logged_subevent, app, module, form)
if error:
return (None, True)
ivr_responses = []
for response in responses:
ivr_responses.append(format_ivr_response(response.event.caption, app))
ivr_data = IVRResponseData(ivr_responses, get_input_length(responses[-1]),
session)
return (ivr_data, False)
return (None, False)
def set_first_ivr_response(call_log_entry, gateway_session_id, ivr_data, get_response_function):
call_log_entry.xforms_session_id = ivr_data.session.session_id
call_log_entry.use_precached_first_response = True
call_log_entry.first_response = get_response_function(
gateway_session_id, ivr_data.ivr_responses, collect_input=True,
hang_up=False, input_length=ivr_data.input_length)
def initiate_outbound_call(recipient, form_unique_id, submit_partial_form,
include_case_side_effects, max_question_retries, messaging_event_id,
verified_number=None, unverified_number=None, case_id=None,
case_for_case_submission=False, timestamp=None):
"""
Returns False if an error occurred and the call should be retried.
Returns True if the call should not be retried (either because it was
queued successfully or because an unrecoverable error occurred).
"""
call_log_entry = None
logged_event = MessagingEvent.objects.get(pk=messaging_event_id)
logged_subevent = logged_event.create_ivr_subevent(recipient,
form_unique_id, case_id=case_id)
if not verified_number and not unverified_number:
log_error(MessagingEvent.ERROR_NO_PHONE_NUMBER,
logged_subevent=logged_subevent)
return True
backend = get_ivr_backend(recipient, verified_number, unverified_number)
if not backend:
log_error(MessagingEvent.ERROR_NO_SUITABLE_GATEWAY,
logged_subevent=logged_subevent)
return True
phone_number = (verified_number.phone_number if verified_number
else unverified_number)
call_log_entry = CallLog(
couch_recipient_doc_type=recipient.doc_type,
couch_recipient=recipient.get_id,
phone_number='+%s' % str(phone_number),
direction=OUTGOING,
date=timestamp or datetime.utcnow(),
domain=recipient.domain,
form_unique_id=form_unique_id,
submit_partial_form=submit_partial_form,
include_case_side_effects=include_case_side_effects,
max_question_retries=max_question_retries,
current_question_retry_count=0,
case_id=case_id,
case_for_case_submission=case_for_case_submission,
messaging_subevent_id=logged_subevent.pk,
)
ivr_data, error = get_first_ivr_response_data(recipient,
call_log_entry, logged_subevent)
if error:
return True
if ivr_data:
logged_subevent.xforms_session = ivr_data.session
logged_subevent.save()
try:
kwargs = backend.get_cleaned_outbound_params()
module = backend.backend_module
call_log_entry.backend_api = module.API_ID
call_log_entry.save()
result = module.initiate_outbound_call(call_log_entry,
logged_subevent, ivr_data=ivr_data, **kwargs)
logged_subevent.completed()
return result
except GatewayConnectionError:
log_error(MessagingEvent.ERROR_GATEWAY_ERROR,
call_log_entry, logged_subevent)
raise
except Exception:
log_error(MessagingEvent.ERROR_INTERNAL_SERVER_ERROR,
call_log_entry, logged_subevent)
raise
```
#### File: apps/locations/permissions.py
```python
from django_prbac.decorators import requires_privilege_raise404
from corehq import privileges
from functools import wraps
from django.http import Http404
from corehq import toggles
from corehq.apps.domain.models import Domain
from corehq.apps.domain.decorators import (login_and_domain_required,
domain_admin_required)
from corehq.apps.users.models import CommCareUser
from .models import SQLLocation
from .util import get_xform_location
def locations_access_required(view_fn):
"""
Decorator controlling domain-level access to locations.
"""
return login_and_domain_required(
requires_privilege_raise404(privileges.LOCATIONS)(view_fn)
)
def is_locations_admin(view_fn):
"""
Decorator controlling write access to locations.
"""
return locations_access_required(domain_admin_required(view_fn))
def user_can_edit_any_location(user, project):
return user.is_domain_admin(project.name) or not project.location_restriction_for_users
def can_edit_any_location(view_fn):
"""
Decorator determining whether a user has permission to edit all locations in a project
"""
@wraps(view_fn)
def _inner(request, domain, *args, **kwargs):
if user_can_edit_any_location(request.couch_user, request.project):
return view_fn(request, domain, *args, **kwargs)
raise Http404()
return locations_access_required(_inner)
def user_can_edit_location(user, sql_location, project):
if user_can_edit_any_location(user, project):
return True
user_loc = user.get_sql_location(sql_location.domain)
if not user_loc:
return False
return user_loc.is_direct_ancestor_of(sql_location)
def user_can_view_location(user, sql_location, project):
if (user.is_domain_admin(project.name) or
not project.location_restriction_for_users):
return True
user_loc = user.get_location(sql_location.domain)
if not user_loc:
return True
if user_can_edit_location(user, sql_location, project):
return True
return sql_location.location_id in user_loc.lineage
def can_edit_location(view_fn):
"""
Decorator controlling a user's access to a specific location.
The decorated function must be passed a loc_id arg (eg: from urls.py)
"""
@wraps(view_fn)
def _inner(request, domain, loc_id, *args, **kwargs):
try:
# pass to view?
location = SQLLocation.objects.get(location_id=loc_id)
except SQLLocation.DoesNotExist:
raise Http404()
else:
if user_can_edit_location(request.couch_user, location, request.project):
return view_fn(request, domain, loc_id, *args, **kwargs)
raise Http404()
return locations_access_required(_inner)
def user_can_edit_location_types(user, project):
if (user.is_domain_admin(project.name) or
not project.location_restriction_for_users):
return True
return not user.get_domain_membership(project.name).location_id
def can_edit_location_types(view_fn):
"""
Decorator controlling a user's access to a location types.
"""
@wraps(view_fn)
def _inner(request, domain, *args, **kwargs):
if user_can_edit_location_types(request.couch_user, request.project):
return view_fn(request, domain, *args, **kwargs)
raise Http404()
return locations_access_required(_inner)
def can_edit_form_location(domain, web_user, form):
# Domain admins can always edit locations. If the user isn't an admin and
# the location restriction is enabled, they can only edit forms that are
# explicitly at or below them in the location tree.
domain_obj = Domain.get_by_name(domain)
if (not toggles.RESTRICT_FORM_EDIT_BY_LOCATION.enabled(domain)
or user_can_edit_any_location(web_user, domain_obj)):
return True
if domain_obj.supports_multiple_locations_per_user:
user_id = getattr(form.metadata, 'userID', None)
if not user_id:
return False
form_user = CommCareUser.get(user_id)
for location in form_user.locations:
if user_can_edit_location(web_user, location.sql_location, domain_obj):
return True
return False
else:
form_location = get_xform_location(form)
if not form_location:
return False
return user_can_edit_location(web_user, form_location, domain_obj)
```
#### File: locations/tests/test_location_groups.py
```python
from mock import patch
from corehq.apps.locations.models import LOCATION_REPORTING_PREFIX
from corehq.apps.locations.fixtures import location_fixture_generator
from corehq.apps.locations.tests.util import make_loc
from corehq.apps.locations.tests.test_locations import LocationTestBase
from corehq import toggles
from corehq.apps.groups.exceptions import CantSaveException
from corehq.apps.users.models import CommCareUser
class LocationGroupTest(LocationTestBase):
def setUp(self):
super(LocationGroupTest, self).setUp()
self.test_state = make_loc(
'teststate',
type='state',
domain=self.domain.name
)
self.test_village = make_loc(
'testvillage',
type='village',
parent=self.test_state,
domain=self.domain.name
)
self.test_outlet = make_loc(
'testoutlet',
type='outlet',
parent=self.test_village,
domain=self.domain.name
)
toggles.MULTIPLE_LOCATIONS_PER_USER.set("domain:{}".format(self.domain.name), True)
def test_group_name(self):
# just location name for top level
self.assertEqual(
'teststate-Cases',
self.test_state.sql_location.case_sharing_group_object().name
)
# locations combined by forward slashes otherwise
self.assertEqual(
'teststate/testvillage/testoutlet-Cases',
self.test_outlet.sql_location.case_sharing_group_object().name
)
# reporting group is similar but has no ending
self.assertEqual(
'teststate/testvillage/testoutlet',
self.test_outlet.sql_location.reporting_group_object().name
)
def test_id_assignment(self):
# each should have the same id, but with a different prefix
self.assertEqual(
self.test_outlet._id,
self.test_outlet.sql_location.case_sharing_group_object()._id
)
self.assertEqual(
LOCATION_REPORTING_PREFIX + self.test_outlet._id,
self.test_outlet.sql_location.reporting_group_object()._id
)
def test_group_properties(self):
# case sharing groups should ... be case sharing
self.assertTrue(
self.test_outlet.sql_location.case_sharing_group_object().case_sharing
)
self.assertFalse(
self.test_outlet.sql_location.case_sharing_group_object().reporting
)
# and reporting groups reporting
self.assertFalse(
self.test_outlet.sql_location.reporting_group_object().case_sharing
)
self.assertTrue(
self.test_outlet.sql_location.reporting_group_object().reporting
)
# both should set domain properly
self.assertEqual(
self.domain.name,
self.test_outlet.sql_location.reporting_group_object().domain
)
self.assertEqual(
self.domain.name,
self.test_outlet.sql_location.case_sharing_group_object().domain
)
def test_accessory_methods(self):
# we need to expose group id without building the group sometimes
# so lets make sure those match up
expected_id = self.loc.sql_location.case_sharing_group_object()._id
self.assertEqual(
expected_id,
self.loc.group_id
)
def test_not_real_groups(self):
# accessing a group object should not cause it to save
# in the DB
group_obj = self.test_outlet.sql_location.case_sharing_group_object()
self.assertNotEqual(group_obj.doc_type, 'Group')
def test_cant_save_wont_save(self):
group_obj = self.test_outlet.sql_location.case_sharing_group_object()
with self.assertRaises(CantSaveException):
group_obj.save()
def test_get_owner_ids(self):
loc_type = self.loc.location_type_object
self.assertFalse(loc_type.shares_cases)
owner_ids = self.user.get_owner_ids()
self.assertEqual(1, len(owner_ids))
self.assertEqual(self.user._id, owner_ids[0])
# change it so case sharing is enabled and make sure it is now included
loc_type.shares_cases = True
loc_type.save()
# we have to re-create the user object because various things are cached
user = CommCareUser.wrap(self.user.to_json())
owner_ids = user.get_owner_ids()
self.assertEqual(2, len(owner_ids))
self.assertEqual(self.loc._id, owner_ids[1])
# set it back to false in case other tests needed that
loc_type.shares_cases = False
loc_type.save()
def test_custom_data(self):
# need to put the location data on the
# group with a special prefix
self.loc.metadata = {
'foo': 'bar',
'fruit': 'banana'
}
self.loc.save()
self.assertDictEqual(
{
'commcare_location_type': self.loc.location_type,
'commcare_location_name': self.loc.name,
'commcare_location_foo': 'bar',
'commcare_location_fruit': 'banana'
},
self.loc.sql_location.case_sharing_group_object().metadata
)
self.assertDictEqual(
{
'commcare_location_type': self.loc.location_type,
'commcare_location_name': self.loc.name,
'commcare_location_foo': 'bar',
'commcare_location_fruit': 'banana'
},
self.loc.sql_location.reporting_group_object().metadata
)
@patch('corehq.apps.domain.models.Domain.uses_locations', lambda: True)
def test_location_fixture_generator(self):
"""
This tests the location XML fixture generator. It specifically ensures that no duplicate XML
nodes are generated when all locations have a parent and multiple locations are enabled.
"""
self.domain.commtrack_enabled = True
self.domain.save()
self.loc.delete()
state = make_loc(
'teststate1',
type='state',
domain=self.domain.name
)
district = make_loc(
'testdistrict1',
type='district',
domain=self.domain.name,
parent=state
)
block = make_loc(
'testblock1',
type='block',
domain=self.domain.name,
parent=district
)
village = make_loc(
'testvillage1',
type='village',
domain=self.domain.name,
parent=block
)
outlet1 = make_loc(
'testoutlet1',
type='outlet',
domain=self.domain.name,
parent=village
)
outlet2 = make_loc(
'testoutlet2',
type='outlet',
domain=self.domain.name,
parent=village
)
outlet3 = make_loc(
'testoutlet3',
type='outlet',
domain=self.domain.name,
parent=village
)
self.user.set_location(outlet2)
self.user.add_location_delegate(outlet1)
self.user.add_location_delegate(outlet2)
self.user.add_location_delegate(outlet3)
self.user.add_location_delegate(state)
self.user.save()
fixture = location_fixture_generator(self.user, '2.0')
self.assertEquals(len(fixture[0].findall('.//state')), 1)
self.assertEquals(len(fixture[0].findall('.//outlet')), 3)
```
#### File: locations/tests/test_location_utils.py
```python
from ..models import LocationType
from ..util import get_locations_and_children
from .util import LocationHierarchyTestCase
class MassachusettsTestCase(LocationHierarchyTestCase):
location_type_names = ['state', 'county', 'city']
location_structure = [
('Massachusetts', [
('Middlesex', [
('Cambridge', []),
('Somerville', []),
]),
('Suffolk', [
('Boston', []),
])
])
]
class TestLocationsSetup(MassachusettsTestCase):
def test_location_types(self):
for lt_name in self.location_type_names:
in_db = LocationType.objects.get(domain=self.domain, name=lt_name)
in_dict = self.location_types[lt_name]
self.assertEqual(lt_name, in_db.name, in_dict.name)
def test_locations_created(self):
location_names = ['Massachusetts', 'Middlesex', 'Cambridge',
'Somerville', 'Suffolk', 'Boston']
for name in location_names:
self.assertIn(name, self.locations)
def test_parentage(self):
cambridge = self.locations['Cambridge']
self.assertEqual(cambridge.parent.name, 'Middlesex')
self.assertEqual(cambridge.parent.parent.name, 'Massachusetts')
class TestGetLocationsAndChildren(MassachusettsTestCase):
def test_get_locations_and_children(self):
names = ['Middlesex', 'Somerville', 'Suffolk']
result = get_locations_and_children([self.locations[name].location_id
for name in names])
self.assertItemsEqual(
[loc.name for loc in result],
['Middlesex', 'Cambridge', 'Somerville', 'Suffolk', 'Boston']
)
def test_get_locations_and_children2(self):
names = ['Middlesex', 'Boston']
result = get_locations_and_children([self.locations[name].location_id
for name in names])
self.assertItemsEqual(
[loc.name for loc in result],
['Middlesex', 'Cambridge', 'Somerville', 'Boston']
)
```
#### File: apps/mobile_auth/utils.py
```python
import base64
import os
from datetime import timedelta, datetime
from corehq.apps.mobile_auth.xml import AuthKeys, KeyRecord, OpenRosaResponse
from django.utils.translation import ugettext as _
def generate_aes_key():
# get 32 byte key
bin_key = os.urandom(32)
return base64.b64encode(bin_key)
def new_key_record(domain, user_id, now=None, valid=None):
"""
return initialized but unsaved MobileAuthKeyRecord
"""
from corehq.apps.mobile_auth.models import MobileAuthKeyRecord
now = now or datetime.utcnow()
valid = valid or now
record = MobileAuthKeyRecord(
domain=domain,
user_id=user_id,
valid=valid,
)
bump_expiry(record, now=now)
return record
def bump_expiry(record, now=None):
"""
initialize or extend expiry to after now
in increments of a month
"""
now = now or datetime.utcnow()
record.expires = record.expires or now
while record.expires <= now:
record.expires += timedelta(days=30)
def get_mobile_auth_payload(key_records, domain, issued=None, now=None):
"""
formats a list of key record documents in the xml format outlined in
https://github.com/dimagi/commcare/wiki/CentralAuthAPI
makes sure to set xml object properties in a standard order
for ease of testing
"""
now = now or datetime.utcnow()
issued = issued or now
def _OpenRosaResponse():
x = OpenRosaResponse()
x.auth_keys = _auth_keys()
x.message = _('Here are your keys!')
return x
def _auth_keys():
x = AuthKeys(
key_records=list(_key_record())
)
x.domain = domain
x.issued = issued
return x
def _key_record():
for key_record in key_records:
x = KeyRecord()
for attr in ['valid', 'expires', 'uuid', 'type', 'key']:
setattr(x, attr, getattr(key_record, attr))
yield x
return _OpenRosaResponse().serializeDocument(pretty=True)
```
#### File: apps/ota/views.py
```python
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_noop
from casexml.apps.case.xml import V2
from corehq import toggles
from corehq.apps.domain.decorators import domain_admin_required, login_or_digest_or_basic
from corehq.apps.domain.models import Domain
from corehq.apps.domain.views import DomainViewMixin, EditMyProjectSettingsView
from corehq.apps.hqwebapp.models import ProjectSettingsTab
from corehq.apps.ota.forms import PrimeRestoreCacheForm
from corehq.apps.ota.tasks import prime_restore
from corehq.apps.style.views import BaseB3SectionPageView
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.util.view_utils import json_error
from dimagi.utils.decorators.memoized import memoized
from casexml.apps.phone.restore import RestoreConfig, RestoreParams, RestoreCacheSettings
from django.http import HttpResponse
from soil import DownloadBase
@json_error
@login_or_digest_or_basic()
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
couch_user = CouchUser.from_django_user(user)
return get_restore_response(domain, couch_user, **get_restore_params(request))
def get_restore_params(request):
"""
Given a request, get the relevant restore parameters out with sensible defaults
"""
# not a view just a view util
return {
'since': request.GET.get('since'),
'version': request.GET.get('version', "1.0"),
'state': request.GET.get('state'),
'items': request.GET.get('items') == 'true',
'force_restore_mode': request.GET.get('mode', None)
}
def get_restore_response(domain, couch_user, since=None, version='1.0',
state=None, items=False, force_cache=False,
cache_timeout=None, overwrite_cache=False,
force_restore_mode=None):
# not a view just a view util
if not couch_user.is_commcare_user():
return HttpResponse("No linked chw found for %s" % couch_user.username,
status=401) # Authentication Failure
elif domain != couch_user.domain:
return HttpResponse("%s was not in the domain %s" % (couch_user.username, domain),
status=401)
project = Domain.get_by_name(domain)
restore_config = RestoreConfig(
project=project,
user=couch_user.to_casexml_user(),
params=RestoreParams(
sync_log_id=since,
version=version,
state_hash=state,
include_item_count=items,
force_restore_mode=force_restore_mode,
),
cache_settings=RestoreCacheSettings(
force_cache=force_cache,
cache_timeout=cache_timeout,
overwrite_cache=overwrite_cache
),
)
return restore_config.get_response()
class PrimeRestoreCacheView(BaseB3SectionPageView, DomainViewMixin):
page_title = ugettext_noop("Prime Restore Cache")
section_name = ugettext_noop("Project Settings")
urlname = 'prime_restore_cache'
template_name = "ota/prime_restore_cache.html"
@method_decorator(domain_admin_required)
@toggles.PRIME_RESTORE.required_decorator()
def dispatch(self, *args, **kwargs):
return super(PrimeRestoreCacheView, self).dispatch(*args, **kwargs)
@property
def main_context(self):
main_context = super(PrimeRestoreCacheView, self).main_context
main_context.update({
'domain': self.domain,
})
main_context.update({
'active_tab': ProjectSettingsTab(
self.request,
self.urlname,
domain=self.domain,
couch_user=self.request.couch_user,
project=self.request.project
),
'is_project_settings': True,
})
return main_context
@property
@memoized
def page_url(self):
if self.urlname:
return reverse(self.urlname, args=[self.domain])
@property
@memoized
def section_url(self):
return reverse(EditMyProjectSettingsView.urlname, args=[self.domain])
@property
@memoized
def form(self):
if self.request.method == 'POST':
return PrimeRestoreCacheForm(self.request.POST)
return PrimeRestoreCacheForm()
@property
def page_context(self):
return {
'form': self.form,
}
def post(self, request, *args, **kwargs):
if self.form.is_valid():
return self.form_valid()
return self.get(request, *args, **kwargs)
def form_valid(self):
if self.form.cleaned_data['all_users']:
user_ids = CommCareUser.ids_by_domain(self.domain)
else:
user_ids = self.form.user_ids
download = DownloadBase()
res = prime_restore.delay(
self.domain,
user_ids,
version=V2,
cache_timeout_hours=24,
overwrite_cache=self.form.cleaned_data['overwrite_cache'],
check_cache_only=self.form.cleaned_data['check_cache_only']
)
download.set_task(res)
return redirect('hq_soil_download', self.domain, download.download_id)
```
#### File: performance_sms/tests/test_dbaccessors.py
```python
import uuid
from django.test import TestCase
from corehq.apps.performance_sms import dbaccessors
from corehq.apps.performance_sms.models import PerformanceConfiguration
class TestPerformanceDbaccessors(TestCase):
def test_by_domain(self):
domain = uuid.uuid4().hex
config = _make_performance_config(domain)
try:
results = dbaccessors.by_domain(domain)
self.assertEqual(1, len(results))
self.assertEqual(config._id, results[0]._id)
# check no results for some other domain
no_results = dbaccessors.by_domain(uuid.uuid4().hex)
self.assertEqual(0, len(no_results))
finally:
config.delete()
def _make_performance_config(domain):
config = PerformanceConfiguration(domain=domain, recipient_id=uuid.uuid4().hex, template='test')
config.save()
return config
```
#### File: apps/products/bulk.py
```python
from corehq.apps.products.models import Product, SQLProduct
from django.utils.translation import ugettext as _
from corehq.apps.programs.models import Program
def import_products(domain, importer):
from corehq.apps.products.views import ProductFieldsView
results = {'errors': [], 'messages': []}
to_save = []
product_count = 0
seen_codes = set()
program_ids = [program._id for program in Program.by_domain(domain)]
codes = {
row['code']: row['product_id']
for row in SQLProduct.objects.filter(domain=domain, is_archived=False).values('code', 'product_id')
}
custom_data_validator = ProductFieldsView.get_validator(domain)
for row in importer.worksheet:
try:
p = Product.from_excel(row, custom_data_validator)
except Exception, e:
results['errors'].append(
_(u'Failed to import product {name}: {ex}'.format(
name=row['name'] or '',
ex=e,
))
)
continue
importer.add_progress()
if not p:
# skip if no product is found (or the row is blank)
continue
if not p.domain:
# if product doesn't have domain, use from context
p.domain = domain
elif p.domain != domain:
# don't let user import against another domains products
results['errors'].append(
_(u"Product {product_name} belongs to another domain and was not updated").format(
product_name=p.name
)
)
continue
if p.code:
if (p.code in codes and codes[p.code] != p.get_id) or (p.code in seen_codes):
results['errors'].append(_(
u"Product {product_name} could not be imported \
since its product ID is already assigned to another product"
).format(
product_name=p.name
))
continue
if p.code not in codes:
seen_codes.add(p.code)
if p.program_id and p.program_id not in program_ids:
results['errors'].append(_(
u"Product {product_name} references a program that doesn't exist: {program_id}"
).format(
product_name=p.name,
program_id=p.program_id
))
continue
product_count += 1
to_save.append(p)
if len(to_save) > 500:
Product.bulk_save(to_save)
for couch_product in to_save:
couch_product.sync_to_sql()
to_save = []
if to_save:
Product.bulk_save(to_save)
for couch_product in to_save:
couch_product.sync_to_sql()
if product_count:
results['messages'].insert(
0,
_('Successfully updated {number_of_products} products with {errors} '
'errors.').format(
number_of_products=product_count, errors=len(results['errors'])
)
)
return results
```
#### File: apps/products/fixtures.py
```python
from corehq.apps.products.models import Product
from corehq.apps.commtrack.fixtures import _simple_fixture_generator
from corehq.apps.products.models import SQLProduct
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
PRODUCT_FIELDS = [
'name',
'unit',
'code',
'description',
'category',
'program_id',
'cost',
'product_data'
]
CUSTOM_DATA_SLUG = 'product_data'
def product_fixture_generator_json(domain):
if not SQLProduct.objects.filter(domain=domain).exists():
return None
fields = filter(lambda x: x != CUSTOM_DATA_SLUG, PRODUCT_FIELDS)
fields.append('@id')
custom_fields = get_by_domain_and_type(domain, 'ProductFields')
if custom_fields:
for f in custom_fields.fields:
fields.append(CUSTOM_DATA_SLUG + '/' + f.slug)
uri = 'jr://fixture/{}'.format(ProductFixturesProvider.id)
return {
'id': 'products',
'uri': uri,
'path': '/products/product',
'name': 'Products',
'structure': {
f: {
'name': f,
'no_option': True
} for f in fields},
# DEPRECATED PROPERTIES
'sourceUri': uri,
'defaultId': 'products',
'initialQuery': "instance('products')/products/product",
}
class ProductFixturesProvider(object):
id = 'commtrack:products'
def __call__(self, user, version, last_sync=None):
def get_products():
return sorted(
Product.by_domain(user.domain, include_archived=True),
key=lambda product: product.code
)
return _simple_fixture_generator(
user, self.id, "product", PRODUCT_FIELDS, get_products, last_sync
)
product_fixture_generator = ProductFixturesProvider()
```
#### File: reports/filters/dates.py
```python
import json
from django.utils.translation import ugettext_lazy, ugettext as _
from corehq.util.dates import iso_string_to_date
from dimagi.utils.dates import DateSpan
from corehq.apps.reports.filters.base import BaseReportFilter
import datetime
class DatespanFilter(BaseReportFilter):
"""
A filter that returns a startdate and an enddate.
This is the standard datespan filter that gets pulled into request with the decorator
@datespan_in_request
"""
template = "reports/filters/datespan.html"
label = ugettext_lazy("Date Range")
slug = "datespan"
inclusive = True
default_days = 30
@property
def datespan(self):
datespan = DateSpan.since(self.default_days, timezone=self.timezone, inclusive=self.inclusive)
if self.request.datespan.is_valid() and self.slug == 'datespan':
datespan.startdate = self.request.datespan.startdate
datespan.enddate = self.request.datespan.enddate
return datespan
@property
def filter_context(self):
return {
'datespan': self.datespan,
'report_labels': self.report_labels,
'separator': _(' to '),
'timezone': self.timezone.zone,
}
@property
def report_labels(self):
return json.dumps({
'last_7_days': _('Last 7 Days'),
'last_month': _('Last Month'),
'last_30_days': _('Last 30 Days')
})
class SingleDateFilter(BaseReportFilter):
"""
A filter that returns a single date
"""
template = "reports/filters/date_selector.html"
label = ugettext_lazy("Date")
slug = "date"
@property
def date(self):
from_req = self.request.GET.get('date')
if from_req:
try:
return iso_string_to_date(from_req)
except ValueError:
pass
return datetime.date.today()
@property
def filter_context(self):
return {
'date': self.date,
}
```
#### File: reports/tests/test_filters.py
```python
from django.test import SimpleTestCase
from corehq.apps.reports.filters.api import paginate_options
class TestEmwfPagination(SimpleTestCase):
def make_data_source(self, options):
def matching_objects(query):
if not query:
return options
return [o for o in options if query.lower() in o.lower()]
def get_size(query):
return len(matching_objects(query))
def get_objects(query, start, size):
return matching_objects(query)[start:start+size]
return (get_size, get_objects)
@property
def data_sources(self):
return [
self.make_data_source(["<NAME>", "<NAME>", "Queen"]),
self.make_data_source(["Oslo", "Baldwin", "Perth", "Quito"]),
self.make_data_source([]),
self.make_data_source(["Jdoe", "Rumpelstiltskin"]),
]
def test_first_page(self):
count, options = paginate_options(self.data_sources, "", 0, 5)
self.assertEqual(count, 9)
self.assertEqual(
options,
["<NAME>", "<NAME>", "Queen", "Oslo", "Baldwin"],
)
def test_second_page(self):
count, options = paginate_options(self.data_sources, "", 5, 10)
self.assertEqual(count, 9)
self.assertEqual(
options,
["Perth", "Quito", "Jdoe", "Rumpelstiltskin"],
)
def test_query_first_page(self):
query = "o"
count, options = paginate_options(self.data_sources, query, 0, 5)
self.assertEqual(count, 4)
self.assertEqual(options, ["<NAME>", "Oslo", "Quito", "Jdoe"])
def test_query_no_matches(self):
query = "Waldo"
count, options = paginate_options(self.data_sources, query, 0, 5)
self.assertEqual(count, 0)
self.assertEqual(options, [])
```
#### File: reports/tests/test_form_export.py
```python
from StringIO import StringIO
import json
import os
from django.core.urlresolvers import reverse
from django.test import TestCase
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.reports.models import FormExportSchema
from corehq.apps.users.models import CommCareUser
from couchexport.models import Format
from django_digest.test import Client
XMLNS = 'http://www.commcarehq.org/example/hello-world'
XFORM_ID = '50fa6deb-91f3-4f9b-9d4c-f5ed312457fa'
XML_DATA = """<?xml version='1.0' ?>
<data uiVersion="1" version="63" name="Hello World" xmlns:jrm="http://dev.commcarehq.org/jr/xforms" xmlns="{xmlns}">
<name>S</name>
<color>1</color>
<date>2012-10-02</date>
<n1:meta xmlns:n1="http://openrosa.org/jr/xforms">
<n1:deviceID>cloudcare</n1:deviceID>
<n1:timeStart>2012-10-15T15:26:02.386-04</n1:timeStart>
<n1:timeEnd>2012-10-15T15:26:14.745-04</n1:timeEnd>
<n1:username>user1</n1:username>
<n1:userID>{user_id}</n1:userID>
<n1:instanceID>{xform_id}</n1:instanceID>
<n2:appVersion xmlns:n2="http://commcarehq.org/xforms">2.0</n2:appVersion>
</n1:meta>
</data>
"""
class FormExportTest(TestCase):
def setUp(self):
self.app_id = 'kasdlfkjsldfkjsdlkjf'
self.domain_name = 'form-export-test'
self.domain = create_domain(self.domain_name)
self.username = 'danny'
self.couch_user = CommCareUser.create(self.domain_name, self.username,
password='<PASSWORD>')
self.couch_user.save()
self.client = Client()
self.client.login(username=self.couch_user.username, password='<PASSWORD>')
self.url = reverse("receiver_post_with_app_id",
args=[self.domain_name, self.app_id])
self.custom_export = FormExportSchema.wrap({
'type': 'form',
'app_id': self.app_id,
'default_format': Format.JSON,
'index': json.dumps([self.domain_name, XMLNS]),
'tables': [{
'index': '#',
'display': 'Export',
'columns': [{'index': 'form.name', 'display': 'Name'}],
}]
})
def tearDown(self):
self.couch_user.delete()
def post_it(self, user_id=None, form_id=XFORM_ID):
user_id = user_id or self.couch_user._id
f = StringIO(XML_DATA.format(
user_id=user_id,
xmlns=XMLNS,
xform_id=form_id,
))
f.name = 'form.xml'
return self.client.post(self.url, {'xml_submission_file': f})
def test_include_duplicates(self):
self.post_it()
self.post_it()
self.custom_export.include_errors = True
files = self.custom_export.get_export_files()
data = json.loads(files.file.payload)
self.assertEqual(data['Export']['headers'], ['Name'])
self.assertEqual(len(data['Export']['rows']), 2)
self.custom_export.include_errors = False
files = self.custom_export.get_export_files()
data = json.loads(files.file.payload)
self.assertEqual(data['Export']['headers'], ['Name'])
self.assertEqual(len(data['Export']['rows']), 1)
def test_exclude_unknown_users(self):
self.post_it(form_id='good', user_id=self.couch_user._id)
files = self.custom_export.get_export_files()
data = json.loads(files.file.payload)
self.assertEqual(len(data['Export']['rows']), 1)
# posting from a non-real user shouldn't update
self.post_it(form_id='bad', user_id='notarealuser')
files = self.custom_export.get_export_files()
data = json.loads(files.file.payload)
self.assertEqual(len(data['Export']['rows']), 1)
# posting from the real user should update
self.post_it(form_id='stillgood', user_id=self.couch_user._id)
files = self.custom_export.get_export_files()
data = json.loads(files.file.payload)
self.assertEqual(len(data['Export']['rows']), 2)
```
#### File: apps/settings/views.py
```python
import re
from django.views.decorators.debug import sensitive_post_parameters
from corehq.apps.hqwebapp.models import MySettingsTab
from corehq.apps.style.decorators import use_bootstrap3, use_select2
from dimagi.utils.couch.resource_conflict import retry_resource
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.views.decorators.http import require_POST
import langcodes
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy
from corehq.apps.domain.decorators import (login_and_domain_required, require_superuser,
login_required)
from django.core.urlresolvers import reverse
from corehq.apps.domain.views import BaseDomainView
from corehq.apps.hqwebapp.views import BaseSectionPageView
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import json_response
from dimagi.utils.couch import CriticalSection
import corehq.apps.style.utils as style_utils
from tastypie.models import ApiKey
@login_and_domain_required
def default(request, domain):
return HttpResponseRedirect(reverse("users_default", args=[domain]))
@login_and_domain_required
def redirect_users(request, domain, old_url=""):
return HttpResponseRedirect(reverse("users_default", args=[domain]))
@login_and_domain_required
def redirect_domain_settings(request, domain, old_url=""):
return HttpResponseRedirect(reverse("domain_forwarding", args=[domain]))
@require_superuser
def project_id_mapping(request, domain):
from corehq.apps.users.models import CommCareUser
from corehq.apps.groups.models import Group
users = CommCareUser.by_domain(domain)
groups = Group.by_domain(domain)
return json_response({
'users': dict([(user.raw_username, user.user_id) for user in users]),
'groups': dict([(group.name, group.get_id) for group in groups]),
})
class BaseMyAccountView(BaseSectionPageView):
section_name = ugettext_lazy("My Account")
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# this is only here to add the login_required decorator
return super(BaseMyAccountView, self).dispatch(request, *args, **kwargs)
@property
def page_url(self):
return reverse(self.urlname)
@property
def main_context(self):
context = super(BaseMyAccountView, self).main_context
context.update({
'active_tab': MySettingsTab(
self.request,
self.urlname,
couch_user=self.request.couch_user
),
'is_my_account_settings': True,
})
return context
@property
def section_url(self):
return reverse(MyAccountSettingsView.urlname)
class DefaultMySettingsView(BaseMyAccountView):
urlname = "default_my_settings"
def get(self, request, *args, **kwargs):
return HttpResponseRedirect(reverse(MyAccountSettingsView.urlname))
class MyAccountSettingsView(BaseMyAccountView):
urlname = 'my_account_settings'
page_title = ugettext_lazy("My Information")
api_key = None
template_name = 'settings/edit_my_account.b2.html'
def get_or_create_api_key(self):
if not self.api_key:
with CriticalSection(['get-or-create-api-key-for-%d' % self.request.user.id]):
api_key, _ = ApiKey.objects.get_or_create(user=self.request.user)
self.api_key = api_key.key
return self.api_key
@property
@memoized
def settings_form(self):
language_choices = langcodes.get_all_langs_for_select()
api_key = self.get_or_create_api_key()
from corehq.apps.users.forms import UpdateMyAccountInfoForm
if self.request.method == 'POST':
form = UpdateMyAccountInfoForm(
self.request.POST, username=self.request.couch_user.username,
api_key=api_key
)
else:
form = UpdateMyAccountInfoForm(
username=self.request.couch_user.username,
api_key=api_key
)
try:
domain = self.request.domain
except AttributeError:
domain = ''
form.initialize_form(domain, existing_user=self.request.couch_user)
form.load_language(language_choices)
return form
@property
def page_context(self):
user = self.request.couch_user
return {
'form': self.settings_form,
'api_key': self.get_or_create_api_key(),
'phonenumbers': user.phone_numbers_extended(user),
'user_type': 'mobile' if user.is_commcare_user() else 'web',
}
def phone_number_is_valid(self):
return (
isinstance(self.phone_number, basestring) and
re.compile('^\d+$').match(self.phone_number) is not None
)
def process_add_phone_number(self):
if self.phone_number_is_valid():
user = self.request.couch_user
user.add_phone_number(self.phone_number)
user.save()
messages.success(self.request, _("Phone number added."))
else:
messages.error(self.request, _("Invalid phone number format entered. "
"Please enter number, including country code, in digits only."))
return HttpResponseRedirect(reverse(MyAccountSettingsView.urlname))
def process_delete_phone_number(self):
self.request.couch_user.delete_phone_number(self.phone_number)
messages.success(self.request, _("Phone number deleted."))
return HttpResponseRedirect(reverse(MyAccountSettingsView.urlname))
def process_make_phone_number_default(self):
self.request.couch_user.set_default_phone_number(self.phone_number)
messages.success(self.request, _("Primary phone number updated."))
return HttpResponseRedirect(reverse(MyAccountSettingsView.urlname))
@property
@memoized
def phone_number(self):
return self.request.POST.get('phone_number')
@property
@memoized
def form_actions(self):
return {
'add-phonenumber': self.process_add_phone_number,
'delete-phone-number': self.process_delete_phone_number,
'make-phone-number-default': self.process_make_phone_number_default,
}
@property
@memoized
def form_type(self):
return self.request.POST.get('form_type')
def post(self, request, *args, **kwargs):
if self.form_type and self.form_type in self.form_actions:
return self.form_actions[self.form_type]()
if self.settings_form.is_valid():
old_lang = self.request.couch_user.language
self.settings_form.update_user(existing_user=self.request.couch_user)
new_lang = self.request.couch_user.language
# set language in the session so it takes effect immediately
if new_lang != old_lang:
request.session['django_language'] = new_lang
return self.get(request, *args, **kwargs)
class MyProjectsList(BaseMyAccountView):
urlname = 'my_projects'
page_title = ugettext_lazy("My Projects")
template_name = 'settings/my_projects.html'
@property
def all_domains(self):
all_domains = self.request.couch_user.get_domains()
for d in all_domains:
yield {
'name': d,
'is_admin': self.request.couch_user.is_domain_admin(d)
}
@property
def page_context(self):
return {
'domains': self.all_domains
}
@property
@memoized
def domain_to_remove(self):
if self.request.method == 'POST':
return self.request.POST['domain']
def post(self, request, *args, **kwargs):
if self.request.couch_user.is_domain_admin(self.domain_to_remove):
messages.error(request, _("Unable remove membership because you are the admin of %s")
% self.domain_to_remove)
else:
try:
self.request.couch_user.delete_domain_membership(self.domain_to_remove, create_record=True)
self.request.couch_user.save()
messages.success(request, _("You are no longer part of the project %s") % self.domain_to_remove)
except Exception:
messages.error(request, _("There was an error removing you from this project."))
return self.get(request, *args, **kwargs)
class ChangeMyPasswordView(BaseMyAccountView):
urlname = 'change_my_password'
template_name = 'settings/change_my_password.html'
page_title = ugettext_lazy("Change My Password")
@property
@memoized
def password_change_form(self):
if self.request.method == 'POST':
return PasswordChangeForm(user=self.request.user, data=self.request.POST)
return PasswordChangeForm(user=self.request.user)
@property
def page_context(self):
return {
'form': self.password_change_form,
}
@method_decorator(sensitive_post_parameters())
def post(self, request, *args, **kwargs):
if self.password_change_form.is_valid():
self.password_change_form.save()
messages.success(request, _("Your password was successfully changed!"))
return self.get(request, *args, **kwargs)
class BaseProjectDataView(BaseDomainView):
section_name = ugettext_noop("Data")
@property
def section_url(self):
return reverse('data_interfaces_default', args=[self.domain])
@require_POST
@retry_resource(3)
def keyboard_config(request):
request.couch_user.keyboard_shortcuts["enabled"] = bool(request.POST.get('enable'))
request.couch_user.keyboard_shortcuts["main_key"] = request.POST.get('main-key', 'option')
request.couch_user.save()
return HttpResponseRedirect(request.GET.get('next'))
@require_POST
@login_required
def new_api_key(request):
api_key = ApiKey.objects.get(user=request.user)
api_key.key = api_key.generate_key()
api_key.save()
return HttpResponse(api_key.key)
```
#### File: sms/backend/test.py
```python
from couchdbkit import ResourceNotFound
from corehq.apps.sms.forms import BackendForm
from corehq.apps.sms.mixin import SMSBackend
from dimagi.utils.couch.database import get_safe_write_kwargs
from dimagi.ext.couchdbkit import *
# TODO: What uses this? There already is a test backend
class TestBackend(SMSBackend):
to_console = BooleanProperty(default=False)
@classmethod
def get_api_id(cls):
return "TEST"
def send(self, msg, *args, **kwargs):
"""
The test backend does very little.
"""
if self.to_console:
print msg
@classmethod
def get_form_class(cls):
return BackendForm
@classmethod
def get_generic_name(cls):
return "Test Backend"
def bootstrap(id=None, to_console=True):
"""
Create an instance of the test backend in the database
"""
if id:
try:
return TestBackend.get(id)
except ResourceNotFound:
pass
backend = TestBackend(
description='test backend',
is_global=True,
to_console=to_console,
)
if id:
backend._id = id
backend.name = id.strip().upper()
backend.save(**get_safe_write_kwargs())
return backend
```
#### File: management/commands/bootstrap_grapevine_gateway_update.py
```python
from decimal import Decimal
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.messaging.smsbackends.grapevine.api import GrapevineBackend
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def bootstrap_grapevine_gateway_update(apps):
currency_class = apps.get_model('accounting', 'Currency') if apps else Currency
sms_gateway_fee_class = apps.get_model('smsbillables', 'SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables', 'SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
currency = currency_class.objects.get_or_create(code="ZAR")[0]
# Incoming message to South Africa
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), INCOMING, Decimal('0.65'),
country_code='27',
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
# Outgoing message from South Africa
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), OUTGOING, Decimal('0.22'),
country_code='27',
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
# Explicitly include Lesotho fees for pricing table UI.
# Incoming message to Lesotho
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), INCOMING, Decimal('0.90'),
country_code='266',
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
# Outgoing message from Lesotho
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), OUTGOING, Decimal('0.90'),
country_code='266',
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
# Incoming message to arbitrary country
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), INCOMING, Decimal('0.90'),
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
# Outgoing message from arbitrary country
SmsGatewayFee.create_new(
GrapevineBackend.get_api_id(), OUTGOING, Decimal('0.90'),
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated Global Grapevine gateway fees.")
class Command(LabelCommand):
help = "update Grapevine gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_grapevine_gateway_update(None)
```
#### File: management/commands/bootstrap_tropo_gateway.py
```python
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.messaging.smsbackends.tropo.api import TropoBackend
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def bootstrap_tropo_gateway(apps):
currency = (apps.get_model('accounting', 'Currency') if apps else Currency).objects.get(code="USD")
sms_gateway_fee_class = apps.get_model('smsbillables', 'SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables', 'SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(
TropoBackend.get_api_id(),
INCOMING,
0.01,
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
rates_csv = open('corehq/apps/smsbillables/management/'
'pricing_data/tropo_international_rates_2013-12-19.csv', 'r')
for line in rates_csv.readlines():
data = line.split(',')
if data[1] == 'Fixed Line' and data[4] != '\n':
SmsGatewayFee.create_new(
TropoBackend.get_api_id(),
OUTGOING,
float(data[4].rstrip()),
country_code=int(data[2]),
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
rates_csv.close()
# Fee for invalid phonenumber
SmsGatewayFee.create_new(
TropoBackend.get_api_id(), OUTGOING, 0.01,
country_code=None,
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated Tropo gateway fees.")
class Command(LabelCommand):
help = "bootstrap Tropo gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_tropo_gateway(None)
```
#### File: management/commands/bootstrap_unicel_gateway.py
```python
import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
from corehq.messaging.smsbackends.unicel.api import UnicelBackend
logger = logging.getLogger('accounting')
def bootstrap_unicel_gateway(apps):
currency = (apps.get_model('accounting.Currency') if apps else Currency).objects.get(code="INR")
sms_gateway_fee_class = apps.get_model('smsbillables.SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables.SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), INCOMING, 0.50,
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class)
SmsGatewayFee.create_new(UnicelBackend.get_api_id(), OUTGOING, 0.50,
currency=currency,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class)
logger.info("Updated Unicel gateway fees.")
class Command(LabelCommand):
help = "bootstrap Unicel gateway fees"
args = ""
label = ""
def handle(self, *labels, **options):
bootstrap_unicel_gateway(None)
```
#### File: apps/smsbillables/models.py
```python
import logging
from decimal import Decimal
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from corehq.apps.accounting import models as accounting
from corehq.apps.accounting.models import Currency
from corehq.apps.accounting.utils import EXCHANGE_RATE_DECIMAL_PLACES
from corehq.apps.sms.mixin import SMSBackend
from corehq.apps.sms.models import DIRECTION_CHOICES
from corehq.apps.sms.phonenumbers_helper import get_country_code_and_national_number
from corehq.messaging.smsbackends.test.api import TestSMSBackend
from corehq.apps.sms.util import clean_phone_number
from corehq.apps.smsbillables.exceptions import AmbiguousPrefixException
from corehq.util.quickcache import quickcache
smsbillables_logging = logging.getLogger("smsbillables")
class SmsGatewayFeeCriteria(models.Model):
"""
These are the parameters we'll use to try and calculate the cost of sending a message through
our gateways. We configure the SMS fee criteria based on parameters given to us by specific
gateway providers.
Nullable fields indicate criteria that can be applied globally to all messages with no specific matches
for that field.
"""
backend_api_id = models.CharField(max_length=100, db_index=True)
backend_instance = models.CharField(max_length=255, db_index=True, null=True)
direction = models.CharField(max_length=10, db_index=True, choices=DIRECTION_CHOICES)
country_code = models.IntegerField(max_length=5, null=True, blank=True, db_index=True)
prefix = models.CharField(max_length=10, blank=True, default="", db_index=True)
class Meta:
app_label = 'smsbillables'
@classmethod
def get_most_specific(cls, backend_api_id, direction,
backend_instance=None, country_code=None, national_number=None):
"""
Gets the most specific criteria available based on (and in order of preference for optional):
- backend_api_id
- direction
- backend_instance (optional)
- country_code and prefix (optional)
"""
all_possible_criteria = cls.objects.filter(backend_api_id=backend_api_id, direction=direction)
if all_possible_criteria.count() == 0:
return None
national_number = national_number or ""
def get_criteria_with_longest_matching_prefix(criteria_list):
if len(set(criteria.prefix for criteria in criteria_list)) != len(criteria_list):
raise AmbiguousPrefixException(
", ".join(
"%(country_code)d, '%(prefix)s'" % {
"country_code": criteria.country_code,
"prefix": criteria.prefix,
} for criteria in criteria_list
)
)
criteria_list.sort(key=(lambda criteria: len(criteria.prefix)), reverse=True)
for criteria in criteria_list:
if national_number.startswith(criteria.prefix):
return criteria
raise ObjectDoesNotExist
try:
return get_criteria_with_longest_matching_prefix(
list(all_possible_criteria.filter(country_code=country_code, backend_instance=backend_instance))
)
except ObjectDoesNotExist:
pass
try:
return all_possible_criteria.get(country_code=None, backend_instance=backend_instance)
except ObjectDoesNotExist:
pass
try:
return get_criteria_with_longest_matching_prefix(
list(all_possible_criteria.filter(country_code=country_code, backend_instance=None))
)
except ObjectDoesNotExist:
pass
try:
return all_possible_criteria.get(country_code=None, backend_instance=None)
except ObjectDoesNotExist:
pass
return None
class SmsGatewayFee(models.Model):
"""
The fee for sending or receiving an SMS Message based on gateway.
When an SmsBillable is calculated, it will use the most recent SmsFee available from the criteria
to determine the gateway_charge.
Once an SmsFee is created, it cannot be modified.
"""
criteria = models.ForeignKey(SmsGatewayFeeCriteria, on_delete=models.PROTECT)
amount = models.DecimalField(default=0.0, max_digits=10, decimal_places=4)
currency = models.ForeignKey(accounting.Currency, on_delete=models.PROTECT)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'smsbillables'
@classmethod
def create_new(cls, backend_api_id, direction, amount,
currency=None, backend_instance=None, country_code=None, prefix=None,
save=True, fee_class=None, criteria_class=None):
fee_class = fee_class or cls
criteria_class = criteria_class or SmsGatewayFeeCriteria
currency = currency or Currency.get_default()
if 'prefix' in [
field.name
for field, _ in criteria_class._meta.get_fields_with_model()
]:
prefix = prefix or ''
criteria, _ = criteria_class.objects.get_or_create(
backend_api_id=backend_api_id,
direction=direction,
backend_instance=backend_instance,
country_code=country_code,
prefix=prefix,
)
else:
criteria, _ = criteria_class.objects.get_or_create(
backend_api_id=backend_api_id,
direction=direction,
backend_instance=backend_instance,
country_code=country_code,
)
new_fee = fee_class(
currency=currency,
amount=amount,
criteria=criteria
)
if save:
new_fee.save()
return new_fee
@classmethod
def get_by_criteria(cls, backend_api_id, direction,
backend_instance=None, country_code=None, national_number=None):
criteria = SmsGatewayFeeCriteria.get_most_specific(
backend_api_id,
direction,
backend_instance=backend_instance,
country_code=country_code,
national_number=national_number,
)
return cls.get_by_criteria_obj(criteria)
@classmethod
def get_by_criteria_obj(cls, criteria):
if not criteria:
return None
return cls.objects.filter(criteria=criteria.id).latest('date_created')
class SmsUsageFeeCriteria(models.Model):
"""
Criteria for determining a usage fee applied for each SMS message sent or received.
Nullable fields indicate criteria that can be applied globally to all messages with no specific matches
for that field.
"""
direction = models.CharField(max_length=10, db_index=True, choices=DIRECTION_CHOICES)
domain = models.CharField(max_length=25, db_index=True, null=True)
class Meta:
app_label = 'smsbillables'
@classmethod
def get_most_specific(cls, direction, domain=None):
"""
Gets the most specific criteria available based on (and in order of preference for optional):
- direction
- domain (optional)
"""
all_possible_criteria = cls.objects.filter(direction=direction)
if all_possible_criteria.count() == 0:
return None
try:
return all_possible_criteria.get(domain=domain)
except ObjectDoesNotExist:
pass
try:
return all_possible_criteria.get(domain=None)
except ObjectDoesNotExist:
pass
return None
class SmsUsageFee(models.Model):
"""
The usage fee, with version information, based on domain or globally.
When an SmsBillable is calculated, it will use the most recent SmsUsageFee available from the
criteria to determine the usage_charge.
Currency is always in USD since this is something we control.
Once an SmsUsageFee is created, it cannot be modified.
"""
criteria = models.ForeignKey(SmsUsageFeeCriteria, on_delete=models.PROTECT)
amount = models.DecimalField(default=0.0, max_digits=10, decimal_places=4)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
app_label = 'smsbillables'
@classmethod
def create_new(cls, direction, amount, domain=None, save=True):
criteria, _ = SmsUsageFeeCriteria.objects.get_or_create(
domain=domain, direction=direction,
)
new_fee = SmsUsageFee(
amount=amount,
criteria=criteria
)
if save:
new_fee.save()
return new_fee
@classmethod
def get_by_criteria(cls, direction, domain=None):
criteria = SmsUsageFeeCriteria.get_most_specific(direction, domain=domain)
if not criteria:
return None
return cls.objects.filter(criteria=criteria.id).latest('date_created')
@quickcache(['sms_backend_id'])
def _sms_backend_is_global(sms_backend_id):
return SMSBackend.get(sms_backend_id).is_global
class SmsBillable(models.Model):
"""
A record of matching a fee to a particular MessageLog (or SMSLog).
If on closer inspection we determine a particular SmsBillable is invalid (whether something is
awry with the api_response, or we used the incorrect fee and want to recalculate) we can set
this billable to is_valid = False and it will not be used toward calculating the SmsLineItem in
the monthly Invoice.
"""
gateway_fee = models.ForeignKey(SmsGatewayFee, null=True, on_delete=models.PROTECT)
gateway_fee_conversion_rate = models.DecimalField(default=Decimal('1.0'), null=True, max_digits=20,
decimal_places=EXCHANGE_RATE_DECIMAL_PLACES)
usage_fee = models.ForeignKey(SmsUsageFee, null=True, on_delete=models.PROTECT)
log_id = models.CharField(max_length=50, db_index=True)
phone_number = models.CharField(max_length=50)
api_response = models.TextField(null=True, blank=True)
is_valid = models.BooleanField(default=True, db_index=True)
domain = models.CharField(max_length=25, db_index=True)
direction = models.CharField(max_length=10, db_index=True, choices=DIRECTION_CHOICES)
date_sent = models.DateField()
date_created = models.DateField(auto_now_add=True)
class Meta:
app_label = 'smsbillables'
@property
def gateway_charge(self):
if self.gateway_fee is not None:
try:
charge = SmsGatewayFee.objects.get(id=self.gateway_fee.id)
if self.gateway_fee_conversion_rate is not None:
return charge.amount / self.gateway_fee_conversion_rate
return charge.amount
except ObjectDoesNotExist:
pass
return Decimal('0.0')
@property
def usage_charge(self):
if self.usage_fee is not None:
try:
charge = SmsUsageFee.objects.get(id=self.usage_fee.id)
return charge.amount
except ObjectDoesNotExist:
pass
return Decimal('0.0')
@classmethod
def create(cls, message_log, api_response=None):
phone_number = clean_phone_number(message_log.phone_number)
direction = message_log.direction
billable = cls(
log_id=message_log._id,
phone_number=phone_number,
direction=direction,
date_sent=message_log.date,
domain=message_log.domain,
)
# Fetch gateway_fee
backend_api_id = message_log.backend_api
backend_instance = message_log.backend_id
country_code, national_number = get_country_code_and_national_number(phone_number)
if backend_instance is None or _sms_backend_is_global(backend_instance):
billable.gateway_fee = SmsGatewayFee.get_by_criteria(
backend_api_id,
direction,
backend_instance=backend_instance,
country_code=country_code,
national_number=national_number,
)
if billable.gateway_fee is not None:
conversion_rate = billable.gateway_fee.currency.rate_to_default
if conversion_rate != 0:
billable.gateway_fee_conversion_rate = conversion_rate
else:
smsbillables_logging.error("Gateway fee conversion rate for currency %s is 0",
billable.gateway_fee.currency.code)
else:
smsbillables_logging.error(
"No matching gateway fee criteria for SMSLog %s" % message_log._id
)
# Fetch usage_fee todo
domain = message_log.domain
billable.usage_fee = SmsUsageFee.get_by_criteria(
direction, domain=domain
)
if billable.usage_fee is None:
smsbillables_logging.error("Did not find usage fee for direction %s and domain %s"
% (direction, domain))
if api_response is not None:
billable.api_response = api_response
if backend_api_id == TestSMSBackend.get_api_id():
billable.is_valid = False
billable.save()
return billable
```
#### File: apps/sms/dbaccessors.py
```python
from corehq.apps.sms.models import ForwardingRule
def get_forwarding_rules_for_domain(domain):
return ForwardingRule.view(
"domain/docs",
startkey=[domain, 'ForwardingRule'],
endkey=[domain, 'ForwardingRule', {}],
include_docs=True,
reduce=False,
).all()
```
#### File: management/commands/migrate_sms_to_sql.py
```python
from corehq.apps.sms.models import SMSLog, SMS
from custom.fri.models import FRISMSLog
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
args = ""
help = ("Migrates SMSLog to SMS")
option_list = BaseCommand.option_list + (
make_option("--balance_only",
action="store_true",
dest="balance_only",
default=False,
help="Include this option to only run the balancing step."),
)
def get_sms_couch_ids(self):
result = SMSLog.view(
'sms/by_domain',
include_docs=False,
reduce=False,
).all()
return [row['id'] for row in result if row['key'][1] == 'SMSLog']
def clean_doc(self, doc):
"""
Some old docs apparently have +00:00Z at the end of the date string,
which is not a valid timezone specification.
Also, because of http://manage.dimagi.com/default.asp?111189, there's
9 docs with very long phone numbers that should just be replaced
with null because there was no recipient to those sms.
"""
date = doc.get('date')
if isinstance(date, basestring) and date.endswith('+00:00Z'):
date = date[:-7] + 'Z'
doc['date'] = date
phone_number = doc.get('phone_number')
if isinstance(phone_number, basestring) and len(phone_number) > 126:
doc['phone_number'] = None
def run_migration(self):
count = 0
ids = self.get_sms_couch_ids()
total_count = len(ids)
for doc in iter_docs(FRISMSLog.get_db(), ids):
try:
self.clean_doc(doc)
couch_sms = FRISMSLog.wrap(doc)
couch_sms._migration_do_sync()
except Exception as e:
print 'Could not sync SMSLog %s: %s' % (doc['_id'], e)
count += 1
if (count % 10000) == 0:
print 'Processed %s / %s documents' % (count, total_count)
def balance(self):
sql_count = SMS.objects.count()
couch_count = len(self.get_sms_couch_ids())
print "SQL Count: %s, Couch Count: %s" % (sql_count, couch_count)
def handle(self, *args, **options):
if not options['balance_only']:
self.run_migration()
self.balance()
```
#### File: sms/tests/__init__.py
```python
from .opt_tests import *
from .migration import *
from .test_dbaccessors import *
from .test_all_backends import *
from corehq.apps.domain.calculations import num_mobile_users
from corehq.apps.domain.models import Domain
from corehq.apps.sms.api import send_sms_to_verified_number, send_sms_with_backend, send_sms_with_backend_name
from corehq.apps.sms.mixin import (SMSBackend, BadSMSConfigException,
MobileBackend, apply_leniency)
from corehq.apps.sms.models import CommConnectCase
from corehq.apps.sms.util import get_contact
from corehq.apps.sms.tests.util import BaseSMSTest
from dimagi.ext.couchdbkit import *
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.models import CommCareCase
from corehq.apps.users.models import CommCareUser
from django.contrib.sites.models import Site
from corehq.apps.users.util import format_username
from django.conf import settings
from django.test import TestCase
from corehq.apps.accounting import generator
class BackendInvocationDoc(Document):
pass
class TestCaseBackend(SMSBackend):
@classmethod
def get_api_id(cls):
return "TEST_CASE_BACKEND"
def send(self, msg, *args, **kwargs):
self.create_invoke_doc()
print "***************************************************"
print "Backend: %s" % self.name
print "Message To: %s" % msg.phone_number
print "Message Content: %s" % msg.text
print "***************************************************"
def get_invoke_doc_id(self):
return "SEND-INVOKED-FROM-%s" % self._id
def create_invoke_doc(self):
if not self.invoke_doc_exists():
doc = BackendInvocationDoc(_id=self.get_invoke_doc_id())
doc.save()
def delete_invoke_doc(self):
try:
doc = BackendInvocationDoc.get(self.get_invoke_doc_id())
doc.delete()
except ResourceNotFound:
pass
def invoke_doc_exists(self):
try:
BackendInvocationDoc.get(self.get_invoke_doc_id())
return True
except ResourceNotFound:
return False
class BackendTestCase(BaseSMSTest):
def get_or_create_site(self):
site, created = Site.objects.get_or_create(id=settings.SITE_ID)
if created:
site.domain = 'localhost'
site.name = 'localhost'
site.save()
return (site, created)
def setUp(self):
super(BackendTestCase, self).setUp()
self.domain = "test-domain"
self.domain2 = "test-domain2"
self.site, self.site_created = self.get_or_create_site()
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.create_account_and_subscription(self.domain_obj.name)
self.domain_obj = Domain.get(self.domain_obj._id) # Prevent resource conflict
self.backend1 = TestCaseBackend(name="BACKEND1",is_global=True)
self.backend1.save()
self.backend2 = TestCaseBackend(name="BACKEND2",is_global=True)
self.backend2.save()
self.backend3 = TestCaseBackend(name="BACKEND3",is_global=True)
self.backend3.save()
self.backend4 = TestCaseBackend(name="BACKEND4",is_global=True)
self.backend4.save()
self.backend5 = TestCaseBackend(name="BACKEND5",domain=self.domain,is_global=False,authorized_domains=[])
self.backend5.save()
self.backend6 = TestCaseBackend(name="BACKEND6",domain=self.domain2,is_global=False,authorized_domains=[self.domain])
self.backend6.save()
self.backend7 = TestCaseBackend(name="BACKEND7",domain=self.domain2,is_global=False,authorized_domains=[])
self.backend7.save()
self.backend8 = TestCaseBackend(name="BACKEND",domain=self.domain,is_global=False,authorized_domains=[])
self.backend8.save()
self.backend9 = TestCaseBackend(name="BACKEND",domain=self.domain2,is_global=False,authorized_domains=[self.domain])
self.backend9.save()
self.backend10 = TestCaseBackend(name="BACKEND",is_global=True)
self.backend10.save()
self.backend_mapping1 = BackendMapping(is_global=True,prefix="*",backend_id=self.backend1._id)
self.backend_mapping1.save()
self.backend_mapping2 = BackendMapping(is_global=True,prefix="1",backend_id=self.backend2._id)
self.backend_mapping2.save()
self.backend_mapping3 = BackendMapping(is_global=True,prefix="91",backend_id=self.backend3._id)
self.backend_mapping3.save()
self.backend_mapping4 = BackendMapping(is_global=True,prefix="265",backend_id=self.backend4._id)
self.backend_mapping4.save()
self.backend_mapping5 = BackendMapping(is_global=True, prefix="256", backend_id=self.backend5._id)
self.backend_mapping5.save()
self.backend_mapping6 = BackendMapping(is_global=True, prefix="25670", backend_id=self.backend6._id)
self.backend_mapping6.save()
self.backend_mapping7 = BackendMapping(is_global=True, prefix="25675", backend_id=self.backend7._id)
self.backend_mapping7.save()
self.case = CommCareCase(domain=self.domain)
self.case.set_case_property("contact_phone_number","15551234567")
self.case.set_case_property("contact_phone_number_is_verified", "1")
self.case.save()
self.contact = CommConnectCase.wrap(self.case.to_json())
settings.SMS_LOADED_BACKENDS.append("corehq.apps.sms.tests.TestCaseBackend")
def tearDown(self):
self.backend1.delete_invoke_doc()
self.backend1.delete()
self.backend_mapping1.delete()
self.backend2.delete_invoke_doc()
self.backend2.delete()
self.backend_mapping2.delete()
self.backend3.delete_invoke_doc()
self.backend3.delete()
self.backend_mapping3.delete()
self.backend4.delete_invoke_doc()
self.backend4.delete()
self.backend_mapping4.delete()
self.backend5.delete_invoke_doc()
self.backend5.delete()
self.backend_mapping5.delete()
self.backend6.delete_invoke_doc()
self.backend6.delete()
self.backend_mapping6.delete()
self.backend7.delete_invoke_doc()
self.backend7.delete()
self.backend_mapping7.delete()
self.contact.delete_verified_number()
self.case.delete()
self.domain_obj.delete()
if self.site_created:
self.site.delete()
settings.SMS_LOADED_BACKENDS.pop()
super(BackendTestCase, self).tearDown()
def test_multiple_country_prefixes(self):
self.assertEqual(MobileBackend.auto_load('256800000000')._id, self.backend5._id)
self.assertEqual(MobileBackend.auto_load('256700000000')._id, self.backend6._id)
self.assertEqual(MobileBackend.auto_load('256750000000')._id, self.backend7._id)
def test_backend(self):
# Test the backend map
self.assertTrue(send_sms(self.domain, None, "15551234567", "Test for BACKEND2"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertTrue(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend2.delete_invoke_doc()
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertTrue(send_sms(self.domain, None, "9100000000", "Test for BACKEND3"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertTrue(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend3.delete_invoke_doc()
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertTrue(send_sms(self.domain, None, "26500000000", "Test for BACKEND4"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertTrue(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend4.delete_invoke_doc()
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertTrue(send_sms(self.domain, None, "25800000000", "Test for BACKEND1"))
self.assertTrue(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend1.delete_invoke_doc()
self.assertFalse(self.backend1.invoke_doc_exists())
# Test overriding with a domain-level backend
self.domain_obj = Domain.get(self.domain_obj._id) # Prevent resource conflict
self.domain_obj.default_sms_backend_id = self.backend5._id
self.domain_obj.save()
self.assertTrue(send_sms(self.domain, None, "15551234567", "Test for BACKEND5"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertTrue(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend5.delete_invoke_doc()
self.assertFalse(self.backend5.invoke_doc_exists())
# Test use of backend that another domain owns but has granted access
self.domain_obj.default_sms_backend_id = self.backend6._id
self.domain_obj.save()
self.assertTrue(send_sms(self.domain, None, "25800000000", "Test for BACKEND6"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertTrue(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend6.delete_invoke_doc()
self.assertFalse(self.backend6.invoke_doc_exists())
# Test backend access control
self.domain_obj.default_sms_backend_id = self.backend7._id
self.domain_obj.save()
self.assertFalse(send_sms(self.domain, None, "25800000000", "Test for BACKEND7"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
# Test sending to verified number with backend map
self.domain_obj.default_sms_backend_id = None
self.domain_obj.save()
verified_number = self.contact.get_verified_number()
self.assertTrue(verified_number is not None)
self.assertTrue(verified_number.backend_id is None)
self.assertEqual(verified_number.phone_number, "15551234567")
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND2"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertTrue(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend2.delete_invoke_doc()
self.assertFalse(self.backend2.invoke_doc_exists())
# Test sending to verified number with default domain backend
self.domain_obj.default_sms_backend_id = self.backend5._id
self.domain_obj.save()
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND5"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertTrue(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertFalse(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend5.delete_invoke_doc()
self.assertFalse(self.backend5.invoke_doc_exists())
# Test sending to verified number with a contact-level backend owned by the domain
self.case.set_case_property("contact_backend_id", "BACKEND")
self.case.save()
self.contact = CommConnectCase.wrap(self.case.to_json())
verified_number = self.contact.get_verified_number()
self.assertTrue(verified_number is not None)
self.assertEqual(verified_number.backend_id, "BACKEND")
self.assertEqual(verified_number.phone_number, "15551234567")
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertTrue(self.backend8.invoke_doc_exists())
self.assertFalse(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend8.delete_invoke_doc()
self.assertFalse(self.backend8.invoke_doc_exists())
# Test sending to verified number with a contact-level backend granted to the domain by another domain
self.backend8.delete()
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertTrue(self.backend9.invoke_doc_exists())
self.assertFalse(self.backend10.invoke_doc_exists())
self.backend9.delete_invoke_doc()
self.assertFalse(self.backend9.invoke_doc_exists())
# Test sending to verified number with a contact-level global backend
self.backend9.delete()
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertFalse(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.assertTrue(self.backend10.invoke_doc_exists())
self.backend10.delete_invoke_doc()
self.assertFalse(self.backend10.invoke_doc_exists())
# Test raising exception if contact-level backend is not found
self.backend10.delete()
try:
self.assertTrue(send_sms_to_verified_number(verified_number, "Test for BACKEND"))
except BadSMSConfigException:
pass
else:
self.assertTrue(False)
# Test send_sms_with_backend
self.assertTrue(send_sms_with_backend(self.domain, "+15551234567", "Test for BACKEND3", self.backend3._id))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertTrue(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.backend3.delete_invoke_doc()
self.assertFalse(self.backend3.invoke_doc_exists())
# Test send_sms_with_backend_name
self.assertTrue(send_sms_with_backend_name(self.domain, "+15551234567", "Test for BACKEND3", "BACKEND3"))
self.assertFalse(self.backend1.invoke_doc_exists())
self.assertFalse(self.backend2.invoke_doc_exists())
self.assertTrue(self.backend3.invoke_doc_exists())
self.assertFalse(self.backend4.invoke_doc_exists())
self.assertFalse(self.backend5.invoke_doc_exists())
self.assertFalse(self.backend6.invoke_doc_exists())
self.assertFalse(self.backend7.invoke_doc_exists())
self.backend3.delete_invoke_doc()
self.assertFalse(self.backend3.invoke_doc_exists())
def test_sms_registration(self):
formatted_username = format_username("tester", self.domain)
incoming("+9991234567", "JOIN {} WORKER tester".format(self.domain), "TEST_CASE_BACKEND")
# Test without mobile worker registration enabled
self.assertIsNone(CommCareUser.get_by_username(formatted_username))
# Enable mobile worker registration
setattr(self.domain_obj, "sms_mobile_worker_registration_enabled", True)
self.domain_obj.save()
incoming("+9991234567", "JOIN {} WORKER tester".format(self.domain), "TEST_CASE_BACKEND")
self.assertIsNotNone(CommCareUser.get_by_username(formatted_username))
# Test a duplicate registration
prev_num_users = num_mobile_users(self.domain)
incoming("+9991234568", "JOIN {} WORKER tester".format(self.domain), "TEST_CASE_BACKEND")
current_num_users = num_mobile_users(self.domain)
self.assertEqual(prev_num_users, current_num_users)
class TestUtilFunctions(TestCase):
def setUp(self):
self.case = CommCareCase(domain='test-domain', name='test-case')
self.case.save()
self.user = CommCareUser.create('test-domain', 'test-user', '123')
def test_get_contact(self):
contact = get_contact(self.case.get_id)
self.assertEqual(contact.get_id, self.case.get_id)
self.assertTrue(isinstance(contact, CommConnectCase))
contact = get_contact(self.user.get_id)
self.assertEqual(contact.get_id, self.user.get_id)
self.assertTrue(isinstance(contact, CommCareUser))
try:
get_contact('this-id-should-not-be-found')
except Exception:
pass
else:
self.assertTrue(False)
def test_apply_leniency(self):
self.assertEqual('16175551234', apply_leniency(' 1 (617) 555-1234 '))
self.assertEqual('16175551234', apply_leniency(' 1.617.555.1234 '))
self.assertEqual('16175551234', apply_leniency(' +1 617 555 1234 '))
def tearDown(self):
self.case.delete()
self.user.delete()
```
#### File: sofabed/tests/test_formdata.py
```python
from django.test import TestCase
from corehq.apps.hqadmin.dbaccessors import get_all_forms_in_all_domains
from corehq.apps.receiverwrapper.util import submit_form_locally
from couchforms.models import XFormInstance
import os
from corehq.apps.sofabed.models import FormData
from datetime import date, datetime
class FormDataTestCase(TestCase):
def setUp(self):
for item in get_all_forms_in_all_domains():
item.delete()
for item in FormData.objects.all():
item.delete()
file_path = os.path.join(os.path.dirname(__file__), "data", "meta.xml")
with open(file_path, "rb") as f:
xml_data = f.read()
submit_form_locally(xml_data, 'sofabed', app_id='12345', received_on=datetime.utcnow())
self.instance = XFormInstance.get('THIS_IS_THE_INSTANCEID')
def testFromInstance(self):
formdata = FormData.from_instance(self.instance)
self.assertEqual(date(2010, 07, 22), formdata.time_start.date())
self.assertEqual(date(2010, 07, 23), formdata.time_end.date())
self.assertEqual("THIS_IS_THE_INSTANCEID", formdata.instance_id)
self.assertEqual("THIS_IS_THE_DEVICEID", formdata.device_id)
self.assertEqual("THIS_IS_THE_USERID", formdata.user_id)
def testMatches(self):
formdata = FormData.from_instance(self.instance)
self.assertTrue(formdata.matches_exact(self.instance))
formdata.device_id = "UPDATED_DEVICEID"
self.assertFalse(formdata.matches_exact(self.instance))
def testUpdate(self):
formdata = FormData.from_instance(self.instance)
self.instance["form"]["meta"]["deviceID"] = "UPDATED_DEVICEID"
formdata.update(self.instance)
self.assertEqual("UPDATED_DEVICEID", formdata.device_id)
self.assertTrue(formdata.matches_exact(self.instance))
def testCreateOrUpdate(self):
self.assertEqual(0, FormData.objects.count())
FormData.create_or_update_from_instance(self.instance)
self.assertEqual(1, FormData.objects.count())
self.assertTrue(FormData.objects.all()[0].matches_exact(self.instance))
FormData.create_or_update_from_instance(self.instance)
self.assertEqual(1, FormData.objects.count())
self.assertTrue(FormData.objects.all()[0].matches_exact(self.instance))
self.instance["form"]["meta"]["deviceID"] = "UPDATED_DEVICEID"
FormData.create_or_update_from_instance(self.instance)
self.assertEqual(1, FormData.objects.count())
self.assertTrue(FormData.objects.all()[0].matches_exact(self.instance))
self.instance["form"]["meta"]["instanceID"] = "UPDATED_INSTANCEID"
self.instance._id = "UPDATED_INSTANCEID"
FormData.create_or_update_from_instance(self.instance)
self.assertEqual(2, FormData.objects.count())
self.assertTrue(FormData.objects.get(instance_id="UPDATED_INSTANCEID").matches_exact(self.instance))
```
#### File: tzmigration/tests/test_timezone_migration_progress.py
```python
from django.test import TestCase
from corehq.apps.tzmigration import get_migration_complete, \
get_migration_status, set_migration_complete
from corehq.apps.tzmigration.api import set_migration_started, \
set_migration_not_started
from corehq.apps.tzmigration.models import MigrationStatus
class TimezoneMigrationProgressTest(TestCase):
def test_not_started(self):
self.assertFalse(get_migration_complete('red'))
self.assertEqual(get_migration_status('red'),
MigrationStatus.NOT_STARTED)
def test_in_progress(self):
set_migration_started('yellow')
self.assertFalse(get_migration_complete('yellow'))
self.assertEqual(get_migration_status('yellow'),
MigrationStatus.IN_PROGRESS)
def test_complete(self):
set_migration_complete('green')
self.assertEqual(get_migration_status('green'),
MigrationStatus.COMPLETE)
self.assertTrue(get_migration_complete('green'))
def test_abort(self):
set_migration_started('yellow')
self.assertFalse(get_migration_complete('yellow'))
self.assertEqual(get_migration_status('yellow'),
MigrationStatus.IN_PROGRESS)
set_migration_not_started('yellow')
self.assertFalse(get_migration_complete('yellow'))
self.assertEqual(get_migration_status('yellow'),
MigrationStatus.NOT_STARTED)
```
#### File: apps/userreports/app_manager.py
```python
from corehq.apps.app_manager.util import get_case_properties
from corehq.apps.app_manager.xform import XForm
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.reports.builder import (
DEFAULT_CASE_PROPERTY_DATATYPES,
FORM_METADATA_PROPERTIES,
make_case_data_source_filter,
make_case_property_indicator,
make_form_data_source_filter,
make_form_meta_block_indicator,
make_form_question_indicator,
)
from corehq.apps.userreports.sql import get_column_name
import unidecode
def get_case_data_sources(app):
"""
Returns a dict mapping case types to DataSourceConfiguration objects that have
the default set of case properties built in.
"""
return {case_type: get_case_data_source(app, case_type) for case_type in app.get_case_types() if case_type}
def get_case_data_source(app, case_type):
prop_map = get_case_properties(app, [case_type], defaults=DEFAULT_CASE_PROPERTY_DATATYPES.keys())
return DataSourceConfiguration(
domain=app.domain,
referenced_doc_type='CommCareCase',
table_id=_clean_table_name(app.domain, case_type),
display_name=case_type,
configured_filter=make_case_data_source_filter(case_type),
configured_indicators=[
make_case_property_indicator(property) for property in prop_map[case_type]
]
)
def get_form_data_sources(app):
"""
Returns a dict mapping forms to DataSourceConfiguration objects
This is never used, except for testing that each form in an app will source correctly
"""
forms = {}
for module in app.modules:
for form in module.forms:
forms = {form.xmlns: get_form_data_source(app, form)}
return forms
def get_form_data_source(app, form):
xform = XForm(form.source)
form_name = form.default_name()
questions = xform.get_questions([])
return DataSourceConfiguration(
domain=app.domain,
referenced_doc_type='XFormInstance',
table_id=_clean_table_name(app.domain, form_name),
display_name=form_name,
configured_filter=make_form_data_source_filter(xform.data_node.tag_xmlns),
configured_indicators=[
make_form_question_indicator(q, column_id=get_column_name(q['value']))
for q in questions
] + [
make_form_meta_block_indicator(field)
for field in FORM_METADATA_PROPERTIES
],
)
def _clean_table_name(domain, readable_name):
"""
Slugifies and truncates readable name to make a valid configurable report table name.
"""
name_slug = '_'.join(unidecode.unidecode(readable_name).lower().split(' '))
# 63 = max postgres table name, 24 = table name prefix + hash overhead
max_length = 63 - len(domain) - 24
return name_slug[:max_length]
```
#### File: userreports/expressions/getters.py
```python
from datetime import date, datetime
from decimal import Decimal, InvalidOperation
from corehq.util.dates import iso_string_to_date, iso_string_to_datetime
class TransformedGetter(object):
"""
Getter that takes in another getter and a transform function.
Returns the result of calling the transform function on result of the getter.
"""
def __init__(self, getter, transform=None):
self.getter = getter
self.transform = transform
def __call__(self, item, context=None):
extracted = self.getter(item, context)
if self.transform:
return self.transform(extracted)
return extracted
class DictGetter(object):
def __init__(self, property_name):
self.property_name = property_name
def __call__(self, item, context=None):
if not isinstance(item, dict):
return None
try:
return item[self.property_name]
except KeyError:
return None
class NestedDictGetter(object):
"""
Gets a property from a series of nested dicts. Takes in a fully qualified
path to the value in question in the form of a list. Returns None if the path
does not exist in the dict.
"""
def __init__(self, property_path):
self.property_path = property_path
def __call__(self, item, context=None):
if not isinstance(item, dict):
return None
try:
return recursive_lookup(item, self.property_path)
except (KeyError, TypeError):
# key errors are missing keys
# type errors are valid keys that return the wrong type
return None
def recursive_lookup(dict_object, keys):
"""
Given a dict object and list of keys, nest into those keys.
Raises KeyError if the path isn't found.
>>> recursive_lookup({'foo': 1}, ['foo'])
1
>>> recursive_lookup({'foo': {'bar': 1}}, ['foo'])
{'bar': 1}
>>> recursive_lookup({'foo': {'bar': 1}}, ['foo', 'bar'])
1
"""
if not keys or not isinstance(keys, list):
raise ValueError('Keys must be a non-empty list!')
if len(keys) == 1:
return dict_object[keys[0]]
else:
return recursive_lookup(dict_object[keys[0]], keys[1:])
def transform_date(item):
# postgres crashes on empty strings, but is happy to take null dates
if item:
if isinstance(item, basestring):
try:
return iso_string_to_date(item)
except ValueError:
try:
return iso_string_to_datetime(item, strict=True).date()
except ValueError:
return None
elif isinstance(item, date):
return item
elif isinstance(item, datetime):
return item.date()
return None
def transform_datetime(item):
if item:
if isinstance(item, basestring):
try:
return iso_string_to_datetime(item, strict=True)
except ValueError:
pass
elif isinstance(item, datetime):
return item
return None
def transform_int(item):
try:
return int(item)
except (ValueError, TypeError):
try:
return int(float(item))
except (ValueError, TypeError):
return None
def transform_decimal(item):
try:
return Decimal(item)
except (ValueError, TypeError, InvalidOperation):
return None
def transform_unicode(item):
if item is None:
return None
try:
return unicode(item)
except (ValueError, TypeError):
return None
def transform_from_datatype(datatype):
"""
Given a datatype, return a transform for that type.
"""
identity = lambda x: x
return {
'date': transform_date,
'datetime': transform_datetime,
'decimal': transform_decimal,
'integer': transform_int,
'string': transform_unicode,
}.get(datatype) or identity
def getter_from_property_reference(spec):
if spec.property_name:
assert not spec.property_path, \
'indicator {} has both a name and path specified! you must only pick one.'.format(spec.property_name)
return DictGetter(property_name=spec.property_name)
else:
assert spec.property_path, spec.property_name
return NestedDictGetter(property_path=spec.property_path)
```
#### File: userreports/expressions/specs.py
```python
import json
from couchdbkit.exceptions import ResourceNotFound
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.util.couch import get_db_by_doc_type
from dimagi.ext.jsonobject import JsonObject, StringProperty, ListProperty, DictProperty
from jsonobject.base_properties import DefaultProperty
from corehq.apps.userreports.expressions.getters import (
DictGetter,
NestedDictGetter,
TransformedGetter,
transform_from_datatype)
from corehq.apps.userreports.indicators.specs import DataTypeProperty
from corehq.apps.userreports.specs import TypeProperty, EvaluationContext
from corehq.util.quickcache import quickcache
class IdentityExpressionSpec(JsonObject):
type = TypeProperty('identity')
def __call__(self, item, context=None):
return item
class ConstantGetterSpec(JsonObject):
type = TypeProperty('constant')
constant = DefaultProperty()
@classmethod
def wrap(self, obj):
if 'constant' not in obj:
raise BadSpecError('"constant" property is required!')
return super(ConstantGetterSpec, self).wrap(obj)
def __call__(self, item, context=None):
return self.constant
class PropertyNameGetterSpec(JsonObject):
type = TypeProperty('property_name')
property_name = StringProperty(required=True)
datatype = DataTypeProperty(required=False)
@property
def expression(self):
transform = transform_from_datatype(self.datatype)
getter = DictGetter(self.property_name)
return TransformedGetter(getter, transform)
def __call__(self, item, context=None):
return self.expression(item, context)
class PropertyPathGetterSpec(JsonObject):
type = TypeProperty('property_path')
property_path = ListProperty(unicode, required=True)
datatype = DataTypeProperty(required=False)
@property
def expression(self):
transform = transform_from_datatype(self.datatype)
getter = NestedDictGetter(self.property_path)
return TransformedGetter(getter, transform)
def __call__(self, item, context=None):
return self.expression(item, context)
class ConditionalExpressionSpec(JsonObject):
type = TypeProperty('conditional')
test = DictProperty(required=True)
expression_if_true = DictProperty(required=True)
expression_if_false = DictProperty(required=True)
def configure(self, test_function, true_expression, false_expression):
self._test_function = test_function
self._true_expression = true_expression
self._false_expression = false_expression
def __call__(self, item, context=None):
if self._test_function(item, context):
return self._true_expression(item, context)
else:
return self._false_expression(item, context)
class ArrayIndexExpressionSpec(JsonObject):
type = TypeProperty('array_index')
array_expression = DictProperty(required=True)
index_expression = DefaultProperty(required=True)
def configure(self, array_expression, index_expression):
self._array_expression = array_expression
self._index_expression = index_expression
def __call__(self, item, context=None):
array_value = self._array_expression(item, context)
if not isinstance(array_value, list):
return None
index_value = self._index_expression(item, context)
if not isinstance(index_value, int):
return None
try:
return array_value[index_value]
except IndexError:
return None
class SwitchExpressionSpec(JsonObject):
type = TypeProperty('switch')
switch_on = DictProperty(required=True)
cases = DictProperty(required=True)
default = DictProperty(required=True)
def configure(self, switch_on_expression, case_expressions, default_expression):
self._switch_on_expression = switch_on_expression
self._case_expressions = case_expressions
self._default_expression = default_expression
def __call__(self, item, context=None):
switch_value = self._switch_on_expression(item, context)
for c in self.cases:
if switch_value == c:
return self._case_expressions[c](item, context)
return self._default_expression(item, context)
class IteratorExpressionSpec(JsonObject):
type = TypeProperty('iterator')
expressions = ListProperty(required=True)
# an optional filter to test the values on - if they don't match they won't be included in the iteration
test = DictProperty()
def configure(self, expressions, test):
self._expression_fns = expressions
if test:
self._test = test
else:
# if not defined then all values should be returned
self._test = lambda *args, **kwargs: True
def __call__(self, item, context=None):
values = []
for expression in self._expression_fns:
value = expression(item, context)
if self._test(value):
values.append(value)
return values
class RootDocExpressionSpec(JsonObject):
type = TypeProperty('root_doc')
expression = DictProperty(required=True)
def configure(self, expression):
self._expression_fn = expression
def __call__(self, item, context=None):
if context is None:
return None
return self._expression_fn(context.root_doc, context)
class RelatedDocExpressionSpec(JsonObject):
type = TypeProperty('related_doc')
related_doc_type = StringProperty()
doc_id_expression = DictProperty(required=True)
value_expression = DictProperty(required=True)
def configure(self, doc_id_expression, value_expression):
if get_db_by_doc_type(self.related_doc_type) is None:
raise BadSpecError(u'Cannot determine database for document type {}!'.format(self.related_doc_type))
self._doc_id_expression = doc_id_expression
self._value_expression = value_expression
# used in caching
self._vary_on = json.dumps(self.value_expression, sort_keys=True)
def __call__(self, item, context=None):
doc_id = self._doc_id_expression(item, context)
if doc_id:
return self.get_value(doc_id, context)
@quickcache(['self._vary_on', 'doc_id'])
def get_value(self, doc_id, context):
try:
doc = get_db_by_doc_type(self.related_doc_type).get(doc_id)
# ensure no cross-domain lookups of different documents
assert context.root_doc['domain']
if context.root_doc['domain'] != doc.get('domain'):
return None
# explicitly use a new evaluation context since this is a new document
return self._value_expression(doc, EvaluationContext(doc, 0))
except ResourceNotFound:
return None
class NestedExpressionSpec(JsonObject):
type = TypeProperty('nested')
argument_expression = DictProperty(required=True)
value_expression = DictProperty(required=True)
def configure(self, argument_expression, value_expression):
self._argument_expression = argument_expression
self._value_expression = value_expression
def __call__(self, item, context=None):
argument = self._argument_expression(item, context)
return self._value_expression(argument, context)
class DictExpressionSpec(JsonObject):
type = TypeProperty('dict')
properties = DictProperty(required=True)
def configure(self, compiled_properties):
for key in compiled_properties:
if not isinstance(key, basestring):
raise BadSpecError("Properties in a dict expression must be strings!")
self._compiled_properties = compiled_properties
def __call__(self, item, context=None):
ret = {}
for property_name, expression in self._compiled_properties.items():
ret[property_name] = expression(item, context)
return ret
```
#### File: userreports/filters/__init__.py
```python
class Filter(object):
"""
Base filter class
"""
def __call__(self, item, context=None):
return True
class NOTFilter(Filter):
def __init__(self, filter):
self._filter = filter
def __call__(self, item, context=None):
return not self._filter(item)
class ANDFilter(Filter):
"""
Lets you construct AND operations on filters.
"""
def __init__(self, filters):
self.filters = filters
assert len(self.filters) > 0
def __call__(self, item, context=None):
return all(filter(item, context) for filter in self.filters)
class ORFilter(Filter):
"""
Lets you construct OR operations on filters.
"""
def __init__(self, filters):
self.filters = filters
assert len(self.filters) > 0
def __call__(self, item, context=None):
return any(filter(item, context) for filter in self.filters)
class CustomFilter(Filter):
"""
This filter allows you to pass in a function reference to use as the filter
e.g. CustomFilter(lambda f, context: f['gender'] in ['male', 'female'])
"""
def __init__(self, filter):
self._filter = filter
def __call__(self, item, context=None):
return self._filter(item, context)
class SinglePropertyValueFilter(Filter):
def __init__(self, expression, operator, reference_value):
self.expression = expression
self.operator = operator
self.reference_value = reference_value
def __call__(self, item, context=None):
return self.operator(self.expression(item, context), self.reference_value)
```
#### File: userreports/filters/specs.py
```python
from dimagi.ext.jsonobject import JsonObject, StringProperty, ListProperty, DictProperty
from jsonobject.base import DefaultProperty
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.expressions.getters import getter_from_property_reference
from corehq.apps.userreports.operators import OPERATORS
from corehq.apps.userreports.specs import TypeProperty
from django.utils.translation import ugettext as _
class BaseFilterSpec(JsonObject):
_allow_dynamic_properties = False
class BooleanExpressionFilterSpec(BaseFilterSpec):
type = TypeProperty('boolean_expression')
operator = StringProperty(choices=OPERATORS.keys(), required=True)
property_value = DefaultProperty()
expression = DictProperty(required=True)
@classmethod
def wrap(cls, obj):
_assert_prop_in_obj('property_value', obj)
return super(BooleanExpressionFilterSpec, cls).wrap(obj)
class PropertyMatchFilterSpec(BaseFilterSpec):
type = TypeProperty('property_match')
property_name = StringProperty()
property_path = ListProperty()
property_value = DefaultProperty()
@property
def getter(self):
return getter_from_property_reference(self)
@classmethod
def wrap(cls, obj):
_assert_prop_in_obj('property_value', obj)
return super(PropertyMatchFilterSpec, cls).wrap(obj)
class NotFilterSpec(BaseFilterSpec):
type = TypeProperty('not')
filter = DictProperty() # todo: validators=FilterFactory.validate_spec
class NamedFilterSpec(BaseFilterSpec):
type = TypeProperty('named')
name = StringProperty(required=True)
def _assert_prop_in_obj(property_name, obj):
if property_name not in obj:
raise BadSpecError(_('{} is required!'.format(property_name)))
```
#### File: apps/userreports/fixtures.py
```python
from datetime import datetime
from xml.etree import ElementTree
from corehq import toggles
from corehq.apps.app_manager.dbaccessors import get_apps_in_domain
from corehq.apps.app_manager.models import (
Application,
AutoFilter,
CustomDataAutoFilter,
ReportModule,
StaticChoiceFilter,
StaticChoiceListFilter,
StaticDatespanFilter,
)
from corehq.apps.userreports.exceptions import UserReportsError
from corehq.apps.userreports.reports.factory import ReportFactory
from corehq.apps.userreports.util import localize
from corehq.util.xml import serialize
from .models import ReportConfiguration
def wrap_by_filter_type(report_app_filter):
doc_type_to_filter_class = {
'AutoFilter': AutoFilter,
'CustomDataAutoFilter': CustomDataAutoFilter,
'StaticChoiceFilter': StaticChoiceFilter,
'StaticChoiceListFilter': StaticChoiceListFilter,
'StaticDatespanFilter': StaticDatespanFilter,
}
filter_class = doc_type_to_filter_class.get(report_app_filter.doc_type)
if not filter_class:
raise Exception("Unknown saved filter type: %s " % report_app_filter.doc_type)
return filter_class.wrap(report_app_filter.to_json())
class ReportFixturesProvider(object):
id = 'commcare:reports'
def __call__(self, user, version, last_sync=None):
"""
Generates a report fixture for mobile that can be used by a report module
"""
if not toggles.MOBILE_UCR.enabled(user.domain):
return []
report_configs = [
report_config
for app in get_apps_in_domain(user.domain) if isinstance(app, Application)
# TODO: pass app_id to reduce size of fixture
for module in app.modules if isinstance(module, ReportModule)
for report_config in module.report_configs
]
if not report_configs:
return []
root = ElementTree.Element('fixture', attrib={'id': self.id})
reports_elem = ElementTree.Element(
'reports',
attrib={
'last_sync': datetime.utcnow().isoformat(),
},
)
for report_config in report_configs:
try:
reports_elem.append(self._report_config_to_fixture(report_config, user))
except UserReportsError:
pass
root.append(reports_elem)
return [root]
def _report_config_to_fixture(self, report_config, user):
report_elem = ElementTree.Element('report', attrib={'id': report_config.uuid})
report = ReportConfiguration.get(report_config.report_id)
report_elem.append(self._element('name', localize(report_config.header, user.language)))
report_elem.append(self._element('description', localize(report_config.description, user.language)))
data_source = ReportFactory.from_spec(report)
data_source.set_filter_values({
filter_slug: wrap_by_filter_type(filter).get_filter_value(user)
for filter_slug, filter in report_config.filters.items()
})
rows_elem = ElementTree.Element('rows')
for i, row in enumerate(data_source.get_data()):
row_elem = ElementTree.Element('row', attrib={'index': str(i)})
for k in sorted(row.keys()):
row_elem.append(self._element('column', serialize(row[k]), attrib={'id': k}))
rows_elem.append(row_elem)
report_elem.append(rows_elem)
return report_elem
@staticmethod
def _element(name, text, attrib=None):
attrib = attrib or {}
element = ElementTree.Element(name, attrib=attrib)
element.text = text
return element
report_fixture_generator = ReportFixturesProvider()
```
#### File: apps/userreports/specs.py
```python
from dimagi.ext.jsonobject import StringProperty
from datetime import datetime
def TypeProperty(value):
"""
Shortcut for making a required property and restricting it to a single specified
value. This adds additional validation that the objects are being wrapped as expected
according to the type.
"""
return StringProperty(required=True, choices=[value])
class EvaluationContext(object):
"""
An evaluation context. Necessary for repeats to pass both the row of the repeat as well
as the root document and the iteration number.
"""
def __init__(self, root_doc, iteration=0):
self.root_doc = root_doc
self.iteration = iteration
self.inserted_timestamp = datetime.utcnow()
```
#### File: userreports/tests/test_columns.py
```python
import uuid
from jsonobject.exceptions import BadValueError
from sqlagg import SumWhen
from django.test import SimpleTestCase, TestCase
from corehq.apps.userreports import tasks
from corehq.apps.userreports.app_manager import _clean_table_name
from corehq.apps.userreports.models import (
DataSourceConfiguration,
ReportConfiguration,
)
from corehq.apps.userreports.reports.factory import ReportFactory, ReportColumnFactory
from corehq.apps.userreports.reports.specs import FieldColumn, PercentageColumn, AggregateDateColumn
from corehq.apps.userreports.sql import IndicatorSqlAdapter
from corehq.apps.userreports.sql.columns import (
_expand_column,
_get_distinct_values,
DEFAULT_MAXIMUM_EXPANSION,
)
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests.util import delete_all_cases
from casexml.apps.case.xml import V2
from corehq.form_processor.interfaces import FormProcessorInterface
class TestFieldColumn(SimpleTestCase):
def testColumnSetFromAlias(self):
field = ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"alias": "the_right_answer",
"type": "field",
})
self.assertTrue(isinstance(field, FieldColumn))
self.assertEqual('the_right_answer', field.column_id)
def testColumnDefaultsToField(self):
field = ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"type": "field",
})
self.assertEqual('doc_id', field.column_id)
def testBadAggregation(self):
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
"aggregation": "simple_",
"field": "doc_id",
"type": "field",
})
def testGoodFormat(self):
for format in [
'default',
'percent_of_total',
]:
self.assertEquals(FieldColumn, type(
ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"format": format,
"type": "field",
})
))
def testBadFormat(self):
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
"aggregation": "simple",
"field": "doc_id",
"format": "default_",
"type": "field",
})
class ChoiceListColumnDbTest(TestCase):
def test_column_uniqueness_when_truncated(self):
problem_spec = {
"display_name": "practicing_lessons",
"property_name": "long_column",
"choices": [
"duplicate_choice_1",
"duplicate_choice_2",
],
"select_style": "multiple",
"column_id": "a_very_long_base_selection_column_name_with_limited_room",
"type": "choice_list",
}
data_source_config = DataSourceConfiguration(
domain='test',
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=uuid.uuid4().hex,
configured_filter={},
configured_indicators=[problem_spec],
)
adapter = IndicatorSqlAdapter(data_source_config)
adapter.rebuild_table()
# ensure we can save data to the table.
adapter.save({
'_id': uuid.uuid4().hex,
'domain': 'test',
'doc_type': 'CommCareCase',
'long_column': 'duplicate_choice_1',
})
# and query it back
q = adapter.get_query_object()
self.assertEqual(1, q.count())
class TestExpandedColumn(TestCase):
domain = 'foo'
case_type = 'person'
def _new_case(self, properties):
id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=id,
case_type=self.case_type,
update=properties,
).as_xml()
FormProcessorInterface.post_case_blocks([case_block], {'domain': self.domain})
return CommCareCase.get(id)
def _build_report(self, vals, field='my_field', build_data_source=True):
"""
Build a new report, and populate it with cases.
Return a ConfigurableReportDataSource and a FieldColumn
:param vals: List of values to populate the given report field with.
:param field: The name of a field in the data source/report
:return: Tuple containing a ConfigurableReportDataSource and FieldColumn.
The column is a column mapped to the given field.
"""
# Create Cases
for v in vals:
self._new_case({field: v}).save()
# Create report
data_source_config = DataSourceConfiguration(
domain=self.domain,
display_name='foo',
referenced_doc_type='CommCareCase',
table_id=_clean_table_name(self.domain, str(uuid.uuid4().hex)),
configured_filter={
"type": "boolean_expression",
"operator": "eq",
"expression": {
"type": "property_name",
"property_name": "type"
},
"property_value": self.case_type,
},
configured_indicators=[{
"type": "expression",
"expression": {
"type": "property_name",
"property_name": field
},
"column_id": field,
"display_name": field,
"datatype": "string"
}],
)
data_source_config.validate()
data_source_config.save()
if build_data_source:
tasks.rebuild_indicators(data_source_config._id)
report_config = ReportConfiguration(
domain=self.domain,
config_id=data_source_config._id,
title='foo',
aggregation_columns=['doc_id'],
columns=[{
"type": "expanded",
"field": field,
"display": field,
"format": "default",
}],
filters=[],
configured_charts=[]
)
report_config.save()
data_source = ReportFactory.from_spec(report_config)
return data_source, data_source.column_configs[0]
def setUp(self):
delete_all_cases()
def test_getting_distinct_values(self):
data_source, column = self._build_report([
'apple',
'apple',
'banana',
'blueberry'
])
vals = _get_distinct_values(data_source.config, column)[0]
self.assertSetEqual(set(vals), set(['apple', 'banana', 'blueberry']))
def test_no_distinct_values(self):
data_source, column = self._build_report([])
distinct_vals, too_many_values = _get_distinct_values(data_source.config, column)
self.assertListEqual(distinct_vals, [])
def test_too_large_expansion(self):
vals = ['foo' + str(i) for i in range(DEFAULT_MAXIMUM_EXPANSION + 1)]
data_source, column = self._build_report(vals)
distinct_vals, too_many_values = _get_distinct_values(data_source.config, column)
self.assertTrue(too_many_values)
self.assertEqual(len(distinct_vals), DEFAULT_MAXIMUM_EXPANSION)
def test_allowed_expansion(self):
num_columns = DEFAULT_MAXIMUM_EXPANSION + 1
vals = ['foo' + str(i) for i in range(num_columns)]
data_source, column = self._build_report(vals)
column.max_expansion = num_columns
distinct_vals, too_many_values = _get_distinct_values(
data_source.config,
column,
expansion_limit=num_columns,
)
self.assertFalse(too_many_values)
self.assertEqual(len(distinct_vals), num_columns)
def test_unbuilt_data_source(self):
data_source, column = self._build_report(['apple'], build_data_source=False)
distinct_vals, too_many_values = _get_distinct_values(data_source.config, column)
self.assertListEqual(distinct_vals, [])
self.assertFalse(too_many_values)
def test_expansion(self):
column = ReportColumnFactory.from_spec(dict(
type="expanded",
field="lab_result",
display="Lab Result",
format="default",
description="foo"
))
cols = _expand_column(column, ["positive", "negative"], "en")
self.assertEqual(len(cols), 2)
self.assertEqual(type(cols[0].view), SumWhen)
self.assertEqual(cols[1].view.whens, {'negative': 1})
class TestAggregateDateColumn(SimpleTestCase):
def setUp(self):
self._spec = {
'type': 'aggregate_date',
'column_id': 'a_date',
'field': 'a_date',
}
def test_wrap(self):
wrapped = ReportColumnFactory.from_spec(self._spec)
self.assertTrue(isinstance(wrapped, AggregateDateColumn))
self.assertEqual('a_date', wrapped.column_id)
def test_group_by(self):
wrapped = ReportColumnFactory.from_spec(self._spec)
self.assertEqual(['a_date_year', 'a_date_month'], wrapped.get_group_by_columns())
def test_format(self):
wrapped = ReportColumnFactory.from_spec(self._spec)
self.assertEqual('2015-03', wrapped.get_format_fn()({'year': 2015, 'month': 3}))
def test_format_missing(self):
wrapped = ReportColumnFactory.from_spec(self._spec)
self.assertEqual('Unknown Date', wrapped.get_format_fn()({'year': None, 'month': None}))
class TestPercentageColumn(SimpleTestCase):
def test_wrap(self):
wrapped = ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': {
"aggregation": "sum",
"field": "has_danger_signs",
"type": "field",
},
'denominator': {
"aggregation": "sum",
"field": "is_pregnant",
"type": "field",
},
})
self.assertTrue(isinstance(wrapped, PercentageColumn))
self.assertEqual('pct', wrapped.column_id)
self.assertEqual('has_danger_signs', wrapped.numerator.field)
self.assertEqual('is_pregnant', wrapped.denominator.field)
self.assertEqual('percent', wrapped.format)
def test_missing_fields(self):
field_spec = {
"aggregation": "simple",
"field": "is_pregnant",
"type": "field",
}
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
})
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': field_spec,
})
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'denominator': field_spec,
})
def test_wrong_field_type(self):
# can't put a percent in another percent
field_spec = {
"aggregation": "simple",
"field": "is_pregnant",
"type": "percent",
}
with self.assertRaises(BadValueError):
ReportColumnFactory.from_spec({
'type': 'percent',
'column_id': 'pct',
'numerator': field_spec,
'denominator': field_spec,
})
def test_format_pct(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec)
self.assertEqual('33%', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_pct_denom_0(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec)
for empty_value in [0, 0.0, None, '']:
self.assertEqual('--', wrapped.get_format_fn()({'num': 1, 'denom': empty_value}))
def test_format_fraction(self):
spec = self._test_spec()
spec['format'] = 'fraction'
wrapped = ReportColumnFactory.from_spec(spec)
self.assertEqual('1/3', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_both(self):
spec = self._test_spec()
spec['format'] = 'both'
wrapped = ReportColumnFactory.from_spec(spec)
self.assertEqual('33% (1/3)', wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_pct_non_numeric(self):
spec = self._test_spec()
spec['format'] = 'percent'
wrapped = ReportColumnFactory.from_spec(spec)
for unexpected_value in ['hello', object()]:
self.assertEqual('?', wrapped.get_format_fn()({'num': 1, 'denom': unexpected_value}),
'non-numeric value failed for denominator {}'. format(unexpected_value))
self.assertEqual('?', wrapped.get_format_fn()({'num': unexpected_value, 'denom': 1}))
def test_format_numeric_pct(self):
spec = self._test_spec()
spec['format'] = 'numeric_percent'
wrapped = ReportColumnFactory.from_spec(spec)
self.assertEqual(33, wrapped.get_format_fn()({'num': 1, 'denom': 3}))
def test_format_float(self):
spec = self._test_spec()
spec['format'] = 'decimal'
wrapped = ReportColumnFactory.from_spec(spec)
self.assertEqual(.333, wrapped.get_format_fn()({'num': 1, 'denom': 3}))
self.assertEqual(.25, wrapped.get_format_fn()({'num': 1, 'denom': 4}))
def _test_spec(self):
return {
'type': 'percent',
'column_id': 'pct',
'denominator': {
"aggregation": "simple",
"field": "is_pregnant",
"type": "field",
},
'numerator': {
"aggregation": "simple",
"field": "has_danger_signs",
"type": "field",
}
}
```
#### File: userreports/tests/test_data_source_repeats.py
```python
import json
import os
import datetime
from django.test import SimpleTestCase, TestCase
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.sql import IndicatorSqlAdapter
DOC_ID = 'repeat-id'
DAY_OF_WEEK = 'monday'
class RepeatDataSourceTestMixin(object):
def setUp(self):
folder = os.path.join(os.path.dirname(__file__), 'data', 'configs')
sample_file = os.path.join(folder, 'data_source_with_repeat.json')
with open(sample_file) as f:
self.config = DataSourceConfiguration.wrap(json.loads(f.read()))
class RepeatDataSourceConfigurationTest(RepeatDataSourceTestMixin, SimpleTestCase):
def test_test_doc_matches(self):
self.assertTrue(self.config.filter(_test_doc()))
def test_empty_doc_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc()))
def test_missing_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={})))
def test_null_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": None})))
def test_empty_list_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": []})))
def test_dict_property(self):
start = datetime.datetime.utcnow()
end = start + datetime.timedelta(minutes=30)
rows = self.config.get_all_values(_test_doc(form={"time_logs": {
"start_time": start, "end_time": end, "person": "al"
}}))
self.assertEqual(1, len(rows))
doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = rows[0]
self.assertEqual(DOC_ID, doc_id_ind.value)
self.assertEqual(0, repeat_iteration.value)
self.assertEqual(start, start_ind.value)
self.assertEqual(end, end_ind.value)
self.assertEqual('al', person_ind.value)
self.assertEqual(DAY_OF_WEEK, created_base_ind.value)
def test_list_property(self):
now = datetime.datetime.utcnow()
one_hour = datetime.timedelta(hours=1)
logs = [
{"start_time": now, "end_time": now + one_hour, "person": "al"},
{"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"},
{"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"},
]
rows = self.config.get_all_values(_test_doc(form={"time_logs": logs}))
self.assertEqual(len(logs), len(rows))
for i, row in enumerate(rows):
doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = row
self.assertEqual(DOC_ID, doc_id_ind.value)
self.assertEqual(logs[i]['start_time'], start_ind.value)
self.assertEqual(i, repeat_iteration.value)
self.assertEqual(logs[i]['end_time'], end_ind.value)
self.assertEqual(logs[i]['person'], person_ind.value)
self.assertEqual(DAY_OF_WEEK, created_base_ind.value)
class RepeatDataSourceBuildTest(RepeatDataSourceTestMixin, TestCase):
def test_table_population(self):
adapter = IndicatorSqlAdapter(self.config)
# Delete and create table
adapter.rebuild_table()
# Create a doc
now = datetime.datetime.now()
one_hour = datetime.timedelta(hours=1)
logs = [
{"start_time": now, "end_time": now + one_hour, "person": "al"},
{"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"},
{"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"},
]
doc = _test_doc(form={'time_logs': logs})
# Save this document into the table
adapter.save(doc)
# Get rows from the table
rows = adapter.get_query_object()
retrieved_logs = [
{
'start_time': r.start_time,
'end_time': r.end_time,
'person': r.person,
} for r in rows
]
# Check those rows against the expected result
self.assertItemsEqual(
retrieved_logs,
logs,
"The repeat data saved in the data source table did not match the expected data!"
)
def _test_doc(**extras):
test_doc = {
"_id": DOC_ID,
"domain": "user-reports",
"doc_type": "XFormInstance",
"created": DAY_OF_WEEK
}
test_doc.update(extras)
return test_doc
```
#### File: userreports/tests/test_multi_db.py
```python
import uuid
from django.conf import settings
from django.test import TestCase
from mock import patch
from sqlalchemy import create_engine
from sqlalchemy.exc import ProgrammingError
from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration
from corehq.apps.userreports.pillow import ConfigurableIndicatorPillow
from corehq.apps.userreports.reports.factory import ReportFactory
from corehq.apps.userreports.sql.connection import get_engine_id
from corehq.apps.userreports.tests.utils import get_sample_data_source, get_sample_doc_and_indicators, \
get_sample_report_config
from corehq.apps.userreports.sql import IndicatorSqlAdapter
from corehq import db
class UCRMultiDBTest(TestCase):
@classmethod
def setUpClass(cls):
cls.db2_name = 'cchq_ucr_tests'
db_conn_parts = settings.SQL_REPORTING_DATABASE_URL.split('/')
db_conn_parts[-1] = cls.db2_name
cls.db2_url = '/'.join(db_conn_parts)
# setup patches
cls.connection_string_patch = patch('corehq.db.connection_manager.get_connection_string')
def connection_string_for_engine(engine_id):
if engine_id == 'engine-1':
return settings.SQL_REPORTING_DATABASE_URL
else:
return cls.db2_url
mock_manager = cls.connection_string_patch.start()
mock_manager.side_effect = connection_string_for_engine
# setup data sources
data_source_template = get_sample_data_source()
cls.ds_1 = DataSourceConfiguration.wrap(data_source_template.to_json())
cls.ds_1.engine_id = 'engine-1'
cls.ds_1.save()
cls.ds_2 = DataSourceConfiguration.wrap(data_source_template.to_json())
cls.ds_2.engine_id = 'engine-2'
cls.ds_2.save()
# use db1 engine to create db2 http://stackoverflow.com/a/8977109/8207
cls.root_engine = create_engine(settings.SQL_REPORTING_DATABASE_URL)
conn = cls.root_engine.connect()
conn.execute('commit')
try:
conn.execute('CREATE DATABASE {}'.format(cls.db2_name))
except ProgrammingError:
# optimistically assume it failed because was already created.
pass
conn.close()
cls.ds1_adapter = IndicatorSqlAdapter(cls.ds_1)
cls.ds2_adapter = IndicatorSqlAdapter(cls.ds_2)
def setUp(self):
# initialize the tables
self.ds1_adapter.rebuild_table()
self.ds2_adapter.rebuild_table()
self.assertEqual(0, self.ds1_adapter.get_query_object().count())
self.assertEqual(0, self.ds2_adapter.get_query_object().count())
@classmethod
def tearDownClass(cls):
# unpatch
cls.connection_string_patch.stop()
# delete data sources
cls.ds_1.delete()
cls.ds_2.delete()
# dispose secondary engine
cls.ds2_adapter.session_helper.engine.dispose()
# drop the secondary database
conn = cls.root_engine.connect()
conn.execute('rollback')
try:
conn.execute('DROP DATABASE {}'.format(cls.db2_name))
finally:
conn.close()
cls.root_engine.dispose()
def tearDown(self):
self.ds1_adapter.session_helper.Session.remove()
self.ds2_adapter.session_helper.Session.remove()
self.ds1_adapter.drop_table()
self.ds2_adapter.drop_table()
def test_patches_and_setup(self):
self.assertEqual('engine-1', get_engine_id(self.ds_1))
self.assertEqual('engine-2', get_engine_id(self.ds_2))
self.assertEqual(settings.SQL_REPORTING_DATABASE_URL,
db.connection_manager.get_connection_string('engine-1'))
self.assertEqual(self.db2_url,
db.connection_manager.get_connection_string('engine-2'))
self.assertNotEqual(str(self.ds1_adapter.engine.url), str(self.ds2_adapter.engine.url))
self.assertEqual(settings.SQL_REPORTING_DATABASE_URL, str(self.ds1_adapter.engine.url))
self.assertEqual(self.db2_url, str(self.ds2_adapter.engine.url))
def test_pillow_save_to_multiple_databases(self):
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
pillow = ConfigurableIndicatorPillow()
pillow.bootstrap(configs=[self.ds_1, self.ds_2])
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
sample_doc, _ = get_sample_doc_and_indicators()
pillow.change_transport(sample_doc)
self.assertNotEqual(self.ds1_adapter.engine.url, self.ds2_adapter.engine.url)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(1, self.ds2_adapter.get_query_object().count())
def test_pillow_save_to_one_database_at_a_time(self):
pillow = ConfigurableIndicatorPillow()
pillow.bootstrap(configs=[self.ds_1])
sample_doc, _ = get_sample_doc_and_indicators()
pillow.change_transport(sample_doc)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(0, self.ds2_adapter.get_query_object().count())
# save to the other
pillow.bootstrap(configs=[self.ds_2])
sample_doc['_id'] = uuid.uuid4().hex
pillow.change_transport(sample_doc)
self.assertEqual(1, self.ds1_adapter.get_query_object().count())
self.assertEqual(1, self.ds2_adapter.get_query_object().count())
self.assertEqual(1, self.ds1_adapter.get_query_object().filter_by(doc_id='some-doc-id').count())
self.assertEqual(1, self.ds2_adapter.get_query_object().filter_by(doc_id=sample_doc['_id']).count())
def test_report_data_source(self):
# bootstrap report data sources against indicator data sources
report_config_template = get_sample_report_config()
report_config_1 = ReportConfiguration.wrap(report_config_template.to_json())
report_config_1.config_id = self.ds_1._id
report_config_2 = ReportConfiguration.wrap(report_config_template.to_json())
report_config_2.config_id = self.ds_2._id
# save a few docs to ds 1
sample_doc, _ = get_sample_doc_and_indicators()
num_docs = 3
for i in range(num_docs):
sample_doc['_id'] = uuid.uuid4().hex
self.ds1_adapter.save(sample_doc)
# ds 1 should have data, ds2 should not
ds1_rows = ReportFactory.from_spec(report_config_1).get_data()
self.assertEqual(1, len(ds1_rows))
self.assertEqual(num_docs, ds1_rows[0]['count'])
ds2_rows = ReportFactory.from_spec(report_config_2).get_data()
self.assertEqual(0, len(ds2_rows))
# save one doc to ds 2
sample_doc['_id'] = uuid.uuid4().hex
self.ds2_adapter.save(sample_doc)
# ds 1 should still have same data, ds2 should now have one row
ds1_rows = ReportFactory.from_spec(report_config_1).get_data()
self.assertEqual(1, len(ds1_rows))
self.assertEqual(num_docs, ds1_rows[0]['count'])
ds2_rows = ReportFactory.from_spec(report_config_2).get_data()
self.assertEqual(1, len(ds2_rows))
self.assertEqual(1, ds2_rows[0]['count'])
```
#### File: userreports/tests/test_report_builder.py
```python
import os
from django.test import TestCase
from corehq.apps.app_manager.const import APP_V2
from corehq.apps.app_manager.models import Application, Module
from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration
from corehq.apps.userreports.reports.builder.forms import ConfigureListReportForm
def read(rel_path):
path = os.path.join(os.path.dirname(__file__), *rel_path)
with open(path) as f:
return f.read()
class ReportBuilderTest(TestCase):
@classmethod
def setUpClass(cls):
cls.app = Application.new_app('domain', 'Untitled Application', application_version=APP_V2)
module = cls.app.add_module(Module.new_module('Untitled Module', None))
cls.form = cls.app.new_form(module.id, "Untitled Form", 'en', read(['data', 'forms', 'simple.xml']))
cls.app.save()
@classmethod
def tearDownClass(cls):
cls.app.delete()
for config in DataSourceConfiguration.all():
config.delete()
for config in ReportConfiguration.all():
config.delete()
def test_updating_out_of_date_report(self):
"""
Test that editing a report for an outdated data source creates a new data source.
Data sources are tied to app version.
"""
# Make report
builder_form = ConfigureListReportForm(
"Test Report",
self.app._id,
"form",
self.form.unique_id,
existing_report=None,
data={
'filters': '[]',
'columns': '[]',
}
)
self.assertTrue(builder_form.is_valid())
report = builder_form.create_report()
first_data_source_id = report.config_id
# Bump version of app by saving it
self.app.save()
# Modify the report
builder_form = ConfigureListReportForm(
"Test Report",
self.app._id,
"form",
self.form.unique_id,
existing_report=report,
data={
'filters': '[]',
'columns': '[]'
}
)
self.assertTrue(builder_form.is_valid())
report = builder_form.update_report()
second_data_source_id = report.config_id
self.assertNotEqual(first_data_source_id, second_data_source_id)
```
#### File: userreports/tests/test_static_reports.py
```python
import os
from django.test import SimpleTestCase
from django.test.utils import override_settings
from corehq.util.test_utils import TestFileMixin
from corehq.apps.userreports.models import StaticDataSourceConfiguration, StaticReportConfiguration
class TestStaticReportConfig(SimpleTestCase, TestFileMixin):
file_path = ('data', 'static_reports')
root = os.path.dirname(__file__)
def test_wrap(self):
wrapped = StaticReportConfiguration.wrap(self.get_json('static_report_config'))
self.assertEqual(["example", "dimagi"], wrapped.domains)
def test_get_all(self):
with override_settings(STATIC_UCR_REPORTS=[self.get_path('static_report_config', 'json')]):
all = list(StaticReportConfiguration.all())
self.assertEqual(2, len(all))
example, dimagi = all
self.assertEqual('example', example.domain)
self.assertEqual('dimagi', dimagi.domain)
for config in all:
self.assertEqual('Custom Title', config.title)
def test_production_config(self):
for data_source in StaticDataSourceConfiguration.all():
data_source.validate()
```
#### File: userreports/ui/forms.py
```python
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from crispy_forms import layout as crispy
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from corehq import toggles
from corehq.apps.app_manager.fields import ApplicationDataSourceUIHelper
from corehq.apps.userreports.sql import get_table_name
from corehq.apps.userreports.ui import help_text
from corehq.apps.userreports.ui.fields import ReportDataSourceField, JsonField
class DocumentFormBase(forms.Form):
"""
HQ specific document base form. Loosely modeled off of Django's ModelForm
"""
def __init__(self, instance=None, read_only=False, *args, **kwargs):
self.instance = instance
object_data = instance._doc if instance is not None else {}
self.helper = FormHelper()
self.helper.form_method = 'post'
if not read_only:
self.helper.add_input(Submit('submit', _('Save Changes')))
super(DocumentFormBase, self).__init__(initial=object_data, *args, **kwargs)
def save(self, commit=False):
self.populate_instance(self.instance, self.cleaned_data)
if commit:
self.instance.save()
return self.instance
def populate_instance(self, instance, cleaned_data):
for field in self.fields:
setattr(instance, field, cleaned_data[field])
return instance
VISIBILITY_CHOICES = (
(True, _('all users')),
(False, _('feature flag only'))
)
class ConfigurableReportEditForm(DocumentFormBase):
config_id = forms.ChoiceField() # gets overridden on instantiation
title = forms.CharField()
visible = forms.ChoiceField(label=_('Visible to:'), choices=VISIBILITY_CHOICES)
description = forms.CharField(required=False)
aggregation_columns = JsonField(expected_type=list)
filters = JsonField(expected_type=list)
columns = JsonField(expected_type=list)
configured_charts = JsonField(expected_type=list)
sort_expression = JsonField(expected_type=list)
def __init__(self, domain, instance=None, read_only=False, *args, **kwargs):
super(ConfigurableReportEditForm, self).__init__(instance, read_only, *args, **kwargs)
self.fields['config_id'] = ReportDataSourceField(domain=domain)
def clean_visible(self):
return self.cleaned_data['visible'] == 'True'
def clean(self):
cleaned_data = super(ConfigurableReportEditForm, self).clean()
# only call additional validation if initial validation has passed for all fields
for field in self.fields:
if field not in cleaned_data:
return
try:
config = self.populate_instance(self.instance, cleaned_data)
config.validate()
except Exception, e:
raise ValidationError(_(u'Problem with report spec: {}').format(e))
return cleaned_data
DOC_TYPE_CHOICES = (
('CommCareCase', _('cases')),
('XFormInstance', _('forms'))
)
class ConfigurableDataSourceEditForm(DocumentFormBase):
table_id = forms.CharField(label=_("Table ID"),
help_text=help_text.TABLE_ID)
referenced_doc_type = forms.ChoiceField(
choices=DOC_TYPE_CHOICES,
label=_("Source Type"))
display_name = forms.CharField(label=_("Report Title"),
help_text=help_text.DISPLAY_NAME)
description = forms.CharField(required=False,
help_text=help_text.DESCRIPTION)
base_item_expression = JsonField(expected_type=dict,
help_text=help_text.BASE_ITEM_EXPRESSION)
configured_filter = JsonField(expected_type=dict,
help_text=help_text.CONFIGURED_FILTER)
configured_indicators = JsonField(
expected_type=list, help_text=help_text.CONFIGURED_INDICATORS)
named_filters = JsonField(required=False, expected_type=dict,
label=_("Named filters (optional)"),
help_text=help_text.NAMED_FILTER)
def __init__(self, domain, *args, **kwargs):
self.domain = domain
super(ConfigurableDataSourceEditForm, self).__init__(*args, **kwargs)
if toggles.LOCATIONS_IN_UCR.enabled(domain):
choices = self.fields['referenced_doc_type'].choices
choices.append(
('Location', _('locations'))
)
self.fields['referenced_doc_type'].choices = choices
def clean_table_id(self):
# todo: validate table_id as [a-z][a-z0-9_]*
table_id = self.cleaned_data['table_id']
table_name = get_table_name(self.domain, table_id)
if len(table_name) > 63: # max table name length for postgres
raise ValidationError(
_('Table id is too long. Your table id and domain name must add up to fewer than 40 characters')
)
for src in self.instance.by_domain(self.domain):
if src.table_id == table_id and src.get_id != self.instance.get_id:
raise ValidationError(
_('A data source with this table id already exists. Table'
' ids must be unique')
)
return table_id
def clean(self):
cleaned_data = super(ConfigurableDataSourceEditForm, self).clean()
# only call additional validation if initial validation has passed for all fields
for field in self.fields:
if field not in cleaned_data:
return
try:
config = self.populate_instance(self.instance, cleaned_data)
config.validate()
except Exception, e:
if settings.DEBUG:
raise
raise ValidationError(_(u'Problem with data source spec: {}').format(e))
return cleaned_data
def save(self, commit=False):
self.instance.meta.build.finished = False
self.instance.meta.build.initiated = None
return super(ConfigurableDataSourceEditForm, self).save(commit)
class ConfigurableDataSourceFromAppForm(forms.Form):
def __init__(self, domain, *args, **kwargs):
super(ConfigurableDataSourceFromAppForm, self).__init__(*args, **kwargs)
self.app_source_helper = ApplicationDataSourceUIHelper()
self.app_source_helper.bootstrap(domain)
report_source_fields = self.app_source_helper.get_fields()
self.fields.update(report_source_fields)
self.helper = FormHelper()
self.helper.form_id = "data-source-config"
self.helper.layout = crispy.Layout(
crispy.Div(
*report_source_fields.keys() + [Submit('submit', _('Save Changes'))]
)
)
```
#### File: apps/users/forms.py
```python
from crispy_forms.bootstrap import FormActions, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms import layout as crispy
from crispy_forms.layout import Div, Fieldset, HTML, Layout, Submit
import datetime
from dimagi.utils.django.fields import TrimmedCharField
from django import forms
from django.core.validators import EmailValidator, validate_email
from django.core.urlresolvers import reverse
from django.forms.widgets import PasswordInput, HiddenInput
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _, ugettext_lazy, ugettext_noop
from django.template.loader import get_template
from django.template import Context
from django_countries.data import COUNTRIES
from corehq import toggles
from corehq.apps.domain.forms import EditBillingAccountInfoForm
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import format_username, cc_user_domain
from corehq.apps.app_manager.models import validate_lang
from corehq.apps.programs.models import Program
# Bootstrap 3 Crispy Forms
from crispy_forms import layout as cb3_layout
from crispy_forms import helper as cb3_helper
from crispy_forms import bootstrap as twbscrispy
from corehq.apps.style import crispy as hqcrispy
import re
# required to translate inside of a mark_safe tag
from django.utils.functional import lazy
import six # Python 3 compatibility
mark_safe_lazy = lazy(mark_safe, six.text_type)
UNALLOWED_MOBILE_WORKER_NAMES = ('admin', 'demo_user')
def get_mobile_worker_max_username_length(domain):
"""
The auth_user table only allows for usernames up to 128 characters long.
The code used to allow for usernames up to 80 characters, but that
didn't properly take into consideration the fact that the domain and
site name vary.
"""
return min(128 - len(cc_user_domain(domain)) - 1, 80)
def clean_mobile_worker_username(domain, username, name_too_long_message=None,
name_reserved_message=None, name_exists_message=None):
max_username_length = get_mobile_worker_max_username_length(domain)
if len(username) > max_username_length:
raise forms.ValidationError(name_too_long_message or
_('Username %(username)s is too long. Must be under %(max_length)s characters.')
% {'username': username, 'max_length': max_username_length})
if username in UNALLOWED_MOBILE_WORKER_NAMES:
raise forms.ValidationError(name_reserved_message or
_('The username "%(username)s" is reserved for CommCare.')
% {'username': username})
username = format_username(username, domain)
validate_username(username)
if CouchUser.username_exists(username):
raise forms.ValidationError(name_exists_message or
_('This Mobile Worker already exists.'))
return username
def wrapped_language_validation(value):
try:
validate_lang(value)
except ValueError:
raise forms.ValidationError("%s is not a valid language code! Please "
"enter a valid two or three digit code." % value)
class LanguageField(forms.CharField):
"""
Adds language code validation to a field
"""
def __init__(self, *args, **kwargs):
super(LanguageField, self).__init__(*args, **kwargs)
self.min_length = 2
self.max_length = 3
default_error_messages = {
'invalid': ugettext_lazy(u'Please enter a valid two or three digit language code.'),
}
default_validators = [wrapped_language_validation]
class BaseUpdateUserForm(forms.Form):
@property
def direct_properties(self):
return []
def clean_email(self):
return self.cleaned_data['email'].lower()
def update_user(self, existing_user=None, save=True, **kwargs):
is_update_successful = False
if not existing_user and 'email' in self.cleaned_data:
from django.contrib.auth.models import User
django_user = User()
django_user.username = self.cleaned_data['email']
django_user.save()
existing_user = CouchUser.from_django_user(django_user)
existing_user.save()
is_update_successful = True
for prop in self.direct_properties:
setattr(existing_user, prop, self.cleaned_data[prop])
is_update_successful = True
if is_update_successful and save:
existing_user.save()
return is_update_successful
def initialize_form(self, domain, existing_user=None):
if existing_user is None:
return
for prop in self.direct_properties:
self.initial[prop] = getattr(existing_user, prop, "")
class UpdateUserRoleForm(BaseUpdateUserForm):
role = forms.ChoiceField(choices=(), required=False)
def update_user(self, existing_user=None, domain=None, **kwargs):
is_update_successful = super(UpdateUserRoleForm, self).update_user(existing_user, save=False)
if domain and 'role' in self.cleaned_data:
role = self.cleaned_data['role']
try:
existing_user.set_role(domain, role)
existing_user.save()
is_update_successful = True
except KeyError:
pass
elif is_update_successful:
existing_user.save()
return is_update_successful
def load_roles(self, role_choices=None, current_role=None):
if role_choices is None:
role_choices = []
self.fields['role'].choices = role_choices
if current_role:
self.initial['role'] = current_role
class UpdateUserPermissionForm(forms.Form):
super_user = forms.BooleanField(label=ugettext_lazy('System Super User'), required=False)
def update_user_permission(self, couch_user=None, editable_user=None, is_super_user=None):
is_update_successful = False
if editable_user and couch_user.is_superuser:
editable_user.is_superuser = is_super_user
editable_user.save()
is_update_successful = True
return is_update_successful
class BaseUserInfoForm(forms.Form):
first_name = forms.CharField(label=ugettext_lazy('First Name'), max_length=50, required=False)
last_name = forms.CharField(label=ugettext_lazy('Last Name'), max_length=50, required=False)
email = forms.EmailField(label=ugettext_lazy("E-Mail"), max_length=75, required=False)
language = forms.ChoiceField(
choices=(),
initial=None,
required=False,
help_text=mark_safe_lazy(
ugettext_lazy(
"<i class=\"icon-info-sign\"></i> "
"Becomes default language seen in CloudCare and reports (if applicable), "
"but does not affect mobile applications. "
"Supported languages for reports are en, fr (partial), and hin (partial)."
)
)
)
def load_language(self, language_choices=None):
if language_choices is None:
language_choices = []
self.fields['language'].choices = [('', '')] + language_choices
class UpdateMyAccountInfoForm(BaseUpdateUserForm, BaseUserInfoForm):
email_opt_out = forms.BooleanField(
required=False,
label=ugettext_lazy("Opt out of emails about CommCare updates."),
)
def __init__(self, *args, **kwargs):
self.username = kwargs.pop('username') if 'username' in kwargs else None
self.user = kwargs.pop('user') if 'user' in kwargs else None
api_key = kwargs.pop('api_key') if 'api_key' in kwargs else None
super(UpdateMyAccountInfoForm, self).__init__(*args, **kwargs)
username_controls = []
if self.username:
username_controls.append(hqcrispy.StaticField(
_('Username'), self.username)
)
api_key_controls = [
hqcrispy.StaticField(_('API Key'), api_key),
hqcrispy.FormActions(
twbscrispy.StrictButton(
_('Generate API Key'),
type="button",
id='generate-api-key',
),
css_class="form-group"
),
]
self.fields['language'].label = _("My Language")
self.new_helper = cb3_helper.FormHelper()
self.new_helper.form_method = 'POST'
self.new_helper.form_class = 'form-horizontal'
self.new_helper.attrs = {
'name': 'user_information',
}
self.new_helper.label_class = 'col-sm-3 col-md-2 col-lg-2'
self.new_helper.field_class = 'col-sm-9 col-md-8 col-lg-6'
self.new_helper.layout = cb3_layout.Layout(
cb3_layout.Fieldset(
_("Basic"),
cb3_layout.Div(*username_controls),
hqcrispy.Field('first_name'),
hqcrispy.Field('last_name'),
hqcrispy.Field('email'),
hqcrispy.Field('email_opt_out'),
),
cb3_layout.Fieldset(
_("Other Options"),
hqcrispy.Field('language'),
cb3_layout.Div(*api_key_controls),
),
hqcrispy.FormActions(
twbscrispy.StrictButton(
_("Update My Information"),
type='submit',
css_class='btn-primary',
)
)
)
@property
def direct_properties(self):
return self.fields.keys()
class UpdateCommCareUserInfoForm(BaseUserInfoForm, UpdateUserRoleForm):
loadtest_factor = forms.IntegerField(
required=False, min_value=1, max_value=50000,
help_text=ugettext_lazy(u"Multiply this user's case load by a number for load testing on phones. "
u"Leave blank for normal users."),
widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(UpdateCommCareUserInfoForm, self).__init__(*args, **kwargs)
self.fields['role'].help_text = _(mark_safe(
"<i class=\"icon-info-sign\"></i> "
"Only applies to mobile workers that will be entering data using "
"<a href='https://help.commcarehq.org/display/commcarepublic/CloudCare+-+Web+Data+Entry'>"
"CloudCare</a>"
))
@property
def direct_properties(self):
indirect_props = ['role']
return [k for k in self.fields.keys() if k not in indirect_props]
def initialize_form(self, domain, existing_user=None):
if toggles.ENABLE_LOADTEST_USERS.enabled(domain):
self.fields['loadtest_factor'].widget = forms.TextInput()
super(UpdateCommCareUserInfoForm, self).initialize_form(domain, existing_user)
class RoleForm(forms.Form):
def __init__(self, *args, **kwargs):
if kwargs.has_key('role_choices'):
role_choices = kwargs.pop('role_choices')
else:
role_choices = ()
super(RoleForm, self).__init__(*args, **kwargs)
self.fields['role'].choices = role_choices
class CommCareAccountForm(forms.Form):
"""
Form for CommCareAccounts
"""
username = forms.CharField(required=True)
password = forms.CharField(widget=PasswordInput(), required=True, min_length=1)
password_2 = forms.CharField(label='Password (reenter)', widget=PasswordInput(), required=True, min_length=1)
phone_number = forms.CharField(max_length=80, required=False)
def __init__(self, *args, **kwargs):
if 'domain' not in kwargs:
raise Exception('Expected kwargs: domain')
self.domain = kwargs.pop('domain', None)
super(forms.Form, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset(
'Create new Mobile Worker account',
'username',
'password',
'password_2',
'phone_number',
Div(
Div(HTML("Please enter number, including international code, in digits only."),
css_class="controls"),
css_class="control-group"
)
)
)
def clean_username(self):
return clean_mobile_worker_username(
self.domain,
self.cleaned_data.get('username')
)
def clean_phone_number(self):
phone_number = self.cleaned_data['phone_number']
phone_number = re.sub('\s|\+|\-', '', phone_number)
if phone_number == '':
return None
elif not re.match(r'\d+$', phone_number):
raise forms.ValidationError(_("%s is an invalid phone number." % phone_number))
return phone_number
def clean(self):
try:
password = self.cleaned_data['password']
password_2 = self.cleaned_data['password_2']
except KeyError:
pass
else:
if password != password_2:
raise forms.ValidationError("Passwords do not match")
return self.cleaned_data
import django
if django.VERSION < (1, 6):
from django.core.validators import email_re
validate_username = EmailValidator(email_re,
ugettext_lazy(u'Username contains invalid characters.'), 'invalid')
else:
validate_username = EmailValidator(message=ugettext_lazy(u'Username contains invalid characters.'))
_username_help = """
<span ng-if="usernameAvailabilityStatus === 'pending'">
<i class="fa fa-circle-o-notch fa-spin"></i>
%(checking)s
</span>
<span ng-if="usernameAvailabilityStatus === 'taken'"
style="word-wrap:break-word;">
<i class="fa fa-remove"></i>
{{ usernameStatusMessage }}
</span>
<span ng-if="usernameAvailabilityStatus === 'available'"
style="word-wrap:break-word;">
<i class="fa fa-check"></i>
{{ usernameStatusMessage }}
</span>
<span ng-if="usernameAvailabilityStatus === 'error'">
<i class="fa fa-exclamation-triangle"></i>
%(server_error)s
</span>
""" % {
'checking': ugettext_noop('Checking Availability...'),
'server_error': ugettext_noop('Issue connecting to server. Check Internet connection.')
}
class NewMobileWorkerForm(forms.Form):
username = forms.CharField(
max_length=50,
required=True,
help_text=_username_help,
label=ugettext_noop("Username"),
)
first_name = forms.CharField(
max_length=50,
required=False,
label=ugettext_noop("First Name")
)
last_name = forms.CharField(
max_length=50,
required=False,
label=ugettext_noop("Last Name")
)
password = forms.CharField(
widget=PasswordInput(),
required=True,
min_length=1,
label=ugettext_noop("Password")
)
def __init__(self, domain, *args, **kwargs):
super(NewMobileWorkerForm, self).__init__(*args, **kwargs)
email_string = u"@{}.<EMAIL>".format(domain)
max_chars_username = 80 - len(email_string)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.label_class = 'col-sm-4'
self.helper.field_class = 'col-sm-8'
self.helper.layout = Layout(
Fieldset(
_('Basic Information'),
crispy.Field(
'username',
ng_required="true",
validate_username="",
# What this says is, update as normal or when the element
# loses focus. If the update is normal, wait 300 ms to
# send the request again. If the update is on blur,
# send the request.
ng_model_options="{ "
" updateOn: 'default blur', "
" debounce: {'default': 300, 'blur': 0} "
"}",
ng_model='mobileWorker.username',
ng_maxlength=max_chars_username,
maxlength=max_chars_username,
),
crispy.Field(
'first_name',
ng_required="false",
ng_model='mobileWorker.first_name',
ng_maxlength="50",
),
crispy.Field(
'last_name',
ng_required="false",
ng_model='mobileWorker.last_name',
ng_maxlength="50",
),
crispy.Field(
'password',
ng_required="true",
ng_model='mobileWorker.password'
),
)
)
def clean_username(self):
username = self.cleaned_data['username']
if username == 'admin' or username == 'demo_user':
raise forms.ValidationError("The username %s is reserved for CommCare." % username)
return username
class MultipleSelectionForm(forms.Form):
"""
Form for selecting groups (used by the group UI on the user page)
Usage::
# views.py
@property
@memoized
def users_form(self):
form = MultipleSelectionForm(
initial={'selected_ids': self.users_at_location},
submit_label=_("Update Users at this Location"),
)
form.fields['selected_ids'].choices = self.all_users
return form
# template.html
<script src="{% static 'hqwebapp/js/ui-element.js' %}"></script>
<script src="{% static 'hqwebapp/js/lib/jquery-ui/jquery-ui-1.9.2.multiselect-deps.custom.min.js' %}"></script>
<script src="{% static 'hqwebapp/js/lib/jquery-ui/multiselect/ui.multiselect.js' %}"></script>
<script type="text/javascript">
$(function () {
$("#id_selected_ids").width(800).height(400).multiselect();
});
</script>
<form class="form disable-on-submit" id="edit_users" action="" method='post'>
<legend>{% trans 'Specify Users At This Location' %}</legend>
{% crispy users_per_location_form %}
</form>
To display multiple forms on the same page, you'll need to pass a prefix to
the MultipleSelectionForm constructor, like ``prefix="users"`` This will
change the css id to ``"#id_users-selected_ids"``, and the returned list of
ids to ``request.POST.getlist('users-selected_ids', [])``
"""
selected_ids = forms.MultipleChoiceField(
label="",
required=False,
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_tag = False
submit_label = kwargs.pop('submit_label', "Update")
self.helper.add_input(Submit('submit', submit_label))
super(MultipleSelectionForm, self).__init__(*args, **kwargs)
class SupplyPointSelectWidget(forms.Widget):
def __init__(self, attrs=None, domain=None, id='supply-point', multiselect=False):
super(SupplyPointSelectWidget, self).__init__(attrs)
self.domain = domain
self.id = id
self.multiselect = multiselect
def render(self, name, value, attrs=None):
return get_template('locations/manage/partials/autocomplete_select_widget.html').render(Context({
'id': self.id,
'name': name,
'value': value or '',
'query_url': reverse('corehq.apps.locations.views.child_locations_for_select2', args=[self.domain]),
'multiselect': self.multiselect,
}))
class CommtrackUserForm(forms.Form):
location = forms.CharField(label='Location:', required=False)
program_id = forms.ChoiceField(label="Program", choices=(), required=False)
def __init__(self, *args, **kwargs):
domain = None
if 'domain' in kwargs:
domain = kwargs['domain']
del kwargs['domain']
super(CommtrackUserForm, self).__init__(*args, **kwargs)
self.fields['location'].widget = SupplyPointSelectWidget(domain=domain)
if Domain.get_by_name(domain).commtrack_enabled:
programs = Program.by_domain(domain, wrap=False)
choices = list((prog['_id'], prog['name']) for prog in programs)
choices.insert(0, ('', ''))
self.fields['program_id'].choices = choices
else:
self.fields['program_id'].widget = forms.HiddenInput()
def save(self, user):
location_id = self.cleaned_data['location']
# This means it will clear the location associations set in a domain
# with multiple locations configured. It is acceptable for now because
# multi location config is a not really supported special flag for IPM.
if location_id:
if location_id != user.location_id:
user.set_location(Location.get(location_id))
else:
user.unset_location()
class DomainRequestForm(forms.Form):
full_name = forms.CharField(label=ugettext_lazy('Full Name'), required=True)
email = forms.CharField(
label=ugettext_lazy('Email Address'),
required=True,
help_text=ugettext_lazy('You will use this email to log in.'),
)
domain = forms.CharField(widget=forms.HiddenInput(), required=True)
@property
def form_actions(self):
return FormActions(
crispy.ButtonHolder(
crispy.Submit(
'submit',
ugettext_lazy('Request Access')
)
)
)
def __init__(self, *args, **kwargs):
super(DomainRequestForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.show_form_errors = True
self.helper.layout = crispy.Layout(
crispy.Field('full_name'),
crispy.Field('email'),
crispy.Field('domain'),
self.form_actions,
)
def clean_email(self):
data = self.cleaned_data['email'].strip().lower()
validate_email(data)
return data
class ConfirmExtraUserChargesForm(EditBillingAccountInfoForm):
confirm_product_agreement = forms.BooleanField(
required=True,
)
def __init__(self, account, domain, creating_user, data=None, *args, **kwargs):
super(ConfirmExtraUserChargesForm, self).__init__(account, domain, creating_user, data=data, *args, **kwargs)
self.fields['confirm_product_agreement'].label = _(
'I have read and agree to the <a href="%(pa_url)s" target="_blank">'
'Software Product Subscription Agreement</a>.'
) % {'pa_url': reverse('product_agreement')}
from corehq.apps.users.views.mobile import MobileWorkerListView
self.helper.layout = crispy.Layout(
crispy.Fieldset(
_("Basic Information"),
'company_name',
'first_name',
'last_name',
crispy.Field('emails', css_class='input-xxlarge'),
'phone_number',
),
crispy.Fieldset(
_("Mailing Address"),
'first_line',
'second_line',
'city',
'state_province_region',
'postal_code',
crispy.Field('country', css_class="input-large",
data_countryname=COUNTRIES.get(self.current_country, '')),
),
crispy.Field('confirm_product_agreement'),
FormActions(
crispy.HTML(
'<a href="%(user_list_url)s" class="btn">%(text)s</a>' % {
'user_list_url': reverse(MobileWorkerListView.urlname, args=[self.domain]),
'text': _("Back to Mobile Workers List")
}
),
StrictButton(
_("Confirm Billing Information"),
type="submit",
css_class='btn btn-primary disabled',
disabled="disabled",
css_id="submit-button-pa",
),
crispy.HTML(
'<p class="help-inline" id="submit-button-help-qa" style="vertical-align: '
'top; margin-top: 5px; margin-bottom: 0px;">%s</p>' % _("Please agree to the Product Subscription "
"Agreement above before continuing.")
),
),
)
def save(self, commit=True):
account_save_success = super(ConfirmExtraUserChargesForm, self).save(commit=False)
if not account_save_success:
return False
self.account.date_confirmed_extra_charges = datetime.datetime.today()
self.account.save()
return True
class SelfRegistrationForm(forms.Form):
def __init__(self, *args, **kwargs):
if 'domain' not in kwargs:
raise Exception('Expected kwargs: domain')
self.domain = kwargs.pop('domain')
super(SelfRegistrationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form form-horizontal'
layout_fields = [
crispy.Fieldset(
_('Register'),
crispy.Field('username'),
crispy.Field('password'),
crispy.Field('password2'),
),
FormActions(
StrictButton(
_('Register'),
css_class='btn-primary',
type='submit',
)
),
]
self.helper.layout = crispy.Layout(*layout_fields)
username = TrimmedCharField(
required=True,
label=ugettext_lazy('Username (create a username)'),
)
password = forms.CharField(
required=True,
label=ugettext_lazy('Password (create a password)'),
widget=PasswordInput(),
)
password2 = forms.CharField(
required=True,
label=ugettext_lazy('Re-enter Password'),
widget=PasswordInput(),
)
def clean_username(self):
return clean_mobile_worker_username(
self.domain,
self.cleaned_data.get('username')
)
def clean_password2(self):
if self.cleaned_data.get('password') != self.cleaned_data.get('password2'):
raise forms.ValidationError(_('Passwords do not match.'))
```
#### File: management/commands/make_emails_lowercase.py
```python
from django.core.management import BaseCommand
from corehq.apps.users.models import CouchUser
class Command(BaseCommand):
help = "Makes emails into lowercase"
def handle(self, *args, **options):
db = CouchUser.get_db()
# This view includes users with base_doc == CouchUser-Deleted
for res in db.view("users/by_default_phone", include_docs=True, reduce=False):
doc = res['doc']
# if this condition is met, the doc can't be wrapped
if doc['email'] and not doc['email'].islower():
print doc['email']
doc['email'] = doc['email'].lower()
try:
user = CouchUser.wrap_correctly(doc)
user.save()
except:
print doc['_id'], "failed to save"
```
#### File: users/tests/test_analytics.py
```python
from django.test import TestCase
from corehq.apps.users.analytics import update_analytics_indexes, get_count_of_active_commcare_users_in_domain, \
get_count_of_inactive_commcare_users_in_domain, get_active_commcare_users_in_domain, \
get_inactive_commcare_users_in_domain
from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users
from corehq.apps.users.models import CommCareUser, WebUser
class UserAnalyticsTest(TestCase):
@classmethod
def setUpClass(cls):
delete_all_users()
cls.active_user = CommCareUser.create(
domain='test',
username='active',
password='<PASSWORD>',
is_active=True,
)
cls.active_user_2 = CommCareUser.create(
domain='test',
username='active2',
password='<PASSWORD>',
is_active=True,
)
cls.inactive_user = CommCareUser.create(
domain='test',
username='inactive',
password='<PASSWORD>',
is_active=False
)
cls.web_user = WebUser.create(
domain='test',
username='web',
password='<PASSWORD>',
)
update_analytics_indexes()
def test_get_count_of_active_commcare_users_in_domain(self):
self.assertEqual(2, get_count_of_active_commcare_users_in_domain('test'))
def test_get_count_of_active_commcare_users_in_domain_no_results(self):
self.assertEqual(0, get_count_of_active_commcare_users_in_domain('missing'))
def test_get_count_of_inactive_commcare_users_in_domain(self):
self.assertEqual(1, get_count_of_inactive_commcare_users_in_domain('test'))
def test_get_count_of_inactive_commcare_users_in_domain_no_results(self):
self.assertEqual(0, get_count_of_inactive_commcare_users_in_domain('missing'))
def test_get_active_commcare_users_in_domain(self):
users = get_active_commcare_users_in_domain('test')
self.assertEqual(2, len(users))
self.assertEqual(set(['active', 'active2']), set([u.username for u in users]))
def test_get_inactive_commcare_users_in_domain(self):
users = get_inactive_commcare_users_in_domain('test')
self.assertEqual(1, len(users))
self.assertEqual('inactive', users[0].username)
def test_get_active_commcare_users_in_domain_no_results(self):
self.assertEqual(0, len(get_active_commcare_users_in_domain('missing')))
def test_get_inactive_commcare_users_in_domain_no_results(self):
self.assertEqual(0, len(get_inactive_commcare_users_in_domain('missing')))
```
#### File: users/tests/test_db_accessors.py
```python
from django.test import TestCase
from corehq.apps.users.models import WebUser, CommCareUser
from corehq.apps.users.dbaccessors.all_commcare_users import (
get_all_commcare_users_by_domain,
get_user_docs_by_username,
delete_all_users, get_all_user_ids)
from corehq.apps.domain.models import Domain
class AllCommCareUsersTest(TestCase):
@classmethod
def setUpClass(cls):
delete_all_users()
cls.ccdomain = Domain(name='cc_user_domain')
cls.ccdomain.save()
cls.other_domain = Domain(name='other_domain')
cls.other_domain.save()
cls.ccuser_1 = CommCareUser.create(
domain=cls.ccdomain.name,
username='ccuser_1',
password='<PASSWORD>',
email='<EMAIL>',
)
cls.ccuser_2 = CommCareUser.create(
domain=cls.ccdomain.name,
username='ccuser_2',
password='<PASSWORD>',
email='<EMAIL>',
)
cls.web_user = WebUser.create(
domain=cls.ccdomain.name,
username='webuser',
password='<PASSWORD>',
email='<EMAIL>',
)
cls.ccuser_other_domain = CommCareUser.create(
domain=cls.other_domain.name,
username='cc_user_other_domain',
password='<PASSWORD>',
email='<EMAIL>',
)
@classmethod
def tearDownClass(cls):
delete_all_users()
def test_get_all_commcare_users_by_domain(self):
expected_users = [self.ccuser_2, self.ccuser_1]
expected_usernames = [user.username for user in expected_users]
actual_usernames = [user.username for user in get_all_commcare_users_by_domain(self.ccdomain.name)]
self.assertItemsEqual(actual_usernames, expected_usernames)
def test_exclude_retired_users(self):
deleted_user = CommCareUser.create(
domain=self.ccdomain.name,
username='deleted_user',
password='<PASSWORD>',
email='<EMAIL>',
)
deleted_user.retire()
self.assertNotIn(
deleted_user.username,
[user.username for user in
get_all_commcare_users_by_domain(self.ccdomain.name)]
)
deleted_user.delete()
def test_get_user_docs_by_username(self):
users = [self.ccuser_1, self.web_user, self.ccuser_other_domain]
usernames = [u.username for u in users] + ['<EMAIL>']
self.assertItemsEqual(
get_user_docs_by_username(usernames),
[u.to_json() for u in users]
)
def test_get_all_ids(self):
all_ids = get_all_user_ids()
self.assertEqual(4, len(all_ids))
for id in [self.ccuser_1._id, self.ccuser_2._id, self.web_user._id, self.ccuser_other_domain._id]:
self.assertTrue(id in all_ids)
```
#### File: couchapps/tests/test_all_docs.py
```python
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
class AllDocsTest(TestCase):
@classmethod
def setUpClass(cls):
cls.main_db = get_db(None)
cls.users_db = get_db('users')
delete_all_docs_by_doc_type(cls.main_db, ('Application', 'CommCareUser'))
delete_all_docs_by_doc_type(cls.users_db, ('Application', 'CommCareUser'))
cls.domain = 'all-docs-domain'
cls.main_db_doc = {'_id': 'main_db_doc', 'domain': cls.domain,
'doc_type': 'Application'}
cls.users_db_doc = {'_id': 'users_db_doc', 'domain': cls.domain,
'doc_type': 'CommCareUser'}
cls.main_db.save_doc(cls.main_db_doc)
cls.users_db.save_doc(cls.users_db_doc)
@classmethod
def tearDownClass(cls):
cls.main_db.delete_doc(cls.main_db_doc)
cls.users_db.delete_doc(cls.users_db_doc)
def test_get_all_doc_ids_for_domain_grouped_by_db(self):
self.assertEqual(
{key.uri: list(value) for key, value in
get_all_doc_ids_for_domain_grouped_by_db(self.domain)},
{get_db(None).uri: ['main_db_doc'],
get_db('users').uri: ['users_db_doc'],
get_db('fixtures').uri: []}
)
def test_get_doc_count_by_type(self):
self.assertEqual(get_doc_count_by_type(get_db(None), 'Application'), 1)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_type(get_db(None), 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'Application'), 0)
```
#### File: dbaccessors/couchapps/all_docs.py
```python
from corehq.preindex import get_preindex_plugin
from corehq.util.couch_helpers import paginate_view
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.database import get_db
def _get_all_docs_dbs():
return get_preindex_plugin('domain').get_dbs('domain') + [get_db(None)]
def get_all_doc_ids_for_domain_grouped_by_db(domain):
"""
This function has the limitation that it only gets docs from the main db
and extra dbs that are listed for the 'domain' design doc
in corehq/apps/domain/__init__.py
"""
# todo: move view to all_docs/by_domain_doc_type as in this original commit:
# todo: https://github.com/dimagi/commcare-hq/commit/400d3878afc5e9f5118ffb30d22b8cebe9afb4a6
for db in _get_all_docs_dbs():
results = db.view(
'domain/related_to_domain',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
reduce=False,
)
yield (db, (result['id'] for result in results))
def get_doc_count_by_type(db, doc_type):
key = [doc_type]
result = db.view(
'all_docs/by_doc_type', startkey=key, endkey=key + [{}], reduce=True,
group_level=1).one()
if result:
return result['value']
else:
return 0
def get_all_docs_with_doc_types(db, doc_types):
for doc_type in doc_types:
results = paginate_view(
db, 'all_docs/by_doc_type',
chunk_size=100, startkey=[doc_type], endkey=[doc_type, {}],
attachments=True, include_docs=True, reduce=False)
for result in results:
yield result['doc']
def delete_all_docs_by_doc_type(db, doc_types):
for chunk in chunked(get_all_docs_with_doc_types(db, doc_types), 100):
db.bulk_delete(chunk)
```
#### File: corehq/doctypemigrations/bulk_migrate.py
```python
import json
from corehq.dbaccessors.couchapps.all_docs import get_all_docs_with_doc_types
from corehq.util.couch import IterDB
def bulk_migrate(source_db, target_db, doc_types, filename):
with open(filename, 'w') as f:
for doc in get_all_docs_with_doc_types(source_db, doc_types):
f.write('{}\n'.format(json.dumps(doc)))
with open(filename, 'r') as f:
with IterDB(target_db, new_edits=False) as iter_db:
for line in f:
doc = json.loads(line)
iter_db.save(doc)
```
#### File: corehq/doctypemigrations/continuous_migrate.py
```python
import datetime
from corehq.util.couch import IterDB
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
import logging
def filter_doc_ids_by_doc_type(db, doc_ids, doc_types):
for doc_ids_chunk in chunked(doc_ids, 100):
keys = [[doc_type, doc_id]
for doc_id in doc_ids_chunk
for doc_type in doc_types]
results = db.view('all_docs/by_doc_type', keys=keys, reduce=False)
for result in results:
yield result['id']
def copy_docs(source_db, target_db, doc_ids):
"""
copy docs from source_db to target_db
by doc_id
"""
if not doc_ids:
return
with IterDB(target_db, new_edits=False) as iter_db:
for doc in iter_docs(source_db, doc_ids, attachments=True):
iter_db.save(doc)
if iter_db.errors_by_type:
logging.error('errors bulk saving in copy_docs: {!r}'
.format(iter_db.errors_by_type))
def _bulk_get_revs(target_db, doc_ids):
"""
return (_id, _rev) for every existing doc in doc_ids
if a doc id is not found in target_db, it is excluded from the result
"""
result = target_db.all_docs(keys=list(doc_ids)).all()
return [(row['id'], row['value']['rev']) for row in result if not row.get('error')]
def delete_docs(target_db, doc_ids):
"""
delete docs from database by doc _id and _rev
"""
if not doc_ids:
return
doc_id_rev_pairs = _bulk_get_revs(target_db, doc_ids)
with IterDB(target_db, new_edits=False) as iter_db:
for doc_id, doc_rev in doc_id_rev_pairs:
iter_db.delete({'_id': doc_id, '_rev': doc_rev})
if iter_db.errors_by_type:
logging.error('errors bulk saving in delete_docs: {!r}'
.format(iter_db.errors_by_type))
class ContinuousReplicator(object):
def __init__(self, source_db, target_db, doc_types,
max_changes_before_commit=100,
max_time_before_commit=datetime.timedelta(seconds=5)):
self.source_db = source_db
self.target_db = target_db
self.doc_types = doc_types
self.max_changes_before_commit = max_changes_before_commit
self.max_time_before_commit = max_time_before_commit
self._ids_to_save = None
self._ids_to_delete = None
self._reset()
def _reset(self):
self._last_commit_time = datetime.datetime.utcnow()
self._uncommitted_changes_count = 0
self._ids_to_save = set()
self._ids_to_delete = set()
def replicate_change(self, change):
if change.deleted:
self._ids_to_delete.add(change.id)
else:
self._ids_to_save.add(change.id)
self._uncommitted_changes_count += 1
def commit(self):
ids_to_save = filter_doc_ids_by_doc_type(
self.source_db, self._ids_to_save, self.doc_types)
copy_docs(self.source_db, self.target_db, ids_to_save)
delete_docs(self.target_db, self._ids_to_delete)
self._reset()
def _get_time_since_last_commit(self):
return datetime.datetime.utcnow() - self._last_commit_time
def should_commit(self):
return (self._uncommitted_changes_count > self.max_changes_before_commit or
self._get_time_since_last_commit() > self.max_time_before_commit)
```
#### File: corehq/doctypemigrations/migrator_instances.py
```python
from django.conf import settings
from corehq.doctypemigrations.migrator import Migrator
users_migration = Migrator(
slug='user_db_migration',
source_db_name=None,
target_db_name=settings.NEW_USERS_GROUPS_DB,
doc_types=(
'Group',
'DeleteGroupRecord',
'UserRole',
'AdminUserRole',
'CommCareUser',
'WebUser',
'Invitation',
'DomainInvitation',
'DomainRemovalRecord',
'OrgRemovalRecord',
)
)
fixtures_migration = Migrator(
slug='fixtures',
source_db_name=None,
target_db_name=settings.NEW_FIXTURES_DB,
doc_types=(
'FixtureDataType',
'FixtureDataItem',
'FixtureOwnership',
)
)
def get_migrator_by_slug(slug):
return Migrator.instances[slug]
def get_migrator_slugs():
return sorted(Migrator.instances.keys())
```
#### File: apps/case/mock.py
```python
from __future__ import absolute_import
import copy
from datetime import datetime, date
import uuid
from xml.etree import ElementTree
from corehq.form_processor.interfaces import FormProcessorInterface
from dimagi.utils.parsing import json_format_datetime
from casexml.apps.case.xml import V1, NS_VERSION_MAP, V2
from casexml.apps.case.const import DEFAULT_CASE_INDEX_IDENTIFIERS, CASE_INDEX_CHILD
class CaseBlock(dict):
"""
Doctests:
>>> NOW = datetime(year=2012, month=1, day=24)
>>> FIVE_DAYS_FROM_NOW = datetime(year=2012, month=1, day=29)
>>> CASE_ID = 'test-case-id'
# Basic
>>> ElementTree.tostring(CaseBlock(
... case_id=CASE_ID,
... date_opened=NOW,
... date_modified=NOW,
... ).as_xml())
'<case case_id="test-case-id" date_modified="2012-01-24T00:00:00.000000Z" xmlns="http://commcarehq.org/case/transaction/v2"><update><date_opened>2012-01-24T00:00:00.000000Z</date_opened></update></case>'
# Doesn't let you specify a keyword twice (here 'case_name')
>>> try:
... CaseBlock(
... case_id=CASE_ID,
... case_name='Johnny',
... update={'case_name': 'Johnny'},
... ).as_xml()
... except CaseBlockError, e:
... print "%s" % e
Key 'case_name' specified twice
# The following is a BUG; should fail!! Should fix and change tests
>>> ElementTree.tostring(CaseBlock(
... case_id=CASE_ID,
... date_opened=NOW,
... date_modified=NOW,
... update={
... 'date_opened': FIVE_DAYS_FROM_NOW,
... },
... ).as_xml())
'<case case_id="test-case-id" date_modified="2012-01-24T00:00:00.000000Z" xmlns="http://commcarehq.org/case/transaction/v2"><update><date_opened>2012-01-24T00:00:00.000000Z</date_opened></update></case>'
"""
undefined = object()
def __init__(self,
case_id,
date_modified=None,
user_id=undefined,
owner_id=undefined,
external_id=undefined,
case_type=undefined,
case_name=undefined,
create=False,
date_opened=undefined,
update=None,
close=False,
index=None,
strict=True,
):
"""
https://github.com/dimagi/commcare/wiki/casexml20
<case xmlns="http://commcarehq.org/case/transaction/v2" case_id="" user_id="" date_modified="" >
<!-- user_id - At Most One: the GUID of the user responsible for this transaction -->
<!-- case_id - Exactly One: The id of the abstract case to be modified (even in the case of creation) -->
<!-- date_modified - Exactly One: The date and time of this operation -->
<create> <!-- At Most One: Create action -->
<case_type/> <!-- Exactly One: The ID for the type of case represented -->
<owner_id/> <!-- At Most One: The GUID of the current owner of this case -->
<case_name/> <!-- Exactly One: A semantically meaningless but human readable name associated with the case -->
</create>
<update> <!-- At Most One: Updates data for the case -->
<case_type/> <!-- At Most One: Modifies the Case Type for the case -->
<case_name/> <!-- At Most One: A semantically meaningless but human readable name associated with the case -->
<date_opened/> <!-- At Most One: Modifies the Date the case was opened -->
<owner_id/> <!-- At Most One: Modifies the owner of this case -->
<*/> <-- An Arbitrary Number: Creates or mutates a value identified by the key provided -->
</update>
<index/> <!-- At Most One: Contains a set of referenced GUID's to other cases -->
<close/> <!-- At Most One: Closes the case -->
</case>
"""
super(CaseBlock, self).__init__()
self._id = case_id
date_modified = date_modified or datetime.utcnow()
update = copy.copy(update) if update else {}
index = copy.copy(index) if index else {}
self.XMLNS = NS_VERSION_MAP.get(V2)
self.VERSION = V2
self.CASE_TYPE = "case_type"
if create:
self['create'] = {}
# make case_type
case_type = "" if case_type is CaseBlock.undefined else case_type
case_name = "" if case_name is CaseBlock.undefined else case_name
owner_id = "" if owner_id is CaseBlock.undefined else owner_id
self['update'] = update
self['update'].update({
'date_opened': date_opened
})
create_or_update = {
self.CASE_TYPE: case_type,
'case_name': case_name,
}
self.update({
'_attrib': {
'case_id': case_id,
'date_modified': date_modified,
'user_id': user_id,
'xmlns': self.XMLNS,
}
})
if owner_id is not None:
create_or_update.update({
'owner_id': owner_id,
})
self['update'].update({
'external_id': external_id,
})
# fail if user specifies both, say, case_name='Johnny' and update={'case_name': 'Johnny'}
if strict:
for key in create_or_update:
if create_or_update[key] is not CaseBlock.undefined and key in self['update']:
raise CaseBlockError("Key %r specified twice" % key)
if create:
self['create'].update(create_or_update)
else:
self['update'].update(create_or_update)
if close:
self['close'] = {}
if not ['' for val in self['update'].values() if val is not CaseBlock.undefined]:
self['update'] = CaseBlock.undefined
if index:
self['index'] = {}
for name in index.keys():
case_type = index[name][0]
case_id = index[name][1]
# relationship = "child" for index to a parent case (default)
# relationship = "extension" for index to a host case
relationship = index[name][2] if len(index[name]) > 2 else 'child'
if relationship not in ('child', 'extension'):
raise CaseBlockError('Valid values for an index relationship are "child" and "extension"')
_attrib = {'case_type': case_type}
if relationship != 'child':
_attrib['relationship'] = relationship
self['index'][name] = {
'_attrib': _attrib,
'_text': case_id
}
def as_xml(self, format_datetime=None):
format_datetime = format_datetime or json_format_datetime
case = ElementTree.Element('case')
order = ['case_id', 'date_modified', 'create', 'update', 'close',
self.CASE_TYPE, 'user_id', 'case_name', 'external_id', 'date_opened', 'owner_id']
def sort_key(item):
word, _ = item
try:
i = order.index(word)
return 0, i
except ValueError:
return 1, word
def fmt(value):
if value is None:
return ''
if isinstance(value, datetime):
return unicode(format_datetime(value))
elif isinstance(value, (basestring, int, date)):
return unicode(value)
else:
raise CaseBlockError("Can't transform to XML: {}; unexpected type {}.".format(value, type(value)))
def dict_to_xml(block, dct):
if dct.has_key('_attrib'):
for (key, value) in dct['_attrib'].items():
if value is not CaseBlock.undefined:
block.set(key, fmt(value))
if dct.has_key('_text'):
block.text = unicode(dct['_text'])
for (key, value) in sorted(dct.items(), key=sort_key):
if value is not CaseBlock.undefined and not key.startswith('_'):
elem = ElementTree.Element(key)
block.append(elem)
if isinstance(value, dict):
dict_to_xml(elem, value)
else:
elem.text = fmt(value)
dict_to_xml(case, self)
return case
def as_string(self, format_datetime=None):
return ElementTree.tostring(self.as_xml(format_datetime))
class CaseBlockError(Exception):
pass
class CaseStructure(object):
"""
A structure representing a case and its related cases.
Can recursively nest parents/grandparents inside here.
"""
def __init__(self, case_id=None, indices=None, attrs=None, walk_related=True):
self.case_id = case_id or uuid.uuid4().hex
self.indices = indices if indices is not None else []
self.attrs = attrs if attrs is not None else {}
self.walk_related = walk_related # whether to walk related cases in operations
@property
def index(self):
return {
r.identifier: (r.related_type, r.related_id, r.relationship)
for r in self.indices
}
def walk_ids(self):
yield self.case_id
if self.walk_related:
for relationship in self.indices:
for id in relationship.related_structure.walk_ids():
yield id
class CaseIndex(object):
DEFAULT_RELATIONSHIP = CASE_INDEX_CHILD
DEFAULT_RELATED_CASE_TYPE = 'default_related_case_type'
def __init__(self, related_structure=None, relationship=DEFAULT_RELATIONSHIP, related_type=None,
identifier=None):
self.related_structure = related_structure or CaseStructure()
self.relationship = relationship
if related_type is None:
related_type = self.related_structure.attrs.get('case_type', self.DEFAULT_RELATED_CASE_TYPE)
self.related_type = related_type
if identifier is None:
self.identifier = DEFAULT_CASE_INDEX_IDENTIFIERS[relationship]
else:
self.identifier = identifier
@property
def related_id(self):
return self.related_structure.case_id
class CaseFactory(object):
"""
A case factory makes and updates cases for you using CaseStructures.
The API is a wrapper around the CaseBlock utility and is designed to be
easier to work with to setup parent/child structures or default properties.
"""
def __init__(self, domain=None, case_defaults=None, form_extras=None):
self.domain = domain
self.case_defaults = case_defaults if case_defaults is not None else {}
self.form_extras = form_extras if form_extras is not None else {}
def get_case_block(self, case_id, **kwargs):
for k, v in self.case_defaults.items():
if k not in kwargs:
kwargs[k] = v
return CaseBlock(
case_id=case_id,
**kwargs
).as_xml()
def post_case_blocks(self, caseblocks, form_extras=None):
submit_form_extras = copy.copy(self.form_extras)
if form_extras is not None:
submit_form_extras.update(form_extras)
return FormProcessorInterface.post_case_blocks(
caseblocks,
form_extras=submit_form_extras,
domain=self.domain,
)
def create_case(self, **kwargs):
"""
Shortcut to create a simple case without needing to make a structure for it.
"""
kwargs['create'] = True
return self.create_or_update_case(CaseStructure(case_id=uuid.uuid4().hex, attrs=kwargs))[0]
def close_case(self, case_id):
"""
Shortcut to close a case (and do nothing else)
"""
return self.create_or_update_case(CaseStructure(case_id=case_id, attrs={'close': True}))[0]
def create_or_update_case(self, case_structure, form_extras=None):
return self.create_or_update_cases([case_structure], form_extras)
def create_or_update_cases(self, case_structures, form_extras=None):
from corehq.form_processor.interfaces import FormProcessorInterface
def _get_case_block(substructure):
return self.get_case_block(substructure.case_id, index=substructure.index, **substructure.attrs)
def _get_case_blocks(substructure):
blocks = [_get_case_block(substructure)]
if substructure.walk_related:
blocks += [
block for relationship in substructure.indices
for block in _get_case_blocks(relationship.related_structure)
]
return blocks
self.post_case_blocks(
[block for structure in case_structures for block in _get_case_blocks(structure)],
form_extras,
)
return FormProcessorInterface.get_cases(
[id for structure in case_structures for id in structure.walk_ids()]
)
```
#### File: case/tests/test_from_xform.py
```python
from django.test import TestCase
from django.test.utils import override_settings
from casexml.apps.case import const
from casexml.apps.case.tests.test_const import *
from casexml.apps.case.tests.util import bootstrap_case_from_xml
from corehq.form_processor.generic import GenericCommCareCase
@override_settings(CASEXML_FORCE_DOMAIN_CHECK=False)
class CaseFromXFormTest(TestCase):
def testCreate(self):
case = bootstrap_case_from_xml(self, "create.xml")
self._check_static_properties(case)
self.assertEqual(False, case.closed)
self.assertEqual(1, len(case.actions))
create_action = case.actions[0]
self.assertEqual(const.CASE_ACTION_CREATE, create_action.action_type)
self.assertEqual("http://openrosa.org/case/test/create", create_action.xform_xmlns)
self.assertEqual("test create", create_action.xform_name)
def testCreateThenUpdateInSeparateForms(self):
# recycle our previous test's form
original_case = bootstrap_case_from_xml(self, "create_update.xml")
self.assertEqual(original_case.type, "test_case_type")
self.assertEqual(original_case.name, "test case name")
# we don't need to bother checking all the properties because this is
# the exact same workflow as above.
case = bootstrap_case_from_xml(self, "update.xml", original_case.id)
self.assertEqual(False, case.closed)
self.assertEqual(3, len(case.actions))
new_update_action = case.actions[2]
self.assertEqual(const.CASE_ACTION_UPDATE, new_update_action.action_type)
self.assertEqual("http://openrosa.org/case/test/update", new_update_action.xform_xmlns)
self.assertEqual("", new_update_action.xform_name)
# some properties didn't change
self.assertEqual("123", str(case["someotherprop"]))
# but some should have
self.assertEqual("abcd", case["someprop"])
self.assertEqual("abcd", new_update_action.updated_unknown_properties["someprop"])
# and there are new ones
self.assertEqual("efgh", case["somenewprop"])
self.assertEqual("efgh", new_update_action.updated_unknown_properties["somenewprop"])
# we also changed everything originally in the case
self.assertEqual("a_new_type", case.type)
self.assertEqual("a_new_type", new_update_action.updated_known_properties["type"])
self.assertEqual("a new name", case.name)
self.assertEqual("a new name", new_update_action.updated_known_properties["name"])
self.assertEqual(UPDATE_DATE, case.opened_on)
self.assertEqual(UPDATE_DATE, new_update_action.updated_known_properties["opened_on"])
# case should have a new modified date
self.assertEqual(MODIFY_DATE, case.modified_on)
def testCreateThenClose(self):
case = bootstrap_case_from_xml(self, "create.xml")
# now close it
case = bootstrap_case_from_xml(self, "close.xml", case.id)
self.assertEqual(True, case.closed)
self.assertEqual(3, len(case.actions))
update_action = case.actions[1]
close_action = case.actions[2]
self.assertEqual(const.CASE_ACTION_UPDATE, update_action.action_type)
self.assertEqual(const.CASE_ACTION_CLOSE, close_action.action_type)
self.assertEqual("http://openrosa.org/case/test/close", close_action.xform_xmlns)
self.assertEqual("", close_action.xform_name)
self.assertEqual("abcde", case["someprop"])
self.assertEqual("abcde", update_action.updated_unknown_properties["someprop"])
self.assertEqual("case closed", case["someclosedprop"])
self.assertEqual("case closed", update_action.updated_unknown_properties["someclosedprop"])
self.assertEqual(CLOSE_DATE, close_action.date)
self.assertEqual(CLOSE_DATE, case.modified_on)
def testCreateMultiple(self):
# TODO: test creating multiple cases from a single form
pass
def testCreateAndUpdateInDifferentCaseBlocks(self):
# TODO: two case blocks, one that creates, another that updates
pass
def _check_static_properties(self, case):
self.assertEqual(GenericCommCareCase, type(case))
self.assertEqual('CommCareCase', case.doc_type)
self.assertEqual("test_case_type", case.type)
self.assertEqual("test case name", case.name)
self.assertEqual("someuser", case.user_id)
self.assertEqual(ORIGINAL_DATE, case.opened_on)
self.assertEqual(ORIGINAL_DATE, case.modified_on)
self.assertEqual("someexternal", case.external_id)
```
#### File: case/tests/test_multimedia.py
```python
from datetime import datetime, timedelta
import time
import uuid
import os
import hashlib
from django.template import Template, Context
from django.test import TestCase
import lxml
from django.core.files.uploadedfile import UploadedFile
from mock import patch
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.tests.util import delete_all_cases, delete_all_xforms, TEST_DOMAIN_NAME
from casexml.apps.case.xml import V2
from casexml.apps.phone.models import SyncLog
from corehq.apps.receiverwrapper.util import submit_form_locally
import couchforms
from couchforms.models import XFormInstance
from dimagi.utils.parsing import json_format_datetime
TEST_CASE_ID = "EOL9FIAKIQWOFXFOH0QAMWU64"
CREATE_XFORM_ID = "6RGAZTETE3Z2QC0PE2DKM88MO"
media_path = os.path.join(os.path.dirname(__file__), "data", "attachments")
MEDIA_FILES = {
"fruity_file": os.path.join(media_path, "fruity.jpg"), # first
"dimagi_logo_file": os.path.join(media_path, "dimagi_logo.jpg"),
"commcare_logo_file": os.path.join(media_path, "commcare-logo.png"),
"globe_file": os.path.join(media_path, "globe.pdf"),
"house_file": os.path.join(media_path, "house.jpg"),
}
class BaseCaseMultimediaTest(TestCase):
def setUp(self):
delete_all_cases()
delete_all_xforms()
def _getXFormString(self, filename):
file_path = os.path.join(os.path.dirname(__file__), "data", "multimedia", filename)
with open(file_path, "rb") as f:
xml_data = f.read()
return xml_data
def _formatXForm(self, doc_id, raw_xml, attachment_block, date=None):
if date is None:
date = datetime.utcnow()
final_xml = Template(raw_xml).render(Context({
"attachments": attachment_block,
"time_start": json_format_datetime(date - timedelta(minutes=4)),
"time_end": json_format_datetime(date),
"date_modified": json_format_datetime(date),
"doc_id": doc_id
}))
return final_xml
def _prepAttachments(self, new_attachments, removes=[]):
attachment_block = ''.join([self._singleAttachBlock(x) for x in new_attachments] + [self._singleAttachRemoveBlock(x) for x in removes])
dict_attachments = dict((MEDIA_FILES[attach_name], self._attachmentFileStream(attach_name)) for attach_name in new_attachments)
return attachment_block, dict_attachments
def _singleAttachBlock(self, key):
return '<n0:%s src="%s" from="local"/>' % (key, MEDIA_FILES[key])
def _singleAttachRemoveBlock(self, key):
return '<n0:%s />' % key
def _attachmentFileStream(self, key):
attachment_path = MEDIA_FILES[key]
attachment = open(attachment_path, 'rb')
uf = UploadedFile(attachment, key)
return uf
def _calc_file_hash(self, key):
with open(MEDIA_FILES[key], 'rb') as attach:
return hashlib.md5(attach.read()).hexdigest()
def _do_submit(self, xml_data, dict_attachments, sync_token=None, date=None):
"""
RequestFactory submitter - simulates direct submission to server directly (no need to call process case after fact)
"""
response, xform, cases = submit_form_locally(
xml_data,
TEST_DOMAIN_NAME,
attachments=dict_attachments,
last_sync_token=sync_token,
received_on=date
)
self.assertEqual(set(dict_attachments.keys()),
set(xform.attachments.keys()))
[case] = cases
self.assertEqual(case.case_id, TEST_CASE_ID)
def _submit_and_verify(self, doc_id, xml_data, dict_attachments,
sync_token=None, date=None):
self._do_submit(xml_data, dict_attachments, sync_token, date=date)
time.sleep(2)
form = XFormInstance.get(doc_id)
self.assertEqual(len(dict_attachments), len(form.attachments))
for k, vstream in dict_attachments.items():
fileback = form.fetch_attachment(k)
# rewind the pointer before comparing
orig_attachment = vstream
orig_attachment.seek(0)
self.assertEqual(hashlib.md5(fileback).hexdigest(), hashlib.md5(orig_attachment.read()).hexdigest())
return form
def _doCreateCaseWithMultimedia(self, attachments=['fruity_file']):
xml_data = self._getXFormString('multimedia_create.xml')
attachment_block, dict_attachments = self._prepAttachments(attachments)
final_xml = self._formatXForm(CREATE_XFORM_ID, xml_data, attachment_block)
self._submit_and_verify(CREATE_XFORM_ID, final_xml, dict_attachments)
def _doSubmitUpdateWithMultimedia(self, new_attachments=None, removes=None,
sync_token=None, date=None):
new_attachments = new_attachments if new_attachments is not None \
else ['commcare_logo_file', 'dimagi_logo_file']
removes = removes if removes is not None else ['fruity_file']
attachment_block, dict_attachments = self._prepAttachments(new_attachments, removes=removes)
raw_xform = self._getXFormString('multimedia_update.xml')
doc_id = uuid.uuid4().hex
final_xform = self._formatXForm(doc_id, raw_xform, attachment_block, date)
self._submit_and_verify(doc_id, final_xform, dict_attachments,
sync_token, date=date)
class CaseMultimediaTest(BaseCaseMultimediaTest):
"""
Tests new attachments for cases and case properties
Spec: https://github.com/dimagi/commcare/wiki/CaseAttachmentAPI
"""
def tearDown(self):
delete_all_xforms()
def testAttachInCreate(self):
single_attach = 'fruity_file'
self._doCreateCaseWithMultimedia(attachments=[single_attach])
case = CommCareCase.get(TEST_CASE_ID)
self.assertEqual(1, len(case.case_attachments))
self.assertTrue(single_attach in case.case_attachments)
self.assertEqual(1, len(filter(lambda x: x['action_type'] == 'attachment', case.actions)))
self.assertEqual(self._calc_file_hash(single_attach), hashlib.md5(case.get_attachment(single_attach)).hexdigest())
def testArchiveAfterAttach(self):
single_attach = 'fruity_file'
self._doCreateCaseWithMultimedia(attachments=[single_attach])
case = CommCareCase.get(TEST_CASE_ID)
for xform in case.xform_ids:
form = XFormInstance.get(xform)
form.archive()
self.assertEqual('XFormArchived', form.doc_type)
form.unarchive()
self.assertEqual('XFormInstance', form.doc_type)
def testAttachRemoveSingle(self):
self.testAttachInCreate()
new_attachments = []
removes = ['fruity_file']
self._doSubmitUpdateWithMultimedia(new_attachments=new_attachments, removes=removes)
case = CommCareCase.get(TEST_CASE_ID)
#1 plus the 2 we had
self.assertEqual(0, len(case.case_attachments))
self.assertIsNone(case._attachments)
attach_actions = filter(lambda x: x['action_type'] == 'attachment', case.actions)
self.assertEqual(2, len(attach_actions))
last_action = attach_actions[-1]
self.assertEqual(sorted(removes), sorted(last_action['attachments'].keys()))
def testAttachRemoveMultiple(self):
self.testAttachInCreate()
new_attachments = ['commcare_logo_file', 'dimagi_logo_file']
removes = ['fruity_file']
self._doSubmitUpdateWithMultimedia(new_attachments=new_attachments, removes=removes)
case = CommCareCase.get(TEST_CASE_ID)
#1 plus the 2 we had
self.assertEqual(2, len(case.case_attachments))
self.assertEqual(2, len(case._attachments))
attach_actions = filter(lambda x: x['action_type'] == 'attachment', case.actions)
self.assertEqual(2, len(attach_actions))
last_action = attach_actions[-1]
self.assertEqual(sorted(new_attachments), sorted(case._attachments.keys()))
def testOTARestoreSingle(self):
self.testAttachInCreate()
restore_attachments = ['fruity_file']
self._validateOTARestore(TEST_CASE_ID, restore_attachments)
def testOTARestoreMultiple(self):
self.testAttachRemoveMultiple()
restore_attachments = ['commcare_logo_file', 'dimagi_logo_file']
self._validateOTARestore(TEST_CASE_ID, restore_attachments)
def _validateOTARestore(self, case_id, restore_attachments):
case = CommCareCase.get(TEST_CASE_ID)
case_xml = case.to_xml(V2)
root_node = lxml.etree.fromstring(case_xml)
attaches = root_node.find('{http://commcarehq.org/case/transaction/v2}attachment')
self.assertEqual(len(restore_attachments), len(attaches))
for attach in attaches:
url = attach.values()[1]
case_id = url.split('/')[-2]
attach_key_from_url = url.split('/')[-1]
tag = attach.tag
clean_tag = tag.replace('{http://commcarehq.org/case/transaction/v2}', '')
self.assertEqual(clean_tag, attach_key_from_url)
self.assertEqual(case_id, TEST_CASE_ID)
self.assertIn(attach_key_from_url, restore_attachments)
restore_attachments.remove(clean_tag)
self.assertEqual(0, len(restore_attachments))
def testAttachInUpdate(self, new_attachments=['commcare_logo_file', 'dimagi_logo_file']):
self.testAttachInCreate()
self._doSubmitUpdateWithMultimedia(new_attachments=new_attachments, removes=[])
case = CommCareCase.get(TEST_CASE_ID)
#1 plus the 2 we had
self.assertEqual(len(new_attachments)+1, len(case.case_attachments))
attach_actions = filter(lambda x: x['action_type'] == 'attachment', case.actions)
self.assertEqual(2, len(attach_actions))
last_action = attach_actions[-1]
self.assertEqual(sorted(new_attachments), sorted(last_action['attachments'].keys()))
for attach_name in new_attachments:
self.assertTrue(attach_name in case.case_attachments)
self.assertEqual(self._calc_file_hash(attach_name), hashlib.md5(case.get_attachment(attach_name)).hexdigest())
def testUpdateWithNoNewAttachment(self):
self.testAttachInCreate()
bulk_save = XFormInstance.get_db().bulk_save
bulk_save_attachments = []
# pull out and record attachments to docs being bulk saved
def new_bulk_save(docs, *args, **kwargs):
for doc in docs:
if doc['_id'] == TEST_CASE_ID:
bulk_save_attachments.append(doc['_attachments'])
bulk_save(docs, *args, **kwargs)
self._doSubmitUpdateWithMultimedia(
new_attachments=[], removes=[])
with patch('couchforms.models.XFormInstance._db.bulk_save', new_bulk_save):
# submit from the 2 min in the past to trigger a rebuild
self._doSubmitUpdateWithMultimedia(
new_attachments=[], removes=[],
date=datetime.utcnow() - timedelta(minutes=2))
# make sure there's exactly one bulk save recorded
self.assertEqual(len(bulk_save_attachments), 1)
# make sure none of the attachments were re-saved in rebuild
self.assertEqual(
[key for key, value in bulk_save_attachments[0].items()
if value.get('data')], [])
def test_sync_log_invalidation_bug(self):
sync_log = SyncLog(user_id='6dac4940-913e-11e0-9d4b-005056aa7fb5')
sync_log.save()
self.testAttachInCreate()
# this used to fail before we fixed http://manage.dimagi.com/default.asp?158373
self._doSubmitUpdateWithMultimedia(new_attachments=['commcare_logo_file'], removes=[],
sync_token=sync_log._id)
sync_log.delete()
```
#### File: apps/phone/cache_utils.py
```python
from collections import namedtuple
import os
import shutil
import tempfile
import uuid
import re
from casexml.apps.phone.exceptions import SyncLogCachingError
from casexml.apps.phone.models import get_properly_wrapped_sync_log
FileReference = namedtuple('FileReference', ['file', 'path'])
def copy_payload_and_synclog_and_get_new_file(filelike_payload):
"""
Given a restore payload, extracts the sync log id and sync log from the payload,
makes a copy of the sync log, and then returns a new FileReference with the same contents
except using the new sync log ID.
"""
synclog_id, end_position = extract_synclog_id_from_filelike_payload(filelike_payload)
old_sync_log = get_properly_wrapped_sync_log(synclog_id)
new_sync_log_doc = old_sync_log.to_json()
new_sync_log_id = uuid.uuid4().hex
new_sync_log_doc['_id'] = new_sync_log_id
del new_sync_log_doc['_rev']
old_sync_log.get_db().save_doc(new_sync_log_doc)
return replace_sync_log_id_in_filelike_payload(
filelike_payload, old_sync_log._id, new_sync_log_id, end_position
)
def extract_synclog_id_from_filelike_payload(filelike_payload):
filelike_payload.seek(0)
try:
beginning_of_log = filelike_payload.read(500)
# i know, regex parsing xml is bad. not sure what to do since this is arbitrarily truncated
match = re.search('<restore_id>([\w_-]+)</restore_id>', beginning_of_log)
if not match:
raise SyncLogCachingError("Couldn't find synclog ID from beginning of restore!")
groups = match.groups()
if len(groups) != 1:
raise SyncLogCachingError("Found more than one synclog ID from beginning of restore! {}".format(
', '.join(groups))
)
return groups[0], beginning_of_log.index(groups[0])
finally:
filelike_payload.seek(0)
def replace_sync_log_id_in_filelike_payload(filelike_payload, old_id, new_id, position):
filelike_payload.seek(0)
try:
beginning = filelike_payload.read(position)
extracted_id = filelike_payload.read(len(old_id))
if extracted_id != old_id:
raise SyncLogCachingError('Error putting sync log back together. Expected ID {} but was {}'.format(
old_id, extracted_id,
))
# write the result to a new file
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as outfile:
outfile.write(beginning)
outfile.write(new_id)
shutil.copyfileobj(filelike_payload, outfile)
return FileReference(open(path, 'r'), path)
finally:
filelike_payload.seek(0)
```
#### File: data_providers/case/batched.py
```python
from collections import defaultdict
import itertools
import logging
from casexml.apps.case.models import CommCareCase
from casexml.apps.phone.caselogic import get_footprint
from casexml.apps.phone.data_providers.case.load_testing import append_update_to_response
from casexml.apps.phone.data_providers.case.stock import get_stock_payload
from casexml.apps.phone.data_providers.case.utils import get_case_sync_updates, CaseStub
from casexml.apps.phone.models import CaseState
from corehq.apps.hqcase.dbaccessors import iter_lite_cases_json, \
get_n_case_ids_in_domain_by_owner
from corehq.util.dates import iso_string_to_datetime
from dimagi.utils.parsing import string_to_utc_datetime
logger = logging.getLogger(__name__)
def get_case_payload_batched(restore_state):
response = restore_state.restore_class()
sync_operation = BatchedCaseSyncOperation(restore_state)
for update in sync_operation.get_all_case_updates():
append_update_to_response(response, update, restore_state)
sync_state = sync_operation.global_state
restore_state.current_sync_log.cases_on_phone = sync_state.actual_owned_cases
restore_state.current_sync_log.dependent_cases_on_phone = sync_state.actual_extended_cases
# commtrack ledger sections
commtrack_elements = get_stock_payload(
restore_state.project, restore_state.stock_settings, sync_state.all_synced_cases
)
response.extend(commtrack_elements)
return response, sync_operation.batch_count
class GlobalSyncState(object):
"""
Object containing global state for a BatchedCaseSyncOperation.
Used within the batches to ensure uniqueness of cases being synced.
Also used after the sync is complete to provide list of CaseState objects
"""
def __init__(self, last_sync, case_sharing=False):
self.actual_relevant_cases_dict = {}
self.actual_owned_cases_dict = {}
self.all_synced_cases_dict = {}
self.minimal_cases = {}
if last_sync and not case_sharing:
def state_to_case_doc(state):
doc = state.to_json()
doc['_id'] = state.case_id
return doc
self.minimal_cases = {
state.case_id: state_to_case_doc(state) for state in itertools.chain(
last_sync.cases_on_phone, last_sync.dependent_cases_on_phone
)
}
@property
def actual_owned_cases(self):
"""
Cases directly owned by the user or one of the user's groups.
"""
return self.actual_owned_cases_dict.values()
@property
def actual_extended_cases(self):
"""
Cases that are indexed by any cases owned by the user (but now owned directly)
"""
return list(set(self.actual_relevant_cases) - set(self.actual_owned_cases))
@property
def actual_relevant_cases(self):
"""
All cases relevant to the user (owned and linked to)
"""
return self.actual_relevant_cases_dict.values()
@property
def all_synced_cases(self):
"""
All cases that were included in the restore response i.e. cases that have updates
which the phone doesn't know about
"""
return self.all_synced_cases_dict.values()
def update_owned_cases(self, cases):
self.actual_owned_cases_dict.update(
{case['_id']: CaseState.from_case(case) for case in cases}
)
def update_relevant_cases(self, cases):
new_cases = []
for case in cases:
state = CaseState.from_case(case)
if state.case_id not in self.actual_relevant_cases_dict:
self.actual_relevant_cases_dict[state.case_id] = state
new_cases.append(case)
return new_cases
def update_synced_cases(self, case_updates):
self.all_synced_cases_dict.update(
{update.case.case_id: CaseStub(update.case._id, update.case.type) for update in case_updates}
)
class BatchedCaseSyncOperation(object):
"""
Case Sync Operation that produces a list of CaseSyncBatch objects
each representing a batch of CaseSyncUpdates.
Global sync state is also available via the 'global_state' field.
Usage:
op = BatchedCaseSyncOperation(user, last_synclog, chunk_size)
case_updates_generator = op.get_all_case_updates()
list(case_updates_generator) # consume case updates generator to update global state
global_state = op.global_state
Throughout this process any case should be assumed to only contain the following properties:
'_id', 'type', 'indices', 'doc_type'.
If 'doc_type' = CommCareCase then the case is a real case but if it is CaseState then it is
a 'minimal case'.
"""
# use class variable to allow patching in tests
chunk_size = 1000
def __init__(self, restore_state, chunk_size=None):
self.restore_state = restore_state
self.user = restore_state.user
self.last_synclog = restore_state.last_sync_log
if chunk_size:
self.chunk_size = chunk_size
self.domain = self.restore_state.domain
try:
self.owner_ids = list(self.restore_state.owner_ids)
except AttributeError:
self.owner_ids = [self.user.user_id]
self.case_sharing = len(self.owner_ids) > 1
self.global_state = GlobalSyncState(self.last_synclog, self.case_sharing)
self.batch_count = 0
def batches(self):
for owner_id in self.owner_ids:
batch = CaseSyncCouchBatch(
self.global_state,
self.domain,
self.last_synclog,
self.chunk_size,
owner_id,
case_sharing=self.case_sharing
)
yield batch
while batch.next_batch:
batch = batch.next_batch
yield batch
if self.last_synclog:
yield CaseSyncPhoneBatch(
self.global_state,
self.domain,
self.last_synclog,
self.chunk_size,
case_sharing=self.case_sharing
)
def get_all_case_updates(self):
"""
Returns a generator that yields the case updates for this user.
Iterating through the updates also has the effect of updating this object's GlobalSyncState.
"""
def get_updates(batch):
self.batch_count += 1
return batch.case_updates_to_sync()
return itertools.chain.from_iterable(get_updates(batch) for batch in self.batches())
class CaseSyncBatch(object):
"""
Object representing a batch of case updates to sync.
"""
def __init__(self, global_state, domain, last_sync, chunksize, case_sharing):
self.global_state = global_state
self.domain = domain
self.last_sync = last_sync
self.chunksize = chunksize
self.case_sharing = case_sharing
self.next_batch = None
@property
def case_updates_to_sync(self):
"""
Override this to return the list of cases to sync
"""
return []
def _get_potential_cases(self, cases):
return filter_cases_modified_elsewhere_since_sync(list(cases), self.last_sync)
def _case_sync_updates(self, all_potential_to_sync):
return get_case_sync_updates(self.domain, all_potential_to_sync, self.last_sync)
def _fetch_missing_cases_and_wrap(self, casedoc_list):
cases = []
to_fetch = []
for doc in casedoc_list:
if doc['doc_type'] == 'CommCareCase':
cases.append(CommCareCase.wrap(doc))
else:
to_fetch.append(doc['_id'])
cases.extend(CommCareCase.bulk_get_lite(to_fetch, wrap=True, chunksize=self.chunksize))
return cases
class CaseSyncPhoneBatch(CaseSyncBatch):
"""
Batch of updates representing all cases that are on the phone
but aren't part of the 'owned' cases of the user.
"""
def __init__(self, global_state, domain, last_sync, chunksize, case_sharing=False):
super(CaseSyncPhoneBatch, self).__init__(global_state, domain, last_sync, chunksize, case_sharing)
# case sharing is in use so we need to fetch the cases from the DB in case
# they were modified by another user or reference cases owned by another user
self.use_minimal_cases = not self.case_sharing
def case_updates_to_sync(self):
other_case_ids_on_phone = set([
case_id
for case_id in self.last_sync.get_footprint_of_cases_on_phone()
if case_id not in self.global_state.actual_relevant_cases_dict
])
logger.debug("%s other cases on phone", len(other_case_ids_on_phone))
if not other_case_ids_on_phone:
return []
if self.use_minimal_cases:
other_cases_on_phone = [
self.global_state.minimal_cases[case_id] for case_id in other_case_ids_on_phone
]
else:
other_cases_on_phone = CommCareCase.bulk_get_lite(
other_case_ids_on_phone,
wrap=False,
chunksize=len(other_case_ids_on_phone)
)
potential_to_sync = self._get_potential_cases(other_cases_on_phone)
cases_to_sync = self._fetch_missing_cases_and_wrap(potential_to_sync)
case_sync_updates = self._case_sync_updates(cases_to_sync)
self.global_state.update_synced_cases(case_sync_updates)
return case_sync_updates
def __repr__(self):
return "CaseSyncPhoneBatch(use_minimal_cases={})".format(
self.use_minimal_cases
)
class CaseSyncCouchBatch(CaseSyncBatch):
"""
Batch of case updates for cases 'owned' by the user.
"""
def __init__(self, global_state, domain, last_sync, chunksize,
owner_id, case_sharing=False, startkey_docid=None):
super(CaseSyncCouchBatch, self).__init__(global_state, domain, last_sync, chunksize, case_sharing)
self.owner_id = owner_id
self.startkey_docid = startkey_docid
# We can only use minimal cases if:
# * there is a SyncLog which we can use that has cases in it
# * the user is not part of any case sharing groups
self.use_minimal_cases = self.last_sync and not case_sharing
def case_updates_to_sync(self):
actual_owned_cases = self._actual_owned_cases()
if not actual_owned_cases:
return []
self.global_state.update_owned_cases(actual_owned_cases)
all_relevant_cases_dict = self._all_relevant_cases_dict(actual_owned_cases)
actual_relevant_cases = self.global_state.update_relevant_cases(all_relevant_cases_dict.values())
potential_to_sync = self._get_potential_cases(actual_relevant_cases)
cases_to_sync = self._fetch_missing_cases_and_wrap(potential_to_sync)
case_sync_updates = self._case_sync_updates(cases_to_sync)
self.global_state.update_synced_cases(case_sync_updates)
return case_sync_updates
def _get_case_ids(self):
case_ids = get_n_case_ids_in_domain_by_owner(
self.domain, self.owner_id, self.chunksize, self.startkey_docid)
for case_id in case_ids:
yield case_id
if len(case_ids) >= self.chunksize:
self.next_batch = CaseSyncCouchBatch(
self.global_state,
self.domain,
self.last_sync,
self.chunksize,
self.owner_id,
self.case_sharing,
startkey_docid=case_ids[-1]
)
def _actual_owned_cases(self):
"""
This returns a list of case dicts. Each dict will either be an actual case dict or else
a dict containing only these keys: '_id', 'type', 'indices'. These 'minimal cases' are
created from CaseState objects from the previous SyncLog.
"""
def _case_domain_match(case):
return not self.domain or self.domain == case.get('domain')
case_ids = self._get_case_ids()
if self.use_minimal_cases:
# First we check to see if there is a case state available that we can use
# rather than fetching the whole case.
minimal_cases = []
cases_to_fetch = []
for case_id in case_ids:
minimal_case = self.global_state.minimal_cases.get(case_id)
if minimal_case:
minimal_cases.append(minimal_case)
else:
cases_to_fetch.append(case_id)
logger.debug(
"%s cases found in previous SyncLog. %s still to fetch",
len(minimal_cases), len(cases_to_fetch)
)
if cases_to_fetch:
cases = CommCareCase.bulk_get_lite(cases_to_fetch, wrap=False, chunksize=self.chunksize)
minimal_cases.extend(
case_doc for case_doc in cases
if _case_domain_match(case_doc)
)
return minimal_cases
else:
lite_cases = list(iter_lite_cases_json(case_ids, self.chunksize))
logger.debug("No previous SyncLog. Fetched %s cases", len(lite_cases))
return lite_cases
def _all_relevant_cases_dict(self, cases):
return get_footprint(cases, domain=self.domain, strip_history=True)
def __repr__(self):
return "CaseSyncCouchBatch(startkey={}, startkey_docid={}, chunksize={}, use_minimal_cases={})".format(
self.owner_id,
self.startkey_docid,
self.chunksize,
self.use_minimal_cases
)
def filter_cases_modified_elsewhere_since_sync(cases, last_sync_token):
"""
This function takes in a list of unwrapped case dicts and a last_sync token and
returns the set of cases that should be applicable to be sent down on top of that
sync token.
This includes:
1. All cases that were modified since the last sync date by any phone other
than the phone that is associated with the sync token.
2. All cases that were not on the phone at the time of last sync that are
now on the phone.
"""
# todo: this function is pretty ugly and is heavily optimized to reduce the number
# of queries to couch.
if not last_sync_token:
return cases
else:
# we can start by filtering out our base set of cases to check for only
# things that have been modified since we last synced
def _is_relevant(case_or_case_state_dict):
if case_or_case_state_dict:
# only case-like things have this.
if 'server_modified_on' in case_or_case_state_dict:
return string_to_utc_datetime(case['server_modified_on']) >= last_sync_token.date
# for case states default to always checking for recent updates
return True
recently_modified_case_ids = [case['_id'] for case in cases if _is_relevant(case)]
# create a mapping of all cases to sync logs for all cases that were modified
# in the appropriate ranges.
# todo: this should really have a better way to filter out updates from sync logs
# that we already have in a better way.
# todo: if this recently modified case list is huge i'm guessing this query is
# pretty expensive
case_log_map = CommCareCase.get_db().view(
'phone/cases_to_sync_logs',
keys=recently_modified_case_ids,
reduce=False,
)
unique_combinations = set((row['key'], row['value']) for row in case_log_map)
# todo: and this one is also going to be very bad. see note above about how we might
# be able to reduce it - by finding a way to only query for sync tokens that are more
# likely to be relevant.
modification_dates = CommCareCase.get_db().view(
'phone/case_modification_status',
keys=[list(combo) for combo in unique_combinations],
reduce=True,
group=True,
)
# we'll build a structure that looks like this for efficiency:
# { case_id: [{'token': 'token value', 'date': 'date value'}, ...]}
all_case_updates_by_sync_token = defaultdict(list)
for row in modification_dates:
# format from couch is a list of objects that look like this:
# {
# 'value': '2012-08-22T08:55:14Z', (most recent date updated)
# 'key': ['case-id', 'sync-token-id']
# }
if row['value']:
modification_date = iso_string_to_datetime(row['value'])
if modification_date >= last_sync_token.date:
case_id, sync_token_id = row['key']
all_case_updates_by_sync_token[case_id].append(
{'token': sync_token_id, 'date': modification_date}
)
def case_modified_elsewhere_since_sync(case_id):
# NOTE: uses closures
return any([row['date'] >= last_sync_token.date and row['token'] != last_sync_token._id
for row in all_case_updates_by_sync_token[case_id]])
def relevant(case):
case_id = case['_id']
return (case_modified_elsewhere_since_sync(case_id)
or not last_sync_token.phone_is_holding_case(case_id))
return filter(relevant, cases)
```
#### File: data_providers/case/load_testing.py
```python
from copy import deepcopy
from casexml.apps.case.models import CommCareCase
from casexml.apps.phone.data_providers.case.utils import CaseSyncUpdate
from casexml.apps.phone.xml import get_case_element
from corehq.toggles import ENABLE_LOADTEST_USERS
def get_loadtest_factor(domain, user):
"""
Gets the loadtest factor for a domain and user. Is always 1 unless
both the toggle is enabled for the domain, and the user has a non-zero,
non-null factor set.
"""
if domain and ENABLE_LOADTEST_USERS.enabled(domain):
return getattr(user, 'loadtest_factor', 1) or 1
return 1
def transform_loadtest_update(update, factor):
"""
Returns a new CaseSyncUpdate object (from an existing one) with all the
case IDs and names mapped to have the factor appended.
"""
def _map_id(id, count):
return '{}-{}'.format(id, count)
case = CommCareCase.wrap(deepcopy(update.case._doc))
case._id = _map_id(case._id, factor)
for index in case.indices:
index.referenced_id = _map_id(index.referenced_id, factor)
case.name = '{} ({})'.format(case.name, factor)
return CaseSyncUpdate(case, update.sync_token, required_updates=update.required_updates)
def append_update_to_response(response, update, restore_state):
"""
Adds the XML from the case_update to the restore response.
If factor is > 1 it will append that many updates to the response for load testing purposes.
"""
current_count = 0
original_update = update
while current_count < restore_state.loadtest_factor:
element = get_case_element(update.case, update.required_updates, restore_state.version)
response.append(element)
current_count += 1
if current_count < restore_state.loadtest_factor:
update = transform_loadtest_update(original_update, current_count)
```
#### File: data_providers/case/stock.py
```python
from collections import defaultdict
from casexml.apps.stock.consumption import compute_consumption_or_default
from casexml.apps.stock.utils import get_current_ledger_state
from dimagi.utils.parsing import json_format_datetime
from datetime import datetime
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
def get_stock_payload(project, stock_settings, case_stub_list):
if project and not project.commtrack_enabled:
return
from lxml.builder import ElementMaker
E = ElementMaker(namespace=COMMTRACK_REPORT_XMLNS)
def entry_xml(id, quantity):
return E.entry(
id=id,
quantity=str(int(quantity)),
)
def state_to_xml(state):
return entry_xml(state.product_id, state.stock_on_hand)
def consumption_entry(case_id, product_id, section_id):
consumption_value = compute_consumption_or_default(
case_id,
product_id,
datetime.utcnow(),
section_id,
stock_settings.consumption_config
)
if consumption_value is not None:
return entry_xml(product_id, consumption_value)
case_ids = [case.case_id for case in case_stub_list]
all_current_ledgers = get_current_ledger_state(case_ids)
for commtrack_case_stub in case_stub_list:
case_id = commtrack_case_stub.case_id
current_ledgers = all_current_ledgers[case_id]
section_product_map = defaultdict(lambda: [])
section_timestamp_map = defaultdict(lambda: json_format_datetime(datetime.utcnow()))
for section_id in sorted(current_ledgers.keys()):
state_map = current_ledgers[section_id]
sorted_product_ids = sorted(state_map.keys())
stock_states = [state_map[p] for p in sorted_product_ids]
as_of = json_format_datetime(max(txn.last_modified_date for txn in stock_states))
section_product_map[section_id] = sorted_product_ids
section_timestamp_map[section_id] = as_of
yield E.balance(*(state_to_xml(e) for e in stock_states),
**{'entity-id': case_id, 'date': as_of, 'section-id': section_id})
for section_id, consumption_section_id in stock_settings.section_to_consumption_types.items():
if (section_id in current_ledgers or
stock_settings.force_consumption_case_filter(commtrack_case_stub)):
consumption_product_ids = stock_settings.default_product_list \
if stock_settings.default_product_list \
else section_product_map[section_id]
consumption_entries = filter(lambda e: e is not None, [
consumption_entry(case_id, p, section_id)
for p in consumption_product_ids
])
if consumption_entries:
yield E.balance(
*consumption_entries,
**{
'entity-id': case_id,
'date': section_timestamp_map[section_id],
'section-id': consumption_section_id,
}
)
```
#### File: apps/phone/fixtures.py
```python
from collections import namedtuple
from casexml.apps.case.xml import V1
from django.conf import settings
from corehq.apps.users.models import CommCareUser
from dimagi.utils.modules import to_function
import itertools
class FixtureGenerator(object):
"""
The generator object, which gets fixtures from your config file that should
be included when OTA restoring.
See: https://bitbucket.org/javarosa/javarosa/wiki/externalinstances
To use, add the following to your settings.py
FIXTURE_GENERATORS = {
'group1': [
"myapp.fixturegenerators.gen1",
"myapp.fixturegenerators.gen2",
...
],
...
}
The values in the file should be paths to objects that
implement the following API:
provider(user, version, last_sync) --> [list of fixture objects]
provider.id --> the ID of the fixture
If the provider generates multiple fixtures it should use an ID format as follows:
"prefix:dynamic"
In this case 'provider.id' should just be the ID prefix.
The function should return an empty list if there are no fixtures
"""
def __init__(self):
self._generator_providers = {}
if hasattr(settings, "FIXTURE_GENERATORS"):
for group, func_paths in settings.FIXTURE_GENERATORS.items():
self._generator_providers[group] = filter(None, [
to_function(func_path) for func_path in func_paths
])
def _get_fixtures(self, group, fixture_id, user, version, last_sync):
if version == V1:
return [] # V1 phones will never use or want fixtures
if getattr(user, "_hq_user", False):
user = user._hq_user
if not isinstance(user, CommCareUser):
return []
if group:
providers = self._generator_providers.get(group, [])
else:
providers = itertools.chain(*self._generator_providers.values())
if fixture_id:
full_id = fixture_id
prefix = fixture_id.split(':', 1)[0]
def provider_matches(provider):
# some providers generate fixtures with dynamic ID's e.g. item-list:my-item-list
# in which case provider.id is just the prefix.
return provider.id == full_id or provider.id == prefix
providers = [provider for provider in providers if provider_matches(provider)]
return itertools.chain(*[provider(user, version, last_sync)
for provider in providers])
def get_fixture_by_id(self, fixture_id, user, version, last_sync=None):
"""
Only get fixtures with the specified ID.
"""
fixtures = self._get_fixtures(None, fixture_id, user, version, last_sync)
for fixture in fixtures:
if fixture.attrib.get("id") == fixture_id:
return fixture
def get_fixtures(self, user, version, last_sync=None, group=None):
"""
Gets all fixtures associated with an OTA restore operation
"""
return self._get_fixtures(group, None, user, version, last_sync)
generator = FixtureGenerator()
```
#### File: apps/phone/models.py
```python
from collections import defaultdict, namedtuple
from copy import copy
from datetime import datetime
import json
from couchdbkit.exceptions import ResourceConflict, ResourceNotFound
from casexml.apps.phone.exceptions import IncompatibleSyncLogType
from corehq.toggles import LEGACY_SYNC_SUPPORT
from corehq.util.global_request import get_request
from corehq.util.soft_assert import soft_assert
from dimagi.ext.couchdbkit import *
from django.db import models
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.couch import LooselyEqualDocumentSchema
from casexml.apps.case import const
from casexml.apps.case.sharedmodels import CommCareCaseIndex, IndexHoldingMixIn
from casexml.apps.phone.checksum import Checksum, CaseStateHash
import logging
logger = logging.getLogger('phone.models')
class User(object):
"""
This is a basic user model that's used for OTA restore to properly
find cases and generate the user XML.
"""
# todo: this model is now useless since casexml and HQ are no longer separate repos.
# we should remove this abstraction layer and switch all the restore code to just
# work off CouchUser objects
def __init__(self, user_id, username, password, date_joined, first_name=None,
last_name=None, phone_number=None, user_data=None,
additional_owner_ids=None, domain=None, loadtest_factor=1):
self.user_id = user_id
self.username = username
self.first_name = first_name
self.last_name = last_name
self.phone_number = phone_number
self.password = password
self.date_joined = date_joined
self.user_data = user_data or {}
self.additional_owner_ids = additional_owner_ids or []
self.domain = domain
self.loadtest_factor = loadtest_factor
@property
def user_session_data(self):
# todo: this is redundant with the implementation in CouchUser.
# this will go away when the two are reconciled
from corehq.apps.custom_data_fields.models import SYSTEM_PREFIX
session_data = copy(self.user_data)
session_data.update({
'{}_first_name'.format(SYSTEM_PREFIX): self.first_name,
'{}_last_name'.format(SYSTEM_PREFIX): self.last_name,
'{}_phone_number'.format(SYSTEM_PREFIX): self.phone_number,
})
return session_data
def get_owner_ids(self):
ret = [self.user_id]
ret.extend(self.additional_owner_ids)
return list(set(ret))
@classmethod
def from_django_user(cls, django_user):
return cls(user_id=str(django_user.pk), username=django_user.username,
password=<PASSWORD>, date_joined=django_user.date_joined,
user_data={})
class CaseState(LooselyEqualDocumentSchema, IndexHoldingMixIn):
"""
Represents the state of a case on a phone.
"""
case_id = StringProperty()
type = StringProperty()
indices = SchemaListProperty(CommCareCaseIndex)
@classmethod
def from_case(cls, case):
if isinstance(case, dict):
return cls.wrap({
'case_id': case['_id'],
'type': case['type'],
'indices': case['indices'],
})
return cls(
case_id=case.get_id,
type=case.type,
indices=case.indices,
)
def __repr__(self):
return "case state: %s (%s)" % (self.case_id, self.indices)
class SyncLogAssertionError(AssertionError):
def __init__(self, case_id, *args, **kwargs):
self.case_id = case_id
super(SyncLogAssertionError, self).__init__(*args, **kwargs)
LOG_FORMAT_LEGACY = 'legacy'
LOG_FORMAT_SIMPLIFIED = 'simplified'
class AbstractSyncLog(SafeSaveDocument, UnicodeMixIn):
date = DateTimeProperty()
# domain = StringProperty()
user_id = StringProperty()
previous_log_id = StringProperty() # previous sync log, forming a chain
duration = IntegerProperty() # in seconds
log_format = StringProperty()
# owner_ids_on_phone stores the ids the phone thinks it's the owner of.
# This typically includes the user id,
# as well as all groups that that user is a member of.
owner_ids_on_phone = StringListProperty()
# for debugging / logging
previous_log_rev = StringProperty() # rev of the previous log at the time of creation
last_submitted = DateTimeProperty() # last time a submission caused this to be modified
rev_before_last_submitted = StringProperty() # rev when the last submission was saved
last_cached = DateTimeProperty() # last time this generated a cached response
hash_at_last_cached = StringProperty() # the state hash of this when it was last cached
# save state errors and hashes here
had_state_error = BooleanProperty(default=False)
error_date = DateTimeProperty()
error_hash = StringProperty()
strict = True # for asserts
def _assert(self, conditional, msg="", case_id=None):
if not conditional:
logger.warn("assertion failed: %s" % msg)
if self.strict:
raise SyncLogAssertionError(case_id, msg)
else:
self.has_assert_errors = True
@classmethod
def wrap(cls, data):
ret = super(AbstractSyncLog, cls).wrap(data)
if hasattr(ret, 'has_assert_errors'):
ret.strict = False
return ret
def case_count(self):
"""
How many cases are associated with this. Used in reports.
"""
raise NotImplementedError()
def phone_is_holding_case(self, case_id):
raise NotImplementedError()
def get_footprint_of_cases_on_phone(self):
"""
Gets the phone's flat list of all case ids on the phone,
owned or not owned but relevant.
"""
raise NotImplementedError()
def get_state_hash(self):
return CaseStateHash(Checksum(self.get_footprint_of_cases_on_phone()).hexdigest())
def update_phone_lists(self, xform, case_list):
"""
Given a form an list of touched cases, update this sync log to reflect the updated
state on the phone.
"""
raise NotImplementedError()
def get_payload_attachment_name(self, version):
return 'restore_payload_{version}.xml'.format(version=version)
def has_cached_payload(self, version):
return self.get_payload_attachment_name(version) in self._doc.get('_attachments', {})
def get_cached_payload(self, version, stream=False):
try:
return self.fetch_attachment(self.get_payload_attachment_name(version), stream=stream)
except ResourceNotFound:
return None
def set_cached_payload(self, payload, version):
self.put_attachment(payload, name=self.get_payload_attachment_name(version),
content_type='text/xml')
def invalidate_cached_payloads(self):
for name in copy(self._doc.get('_attachments', {})):
self.delete_attachment(name)
@classmethod
def from_other_format(cls, other_sync_log):
"""
Convert to an instance of a subclass from another subclass. Subclasses can
override this to provide conversion functions.
"""
raise IncompatibleSyncLogType('Unable to convert from {} to {}'.format(
type(other_sync_log), cls,
))
# anything prefixed with 'tests_only' is only used in tests
def tests_only_get_cases_on_phone(self):
raise NotImplementedError()
def test_only_clear_cases_on_phone(self):
raise NotImplementedError()
def test_only_get_dependent_cases_on_phone(self):
raise NotImplementedError()
class SyncLog(AbstractSyncLog):
"""
A log of a single sync operation.
"""
log_format = StringProperty(default=LOG_FORMAT_LEGACY)
last_seq = StringProperty() # the last_seq of couch during this sync
# we need to store a mapping of cases to indices for generating the footprint
# cases_on_phone represents the state of all cases the server
# thinks the phone has on it and cares about.
cases_on_phone = SchemaListProperty(CaseState)
# dependant_cases_on_phone represents the possible list of cases
# also on the phone because they are referenced by a real case's index
# (or a dependent case's index).
# This list is not necessarily a perfect reflection
# of what's on the phone, but is guaranteed to be after pruning
dependent_cases_on_phone = SchemaListProperty(CaseState)
@classmethod
def wrap(cls, data):
# last_seq used to be int, but is now string for cloudant compatibility
if isinstance(data.get('last_seq'), (int, long)):
data['last_seq'] = unicode(data['last_seq'])
return super(SyncLog, cls).wrap(data)
@classmethod
def last_for_user(cls, user_id):
from casexml.apps.phone.dbaccessors.sync_logs_by_user import get_last_synclog_for_user
return get_last_synclog_for_user(user_id)
def case_count(self):
return len(self.cases_on_phone)
def get_previous_log(self):
"""
Get the previous sync log, if there was one. Otherwise returns nothing.
"""
if not hasattr(self, "_previous_log_ref"):
self._previous_log_ref = SyncLog.get(self.previous_log_id) if self.previous_log_id else None
return self._previous_log_ref
def phone_has_case(self, case_id):
"""
Whether the phone currently has a case, according to this sync log
"""
return self.get_case_state(case_id) is not None
def get_case_state(self, case_id):
"""
Get the case state object associated with an id, or None if no such
object is found
"""
filtered_list = self._case_state_map()[case_id]
if filtered_list:
self._assert(len(filtered_list) == 1,
"Should be exactly 0 or 1 cases on phone but were %s for %s" %
(len(filtered_list), case_id))
return CaseState.wrap(filtered_list[0])
return None
def phone_has_dependent_case(self, case_id):
"""
Whether the phone currently has a dependent case, according to this sync log
"""
return self.get_dependent_case_state(case_id) is not None
def get_dependent_case_state(self, case_id):
"""
Get the dependent case state object associated with an id, or None if no such
object is found
"""
filtered_list = self._dependent_case_state_map()[case_id]
if filtered_list:
self._assert(len(filtered_list) == 1,
"Should be exactly 0 or 1 dependent cases on phone but were %s for %s" %
(len(filtered_list), case_id))
return CaseState.wrap(filtered_list[0])
return None
@memoized
def _dependent_case_state_map(self):
return self._build_state_map('dependent_cases_on_phone')
@memoized
def _case_state_map(self):
return self._build_state_map('cases_on_phone')
def _build_state_map(self, list_name):
state_map = defaultdict(list)
# referencing the property via self._doc is because we don't want to needlessly call wrap
# (which couchdbkit does not make any effort to cache on repeated calls)
# deterministically this change shaved off 10 seconds from an ota restore
# of about 300 cases.
for case in self._doc[list_name]:
state_map[case['case_id']].append(case)
return state_map
def _get_case_state_from_anywhere(self, case_id):
return self.get_case_state(case_id) or self.get_dependent_case_state(case_id)
def archive_case(self, case_id):
state = self.get_case_state(case_id)
if state:
self.cases_on_phone.remove(state)
self._case_state_map.reset_cache(self)
all_indices = [i for case_state in self.cases_on_phone + self.dependent_cases_on_phone
for i in case_state.indices]
if any([i.referenced_id == case_id for i in all_indices]):
self.dependent_cases_on_phone.append(state)
self._dependent_case_state_map.reset_cache(self)
return state
else:
state = self.get_dependent_case_state(case_id)
if state:
all_indices = [i for case_state in self.cases_on_phone + self.dependent_cases_on_phone
for i in case_state.indices]
if not any([i.referenced_id == case_id for i in all_indices]):
self.dependent_cases_on_phone.remove(state)
self._dependent_case_state_map.reset_cache(self)
return state
def _phone_owns(self, action):
# whether the phone thinks it owns an action block.
# the only way this can't be true is if the block assigns to an
# owner id that's not associated with the user on the phone
owner = action.updated_known_properties.get("owner_id")
if owner:
return owner in self.owner_ids_on_phone
return True
def update_phone_lists(self, xform, case_list):
# for all the cases update the relevant lists in the sync log
# so that we can build a historical record of what's associated
# with the phone
removed_states = {}
new_indices = set()
for case in case_list:
actions = case.get_actions_for_form(xform.get_id)
for action in actions:
logger.debug('OLD {}: {}'.format(case._id, action.action_type))
if action.action_type == const.CASE_ACTION_CREATE:
self._assert(not self.phone_has_case(case._id),
'phone has case being created: %s' % case._id)
starter_state = CaseState(case_id=case.get_id, indices=[])
if self._phone_owns(action):
self.cases_on_phone.append(starter_state)
self._case_state_map.reset_cache(self)
else:
removed_states[case._id] = starter_state
elif action.action_type == const.CASE_ACTION_UPDATE:
if not self._phone_owns(action):
# only action necessary here is in the case of
# reassignment to an owner the phone doesn't own
state = self.archive_case(case.get_id)
if state:
removed_states[case._id] = state
elif action.action_type == const.CASE_ACTION_INDEX:
# in the case of parallel reassignment and index update
# the phone might not have the case
if self.phone_has_case(case.get_id):
case_state = self.get_case_state(case.get_id)
else:
case_state = self.get_dependent_case_state(case.get_id)
# reconcile indices
if case_state:
for index in action.indices:
new_indices.add(index.referenced_id)
case_state.update_indices(action.indices)
elif action.action_type == const.CASE_ACTION_CLOSE:
if self.phone_has_case(case.get_id):
state = self.archive_case(case.get_id)
if state:
removed_states[case._id] = state
# if we just removed a state and added an index to it
# we have to put it back in our dependent case list
readded_any = False
for index in new_indices:
if index in removed_states:
self.dependent_cases_on_phone.append(removed_states[index])
readded_any = True
if readded_any:
self._dependent_case_state_map.reset_cache(self)
if case_list:
try:
self.save()
self.invalidate_cached_payloads()
except ResourceConflict:
logging.exception('doc update conflict saving sync log {id}'.format(
id=self._id,
))
raise
def get_footprint_of_cases_on_phone(self):
def children(case_state):
return [self._get_case_state_from_anywhere(index.referenced_id)
for index in case_state.indices]
relevant_cases = set()
queue = list(self.cases_on_phone)
while queue:
case_state = queue.pop()
# I don't actually understand why something is coming back None
# here, but we can probably just ignore it.
if case_state is not None and case_state.case_id not in relevant_cases:
relevant_cases.add(case_state.case_id)
queue.extend(children(case_state))
return relevant_cases
def phone_is_holding_case(self, case_id):
"""
Whether the phone is holding (not purging) a case.
"""
# this is inefficient and could be optimized
if self.phone_has_case(case_id):
return True
else:
cs = self.get_dependent_case_state(case_id)
if cs and case_id in self.get_footprint_of_cases_on_phone():
return True
return False
def __unicode__(self):
return "%s synced on %s (%s)" % (self.user_id, self.date.date(), self.get_id)
def tests_only_get_cases_on_phone(self):
return self.cases_on_phone
def test_only_clear_cases_on_phone(self):
self.cases_on_phone = []
def test_only_get_dependent_cases_on_phone(self):
return self.dependent_cases_on_phone
PruneResult = namedtuple('PruneResult', ['seen', 'pruned'])
class IndexTree(DocumentSchema):
"""
Document type representing a case dependency tree (which is flattened to a single dict)
"""
# a flat mapping of cases to dicts of their indices. The keys in each dict are the index identifiers
# and the values are the referenced case IDs
indices = SchemaDictProperty()
def __repr__(self):
return json.dumps(self.indices, indent=2)
def get_cases_that_directly_depend_on_case(self, case_id, cached_map=None):
cached_map = cached_map or _reverse_index_map(self.indices)
return cached_map.get(case_id, [])
def get_all_cases_that_depend_on_case(self, case_id, cached_map=None):
"""
Recursively builds a tree of all cases that depend on this case and returns
a flat set of case ids.
Allows passing in a cached map of reverse index references if you know you are going
to call it more than once in a row to avoid rebuilding that.
"""
def _recursive_call(case_id, all_cases, cached_map):
all_cases.add(case_id)
for dependent_case in self.get_cases_that_directly_depend_on_case(case_id, cached_map=cached_map):
if dependent_case not in all_cases:
all_cases.add(dependent_case)
_recursive_call(dependent_case, all_cases, cached_map)
all_cases = set()
cached_map = cached_map or _reverse_index_map(self.indices)
_recursive_call(case_id, all_cases, cached_map)
return all_cases
def delete_index(self, from_case_id, index_name):
prior_ids = self.indices.pop(from_case_id, {})
prior_ids.pop(index_name, None)
if prior_ids:
self.indices[from_case_id] = prior_ids
def set_index(self, from_case_id, index_name, to_case_id):
prior_ids = self.indices.get(from_case_id, {})
prior_ids[index_name] = to_case_id
self.indices[from_case_id] = prior_ids
def apply_updates(self, other_tree):
"""
Apply updates from another IndexTree and return a copy with those applied.
If an id is found in the new one, use that id's indices, otherwise, use this ones,
(defaulting to nothing).
"""
assert isinstance(other_tree, IndexTree)
new = IndexTree(
indices=copy(self.indices),
)
new.indices.update(other_tree.indices)
return new
def _reverse_index_map(index_map):
reverse_indices = defaultdict(set)
for case_id, indices in index_map.items():
for indexed_case_id in indices.values():
reverse_indices[indexed_case_id].add(case_id)
return dict(reverse_indices)
class SimplifiedSyncLog(AbstractSyncLog):
"""
New, simplified sync log class that is used by ownership cleanliness restore.
Just maintains a flat list of case IDs on the phone rather than the case/dependent state
lists from the SyncLog class.
"""
log_format = StringProperty(default=LOG_FORMAT_SIMPLIFIED)
case_ids_on_phone = SetProperty(unicode)
# this is a subset of case_ids_on_phone used to flag that a case is only around because it has dependencies
# this allows us to prune it if possible from other actions
dependent_case_ids_on_phone = SetProperty(unicode)
owner_ids_on_phone = SetProperty(unicode)
index_tree = SchemaProperty(IndexTree)
def save(self, *args, **kwargs):
# force doc type to SyncLog to avoid changing the couch view.
self.doc_type = "SyncLog"
super(SimplifiedSyncLog, self).save(*args, **kwargs)
def case_count(self):
return len(self.case_ids_on_phone)
def phone_is_holding_case(self, case_id):
"""
Whether the phone currently has a case, according to this sync log
"""
return case_id in self.case_ids_on_phone
def get_footprint_of_cases_on_phone(self):
return list(self.case_ids_on_phone)
@property
def primary_case_ids(self):
return self.case_ids_on_phone - self.dependent_case_ids_on_phone
def prune_case(self, case_id):
"""
Prunes a case from the tree while also pruning any dependencies as a result of this pruning.
"""
logger.debug('pruning: {}'.format(case_id))
self.dependent_case_ids_on_phone.add(case_id)
reverse_index_map = _reverse_index_map(self.index_tree.indices)
dependencies = self.index_tree.get_all_cases_that_depend_on_case(case_id, cached_map=reverse_index_map)
# we can only potentially remove a case if it's already in dependent case ids
# and therefore not directly owned
candidates_to_remove = dependencies & self.dependent_case_ids_on_phone
dependencies_not_to_remove = dependencies - self.dependent_case_ids_on_phone
def _remove_case(to_remove):
# uses closures for assertions
logger.debug('removing: {}'.format(to_remove))
assert to_remove in self.dependent_case_ids_on_phone
indices = self.index_tree.indices.pop(to_remove, {})
if to_remove != case_id:
# if the case had indexes they better also be in our removal list (except for ourselves)
for index in indices.values():
if not _domain_has_legacy_toggle_set():
assert index in candidates_to_remove, \
"expected {} in {} but wasn't".format(index, candidates_to_remove)
try:
self.case_ids_on_phone.remove(to_remove)
except KeyError:
_assert = soft_assert(to=['czue' + '@' + 'dimagi.com'], exponential_backoff=False)
def _should_fail_softly():
def _sync_log_was_old():
# todo: this here to avoid having to manually clean up after
# http://manage.dimagi.com/default.asp?179664
# it should be removed when there are no longer any instances of the assertion
if self.date < datetime(2015, 8, 25):
_assert(False, 'patching sync log {} to remove missing case ID {}!'.format(
self._id, to_remove)
)
return True
return False
return _domain_has_legacy_toggle_set() or _sync_log_was_old()
if _should_fail_softly():
pass
else:
# this is only a soft assert for now because of http://manage.dimagi.com/default.asp?181443
# we should convert back to a real Exception when we stop getting any of these
_assert(False, 'case {} already removed from sync log {}'.format(to_remove, self._id))
self.dependent_case_ids_on_phone.remove(to_remove)
if not dependencies_not_to_remove:
# this case's entire relevancy chain is in dependent cases
# this means they can all now be removed.
this_case_indices = self.index_tree.indices.get(case_id, {})
for to_remove in candidates_to_remove:
_remove_case(to_remove)
for this_case_index in this_case_indices.values():
if (this_case_index in self.dependent_case_ids_on_phone and
this_case_index not in candidates_to_remove):
self.prune_case(this_case_index)
else:
# we have some possible candidates for removal. we should check each of them.
candidates_to_remove.remove(case_id) # except ourself
for candidate in candidates_to_remove:
candidate_dependencies = self.index_tree.get_all_cases_that_depend_on_case(
candidate, cached_map=reverse_index_map
)
if not candidate_dependencies - self.dependent_case_ids_on_phone:
_remove_case(candidate)
def _add_primary_case(self, case_id):
self.case_ids_on_phone.add(case_id)
if case_id in self.dependent_case_ids_on_phone:
self.dependent_case_ids_on_phone.remove(case_id)
def update_phone_lists(self, xform, case_list):
made_changes = False
logger.debug('updating sync log for {}'.format(self.user_id))
logger.debug('case ids before update: {}'.format(', '.join(self.case_ids_on_phone)))
logger.debug('dependent case ids before update: {}'.format(', '.join(self.dependent_case_ids_on_phone)))
logger.debug('index tree before update: {}'.format(self.index_tree))
class CaseUpdate(object):
def __init__(self, case_id):
self.case_id = case_id
self.was_live_previously = True
self.final_owner_id = None
self.is_closed = None
self.indices_to_add = []
self.indices_to_delete = []
ShortIndex = namedtuple('ShortIndex', ['case_id', 'identifier', 'referenced_id'])
# this is a variable used via closures in the function below
owner_id_map = {}
def get_latest_owner_id(case_id, action=None):
# "latest" just means as this forms actions are played through
if action is not None:
owner_id_from_action = action.updated_known_properties.get("owner_id")
if owner_id_from_action is not None:
owner_id_map[case_id] = owner_id_from_action
return owner_id_map.get(case_id, None)
all_updates = {}
for case in case_list:
if case._id not in all_updates:
logger.debug('initializing update for case {}'.format(case._id))
all_updates[case._id] = CaseUpdate(case_id=case._id)
case_update = all_updates[case._id]
case_update.was_live_previously = case._id in self.primary_case_ids
actions = case.get_actions_for_form(xform.get_id)
for action in actions:
logger.debug('{}: {}'.format(case._id, action.action_type))
owner_id = get_latest_owner_id(case._id, action)
if owner_id is not None:
case_update.final_owner_id = owner_id
if action.action_type == const.CASE_ACTION_INDEX:
for index in action.indices:
if index.referenced_id:
case_update.indices_to_add.append(
ShortIndex(case._id, index.identifier, index.referenced_id)
)
else:
case_update.indices_to_delete.append(
ShortIndex(case._id, index.identifier, None)
)
elif action.action_type == const.CASE_ACTION_CLOSE:
case_update.is_closed = True
def _add_index(index):
logger.debug('adding index {} -> {} ({}).'.format(
index.case_id, index.referenced_id, index.identifier))
self.index_tree.set_index(index.case_id, index.identifier, index.referenced_id)
if index.referenced_id not in self.case_ids_on_phone:
self.case_ids_on_phone.add(index.referenced_id)
self.dependent_case_ids_on_phone.add(index.referenced_id)
def _is_live(case_update, owner_ids):
if case_update.is_closed:
return False
elif case_update.final_owner_id is None:
# we likely didn't touch owner_id so just default to whatever it was previously
return case_update.was_live_previously
else:
return case_update.final_owner_id in owner_ids
non_live_updates = []
for case in case_list:
case_update = all_updates[case._id]
if _is_live(case_update, self.owner_ids_on_phone):
logger.debug('case {} is live.'.format(case_update.case_id))
if case._id not in self.case_ids_on_phone:
self._add_primary_case(case._id)
made_changes = True
elif case._id in self.dependent_case_ids_on_phone:
self.dependent_case_ids_on_phone.remove(case._id)
made_changes = True
for index in case_update.indices_to_add:
_add_index(index)
made_changes = True
for index in case_update.indices_to_delete:
self.index_tree.delete_index(index.case_id, index.identifier)
made_changes = True
else:
# process the non-live updates after all live are already processed
non_live_updates.append(case_update)
for update in non_live_updates:
logger.debug('case {} is NOT live.'.format(update.case_id))
if update.case_id in self.case_ids_on_phone:
# try pruning the case
self.prune_case(update.case_id)
if update.case_id in self.case_ids_on_phone:
# if unsuccessful, process the rest of the update
for index in case_update.indices_to_add:
_add_index(index)
for index in case_update.indices_to_delete:
self.index_tree.delete_index(index.case_id, index.identifier)
made_changes = True
logger.debug('case ids after update: {}'.format(', '.join(self.case_ids_on_phone)))
logger.debug('dependent case ids after update: {}'.format(', '.join(self.dependent_case_ids_on_phone)))
logger.debug('index tree after update: {}'.format(self.index_tree))
if made_changes or case_list:
try:
if made_changes:
logger.debug('made changes, saving.')
self.last_submitted = datetime.utcnow()
self.rev_before_last_submitted = self._rev
self.save()
if case_list:
try:
self.invalidate_cached_payloads()
except ResourceConflict:
# this operation is harmless so just blindly retry and don't
# reraise if it goes through the second time
SimplifiedSyncLog.get(self._id).invalidate_cached_payloads()
except ResourceConflict:
logging.exception('doc update conflict saving sync log {id}'.format(
id=self._id,
))
raise
def prune_dependent_cases(self):
"""
Attempt to prune any dependent cases from the sync log.
"""
# this is done when migrating from old formats or during initial sync
# to prune non-relevant dependencies
for dependent_case_id in list(self.dependent_case_ids_on_phone):
# need this additional check since the case might have already been pruned/remove
# as a result of pruning the child case
if dependent_case_id in self.dependent_case_ids_on_phone:
# this will be a no-op if the case cannot be pruned due to dependencies
self.prune_case(dependent_case_id)
@classmethod
def from_other_format(cls, other_sync_log):
"""
Migrate from the old SyncLog format to this one.
"""
if isinstance(other_sync_log, SyncLog):
previous_log_footprint = set(other_sync_log.get_footprint_of_cases_on_phone())
def _add_state_contributions(new_sync_log, case_state, is_dependent=False):
if case_state.case_id in previous_log_footprint:
new_sync_log.case_ids_on_phone.add(case_state.case_id)
for index in case_state.indices:
new_sync_log.index_tree.set_index(case_state.case_id, index.identifier,
index.referenced_id)
if is_dependent:
new_sync_log.dependent_case_ids_on_phone.add(case_state.case_id)
ret = cls.wrap(other_sync_log.to_json())
for case_state in other_sync_log.cases_on_phone:
_add_state_contributions(ret, case_state)
dependent_case_ids = set()
for case_state in other_sync_log.dependent_cases_on_phone:
if case_state.case_id in previous_log_footprint:
_add_state_contributions(ret, case_state, is_dependent=True)
dependent_case_ids.add(case_state.case_id)
# try to prune any dependent cases - the old format does this on
# access, but the new format does it ahead of time and always assumes
# its current state is accurate.
ret.prune_dependent_cases()
# set and cleanup other properties
ret.log_format = LOG_FORMAT_SIMPLIFIED
del ret['last_seq']
del ret['cases_on_phone']
del ret['dependent_cases_on_phone']
ret.migrated_from = other_sync_log.to_json()
return ret
else:
return super(SimplifiedSyncLog, cls).from_other_format(other_sync_log)
def tests_only_get_cases_on_phone(self):
# hack - just for tests
return [CaseState(case_id=id) for id in self.case_ids_on_phone]
def test_only_clear_cases_on_phone(self):
self.case_ids_on_phone = set()
def test_only_get_dependent_cases_on_phone(self):
# hack - just for tests
return [CaseState(case_id=id) for id in self.dependent_case_ids_on_phone]
def _domain_has_legacy_toggle_set():
# old versions of commcare (< 2.10ish) didn't purge on form completion
# so can still modify cases that should no longer be on the phone.
request = get_request()
domain = request.domain if request else None
return LEGACY_SYNC_SUPPORT.enabled(domain) if domain else False
def get_properly_wrapped_sync_log(doc_id):
"""
Looks up and wraps a sync log, using the class based on the 'log_format' attribute.
Defaults to the existing legacy SyncLog class.
"""
return properly_wrap_sync_log(SyncLog.get_db().get(doc_id))
def properly_wrap_sync_log(doc):
return get_sync_log_class_by_format(doc.get('log_format')).wrap(doc)
def get_sync_log_class_by_format(format):
return {
LOG_FORMAT_LEGACY: SyncLog,
LOG_FORMAT_SIMPLIFIED: SimplifiedSyncLog,
}.get(format, SyncLog)
class OwnershipCleanlinessFlag(models.Model):
"""
Stores whether an owner_id is "clean" aka has a case universe only belonging
to that ID.
We use this field to optimize restores.
"""
domain = models.CharField(max_length=100, db_index=True)
owner_id = models.CharField(max_length=100, db_index=True)
is_clean = models.BooleanField(default=False)
last_checked = models.DateTimeField()
hint = models.CharField(max_length=100, null=True, blank=True)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.last_checked = datetime.utcnow()
super(OwnershipCleanlinessFlag, self).save(force_insert, force_update, using, update_fields)
@classmethod
def get_for_owner(cls, domain, owner_id):
return cls.objects.get_or_create(domain=domain, owner_id=owner_id)[0]
class Meta:
app_label = 'phone'
unique_together = [('domain', 'owner_id')]
```
#### File: phone/tests/test_batched_mode.py
```python
from django.test.utils import override_settings
from mock import patch
from casexml.apps.case.tests.util import assert_user_has_cases, assert_user_doesnt_have_cases
from casexml.apps.phone.tests.test_sync_mode import USER_ID, SyncBaseTest
@patch('casexml.apps.phone.data_providers.case.batched.BatchedCaseSyncOperation.chunk_size', new=3)
@override_settings(TESTS_SHOULD_USE_CLEAN_RESTORE=False)
class BatchRestoreTests(SyncBaseTest):
def test_multiple_batches_restore(self):
case_ids = ["case_{}".format(i) for i in range(10)]
self._createCaseStubs(case_ids, owner_id=USER_ID)
restore_config, _ = assert_user_has_cases(self, self.user, case_ids)
self.assertEqual(restore_config.restore_state.provider_log['num_case_batches'], 4)
def test_multiple_batches_sync(self):
case_ids = ["case_{}".format(i) for i in range(10)]
self._createCaseStubs(case_ids, owner_id=USER_ID)
restore_config, _ = assert_user_doesnt_have_cases(self, self.user, case_ids,
restore_id=self.sync_log.get_id)
# 4 batches to fetch cases + 1 batch for cases left on phone
self.assertEqual(restore_config.restore_state.provider_log['num_case_batches'], 5)
```
#### File: management/commands/force_update_schemas.py
```python
from django.core.management.base import LabelCommand, CommandError
from couchexport.models import ExportSchema
import json
from couchexport.tasks import rebuild_schemas
class Command(LabelCommand):
help = "Given a particular export index, update all checkpoints " \
"referencing that index to use the latest schema."
args = "<index>"
label = "Index of the export to use, or 'all' to include all exports"
def handle(self, *args, **options):
if len(args) < 1: raise CommandError('Please specify %s.' % self.label)
index_in = args[0]
if index_in == "all":
to_update = ExportSchema.get_all_indices()
else:
to_update = [json.loads(index_in)]
for index in to_update:
processed = rebuild_schemas(index)
print "processed %s checkpoints matching %s" % (processed, index)
```
#### File: management/commands/migrate_export_types.py
```python
from django.core.management.base import LabelCommand, CommandError
from couchexport.models import SavedExportSchema
from optparse import make_option
class Command(LabelCommand):
help = "Migrates over custom exports by adding a default type property if not present."
args = "default_type"
label = "default type"
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual migration, just print the output"),)
def handle(self, *args, **options):
if len(args) != 1: raise CommandError("Syntax: ./manage.py migrate_export_types [default type]!")
default_type = args[0]
for export in SavedExportSchema.view("couchexport/saved_export_schemas", include_docs=True):
if not export.type:
print "migrating %s" % export
export.type = default_type
if not options['dryrun']:
export.save()
print "Done!"
```
#### File: ex-submodules/couchexport/schema.py
```python
from couchdbkit.client import Database
from django.conf import settings
from couchexport.exceptions import SchemaInferenceError
from couchexport.models import ExportSchema
def build_latest_schema(schema_index):
"""
Build a schema, directly from the index. Also creates a saved checkpoint.
"""
from couchexport.export import ExportConfiguration
db = Database(settings.COUCH_DATABASE)
previous_export = ExportSchema.last(schema_index)
config = ExportConfiguration(db, schema_index,
previous_export=previous_export)
schema = config.get_latest_schema()
if not schema:
return None
updated_checkpoint = config.create_new_checkpoint()
return updated_checkpoint
def get_kind(doc):
if doc == "" or doc is None:
return "null"
elif isinstance(doc, dict):
return "dict"
elif isinstance(doc, list):
return "list"
else:
return "string"
def make_schema(doc):
doc_kind = get_kind(doc)
if doc_kind == "null":
return None
elif doc_kind == "dict":
schema = {}
for key in doc:
schema[key] = make_schema(doc[key])
return schema
elif doc_kind == "list":
schema = None
for doc_ in doc:
schema = extend_schema(schema, doc_)
return [schema]
elif doc_kind == "string":
return "string"
def extend_schema(schema, doc):
schema_kind = get_kind(schema)
doc_kind = get_kind(doc)
# 1. anything + null => anything
if doc_kind == "null":
return schema
if schema_kind == "null":
return make_schema(doc)
# 2. not-list => [not-list] when compared to a list
if schema_kind != "list" and doc_kind == "list":
schema_kind = "list"
schema = [schema]
if doc_kind != "list" and schema_kind == "list":
doc_kind = "list"
doc = [doc]
# 3. not-dict => {'': not-dict} when compared to a dict
if schema_kind != 'dict' and doc_kind == 'dict':
if not schema_kind == 'string':
raise SchemaInferenceError("%r is type %r but should be type 'string'!!" % (schema, schema_kind))
schema_kind = 'dict'
schema = {'': schema_kind}
if doc_kind != 'dict' and schema_kind == 'dict':
if not doc_kind == 'string':
raise SchemaInferenceError("%r is type %r but should be type 'string'!!" % (doc, doc_kind))
doc_kind = 'dict'
doc = {'': doc_kind}
# 4. Now that schema and doc are of the same kind
if schema_kind == doc_kind == "dict":
for key in doc:
schema[key] = extend_schema(schema.get(key, None), doc[key])
return schema
if schema_kind == doc_kind == "list":
for doc_ in doc:
schema[0] = extend_schema(schema[0], doc_)
return schema
if schema_kind == doc_kind == "string":
return "string"
# 5. We should have covered every case above, but if not, fail hard
raise SchemaInferenceError("Mismatched schema (%r) and doc (%r)" % (schema, doc))
```
#### File: couchexport/tests/test_saved.py
```python
import datetime
from django.test import TestCase
from couchexport.groupexports import get_saved_export_and_delete_copies
from couchexport.models import SavedBasicExport, ExportConfiguration
class SavedExportTest(TestCase):
def test_file_save_and_load(self):
payload = 'something small and simple'
for name in ['normal', u'हिंदी', None]:
saved = SavedBasicExport(configuration=_mk_config(name))
saved.save()
saved.set_payload(payload)
self.assertEqual(payload, saved.get_payload())
def test_get_by_index(self):
index = ['some', 'index']
saved_export = SavedBasicExport(configuration=_mk_config(index=index))
saved_export.save()
back = SavedBasicExport.by_index(index)
self.assertEqual(1, len(back))
self.assertEqual(saved_export._id, back[0]._id)
def test_get_saved_and_delete_copies_missing(self):
self.assertEqual(None, get_saved_export_and_delete_copies(['missing', 'index']))
def test_get_saved_and_delete_copies_single(self):
index = ['single']
saved_export = SavedBasicExport(configuration=_mk_config(index=index))
saved_export.save()
self.assertEqual(saved_export._id, get_saved_export_and_delete_copies(index)._id)
def test_get_saved_and_delete_copies_multiple(self):
index = ['multiple']
# make three exports with the last one being the most recently updated
timestamp = datetime.datetime.utcnow()
for i in range(3):
saved_export = SavedBasicExport(configuration=_mk_config(index=index),
last_updated=timestamp + datetime.timedelta(days=i))
saved_export.save()
self.assertEqual(3, len(SavedBasicExport.by_index(index)))
chosen_one = get_saved_export_and_delete_copies(index)
# this relies on the variable being set last in the loop which is a bit unintuitive
self.assertEqual(saved_export._id, chosen_one._id)
saved_after_deletion = SavedBasicExport.by_index(index)
self.assertEqual(1, len(saved_after_deletion))
self.assertEqual(chosen_one._id, saved_after_deletion[0]._id)
def _mk_config(name='some export name', index='dummy_index'):
return ExportConfiguration(index=index, name=name, format='xlsx')
```
#### File: couchexport/tests/test_writers.py
```python
from codecs import BOM_UTF8
from couchexport.writers import ZippedExportWriter, CsvFileWriter
from django.test import SimpleTestCase
from mock import patch, Mock
class ZippedExportWriterTests(SimpleTestCase):
def setUp(self):
self.zip_file_patch = patch('zipfile.ZipFile')
self.MockZipFile = self.zip_file_patch.start()
self.path_mock = Mock()
self.path_mock.get_path.return_value = 'tmp'
self.writer = ZippedExportWriter()
self.writer.tables = [self.path_mock]
self.writer.file = Mock()
def tearDown(self):
self.zip_file_patch.stop()
del self.writer
def test_zipped_export_writer_unicode(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: u'ひらがな'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
def test_zipped_export_writer_utf8(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: '\xe3\x81\xb2\xe3\x82\x89\xe3\x81\x8c\xe3\x81\xaa'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
class CsvFileWriterTests(SimpleTestCase):
def test_csv_file_writer_bom(self):
"""
CsvFileWriter should prepend a byte-order mark to the start of the CSV file for Excel
"""
writer = CsvFileWriter()
headers = ['ham', 'spam', 'eggs']
writer.open('Spam')
writer.write_row(headers)
writer.finish()
file_start = writer.get_file().read(6)
self.assertEqual(file_start, BOM_UTF8 + 'ham')
```
#### File: couchforms/tests/test_auth.py
```python
from django.test import TestCase
from couchforms.models import DefaultAuthContext
import os
from corehq.form_processor.interfaces import FormProcessorInterface
class AuthTest(TestCase):
def test_auth_context(self):
file_path = os.path.join(os.path.dirname(__file__), "data", "meta.xml")
xml_data = open(file_path, "rb").read()
def process(xform):
xform['auth_context'] = DefaultAuthContext().to_json()
xform = FormProcessorInterface.post_xform(xml_data, process=process)
self.assertEqual(xform.auth_context, {'doc_type': 'DefaultAuthContext'})
```
#### File: couchforms/tests/test_edits.py
```python
from datetime import datetime, timedelta
import os
import uuid
from django.test import TestCase
from mock import MagicMock
from couchdbkit import RequestFailed
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.xml import V2
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.apps.receiverwrapper import submit_form_locally
from couchforms.models import XFormInstance, \
UnfinishedSubmissionStub
from corehq.form_processor.interfaces import FormProcessorInterface
class EditFormTest(TestCase):
ID = '7H46J37FGH3'
domain = 'test-form-edits'
def tearDown(self):
FormProcessorInterface.delete_all_xforms()
def _get_files(self):
first_file = os.path.join(os.path.dirname(__file__), "data", "deprecation", "original.xml")
edit_file = os.path.join(os.path.dirname(__file__), "data", "deprecation", "edit.xml")
with open(first_file, "rb") as f:
xml_data1 = f.read()
with open(edit_file, "rb") as f:
xml_data2 = f.read()
return xml_data1, xml_data2
def test_basic_edit(self):
xml_data1, xml_data2 = self._get_files()
yesterday = datetime.utcnow() - timedelta(days=1)
xform = FormProcessorInterface.post_xform(xml_data1)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual("", xform.form['vitals']['height'])
self.assertEqual("other", xform.form['assessment']['categories'])
# post form back in time to simulate an edit
FormProcessorInterface.update_properties(
xform,
domain=self.domain,
received_on=yesterday,
)
xform = FormProcessorInterface.post_xform(xml_data2, domain=self.domain)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual("100", xform.form['vitals']['height'])
self.assertEqual("Edited Baby!", xform.form['assessment']['categories'])
[deprecated_xform] = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormDeprecated')
self.assertEqual(self.ID, deprecated_xform.orig_id)
self.assertNotEqual(self.ID, deprecated_xform.id)
self.assertEqual('XFormDeprecated', deprecated_xform.doc_type)
self.assertEqual("", deprecated_xform.form['vitals']['height'])
self.assertEqual("other", deprecated_xform.form['assessment']['categories'])
self.assertEqual(xform.received_on, deprecated_xform.received_on)
self.assertEqual(xform.deprecated_form_id, deprecated_xform.id)
self.assertTrue(xform.edited_on > deprecated_xform.received_on)
self.assertEqual(
FormProcessorInterface.get_attachment(deprecated_xform.id, 'form.xml'),
xml_data1
)
self.assertEqual(FormProcessorInterface.get_attachment(self.ID, 'form.xml'), xml_data2)
def test_broken_save(self):
"""
Test that if the second form submission terminates unexpectedly
and the main form isn't saved, then there are no side effects
such as the original having been marked as deprecated.
"""
class BorkDB(object):
"""context manager for making a db's bulk_save temporarily fail"""
def __init__(self, db):
self.old = {}
self.db = db
def __enter__(self):
self.old['bulk_save'] = self.db.bulk_save
self.db.bulk_save = MagicMock(name='bulk_save',
side_effect=RequestFailed())
def __exit__(self, exc_type, exc_val, exc_tb):
self.db.bulk_save = self.old['bulk_save']
xforms = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormInstance')
self.assertEqual(len(xforms), 0)
xml_data1, xml_data2 = self._get_files()
submit_form_locally(xml_data1, self.domain)
xform = FormProcessorInterface.get_xform(self.ID)
self.assertEqual(self.ID, xform.id)
self.assertEqual("XFormInstance", xform.doc_type)
self.assertEqual(self.domain, xform.domain)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(),
0
)
# This seems like a couch specific test util. Will likely need postgres test utils
with BorkDB(XFormInstance.get_db()):
with self.assertRaises(RequestFailed):
submit_form_locally(xml_data2, self.domain)
# it didn't go through, so make sure there are no edits still
xforms = FormProcessorInterface.get_by_doc_type(self.domain, 'XFormDeprecated')
self.assertEqual(len(xforms), 0)
xform = FormProcessorInterface.get_xform(self.ID)
self.assertIsNotNone(xform)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID,
saved=False).count(),
1
)
self.assertEqual(
UnfinishedSubmissionStub.objects.filter(xform_id=self.ID).count(),
1
)
def test_case_management(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
owner_id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
owner_id=owner_id,
update={
'property': 'original value'
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
# validate some assumptions
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.type, 'person')
self.assertEqual(case.property, 'original value')
self.assertEqual([form_id], case.xform_ids)
self.assertEqual(2, len(case.actions))
for a in case.actions:
self.assertEqual(form_id, a.xform_id)
# submit a new form with a different case update
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='newtype',
owner_id=owner_id,
update={
'property': 'edited value'
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.type, 'newtype')
self.assertEqual(case.property, 'edited value')
self.assertEqual([form_id], case.xform_ids)
self.assertEqual(2, len(case.actions))
for a in case.actions:
self.assertEqual(form_id, a.xform_id)
def test_second_edit_fails(self):
form_id = uuid.uuid4().hex
case_id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
# submit an edit form with a bad case update (for example a bad ID)
case_block = CaseBlock(
create=True,
case_id='',
case_type='person',
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=form_id)
xform = FormProcessorInterface.get_xform(form_id)
self.assertEqual('XFormError', xform.doc_type)
deprecated_xform = FormProcessorInterface.get_xform(xform.deprecated_form_id)
self.assertEqual('XFormDeprecated', deprecated_xform.doc_type)
def test_case_management_ordering(self):
case_id = uuid.uuid4().hex
owner_id = uuid.uuid4().hex
# create a case
case_block = CaseBlock(
create=True,
case_id=case_id,
case_type='person',
owner_id=owner_id,
).as_string()
create_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual([create_form_id], case.xform_ids)
self.assertEqual([create_form_id], [a.xform_id for a in case.actions])
for a in case.actions:
self.assertEqual(create_form_id, a.xform_id)
edit_date = datetime.utcnow()
# set some property value
case_block = CaseBlock(
create=False,
case_id=case_id,
date_modified=edit_date,
update={
'property': 'first value',
}
).as_string()
edit_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'first value')
self.assertEqual([create_form_id, edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id], [a.xform_id for a in case.actions])
# submit a second (new) form updating the value
case_block = CaseBlock(
create=False,
case_id=case_id,
update={
'property': 'final value',
}
).as_string()
second_edit_form_id = submit_case_blocks(case_block, domain=self.domain)
# validate that worked
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'final value')
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], [a.xform_id for a in
case.actions])
# deprecate the middle edit
case_block = CaseBlock(
create=False,
case_id=case_id,
date_modified=edit_date, # need to use the previous edit date for action sort comparisons
update={
'property': 'edited value',
'added_property': 'added value',
}
).as_string()
submit_case_blocks(case_block, domain=self.domain, form_id=edit_form_id)
# ensure that the middle edit stays in the right place and is applied
# before the final one
case = FormProcessorInterface.get_case(case_id)
self.assertEqual(case.property, 'final value')
self.assertEqual(case.added_property, 'added value')
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], case.xform_ids)
self.assertEqual([create_form_id, edit_form_id, second_edit_form_id], [a.xform_id for a in
case.actions])
```
#### File: ex-submodules/couchforms/util.py
```python
from __future__ import absolute_import
import hashlib
import datetime
import logging
import pytz
from StringIO import StringIO
from django.test.client import Client
from couchdbkit import ResourceNotFound, BulkSaveError
from django.http import (
HttpRequest,
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
)
import iso8601
from redis import RedisError
from corehq.apps.tzmigration import phone_timezones_should_be_processed, timezone_migration_in_progress
from corehq.util.soft_assert import soft_assert
from dimagi.ext.jsonobject import re_loose_datetime
from dimagi.utils.couch.undo import DELETED_SUFFIX
from dimagi.utils.logging import notify_exception
from dimagi.utils.mixins import UnicodeMixIn
from dimagi.utils.couch import uid, LockManager, ReleaseOnError
from dimagi.utils.parsing import json_format_datetime
import xml2json
import couchforms
from .const import BadRequest
from .exceptions import DuplicateError, UnexpectedDeletedXForm, \
PhoneDateValueError
from .models import (
DefaultAuthContext,
SubmissionErrorLog,
UnfinishedSubmissionStub,
XFormDeprecated,
XFormDuplicate,
XFormError,
XFormInstance,
doc_types,
)
from .signals import (
successful_form_received,
)
from .xml import ResponseNature, OpenRosaResponse
legacy_soft_assert = soft_assert('{}<EMAIL>('<EMAIL>', '<EMAIL>'))
class SubmissionError(Exception, UnicodeMixIn):
"""
When something especially bad goes wrong during a submission, this
exception gets raised.
"""
def __init__(self, error_log, *args, **kwargs):
super(SubmissionError, self).__init__(*args, **kwargs)
self.error_log = error_log
def __str__(self):
return str(self.error_log)
def _extract_meta_instance_id(form):
"""Takes form json (as returned by xml2json)"""
if form.get('Meta'):
# bhoma, 0.9 commcare
meta = form['Meta']
elif form.get('meta'):
# commcare 1.0
meta = form['meta']
else:
return None
if meta.get('uid'):
# bhoma
return meta['uid']
elif meta.get('instanceID'):
# commcare 0.9, commcare 1.0
return meta['instanceID']
else:
return None
def convert_xform_to_json(xml_string):
"""
takes xform payload as xml_string and returns the equivalent json
i.e. the json that will show up as xform.form
"""
try:
name, json_form = xml2json.xml2json(xml_string)
except xml2json.XMLSyntaxError as e:
raise couchforms.XMLSyntaxError(u'Invalid XML: %s' % e)
json_form['#type'] = name
return json_form
def acquire_lock_for_xform(xform_id):
# this is high, but I want to test if MVP conflicts disappear
lock = XFormInstance.get_obj_lock_by_id(xform_id, timeout_seconds=2*60)
try:
lock.acquire()
except RedisError:
lock = None
return lock
class MultiLockManager(list):
def __enter__(self):
return [lock_manager.__enter__() for lock_manager in self]
def __exit__(self, exc_type, exc_val, exc_tb):
for lock_manager in self:
lock_manager.__exit__(exc_type, exc_val, exc_tb)
def adjust_datetimes(data, parent=None, key=None):
"""
find all datetime-like strings within data (deserialized json)
and format them uniformly, in place.
"""
# this strips the timezone like we've always done
# todo: in the future this will convert to UTC
if isinstance(data, basestring) and re_loose_datetime.match(data):
try:
matching_datetime = iso8601.parse_date(data)
except iso8601.ParseError:
pass
else:
if phone_timezones_should_be_processed():
parent[key] = unicode(json_format_datetime(
matching_datetime.astimezone(pytz.utc).replace(tzinfo=None)
))
else:
parent[key] = unicode(json_format_datetime(
matching_datetime.replace(tzinfo=None)))
elif isinstance(data, dict):
for key, value in data.items():
adjust_datetimes(value, parent=data, key=key)
elif isinstance(data, list):
for i, value in enumerate(data):
adjust_datetimes(value, parent=data, key=i)
# return data, just for convenience in testing
# this is the original input, modified, not a new data structure
return data
def create_xform(xml_string, attachments=None, process=None):
"""
create but do not save an XFormInstance from an xform payload (xml_string)
optionally set the doc _id to a predefined value (_id)
return doc _id of the created doc
`process` is transformation to apply to the form right before saving
This is to avoid having to save multiple times
If xml_string is bad xml
- raise couchforms.XMLSyntaxError
"""
from corehq.util.couch_helpers import CouchAttachmentsBuilder
assert attachments is not None
json_form = convert_xform_to_json(xml_string)
adjust_datetimes(json_form)
_id = _extract_meta_instance_id(json_form) or XFormInstance.get_db().server.next_uuid()
assert _id
attachments_builder = CouchAttachmentsBuilder()
attachments_builder.add(
content=xml_string,
name='form.xml',
content_type='text/xml',
)
for key, value in attachments.items():
attachments_builder.add(
content=value,
name=key,
content_type=value.content_type,
)
xform = XFormInstance(
# form has to be wrapped
{'form': json_form},
# other properties can be set post-wrap
_id=_id,
xmlns=json_form.get('@xmlns'),
_attachments=attachments_builder.to_json(),
received_on=datetime.datetime.utcnow(),
)
# this had better not fail, don't think it ever has
# if it does, nothing's saved and we get a 500
if process:
process(xform)
lock = acquire_lock_for_xform(_id)
with ReleaseOnError(lock):
if _id in XFormInstance.get_db():
raise DuplicateError(xform)
return LockManager(xform, lock)
def process_xform(instance, attachments=None, process=None, domain=None):
"""
Create a new xform to ready to be saved to couchdb in a thread-safe manner
Returns a LockManager containing the new XFormInstance and its lock,
or raises an exception if anything goes wrong.
attachments is a dictionary of the request.FILES that are not the xform;
key is parameter name, value is django MemoryFile object stream
"""
attachments = attachments or {}
try:
xform_lock = create_xform(instance, process=process, attachments=attachments)
except couchforms.XMLSyntaxError as e:
xform = _log_hard_failure(instance, process, e)
raise SubmissionError(xform)
except DuplicateError as e:
return _handle_id_conflict(instance, e.xform, domain)
return MultiLockManager([xform_lock])
def _has_errors(response, errors):
return errors or "error" in response
def _assign_new_id_and_lock(xform):
new_id = XFormInstance.get_db().server.next_uuid()
xform._id = new_id
lock = acquire_lock_for_xform(new_id)
return MultiLockManager([LockManager(xform, lock)])
def _handle_id_conflict(instance, xform, domain):
"""
For id conflicts, we check if the files contain exactly the same content,
If they do, we just log this as a dupe. If they don't, we deprecate the
previous form and overwrite it with the new form's contents.
"""
assert domain
conflict_id = xform._id
existing_doc = XFormInstance.get_db().get(conflict_id, attachments=True)
if existing_doc.get('domain') != domain or existing_doc.get('doc_type') not in doc_types():
# the same form was submitted to two domains, or a form was submitted with
# an ID that belonged to a different doc type. these are likely developers
# manually testing or broken API users. just resubmit with a generated ID.
return _assign_new_id_and_lock(xform)
else:
# It looks like a duplicate/edit in the same domain so pursue that workflow.
existing_doc = XFormInstance.wrap(existing_doc)
return _handle_duplicate(existing_doc, xform, instance)
def _handle_duplicate(existing_doc, new_doc, instance):
"""
Handle duplicate xforms and xform editing ('deprecation')
existing doc *must* be validated as an XFormInstance in the right domain
and *must* include inline attachments
"""
conflict_id = existing_doc.get_id
existing_md5 = existing_doc.xml_md5()
new_md5 = hashlib.md5(instance).hexdigest()
if existing_md5 != new_md5:
# if the form contents are not the same:
# - "Deprecate" the old form by making a new document with the same contents
# but a different ID and a doc_type of XFormDeprecated
# - Save the new instance to the previous document to preserve the ID
old_id = existing_doc._id
multi_lock_manager = _assign_new_id_and_lock(new_doc)
# swap the two documents so the original ID now refers to the new one
# and mark original as deprecated
new_doc._id, existing_doc._id = old_id, new_doc._id
new_doc._rev, existing_doc._rev = existing_doc._rev, new_doc._rev
# flag the old doc with metadata pointing to the new one
existing_doc.doc_type = deprecation_type()
existing_doc.orig_id = old_id
# and give the new doc server data of the old one and some metadata
new_doc.received_on = existing_doc.received_on
new_doc.deprecated_form_id = existing_doc._id
new_doc.edited_on = datetime.datetime.utcnow()
multi_lock_manager.append(
LockManager(existing_doc,
acquire_lock_for_xform(old_id))
)
return multi_lock_manager
else:
# follow standard dupe handling, which simply saves a copy of the form
# but a new doc_id, and a doc_type of XFormDuplicate
new_doc.doc_type = XFormDuplicate.__name__
dupe = XFormDuplicate.wrap(new_doc.to_json())
dupe.problem = "Form is a duplicate of another! (%s)" % conflict_id
return _assign_new_id_and_lock(dupe)
def is_deprecation(xform):
return xform.doc_type == deprecation_type()
def deprecation_type():
return XFormDeprecated.__name__
def is_override(xform):
# it's an override if we've explicitly set the "deprecated_form_id" property on it.
return bool(getattr(xform, 'deprecated_form_id', None))
def _log_hard_failure(instance, process, error):
"""
Handle's a hard failure from posting a form to couch.
Currently, it will save the raw payload to couch in a hard-failure doc
and return that doc.
"""
try:
message = unicode(error)
except UnicodeDecodeError:
message = unicode(str(error), encoding='utf-8')
error_log = SubmissionErrorLog.from_instance(instance, message)
if process:
process(error_log)
error_log.save()
return error_log
def scrub_meta(xform):
"""
Cleans up old format metadata to our current standard.
Does NOT save the doc, but returns whether the doc needs to be saved.
"""
property_map = {'TimeStart': 'timeStart',
'TimeEnd': 'timeEnd',
'chw_id': 'userID',
'DeviceID': 'deviceID',
'uid': 'instanceID'}
if not hasattr(xform, 'form'):
return
# hack to make sure uppercase meta still ends up in the right place
found_old = False
if 'Meta' in xform.form:
xform.form['meta'] = xform.form['Meta']
del xform.form['Meta']
found_old = True
if 'meta' in xform.form:
meta_block = xform.form['meta']
# scrub values from 0.9 to 1.0
if isinstance(meta_block, list):
if isinstance(meta_block[0], dict):
# if it's a list of dictionaries, arbitrarily pick the first one
# this is a pretty serious error, but it's also recoverable
xform.form['meta'] = meta_block = meta_block[0]
logging.error((
'form %s contains multiple meta blocks. '
'this is not correct but we picked one abitrarily'
) % xform.get_id)
else:
# if it's a list of something other than dictionaries.
# don't bother scrubbing.
logging.error('form %s contains a poorly structured meta block.'
'this might cause data display problems.')
if isinstance(meta_block, dict):
for key in meta_block:
if key in property_map and property_map[key] not in meta_block:
meta_block[property_map[key]] = meta_block[key]
del meta_block[key]
found_old = True
return found_old
class SubmissionPost(object):
failed_auth_response = HttpResponseForbidden('Bad auth')
def __init__(self, instance=None, attachments=None, auth_context=None,
domain=None, app_id=None, build_id=None, path=None,
location=None, submit_ip=None, openrosa_headers=None,
last_sync_token=None, received_on=None, date_header=None):
assert domain, domain
assert instance, instance
assert not isinstance(instance, HttpRequest), instance
self.domain = domain
self.app_id = app_id
self.build_id = build_id
# get_location has good default
self.location = location or couchforms.get_location()
self.received_on = received_on
self.date_header = date_header
self.submit_ip = submit_ip
self.last_sync_token = last_sync_token
self.openrosa_headers = openrosa_headers or {}
self.instance = instance
self.attachments = attachments or {}
self.auth_context = auth_context or DefaultAuthContext()
self.path = path
def _attach_shared_props(self, doc):
# attaches shared properties of the request to the document.
# used on forms and errors
doc.auth_context = self.auth_context.to_json()
doc.submit_ip = self.submit_ip
doc.path = self.path
doc.openrosa_headers = self.openrosa_headers
doc.last_sync_token = self.last_sync_token
if self.received_on:
doc.received_on = self.received_on
if self.date_header:
doc.date_header = self.date_header
doc.domain = self.domain
doc.app_id = self.app_id
doc.build_id = self.build_id
doc.export_tag = ["domain", "xmlns"]
return doc
def run(self):
if timezone_migration_in_progress(self.domain):
# keep submissions on the phone
# until ready to start accepting again
return HttpResponse(status=503), None, []
if not self.auth_context.is_valid():
return self.failed_auth_response, None, []
if isinstance(self.instance, BadRequest):
return HttpResponseBadRequest(self.instance.message), None, []
def process(xform):
self._attach_shared_props(xform)
if xform.doc_type != 'SubmissionErrorLog':
found_old = scrub_meta(xform)
legacy_soft_assert(not found_old, 'Form with old metadata submitted', xform._id)
try:
lock_manager = process_xform(self.instance,
attachments=self.attachments,
process=process,
domain=self.domain)
except SubmissionError as e:
logging.exception(
u"Problem receiving submission to %s. %s" % (
self.path,
unicode(e),
)
)
return self.get_exception_response(e.error_log), None, []
else:
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.xform import (
get_and_check_xform_domain, CaseDbCache, process_cases_with_casedb
)
from casexml.apps.case.signals import case_post_save
from casexml.apps.case.exceptions import IllegalCaseId, UsesReferrals
from corehq.apps.commtrack.processing import process_stock
from corehq.apps.commtrack.exceptions import MissingProductId
cases = []
responses = []
errors = []
known_errors = (IllegalCaseId, UsesReferrals, MissingProductId,
PhoneDateValueError)
with lock_manager as xforms:
instance = xforms[0]
if instance.doc_type == 'XFormInstance':
if len(xforms) > 1:
assert len(xforms) == 2
assert is_deprecation(xforms[1])
domain = get_and_check_xform_domain(instance)
with CaseDbCache(domain=domain, lock=True, deleted_ok=True, xforms=xforms) as case_db:
try:
case_result = process_cases_with_casedb(xforms, case_db)
stock_result = process_stock(xforms, case_db)
except known_errors as e:
# errors we know about related to the content of the form
# log the error and respond with a success code so that the phone doesn't
# keep trying to send the form
instance = _handle_known_error(e, instance)
xforms[0] = instance
# this is usually just one document, but if an edit errored we want
# to save the deprecated form as well
XFormInstance.get_db().bulk_save(xforms)
response = self._get_open_rosa_response(
instance, None)
return response, instance, cases
except Exception as e:
# handle / log the error and reraise so the phone knows to resubmit
# note that in the case of edit submissions this won't flag the previous
# submission as having been edited. this is intentional, since we should treat
# this use case as if the edit "failed"
error_message = u'{}: {}'.format(type(e).__name__, unicode(e))
instance = _handle_unexpected_error(instance, error_message)
instance.save()
raise
now = datetime.datetime.utcnow()
unfinished_submission_stub = UnfinishedSubmissionStub(
xform_id=instance.get_id,
timestamp=now,
saved=False,
domain=domain,
)
unfinished_submission_stub.save()
cases = case_db.get_changed()
# todo: this property is only used by the MVPFormIndicatorPillow
instance.initial_processing_complete = True
# in saving the cases, we have to do all the things
# done in CommCareCase.save()
for case in cases:
legacy_soft_assert(case.version == "2.0", "v1.0 case updated", case.case_id)
case.initial_processing_complete = True
case.server_modified_on = now
try:
rev = CommCareCase.get_db().get_rev(case.case_id)
except ResourceNotFound:
pass
else:
assert rev == case.get_rev, (
"Aborting because there would have been "
"a document update conflict. {} {} {}".format(
case.get_id, case.get_rev, rev
)
)
# verify that these DB's are the same so that we can save them with one call to bulk_save
assert XFormInstance.get_db().uri == CommCareCase.get_db().uri
docs = xforms + cases
try:
XFormInstance.get_db().bulk_save(docs)
except BulkSaveError as e:
logging.error('BulkSaveError saving forms', exc_info=1,
extra={'details': {'errors': e.errors}})
raise
except Exception as e:
docs_being_saved = [doc['_id'] for doc in docs]
error_message = u'Unexpected error bulk saving docs {}: {}, doc_ids: {}'.format(
type(e).__name__,
unicode(e),
', '.join(docs_being_saved)
)
instance = _handle_unexpected_error(instance, error_message)
instance.save()
raise
unfinished_submission_stub.saved = True
unfinished_submission_stub.save()
case_result.commit_dirtiness_flags()
stock_result.commit()
for case in cases:
case_post_save.send(CommCareCase, case=case)
errors = self.process_signals(instance)
if errors:
# .problems was added to instance
instance.save()
unfinished_submission_stub.delete()
elif instance.doc_type == 'XFormDuplicate':
assert len(xforms) == 1
instance.save()
response = self._get_open_rosa_response(instance, errors)
return response, instance, cases
def get_response(self):
response, _, _ = self.run()
return response
@staticmethod
def process_signals(instance):
feedback = successful_form_received.send_robust(None, xform=instance)
errors = []
for func, resp in feedback:
if resp and isinstance(resp, Exception):
error_message = unicode(resp)
logging.error((
u"Receiver app: problem sending "
u"post-save signal %s for xform %s: %s: %s"
) % (func, instance._id, type(resp).__name__, error_message))
errors.append(error_message)
if errors:
instance.problem = ", ".join(errors)
return errors
@staticmethod
def get_failed_auth_response():
return HttpResponseForbidden('Bad auth')
def _get_open_rosa_response(self, instance, errors):
if instance.doc_type == "XFormInstance":
response = self.get_success_response(instance, errors)
else:
response = self.get_failure_response(instance)
# this hack is required for ODK
response["Location"] = self.location
# this is a magic thing that we add
response['X-CommCareHQ-FormID'] = instance.get_id
return response
@staticmethod
def get_success_response(doc, errors):
if errors:
response = OpenRosaResponse(
message=doc.problem,
nature=ResponseNature.SUBMIT_ERROR,
status=201,
).response()
else:
response = OpenRosaResponse(
# would have done ✓ but our test Nokias' fonts don't have that character
message=u' √ ',
nature=ResponseNature.SUBMIT_SUCCESS,
status=201,
).response()
return response
@staticmethod
def get_failure_response(doc):
return OpenRosaResponse(
message=doc.problem,
nature=ResponseNature.SUBMIT_ERROR,
status=201,
).response()
@staticmethod
def get_exception_response(error_log):
return OpenRosaResponse(
message=("The sever got itself into big trouble! "
"Details: %s" % error_log.problem),
nature=ResponseNature.SUBMIT_ERROR,
status=500,
).response()
def _handle_known_error(e, instance):
error_message = '{}: {}'.format(
type(e).__name__, unicode(e))
logging.exception((
u"Warning in case or stock processing "
u"for form {}: {}."
).format(instance._id, error_message))
return XFormError.from_xform_instance(instance, error_message)
def _handle_unexpected_error(instance, error_message):
# The following code saves the xform instance
# as an XFormError, with a different ID.
# That's because if you save with the original ID
# and then resubmit, the new submission never has a
# chance to get reprocessed; it'll just get saved as
# a duplicate.
instance = XFormError.from_xform_instance(instance, error_message, with_new_id=True)
notify_exception(None, (
u"Error in case or stock processing "
u"for form {}: {}. "
u"Error saved as {}"
).format(instance.orig_id, error_message, instance._id))
return instance
def fetch_and_wrap_form(doc_id):
# This logic is independent of couchforms; when it moves elsewhere,
# please use the most appropriate alternative to get a DB handle.
db = XFormInstance.get_db()
doc = db.get(doc_id)
if doc['doc_type'] in doc_types():
return doc_types()[doc['doc_type']].wrap(doc)
if doc['doc_type'] == "%s%s" % (XFormInstance.__name__, DELETED_SUFFIX):
raise UnexpectedDeletedXForm(doc_id)
raise ResourceNotFound(doc_id)
def spoof_submission(submit_url, body, name="form.xml", hqsubmission=True,
headers=None):
if headers is None:
headers = {}
client = Client()
f = StringIO(body.encode('utf-8'))
f.name = name
response = client.post(submit_url, {
'xml_submission_file': f,
}, **headers)
if hqsubmission:
xform_id = response['X-CommCareHQ-FormID']
xform = XFormInstance.get(xform_id)
xform['doc_type'] = "HQSubmission"
xform.save()
return response
```
#### File: corehq/form_processor/generic.py
```python
import datetime
import re
from dimagi.ext.jsonobject import (
JsonObject,
StringProperty,
DictProperty,
BooleanProperty,
DateTimeProperty,
ListProperty,
IntegerProperty,
)
from jsonobject.base import DefaultProperty
from casexml.apps.case import const
from couchforms.jsonobject_extensions import GeoPointProperty
from dimagi.utils.decorators.memoized import memoized
class GenericXFormOperation(JsonObject):
"""
Simple structure to represent something happening to a form.
Currently used just by the archive workflow.
"""
user = StringProperty()
date = DateTimeProperty(default=datetime.datetime.utcnow)
operation = StringProperty() # e.g. "archived", "unarchived"
class GenericMetadata(JsonObject):
"""
Metadata of an xform, from a meta block structured like:
<Meta>
<timeStart />
<timeEnd />
<instanceID />
<userID />
<deviceID />
<deprecatedID />
<username />
<!-- CommCare extension -->
<appVersion />
<location />
</Meta>
See spec: https://bitbucket.org/javarosa/javarosa/wiki/OpenRosaMetaDataSchema
username is not part of the spec but included for convenience
"""
timeStart = DateTimeProperty()
timeEnd = DateTimeProperty()
instanceID = StringProperty()
userID = StringProperty()
deviceID = StringProperty()
deprecatedID = StringProperty()
username = StringProperty()
appVersion = StringProperty()
location = GeoPointProperty()
class GenericXFormInstance(JsonObject):
"""A generic JSON representation of an XForm"""
id = StringProperty()
domain = StringProperty()
app_id = StringProperty()
orig_id = StringProperty()
deprecated_form_id = StringProperty()
xmlns = StringProperty()
form = DictProperty()
received_on = DateTimeProperty()
# Used to tag forms that were forcefully submitted
# without a touchforms session completing normally
partial_submission = BooleanProperty(default=False)
history = ListProperty(GenericXFormOperation)
auth_context = DictProperty()
submit_ip = StringProperty()
path = StringProperty()
openrosa_headers = DictProperty()
last_sync_token = StringProperty()
# almost always a datetime, but if it's not parseable it'll be a string
date_header = DefaultProperty()
build_id = StringProperty()
export_tag = DefaultProperty(name='#export_tag')
is_error = BooleanProperty(default=False)
is_duplicate = BooleanProperty(default=False)
is_deprecated = BooleanProperty(default=False)
is_archived = BooleanProperty(default=False)
_metadata = None
@property
def metadata(self):
return self._metadata
def get_data(self, xpath):
"""
Get data from a document from an xpath, returning None if the value isn't found.
Copied from safe_index
"""
return GenericXFormInstance._get_data(self, xpath.split('/'))
@staticmethod
def _get_data(xform, keys):
if len(keys) == 1:
# first check dict lookups, in case of conflicting property names
# with methods (e.g. case/update --> a dict's update method when
# it should be the case block's update block.
try:
if keys[0] in xform:
return xform[keys[0]]
except Exception:
return getattr(xform, keys[0], None)
else:
return GenericXFormInstance._get_data(GenericXFormInstance._get_data(xform, [keys[0]]), keys[1:])
class GenericFormAttachment(JsonObject):
name = StringProperty()
content = StringProperty()
class GenericCommCareCaseIndex(JsonObject):
identifier = StringProperty()
referenced_type = StringProperty()
referenced_id = StringProperty()
# relationship = "child" for index to a parent case (default)
# relationship = "extension" for index to a host case
relationship = StringProperty('child', choices=['child', 'extension'])
class GenericCommCareCaseAttachment(JsonObject):
identifier = StringProperty()
attachment_src = StringProperty()
attachment_from = StringProperty()
attachment_name = StringProperty()
server_mime = StringProperty() # Server detected MIME
server_md5 = StringProperty() # Couch detected hash
attachment_size = IntegerProperty() # file size
attachment_properties = DictProperty() # width, height, other relevant metadata
class GenericCommCareCaseAction(JsonObject):
action_type = StringProperty(choices=list(const.CASE_ACTIONS))
user_id = StringProperty()
date = DateTimeProperty()
server_date = DateTimeProperty()
xform_id = StringProperty()
xform_xmlns = StringProperty()
xform_name = StringProperty()
sync_log_id = StringProperty()
updated_known_properties = DictProperty()
updated_unknown_properties = DictProperty()
indices = ListProperty(GenericCommCareCaseIndex)
attachments = DictProperty(GenericCommCareCaseAttachment)
deprecated = False
class GenericCommCareCase(JsonObject):
"""
A case, taken from casexml. This represents the latest
representation of the case - the result of playing all
the actions in sequence.
"""
id = StringProperty()
domain = StringProperty()
export_tag = ListProperty(unicode)
xform_ids = ListProperty(unicode)
external_id = StringProperty()
opened_on = DateTimeProperty()
modified_on = DateTimeProperty()
type = StringProperty()
closed = BooleanProperty(default=False)
closed_on = DateTimeProperty()
user_id = StringProperty()
owner_id = StringProperty()
opened_by = StringProperty()
closed_by = StringProperty()
actions = ListProperty(GenericCommCareCaseAction)
name = StringProperty()
version = StringProperty()
indices = ListProperty(GenericCommCareCaseIndex)
case_attachments = DictProperty(GenericCommCareCaseAttachment)
server_modified_on = DateTimeProperty()
@property
def case_id(self):
return self.id
@property
@memoized
def reverse_indices(self):
from corehq.form_processor.interfaces import FormProcessorInterface
return FormProcessorInterface.get_reverse_indices(self.domain, self.id)
def has_index(self, id):
return id in (i.identifier for i in self.indices)
def get_index(self, id):
found = filter(lambda i: i.identifier == id, self.indices)
if found:
assert(len(found) == 1)
return found[0]
return None
def dynamic_case_properties(self):
"""(key, value) tuples sorted by key"""
from jsonobject.base import get_dynamic_properties
json = self.to_json()
wrapped_case = self
if type(self) != GenericCommCareCase:
wrapped_case = GenericCommCareCase.wrap(self._doc)
# should these be removed before converting to generic?
exclude = ['computed_modified_on_', 'computed_', 'doc_type', 'initial_processing_complete']
return sorted([
(key, json[key]) for key in get_dynamic_properties(wrapped_case)
if re.search(r'^[a-zA-Z]', key) and key not in exclude
])
```
#### File: smsbackends/megamobile/views.py
```python
from corehq.apps.sms.api import incoming as incoming_sms
from corehq.messaging.smsbackends.megamobile.api import MegamobileBackend
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def sms_in(request):
pid = request.GET.get("pid", None)
msg = request.GET.get("msg", None)
cel = request.GET.get("cel", None)
tcs = request.GET.get("tcs", None)
megamobile_attrs = {
"megamobile_pid" : pid,
"megamobile_tcs" : tcs,
}
phone_number = "%s%s" % ("63", cel)
incoming_sms(
phone_number,
msg,
MegamobileBackend.get_api_id(),
backend_attributes=megamobile_attrs
)
return HttpResponse("")
```
#### File: smsbackends/tropo/views.py
```python
import json
from .api import TropoBackend
from tropo import Tropo
from corehq.apps.ivr.api import incoming as incoming_call
from corehq.apps.sms.api import incoming as incoming_sms
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import CallLog, INCOMING, OUTGOING
from datetime import datetime
from corehq.apps.sms.util import strip_plus
@csrf_exempt
def sms_in(request):
"""
Handles tropo messaging requests
"""
if request.method == "POST":
data = json.loads(request.body)
session = data["session"]
# Handle when Tropo posts to us to send an SMS
if "parameters" in session:
params = session["parameters"]
if ("_send_sms" in params) and ("numberToDial" in params) and ("msg" in params):
numberToDial = params["numberToDial"]
msg = params["msg"]
t = Tropo()
t.call(to = numberToDial, network = "SMS")
t.say(msg)
return HttpResponse(t.RenderJson())
# Handle incoming SMS
phone_number = None
text = None
if "from" in session:
phone_number = session["from"]["id"]
if "initialText" in session:
text = session["initialText"]
if phone_number is not None and len(phone_number) > 1:
if phone_number[0] == "+":
phone_number = phone_number[1:]
incoming_sms(phone_number, text, TropoBackend.get_api_id())
t = Tropo()
t.hangup()
return HttpResponse(t.RenderJson())
else:
return HttpResponseBadRequest("Bad Request")
@csrf_exempt
def ivr_in(request):
"""
Handles tropo call requests
"""
if request.method == "POST":
data = json.loads(request.body)
phone_number = data["session"]["from"]["id"]
# TODO: Implement tropo as an ivr backend. In the meantime, just log the call.
if phone_number:
cleaned_number = strip_plus(phone_number)
v = VerifiedNumber.by_extensive_search(cleaned_number)
else:
v = None
# Save the call entry
msg = CallLog(
phone_number = cleaned_number,
direction = INCOMING,
date = datetime.utcnow(),
backend_api = TropoBackend.get_api_id(),
)
if v is not None:
msg.domain = v.domain
msg.couch_recipient_doc_type = v.owner_doc_type
msg.couch_recipient = v.owner_id
msg.save()
t = Tropo()
t.reject()
return HttpResponse(t.RenderJson())
else:
return HttpResponseBadRequest("Bad Request")
```
#### File: util/global_request/api.py
```python
import threading
_thread_local = threading.local()
def get_request():
try:
return _thread_local.request
except AttributeError:
return None
def set_request(request):
_thread_local.request = request
```
#### File: util/spreadsheets/excel.py
```python
from tempfile import NamedTemporaryFile
import openpyxl
class JSONReaderError(Exception):
pass
class HeaderValueError(Exception):
pass
class IteratorJSONReader(object):
"""
>>> def normalize(it):
... r = []
... for row in IteratorJSONReader(it):
... r.append(sorted(row.items()))
... return r
>>> normalize([])
[]
>>> normalize([['A', 'B', 'C'], ['1', '2', '3']])
[[('A', '1'), ('B', '2'), ('C', '3')]]
>>> normalize([['A', 'data: key', 'user 1', 'user 2', 'is-ok?'],
... ['1', '2', '3', '4', 'yes']])
[[('A', '1'), ('data', {'key': '2'}), ('is-ok', True), ('user', ['3', '4'])]]
"""
def __init__(self, rows):
# you can only call __iter__ once
self._rows = iter(rows)
try:
self.headers = list(self._rows.next())
except StopIteration:
self.headers = []
self.fieldnames = self.get_fieldnames()
def row_to_json(self, row):
obj = {}
for value, header in zip(row, self.headers):
self.set_field_value(obj, header, value)
return obj
def __iter__(self):
try:
for row in self._rows:
yield self.row_to_json(row)
finally:
del self._rows
def get_fieldnames(self):
obj = {}
for field, value in zip(self.headers, [''] * len(self.headers)):
if not isinstance(field, basestring):
raise HeaderValueError(u'Field %s is not a string.' % field)
self.set_field_value(obj, field, value)
return obj.keys()
@classmethod
def set_field_value(cls, obj, field, value):
if isinstance(value, basestring):
value = value.strip()
# try dict
try:
field, subfield = field.split(':')
except Exception:
pass
else:
field = field.strip()
if field not in obj:
obj[field] = {}
cls.set_field_value(obj[field], subfield, value)
return
# try list
try:
field, _ = field.split()
except Exception:
pass
else:
dud = {}
cls.set_field_value(dud, field, value)
(field, value), = dud.items()
if field not in obj:
obj[field] = []
if value not in (None, ''):
obj[field].append(value)
return
# else flat
# try boolean
try:
field, nothing = field.split('?')
assert(nothing.strip() == '')
except Exception:
pass
else:
try:
value = {
'yes': True,
'true': True,
'no': False,
'false': False,
'': False,
None: False,
}[value.lower() if hasattr(value, 'lower') else value]
except KeyError:
raise JSONReaderError(
'Values for field %s must be "yes" or "no", not "%s"' % (
field, value)
)
# set for any flat type
field = field.strip()
if field in obj:
raise JSONReaderError(
'You have a repeat field: %s' % field
)
obj[field] = value
class WorksheetNotFound(Exception):
def __init__(self, title):
self.title = title
super(WorksheetNotFound, self).__init__()
class WorksheetJSONReader(IteratorJSONReader):
def __init__(self, worksheet, title=None):
width = 0
self.title = title
self.worksheet = worksheet
try:
header_row = self.worksheet.iter_rows().next()
except StopIteration:
header_row = []
for cell in header_row:
if cell.value is None:
break
else:
width += 1
self.worksheet.calculate_dimension(force=True)
def iterator():
def _convert_float(value):
"""
excel doesn't distinguish between 1 and 1.0
if it can be an integer assume it is
"""
if isinstance(value, float) and int(value) == value:
return int(value)
else:
return value or ''
for row in self.worksheet.iter_rows():
cell_values = [
_convert_float(cell.value)
for cell in row[:width]
]
if not any(cell_values):
break
yield cell_values
super(WorksheetJSONReader, self).__init__(iterator())
class WorkbookJSONReader(object):
def __init__(self, f):
if isinstance(f, basestring):
filename = f
elif not isinstance(f, file):
tmp = NamedTemporaryFile(mode='wb', suffix='.xlsx', delete=False)
filename = tmp.name
tmp.write(f.read())
tmp.close()
else:
filename = f
self.wb = openpyxl.load_workbook(filename, use_iterators=True)
self.worksheets_by_title = {}
self.worksheets = []
for worksheet in self.wb.worksheets:
ws = WorksheetJSONReader(worksheet, title=worksheet.title)
self.worksheets_by_title[worksheet.title] = ws
self.worksheets.append(ws)
def get_worksheet(self, title=None, index=None):
if title is not None and index is not None:
raise TypeError("Can only get worksheet by title *or* index")
if title:
try:
return self.worksheets_by_title[title]
except KeyError:
raise WorksheetNotFound(title=title)
elif index:
try:
return self.worksheets[index]
except IndexError:
raise WorksheetNotFound(title=index)
else:
try:
return self.worksheets[0]
except IndexError:
raise WorksheetNotFound(title=0)
def work_book_headers_as_tuples(self):
"""
Returns raw sheet headers in following format
(("employee", ("id", "name", "gender")),
("building", ("id", "name", "address")))
"""
all_sheet_headers = []
for sheet in self.worksheets:
all_sheet_headers.append(
(sheet.title, tuple(sheet.headers))
)
return tuple(all_sheet_headers)
def work_book_data_as_tuples(self):
"""
Note: This is useful to get xlsx file's data into easy readible format
Exists only to migrate tests using xlsx files to use following format
Returns raw sheet data in following format
(("employee", (("1", "cory", "m"),
("2", "christian", "m"),
("3", "amelia", "f"))),
("building", (("1", "dimagi", "585 mass ave."),
("2", "old dimagi", "529 main st."))))
"""
all_sheet_data = []
for sheet in self.worksheets:
current_sheet_data = []
for row in sheet:
values = [row.get(header) for header in sheet.headers]
current_sheet_data.append(tuple(values))
all_sheet_data.append(
(sheet.title, tuple(current_sheet_data))
)
return tuple(all_sheet_data)
def flatten_json_to_path(obj, path=()):
if isinstance(obj, dict):
for key, value in obj.items():
for item in flatten_json_to_path(value, path + (key,)):
yield item
elif isinstance(obj, list):
for key, value in enumerate(obj):
for item in flatten_json_to_path(value, path + (key,)):
yield item
else:
yield (path, obj)
def format_header(path, value):
# pretty sure making a string-builder would be slower than concatenation
s = path[0]
for p in path[1:]:
if isinstance(p, basestring):
s += ': %s' % p
elif isinstance(p, int):
s += ' %s' % (p + 1)
if isinstance(value, bool):
s += '?'
value = 'yes' if value else 'no'
return s, value
def flatten_json(obj):
for key, value in flatten_json_to_path(obj):
yield format_header(key, value)
def json_to_headers(obj):
return [key for key, value in sorted(flatten_json(obj), key=lambda t: alphanumeric_sort_key(t[0]))]
def alphanumeric_sort_key(key):
"""
Sort the given iterable in the way that humans expect.
Thanks to http://stackoverflow.com/a/2669120/240553
"""
import re
convert = lambda text: int(text) if text.isdigit() else text
return [convert(c) for c in re.split('([0-9]+)', key)]
```
#### File: management/commands/bihar_create_backlogged_repeaters.py
```python
from datetime import datetime
import logging
from django.core.management.base import BaseCommand
from corehq.apps.hqcase.dbaccessors import get_cases_in_domain
from corehq.apps.receiverwrapper.models import CaseRepeater
class Command(BaseCommand):
"""
Creates the backlog of repeat records that were dropped when bihar repeater
infrastructure went down.
"""
def handle(self, *args, **options):
domain = 'care-bihar'
# forward all cases that were last modified between these dates
def should_forward_case(case):
min_date = datetime(2013, 9, 10)
max_date = datetime(2013, 11, 7)
return (case.server_modified_on
and min_date < case.server_modified_on < max_date)
prod_repeater = CaseRepeater.get('a478a5a3d8964338cb3124de77e3ec58')
success_count = 0
fail_count = 0
for case in get_cases_in_domain(domain):
try:
if should_forward_case(case):
prod_repeater.register(case)
success_count += 1
except Exception:
fail_count += 1
logging.exception('problem creating repeater stub for case %s' % case._id)
print 'successfully forwarded %s cases. %s were not processed' % (success_count, fail_count)
```
#### File: reports/indicators/fixtures.py
```python
from xml.etree import ElementTree
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.util.translation import localize
from custom.bihar import BIHAR_DOMAINS
from custom.bihar.reports.indicators.indicators import IndicatorDataProvider, IndicatorConfig, INDICATOR_SETS
# meh
hard_coded_domains = BIHAR_DOMAINS
hard_coded_indicators = 'homevisit'
hard_coded_group_filter = lambda group: bool((group.metadata or {}).get('awc-code', False))
hard_coded_fixture_id = 'indicators:bihar-supervisor'
class IndicatorFixtureProvider(object):
id = hard_coded_fixture_id
def __call__(self, user, version, last_sync=None):
if user.domain in hard_coded_domains:
groups = filter(hard_coded_group_filter, Group.by_user(user))
if len(groups) == 1:
data_provider = IndicatorDataProvider(
domain=user.domain,
indicator_set=IndicatorConfig(INDICATOR_SETS).get_indicator_set(hard_coded_indicators),
groups=groups,
)
return [self.get_fixture(user, data_provider)]
return []
def get_fixture(self, user, data_provider):
"""
Generate a fixture representation of the indicator set. Something like the following:
<fixture id="indicators:bihar-supervisor" user_id="3ce8b1611c38e956d3b3b84dd3a7ac18">
<group id="1012aef098ab0c0" team="Samda Team 1">
<indicators>
<indicator id="bp">
<name>BP Visits last 30 days</name>
<done>25</done>
<due>22</due>
<clients>
<client id="a1029b09c090s9d173" status="done"></client>
<client id="bad7a1029b09c090s9" status="due"></client>
</clients>
</indicator>
</indicators>
</group>
</fixture>
"""
def _el(tag, text, attrib=None):
attrib = attrib or {}
el = ElementTree.Element(tag, attrib=attrib)
el.text = unicode(text)
return el
def _indicator_to_fixture(indicator):
ind_el = ElementTree.Element('indicator',
attrib={
'id': indicator.slug,
},
)
done, due = data_provider.get_indicator_data(indicator)
ind_el.append(_el('name', indicator.name, attrib={'lang': 'en'}))
ind_el.append(_el('name', _(indicator.name), attrib={'lang': 'hin'}))
ind_el.append(_el('done', done))
ind_el.append(_el('due', due))
clients = ElementTree.Element('clients')
for case_id, data in data_provider.get_case_data(indicator).items():
client = ElementTree.Element('client',
attrib={
'id': case_id,
'status': 'done' if data['num'] else 'due',
}
)
clients.append(client)
ind_el.append(clients)
return ind_el
# switch to hindi so we can use our builtin translations
with localize('hin'):
root = ElementTree.Element('fixture',
attrib={'id': self.id, 'user_id': user._id},
)
group = ElementTree.Element('group',
attrib={
'id': data_provider.groups[0]._id,
'team': data_provider.groups[0].name
},
)
root.append(group)
indicators = ElementTree.Element('indicators')
# hack: we have to have something with 'clients' show up first in the list
# context: http://manage.dimagi.com/default.asp?107569
sorted_indicators = sorted(data_provider.summary_indicators,
key=lambda indicator: -len(data_provider.get_case_data(indicator)))
for indicator in sorted_indicators:
indicators.append(_indicator_to_fixture(indicator))
group.append(indicators)
return root
generator = IndicatorFixtureProvider()
```
#### File: custom/ewsghana/forms.py
```python
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import get_template
from corehq.apps.reminders.forms import BroadcastForm
from corehq.apps.reminders.models import (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION)
from crispy_forms import layout as crispy
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _, ugettext_lazy
from custom.ewsghana.models import EWSExtension
ROLE_ALL = '(any role)'
ROLE_IN_CHARGE = 'In Charge'
ROLE_NURSE = 'Nurse'
ROLE_PHARMACIST = 'Pharmacist'
ROLE_LABORATORY_STAFF = 'Laboratory Staff'
ROLE_OTHER = 'Other'
ROLE_FACILITY_MANAGER = 'Facility Manager'
EWS_USER_ROLES = (
ROLE_ALL,
ROLE_IN_CHARGE,
ROLE_NURSE,
ROLE_PHARMACIST,
ROLE_LABORATORY_STAFF,
ROLE_OTHER,
ROLE_FACILITY_MANAGER,
)
class InputStockForm(forms.Form):
product_id = forms.CharField(widget=forms.HiddenInput())
product = forms.CharField(widget=forms.HiddenInput(), required=False)
stock_on_hand = forms.IntegerField(min_value=0, required=False)
receipts = forms.IntegerField(min_value=0, initial=0, required=False)
units = forms.CharField(required=False)
monthly_consumption = forms.IntegerField(required=False, widget=forms.HiddenInput())
class EWSBroadcastForm(BroadcastForm):
role = forms.ChoiceField(
required=False,
label=ugettext_lazy('Send to users with role'),
choices=((role, ugettext_lazy(role)) for role in EWS_USER_ROLES),
)
@property
def crispy_recipient_fields(self):
fields = super(EWSBroadcastForm, self).crispy_recipient_fields
fields.append(
crispy.Div(
crispy.Field(
'role',
data_bind='value: role',
),
data_bind='visible: showUserGroupSelect() || showLocationSelect()',
)
)
return fields
def clean_role(self):
if self.cleaned_data.get('recipient_type') not in (RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION):
return None
value = self.cleaned_data.get('role')
if value not in EWS_USER_ROLES:
raise ValidationError(_('Invalid choice selected.'))
return value
def get_user_data_filter(self):
role = self.cleaned_data.get('role')
if role is None or role == ROLE_ALL:
return {}
else:
return {'role': [role]}
class FacilitiesSelectWidget(forms.Widget):
def __init__(self, attrs=None, domain=None, id='supply-point', multiselect=False):
super(FacilitiesSelectWidget, self).__init__(attrs)
self.domain = domain
self.id = id
self.multiselect = multiselect
def render(self, name, value, attrs=None):
return get_template('locations/manage/partials/autocomplete_select_widget.html').render(Context({
'id': self.id,
'name': name,
'value': value or '',
'query_url': reverse('custom.ewsghana.views.non_administrative_locations_for_select2',
args=[self.domain]),
'multiselect': self.multiselect,
}))
class EWSUserSettings(forms.Form):
facility = forms.CharField(required=False)
sms_notifications = forms.BooleanField(required=False, label='Needs SMS notifications')
def __init__(self, *args, **kwargs):
self.user_id = kwargs.pop('user_id')
domain = None
if 'domain' in kwargs:
domain = kwargs['domain']
del kwargs['domain']
super(EWSUserSettings, self).__init__(*args, **kwargs)
self.fields['facility'].widget = FacilitiesSelectWidget(domain=domain, id='facility')
def save(self, user, domain):
ews_extension = EWSExtension.objects.get_or_create(user_id=user.get_id)[0]
ews_extension.domain = domain
ews_extension.location_id = self.cleaned_data['facility']
ews_extension.sms_notifications = self.cleaned_data['sms_notifications']
ews_extension.save()
```
#### File: tanzania/handlers/delivered.py
```python
from datetime import datetime
from corehq.apps.locations.dbaccessors import get_users_by_location_id
from corehq.apps.products.models import SQLProduct
from corehq.apps.sms.api import send_sms_to_verified_number
from custom.ilsgateway.tanzania.handlers.generic_stock_report_handler import GenericStockReportHandler
from custom.ilsgateway.tanzania.handlers.ils_stock_report_parser import Formatter
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, SupplyPointStatusValues
from custom.ilsgateway.tanzania.handlers.soh import parse_report
from custom.ilsgateway.tanzania.reminders import DELIVERY_CONFIRM_DISTRICT, DELIVERY_PARTIAL_CONFIRM, \
DELIVERY_CONFIRM_CHILDREN, DELIVERED_CONFIRM
class DeliveryFormatter(Formatter):
def format(self, text):
split_text = text.split(' ', 1)
keyword = split_text[0].lower()
content = ' '.join('{} {}'.format(code, amount) for code, amount in parse_report(split_text[1]))
if keyword in ['delivered', 'dlvd', 'nimepokea']:
text = 'delivered ' + content
return text
class DeliveredHandler(GenericStockReportHandler):
formatter = DeliveryFormatter
def _send_delivery_alert_to_facilities(self, location):
locs = [c.get_id for c in location.children]
users = []
for location_id in locs:
users.extend(get_users_by_location_id(self.domain, location_id))
for user in users:
if user.get_verified_number():
send_sms_to_verified_number(user.get_verified_number(), DELIVERY_CONFIRM_CHILDREN %
{"district_name": location.name})
def on_success(self):
SupplyPointStatus.objects.create(location_id=self.location_id,
status_type=SupplyPointStatusTypes.DELIVERY_FACILITY,
status_value=SupplyPointStatusValues.RECEIVED,
status_date=datetime.utcnow())
def get_message(self, data):
products = sorted([
(SQLProduct.objects.get(product_id=tx.product_id).code, tx.quantity)
for tx in data['transactions']
], key=lambda x: x[0])
return DELIVERED_CONFIRM % {'reply_list': ', '.join(
['{} {}'.format(product, quantity) for product, quantity in products]
)}
def help(self):
location = self.user.location
if not location:
return False
status_type = None
if location.location_type == 'FACILITY':
status_type = SupplyPointStatusTypes.DELIVERY_FACILITY
self.respond(DELIVERY_PARTIAL_CONFIRM)
elif location.location_type == 'DISTRICT':
status_type = SupplyPointStatusTypes.DELIVERY_DISTRICT
self._send_delivery_alert_to_facilities(location)
self.respond(DELIVERY_CONFIRM_DISTRICT, contact_name=self.user.first_name + " " + self.user.last_name,
facility_name=location.name)
SupplyPointStatus.objects.create(location_id=location.get_id,
status_type=status_type,
status_value=SupplyPointStatusValues.RECEIVED,
status_date=datetime.utcnow())
return True
```
#### File: tanzania/handlers/__init__.py
```python
from corehq.apps.commtrack.util import get_supply_point
def get_location(domain, user, site_code):
location = None
if user and user.location:
loc = user.location
location = get_supply_point(domain, loc=loc)
elif site_code:
location = get_supply_point(domain, site_code=site_code)
return location
```
#### File: tanzania/handlers/la.py
```python
from datetime import datetime
from casexml.apps.stock.const import TRANSACTION_TYPE_LA, SECTION_TYPE_STOCK
from casexml.apps.stock.models import StockTransaction, StockReport
from corehq.apps.commtrack.models import StockState
from corehq.apps.products.models import SQLProduct
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.tanzania.handlers.soh import parse_report
from custom.ilsgateway.tanzania.reminders import LOSS_ADJUST_HELP, LOSS_ADJUST_BAD_FORMAT, LOSS_ADJUST_CONFIRM
class LossAndAdjustment(KeywordHandler):
def _create_stock_transaction(self, report, product_id, quantity):
current_soh = StockState.objects.get(product_id=product_id, case_id=self.case_id).stock_on_hand
StockTransaction.objects.create(
report=report,
section_id=SECTION_TYPE_STOCK,
type=TRANSACTION_TYPE_LA,
case_id=self.case_id,
product_id=product_id,
quantity=quantity,
stock_on_hand=current_soh + quantity
)
def handle(self):
keyword, content = self.msg.text.split(' ', 1)
parsed_report = parse_report(content)
if not parsed_report:
self.respond(LOSS_ADJUST_BAD_FORMAT)
report = StockReport.objects.create(
form_id='ilsgateway-xform',
date=datetime.utcnow(),
type='balance',
domain=self.domain
)
for product_code, quantity in parsed_report:
product_id = SQLProduct.objects.get(domain=self.domain, code=product_code).product_id
self._create_stock_transaction(report, product_id, quantity)
self.respond(LOSS_ADJUST_CONFIRM)
def help(self):
self.respond(LOSS_ADJUST_HELP)
```
#### File: tanzania/handlers/soh.py
```python
from datetime import datetime, timedelta
from re import findall
from strop import maketrans
from corehq.apps.commtrack.models import StockState
from corehq.apps.products.models import SQLProduct
from custom.ilsgateway.tanzania.handlers.generic_stock_report_handler import GenericStockReportHandler
from custom.ilsgateway.tanzania.handlers.ils_stock_report_parser import Formatter
from custom.ilsgateway.models import SupplyPointStatusTypes, SupplyPointStatusValues, SupplyPointStatus
from custom.ilsgateway.tanzania.reminders import SOH_HELP_MESSAGE, SOH_CONFIRM, SOH_PARTIAL_CONFIRM, SOH_BAD_FORMAT
def parse_report(val):
"""
PORTED FROM LOGISTICS:
Takes a product report string, such as "zi 10 co 20 la 30", and parses it into a list of tuples
of (code, quantity):
>>> parse_report("zi 10 co 20 la 30")
[('zi', 10), ('co', 20), ('la', 30)]
Properly handles arbitrary whitespace:
>>> parse_report("zi10 co20 la30")
[('zi', 10), ('co', 20), ('la', 30)]
Properly deals with Os being used for 0s:
>>> parse_report("zi1O co2O la3O")
[('zi', 10), ('co', 20), ('la', 30)]
Properly handles extra spam in the string:
>>> parse_report("randomextradata zi1O co2O la3O randomextradata")
[('zi', 10), ('co', 20), ('la', 30)]
"""
def _cleanup(s):
return unicode(s).encode('utf-8')
return [
(x[0], int(x[1].translate(maketrans("lLO", "110"))))
for x in findall(
"\s*(?P<code>[A-Za-z]{%(minchars)d,%(maxchars)d})\s*(?P<quantity>[\-?0-9%(numeric_letters)s]+)\s*" %
{
"minchars": 2,
"maxchars": 4,
"numeric_letters": "lLO"
}, _cleanup(val))
]
class SohFormatter(Formatter):
def format(self, text):
split_text = text.split(' ', 1)
keyword = split_text[0].lower()
content = ' '.join('{} {}'.format(code, amount) for code, amount in parse_report(split_text[1]))
if keyword == 'hmk':
text = 'soh ' + content
return text
class SOHHandler(GenericStockReportHandler):
formatter = SohFormatter
def get_message(self, data):
if data['error']:
return SOH_BAD_FORMAT
reported_earlier = StockState.objects.filter(
case_id=self.sql_location.couch_location.linked_supply_point().get_id,
last_modified_date__gte=datetime.utcnow() - timedelta(days=7)
).values_list('product_id', flat=True)
expected_products = set(
self.location_products.exclude(product_id__in=reported_earlier).values_list('product_id', flat=True)
)
reported_now = {
tx.product_id
for tx in data['transactions']
}
diff = expected_products - reported_now
if diff:
return SOH_PARTIAL_CONFIRM % {
'contact_name': self.verified_contact.owner.full_name,
'facility_name': self.sql_location.name,
'product_list': ' '.join(
sorted([SQLProduct.objects.get(product_id=product_id).code for product_id in diff])
)
}
return SOH_CONFIRM
def on_success(self):
SupplyPointStatus.objects.create(location_id=self.location_id,
status_type=SupplyPointStatusTypes.SOH_FACILITY,
status_value=SupplyPointStatusValues.SUBMITTED,
status_date=datetime.utcnow())
SupplyPointStatus.objects.create(location_id=self.location_id,
status_type=SupplyPointStatusTypes.LOSS_ADJUSTMENT_FACILITY,
status_value=SupplyPointStatusValues.REMINDER_SENT,
status_date=datetime.utcnow())
def help(self):
self.respond(SOH_HELP_MESSAGE)
return True
```
#### File: tanzania/warehouse/updater.py
```python
from datetime import datetime, timedelta
import logging
import itertools
from celery.canvas import chain
from celery.task import task
from django.db import transaction
from django.db.models import Q
from corehq.apps.products.models import SQLProduct
from corehq.apps.locations.models import Location, SQLLocation
from custom.ilsgateway.tanzania.warehouse import const
from custom.ilsgateway.tanzania.warehouse.alerts import populate_no_primary_alerts, \
populate_facility_stockout_alerts, create_alert
from dimagi.utils.chunked import chunked
from dimagi.utils.couch.bulk import get_docs
from dimagi.utils.dates import get_business_day_of_month, add_months, months_between
from casexml.apps.stock.models import StockReport, StockTransaction
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, DeliveryGroups, \
OrganizationSummary, GroupSummary, SupplyPointStatusValues, Alert, ProductAvailabilityData, \
SupplyPointWarehouseRecord, HistoricalLocationGroup, ILSGatewayConfig
"""
These functions and variables are ported from:
https://github.com/dimagi/logistics/blob/tz-master/logistics_project/apps/tanzania/reporting/run_reports.py
"""
def _is_valid_status(facility, date, status_type):
if status_type not in const.NEEDED_STATUS_TYPES:
return False
groups = HistoricalLocationGroup.objects.filter(
date__month=date.month,
date__year=date.year,
location_id=facility.sql_location
)
if (not facility.metadata.get('group', None)) and (groups.count() == 0):
return False
if groups.count() > 0:
codes = [group.group for group in groups]
else:
try:
latest_group = HistoricalLocationGroup.objects.filter(
location_id=facility.sql_location
).latest('date')
if date.date() < latest_group.date:
return False
else:
codes = [facility.metadata['group']]
except HistoricalLocationGroup.DoesNotExist:
codes = [facility.metadata['group']]
dg = DeliveryGroups(date.month)
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
return dg.current_submitting_group() in codes
elif status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
return dg.current_delivering_group() in codes
return True
def _get_window_date(status_type, date):
# we need this method because the soh and super reports actually
# are sometimes treated as reports for _next_ month
if status_type == SupplyPointStatusTypes.SOH_FACILITY or \
status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
# if the date is after the last business day of the month
# count it for the next month
if date.date() >= get_business_day_of_month(date.year, date.month, -1):
year, month = add_months(date.year, date.month, 1)
return datetime(year, month, 1)
return datetime(date.year, date.month, 1)
def is_on_time(status_date, warehouse_date, status_type):
"""
on_time requirement
SOH report should be submitted before 6th business day of the month.
R & R report should be submitted before 13th business day of the month.
Otherwise reports are marked as late response.
"""
if status_type == SupplyPointStatusTypes.SOH_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 6):
return True
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 13):
return True
return False
def average_lead_time(facility_id, window_date):
end_date = datetime(window_date.year, window_date.month % 12 + 1, 1)
received = SupplyPointStatus.objects.filter(
location_id=facility_id,
status_date__lt=end_date,
status_value=SupplyPointStatusValues.RECEIVED,
status_type=SupplyPointStatusTypes.DELIVERY_FACILITY).order_by('status_date')
total_time = timedelta(days=0)
count = 0
last_receipt = datetime(1900, 1, 1)
for receipt in received:
if receipt.status_date - last_receipt < timedelta(days=30):
last_receipt = receipt.status_date
continue
last_receipt = receipt.status_date
last_submitted = SupplyPointStatus.objects.filter(
location_id=facility_id,
status_date__lt=receipt.status_date,
status_value=SupplyPointStatusValues.SUBMITTED,
status_type=SupplyPointStatusTypes.R_AND_R_FACILITY).order_by('-status_date')
if last_submitted.count():
ltime = receipt.status_date - last_submitted[0].status_date
if timedelta(days=30) < ltime < timedelta(days=100):
total_time += ltime
count += 1
else:
continue
return total_time / count if count else None
def needed_status_types(org_summary):
facility = Location.get(org_summary.location_id)
return [status_type for status_type in const.NEEDED_STATUS_TYPES if _is_valid_status(facility,
org_summary.date, status_type)]
def not_responding_facility(org_summary):
for status_type in needed_status_types(org_summary):
group_summary, created = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=status_type)
group_summary.total = 1
assert group_summary.responded in (0, 1)
if group_summary.title == SupplyPointStatusTypes.SOH_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'soh_not_responding', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.R_AND_R_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'rr_not_responded', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.DELIVERY_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.location_id, org_summary.date,
'delivery_not_responding', {'number': 1})
else:
# not an expected / needed group. ignore for now
pass
group_summary.save()
@transaction.atomic
def update_product_availability_facility_data(org_summary):
# product availability
facility = Location.get(org_summary.location_id)
assert facility.location_type == "FACILITY"
prods = SQLProduct.objects.filter(domain=facility.domain, is_archived=False)
for p in prods:
product_data, created = ProductAvailabilityData.objects.get_or_create(
product=p.product_id,
location_id=facility._id,
date=org_summary.date
)
if created:
# set defaults
product_data.total = 1
previous_reports = ProductAvailabilityData.objects.filter(
product=p.product_id,
location_id=facility._id,
date__lt=org_summary.date,
total=1
)
if previous_reports.count():
prev = previous_reports.latest('date')
product_data.with_stock = prev.with_stock
product_data.without_stock = prev.without_stock
product_data.without_data = prev.without_data
else:
# otherwise we use the defaults
product_data.with_stock = 0
product_data.without_stock = 0
product_data.without_data = 1
product_data.save()
assert (product_data.with_stock + product_data.without_stock + product_data.without_data) == 1, \
"bad product data config for %s" % product_data
def default_start_date():
return datetime(2012, 1, 1)
def _get_test_locations(domain):
"""
returns test region and all its children
"""
test_region = SQLLocation.objects.get(domain=domain, external_id=const.TEST_REGION_ID)
sql_locations = SQLLocation.objects.filter(
Q(domain=domain) & (Q(parent=test_region) | Q(parent__parent=test_region))
).exclude(is_archived=True).order_by('id').only('location_id')
return [sql_location.couch_location for sql_location in sql_locations] + \
[test_region.couch_location]
def populate_report_data(start_date, end_date, domain, runner, locations=None, strict=True):
# first populate all the warehouse tables for all facilities
# hard coded to know this is the first date with data
start_date = max(start_date, default_start_date())
# For QA purposes generate reporting data for only some small part of data.
if not ILSGatewayConfig.for_domain(domain).all_stock_data:
if locations is None:
locations = _get_test_locations(domain)
facilities = filter(lambda location: location.location_type == 'FACILITY', locations)
non_facilities_types = ['DISTRICT', 'REGION', 'MSDZONE', 'MOHSW']
non_facilities = []
for location_type in non_facilities_types:
non_facilities.extend(filter(lambda location: location.location_type == location_type, locations))
else:
facilities = Location.filter_by_type(domain, 'FACILITY')
non_facilities = list(Location.filter_by_type(domain, 'DISTRICT'))
non_facilities += list(Location.filter_by_type(domain, 'REGION'))
non_facilities += list(Location.filter_by_type(domain, 'MSDZONE'))
non_facilities += list(Location.filter_by_type(domain, 'MOHSW'))
if runner.location:
if runner.location.location_type.name.upper() != 'FACILITY':
facilities = []
non_facilities = itertools.dropwhile(
lambda location: location._id != runner.location.location_id,
non_facilities
)
else:
facilities = itertools.dropwhile(
lambda location: location._id != runner.location.location_id,
facilities
)
facilities_chunked_list = chunked(facilities, 5)
for chunk in facilities_chunked_list:
res = chain(process_facility_warehouse_data.si(fac, start_date, end_date, runner) for fac in chunk)()
res.get()
non_facilities_chunked_list = chunked(non_facilities, 50)
# then populate everything above a facility off a warehouse table
for chunk in non_facilities_chunked_list:
res = chain(
process_non_facility_warehouse_data.si(org, start_date, end_date, runner, strict)
for org in chunk
)()
res.get()
runner.location = None
runner.save()
# finally go back through the history and initialize empty data for any
# newly created facilities
update_historical_data(domain)
@task(queue='logistics_background_queue')
def process_facility_warehouse_data(facility, start_date, end_date, runner):
"""
process all the facility-level warehouse tables
"""
logging.info("processing facility %s (%s)" % (facility.name, str(facility._id)))
try:
runner.location = facility.sql_location
runner.save()
except SQLLocation.DoesNotExist:
# TODO Temporary fix
facility.delete()
return
for alert_type in [const.SOH_NOT_RESPONDING, const.RR_NOT_RESPONDED, const.DELIVERY_NOT_RESPONDING]:
alert = Alert.objects.filter(location_id=facility._id, date__gte=start_date, date__lt=end_date,
type=alert_type)
alert.delete()
supply_point_id = facility.linked_supply_point()._id
location_id = facility._id
new_statuses = SupplyPointStatus.objects.filter(
location_id=facility._id,
status_date__gte=start_date,
status_date__lt=end_date
).order_by('status_date').iterator()
process_facility_statuses(location_id, new_statuses)
new_reports = StockReport.objects.filter(
stocktransaction__case_id=supply_point_id,
date__gte=start_date,
date__lt=end_date,
stocktransaction__type='stockonhand'
).order_by('date').iterator()
process_facility_product_reports(location_id, new_reports)
new_trans = StockTransaction.objects.filter(
case_id=supply_point_id,
report__date__gte=start_date,
report__date__lt=end_date,
).exclude(type='consumption').order_by('report__date').iterator()
process_facility_transactions(location_id, new_trans)
# go through all the possible values in the date ranges
# and make sure there are warehouse tables there
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
# create org_summary for every fac/date combo
org_summary, created = OrganizationSummary.objects.get_or_create(
location_id=facility._id,
date=window_date
)
org_summary.total_orgs = 1
alt = average_lead_time(facility._id, window_date)
if alt:
alt = alt.days
org_summary.average_lead_time_in_days = alt or 0
org_summary.save()
# create group_summary for every org_summary title combo
for title in const.NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
# update all the non-response data
not_responding_facility(org_summary)
# update product availability data
update_product_availability_facility_data(org_summary)
# alerts
with transaction.atomic():
populate_no_primary_alerts(facility, window_date)
populate_facility_stockout_alerts(facility, window_date)
@transaction.atomic
def process_facility_statuses(facility_id, statuses, alerts=True):
"""
For a given facility and list of statuses, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
facility = Location.get(facility_id)
for status in statuses:
warehouse_date = _get_window_date(status.status_type, status.status_date)
if _is_valid_status(facility, status.status_date, status.status_type):
org_summary = OrganizationSummary.objects.get_or_create(
location_id=facility_id,
date=warehouse_date
)[0]
group_summary = GroupSummary.objects.get_or_create(
org_summary=org_summary,
title=status.status_type
)[0]
group_summary.total = 1
if status.status_value not in (SupplyPointStatusValues.REMINDER_SENT,
SupplyPointStatusValues.ALERT_SENT):
# we've responded to this query
group_summary.responded = 1
if status.status_value in [SupplyPointStatusValues.SUBMITTED,
SupplyPointStatusValues.RECEIVED]:
group_summary.complete = 1
else:
group_summary.complete = group_summary.complete or 0
if group_summary.complete:
if is_on_time(status.status_date, warehouse_date, status.status_type):
group_summary.on_time = 1
else:
group_summary.on_time = group_summary.on_time
else:
group_summary.on_time = 0
group_summary.save()
if alerts:
if status.status_value == SupplyPointStatusValues.NOT_SUBMITTED \
and status.status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
create_alert(facility_id, status.status_date, const.RR_NOT_SUBMITTED,
{'number': 1})
if status.status_value == SupplyPointStatusValues.NOT_RECEIVED \
and status.status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
create_alert(facility_id, status.status_date, const.DELIVERY_NOT_RECEIVED,
{'number': 1})
def process_facility_product_reports(facility_id, reports):
"""
For a given facility and list of ProductReports, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities. Currently this only affects stock on hand reporting
data. We need to use this method instead of the statuses because partial
stock on hand reports don't create valid status, but should be treated
like valid submissions in most of the rest of the site.
"""
months_updated = {}
for report in reports:
stock_transactions = report.stocktransaction_set.filter(type='stockonhand')
assert stock_transactions.count() > 0
warehouse_date = _get_window_date(SupplyPointStatusTypes.SOH_FACILITY, report.date)
if warehouse_date in months_updated:
# an optimization to avoid repeatedly doing this work for each
# product report for the entire month
continue
org_summary = OrganizationSummary.objects.get_or_create(location_id=facility_id, date=warehouse_date)[0]
group_summary = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=SupplyPointStatusTypes.SOH_FACILITY)[0]
group_summary.total = 1
group_summary.responded = 1
group_summary.complete = 1
if is_on_time(report.date, warehouse_date, SupplyPointStatusTypes.SOH_FACILITY):
group_summary.on_time = 1
group_summary.save()
months_updated[warehouse_date] = None # update the cache of stuff we've dealt with
@transaction.atomic
def process_facility_transactions(facility_id, transactions):
"""
For a given facility and list of transactions, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
for trans in transactions:
date = trans.report.date
product_data = ProductAvailabilityData.objects.get_or_create(
product=trans.product_id,
location_id=facility_id,
date=datetime(date.year, date.month, 1)
)[0]
product_data.total = 1
product_data.without_data = 0
if trans.stock_on_hand <= 0:
product_data.without_stock = 1
product_data.with_stock = 0
else:
product_data.without_stock = 0
product_data.with_stock = 1
product_data.save()
def get_non_archived_facilities_below(location):
child_ids = location.sql_location.get_descendants(include_self=True).filter(
is_archived=False, location_type__name='FACILITY'
).values_list('location_id', flat=True)
return [Location.wrap(doc) for doc in get_docs(Location.get_db(), child_ids)]
@task(queue='logistics_background_queue')
def process_non_facility_warehouse_data(location, start_date, end_date, runner, strict=True):
runner.location = location.sql_location
runner.save()
facs = get_non_archived_facilities_below(location)
fac_ids = [f._id for f in facs]
logging.info("processing non-facility %s (%s), %s children" % (location.name, str(location._id), len(facs)))
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
org_summary = OrganizationSummary.objects.get_or_create(location_id=location._id, date=window_date)[0]
org_summary.total_orgs = len(facs)
sub_summaries = OrganizationSummary.objects.filter(date=window_date, location_id__in=fac_ids)
subs_with_lead_time = [s for s in sub_summaries if s.average_lead_time_in_days]
# lead times
if subs_with_lead_time:
days_sum = sum([s.average_lead_time_in_days for s in subs_with_lead_time])
org_summary.average_lead_time_in_days = days_sum / len(subs_with_lead_time)
else:
org_summary.average_lead_time_in_days = 0
org_summary.save()
# product availability
prods = SQLProduct.objects.filter(domain=location.domain, is_archived=False)
for p in prods:
product_data = ProductAvailabilityData.objects.get_or_create(product=p.product_id,
location_id=location._id,
date=window_date)[0]
sub_prods = ProductAvailabilityData.objects.filter(product=p.product_id,
location_id__in=fac_ids,
date=window_date)
product_data.total = sum([p.total for p in sub_prods])
if strict:
assert product_data.total == len(facs), \
"total should match number of sub facilities"
product_data.with_stock = sum([p.with_stock for p in sub_prods])
product_data.without_stock = sum([p.without_stock for p in sub_prods])
product_data.without_data = product_data.total - product_data.with_stock - product_data.without_stock
product_data.save()
dg = DeliveryGroups(month=month, facs=facs)
for status_type in const.NEEDED_STATUS_TYPES:
gsum = GroupSummary.objects.get_or_create(org_summary=org_summary, title=status_type)[0]
sub_sums = GroupSummary.objects.filter(title=status_type, org_summary__in=sub_summaries).all()
# TODO: see if moving the aggregation to the db makes it
# faster, if this is slow
gsum.total = sum([s.total for s in sub_sums])
gsum.responded = sum([s.responded for s in sub_sums])
gsum.on_time = sum([s.on_time for s in sub_sums])
gsum.complete = sum([s.complete for s in sub_sums])
# gsum.missed_response = sum([s.missed_response for s in sub_sums])
gsum.save()
if status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
expected = len(dg.delivering())
elif status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
expected = len(dg.submitting())
elif status_type == SupplyPointStatusTypes.SOH_FACILITY \
or status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
expected = len(facs)
if gsum.total != expected:
logging.info("expected %s but was %s for %s" % (expected, gsum.total, gsum))
for alert_type in [const.RR_NOT_SUBMITTED, const.DELIVERY_NOT_RECEIVED,
const.SOH_NOT_RESPONDING, const.RR_NOT_RESPONDED, const.DELIVERY_NOT_RESPONDING]:
sub_alerts = Alert.objects.filter(location_id__in=fac_ids, date=window_date, type=alert_type)
aggregate_response_alerts(location._id, window_date, sub_alerts, alert_type)
def aggregate_response_alerts(location_id, date, alerts, alert_type):
total = sum([s.number for s in alerts])
if total > 0:
create_alert(location_id, date, alert_type, {'number': total})
def update_historical_data(domain, locations=None):
"""
If we don't have a record of this supply point being updated, run
through all historical data and just fill in with zeros.
"""
org_summaries = OrganizationSummary.objects.order_by('date')
if org_summaries.count() == 0:
return
start_date = org_summaries[0].date
if locations is None:
if not ILSGatewayConfig.for_domain(domain).all_stock_data:
locations = _get_test_locations(domain)
else:
locations = Location.by_domain(domain)
for sp in locations:
try:
SupplyPointWarehouseRecord.objects.get(supply_point=sp._id)
except SupplyPointWarehouseRecord.DoesNotExist:
# we didn't have a record so go through and historically update
# anything we maybe haven't touched
for year, month in months_between(start_date, sp.sql_location.created_at):
window_date = datetime(year, month, 1)
for cls in [OrganizationSummary, ProductAvailabilityData, GroupSummary]:
_init_warehouse_model(cls, sp, window_date)
SupplyPointWarehouseRecord.objects.create(supply_point=sp._id,
create_date=datetime.utcnow())
def _init_warehouse_model(cls, location, date):
if cls == OrganizationSummary:
_init_default(location, date)
elif cls == ProductAvailabilityData:
_init_with_product(location, date)
elif cls == GroupSummary:
_init_group_summary(location, date)
def _init_default(location, date):
OrganizationSummary.objects.get_or_create(location_id=location._id, date=date)
def _init_with_product(location, date):
for p in SQLProduct.objects.filter(domain=location.domain, is_archived=False):
ProductAvailabilityData.objects.get_or_create(location_id=location._id, date=date, product=p.product_id)
def _init_group_summary(location, date):
org_summary = OrganizationSummary.objects.get(location_id=location._id, date=date)
for title in const.NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
```
#### File: custom/ilsgateway/tasks.py
```python
from datetime import datetime, timedelta
from functools import partial
import logging
from celery.schedules import crontab
from celery.task import task, periodic_task
from django.conf import settings
from django.db import transaction
from psycopg2._psycopg import DatabaseError
from casexml.apps.stock.models import StockReport, StockTransaction
from corehq.apps.commtrack.models import StockState
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import Product
from custom.ilsgateway.api import ILSGatewayEndpoint, ILSGatewayAPI
from custom.ilsgateway.tanzania.reminders.delivery import DeliveryReminder
from custom.ilsgateway.tanzania.reminders.randr import RandrReminder
from custom.ilsgateway.tanzania.reminders.stockonhand import SOHReminder
from custom.ilsgateway.tanzania.reminders.supervision import SupervisionReminder
from custom.ilsgateway.temporary import fix_stock_data
from custom.ilsgateway.utils import send_for_day, send_for_all_domains
from custom.logistics.commtrack import bootstrap_domain as ils_bootstrap_domain, save_stock_data_checkpoint
from custom.ilsgateway.models import ILSGatewayConfig, SupplyPointStatus, DeliveryGroupReport, ReportRun, \
GroupSummary, OrganizationSummary, ProductAvailabilityData, Alert, SupplyPointWarehouseRecord
from custom.ilsgateway.tanzania.warehouse.updater import populate_report_data
from custom.logistics.models import StockDataCheckpoint
from custom.logistics.tasks import stock_data_task
from dimagi.utils.dates import get_business_day_of_month
@periodic_task(run_every=crontab(hour="4", minute="00", day_of_week="*"),
queue='logistics_background_queue')
def migration_task():
from custom.ilsgateway.stock_data import ILSStockDataSynchronization
for config in ILSGatewayConfig.get_all_steady_sync_configs():
if config.enabled:
endpoint = ILSGatewayEndpoint.from_config(config)
ils_bootstrap_domain(ILSGatewayAPI(config.domain, endpoint))
stock_data_task(ILSStockDataSynchronization(config.domain, endpoint))
report_run.delay(config.domain)
@task(queue='logistics_background_queue')
def ils_bootstrap_domain_task(domain):
ils_config = ILSGatewayConfig.for_domain(domain)
return ils_bootstrap_domain(ILSGatewayAPI(domain, ILSGatewayEndpoint.from_config(ils_config)))
# Region KILIMANJARO
ILS_FACILITIES = [948, 998, 974, 1116, 971, 1122, 921, 658, 995, 1057,
652, 765, 1010, 657, 1173, 1037, 965, 749, 1171, 980,
1180, 1033, 975, 1056, 970, 742, 985, 2194, 935, 1128,
1172, 773, 916, 1194, 4862, 1003, 994, 1034, 1113, 1167,
949, 987, 986, 960, 1046, 942, 972, 21, 952, 930,
1170, 1067, 1006, 752, 747, 1176, 746, 755, 1102, 924,
744, 1109, 760, 922, 945, 988, 927, 1045, 1060, 938,
1041, 1101, 1107, 939, 910, 934, 929, 1111, 1174, 1044,
1008, 914, 1040, 1035, 1126, 1203, 912, 990, 908, 654,
1051, 1110, 983, 771, 1068, 756, 4807, 973, 1013, 911,
1048, 1196, 917, 1127, 963, 1032, 1164, 951, 918, 999,
923, 1049, 1000, 1165, 915, 1036, 1121, 758, 1054, 1042,
4861, 1007, 1053, 954, 761, 1002, 748, 919, 976, 1177,
1179, 1001, 743, 762, 741, 959, 1119, 772, 941, 956, 964,
1014, 953, 754, 1202, 1166, 977, 757, 961, 759, 997, 947, 1112, 978, 1124,
768, 937, 1195, 913, 906, 1043, 1178, 992, 1038, 957, 1106, 767, 979, 1012,
926, 1120, 933, 1066, 1105, 943, 1047, 1063, 1004, 958, 751, 763, 1011, 936,
1114, 932, 984, 656, 653, 946, 1058, 931, 770, 1108, 909, 1118, 1062, 745, 1065,
955, 1052, 753, 944, 1061, 1069, 1104, 996, 4860, 950, 993, 1064, 1175, 1059, 1050,
968, 928, 989, 967, 966, 750, 981, 1055, 766, 1123, 1039, 1103, 655, 1125, 774, 991,
1117, 920, 769, 1005, 1009, 925, 1115, 907, 4996]
def get_locations(api_object, facilities):
for facility in facilities:
location = api_object.endpoint.get_location(facility, params=dict(with_historical_groups=1))
api_object.location_sync(api_object.endpoint.models_map['location'](location))
def process_supply_point_status(supply_point_status, domain, location_id=None):
location_id = location_id or supply_point_status.location_id
try:
SupplyPointStatus.objects.get(
external_id=int(supply_point_status.external_id),
location_id=location_id
)
except SupplyPointStatus.DoesNotExist:
supply_point_status.save()
def sync_supply_point_status(domain, endpoint, facility, checkpoint, date, limit=100, offset=0):
has_next = True
next_url = ""
while has_next:
meta, supply_point_statuses = endpoint.get_supplypointstatuses(
domain,
limit=limit,
offset=offset,
next_url_params=next_url,
filters=dict(supply_point=facility, status_date__gte=date),
facility=facility
)
# set the checkpoint right before the data we are about to process
if not supply_point_statuses:
return None
location_id = SQLLocation.objects.get(domain=domain, external_id=facility).location_id
save_stock_data_checkpoint(checkpoint,
'supply_point_status',
meta.get('limit') or limit,
meta.get('offset') or offset, date, location_id, True)
for supply_point_status in supply_point_statuses:
process_supply_point_status(supply_point_status, domain, location_id)
if not meta.get('next', False):
has_next = False
else:
next_url = meta['next'].split('?')[1]
def process_delivery_group_report(dgr, domain, location_id=None):
location_id = location_id or dgr.location_id
try:
DeliveryGroupReport.objects.get(external_id=dgr.external_id, location_id=location_id)
except DeliveryGroupReport.DoesNotExist:
dgr.save()
def sync_delivery_group_report(domain, endpoint, facility, checkpoint, date, limit=100, offset=0):
has_next = True
next_url = ""
while has_next:
meta, delivery_group_reports = endpoint.get_deliverygroupreports(
domain,
limit=limit,
offset=offset,
next_url_params=next_url,
filters=dict(supply_point=facility, report_date__gte=date),
facility=facility
)
location_id = SQLLocation.objects.get(domain=domain, external_id=facility).location_id
# set the checkpoint right before the data we are about to process
save_stock_data_checkpoint(checkpoint,
'delivery_group',
meta.get('limit') or limit,
meta.get('offset') or offset,
date, location_id, True)
for dgr in delivery_group_reports:
try:
DeliveryGroupReport.objects.get(external_id=dgr.external_id, location_id=location_id)
except DeliveryGroupReport.DoesNotExist:
dgr.save()
if not meta.get('next', False):
has_next = False
else:
next_url = meta['next'].split('?')[1]
@task(queue='background_queue', ignore_result=True)
def ils_clear_stock_data_task(domain):
assert ILSGatewayConfig.for_domain(domain)
locations = SQLLocation.objects.filter(domain=domain)
SupplyPointStatus.objects.filter(location_id__in=locations.values_list('location_id', flat=True)).delete()
DeliveryGroupReport.objects.filter(location_id__in=locations.values_list('location_id', flat=True)).delete()
products = Product.ids_by_domain(domain)
StockState.objects.filter(product_id__in=products).delete()
StockTransaction.objects.filter(
case_id__in=locations.exclude(supply_point_id__isnull=True).values_list('supply_point_id', flat=True)
).delete()
StockReport.objects.filter(domain=domain).delete()
StockDataCheckpoint.objects.filter(domain=domain).delete()
@task(queue='background_queue', ignore_result=True)
def clear_report_data(domain):
locations_ids = SQLLocation.objects.filter(domain=domain).values_list('location_id', flat=True)
GroupSummary.objects.filter(org_summary__location_id__in=locations_ids).delete()
OrganizationSummary.objects.filter(location_id__in=locations_ids).delete()
ProductAvailabilityData.objects.filter(location_id__in=locations_ids).delete()
Alert.objects.filter(location_id__in=locations_ids).delete()
SupplyPointWarehouseRecord.objects.filter(supply_point__in=locations_ids).delete()
ReportRun.objects.filter(domain=domain).delete()
@task(queue='background_queue', ignore_result=True)
def fix_stock_data_task(domain):
fix_stock_data(domain)
# @periodic_task(run_every=timedelta(days=1), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE', 'celery'))
@task(queue='logistics_background_queue', ignore_result=True)
def report_run(domain, locations=None, strict=True):
last_successful_run = ReportRun.last_success(domain)
last_run = ReportRun.last_run(domain)
start_date = (datetime.min if not last_successful_run else last_successful_run.end)
stock_data_checkpoint = StockDataCheckpoint.objects.get(domain=domain)
# TODO Change this to datetime.utcnow() when project goes live
end_date = stock_data_checkpoint.date
running = ReportRun.objects.filter(complete=False, domain=domain)
if running.count() > 0:
raise Exception("Warehouse already running, will do nothing...")
if last_run and last_run.has_error:
run = last_run
run.complete = False
run.save()
else:
if start_date == end_date:
return
# start new run
run = ReportRun.objects.create(start=start_date, end=end_date,
start_run=datetime.utcnow(), domain=domain)
has_error = True
try:
populate_report_data(run.start, run.end, domain, run, locations, strict=strict)
has_error = False
except Exception, e:
# just in case something funky happened in the DB
if isinstance(e, DatabaseError):
try:
transaction.rollback()
except:
pass
has_error = True
raise
finally:
# complete run
run = ReportRun.objects.get(pk=run.id)
run.has_error = has_error
run.end_run = datetime.utcnow()
run.complete = True
run.save()
logging.info("ILSGateway report runner end time: %s" % datetime.utcnow())
facility_delivery_partial = partial(send_for_day, cutoff=15, reminder_class=DeliveryReminder)
district_delivery_partial = partial(send_for_day, cutoff=13, reminder_class=DeliveryReminder,
location_type='DISTRICT')
@periodic_task(run_every=crontab(day_of_month="13-15", hour=14, minute=0),
queue="logistics_reminder_queue")
def first_facility_delivery_task():
facility_delivery_partial(15)
@periodic_task(run_every=crontab(day_of_month="20-22", hour=14, minute=0),
queue="logistics_reminder_queue")
def second_facility_delivery_task():
facility_delivery_partial(22)
@periodic_task(run_every=crontab(day_of_month="26-30", hour=14, minute=0),
queue="logistics_reminder_queue")
def third_facility_delivery_task():
facility_delivery_partial(30)
@periodic_task(run_every=crontab(day_of_month="11-13", hour=8, minute=0),
queue="logistics_reminder_queue")
def first_district_delivery_task():
district_delivery_partial(13)
@periodic_task(run_every=crontab(day_of_month="18-20", hour=14, minute=0),
queue="logistics_reminder_queue")
def second_district_delivery_task():
district_delivery_partial(20)
@periodic_task(run_every=crontab(day_of_month="26-28", hour=14, minute=0),
queue="logistics_reminder_queue")
def third_district_delivery_task():
district_delivery_partial(28)
facility_randr_partial = partial(send_for_day, cutoff=5, reminder_class=RandrReminder, location_type='FACILITY')
district_randr_partial = partial(send_for_day, cutoff=13, reminder_class=RandrReminder, location_type='DISTRICT')
@periodic_task(run_every=crontab(day_of_month="3-5", hour=8, minute=0),
queue="logistics_reminder_queue")
def first_facility():
"""Last business day before or on 5th day of the Submission month, 8:00am"""
facility_randr_partial(5)
@periodic_task(run_every=crontab(day_of_month="8-10", hour=8, minute=0),
queue="logistics_reminder_queue")
def second_facility():
"""Last business day before or on 10th day of the submission month, 8:00am"""
facility_randr_partial(10)
@periodic_task(run_every=crontab(day_of_month="10-12", hour=8, minute=0),
queue="logistics_reminder_queue")
def third_facility():
"""Last business day before or on 12th day of the submission month, 8:00am"""
facility_randr_partial(12)
@periodic_task(run_every=crontab(day_of_month="11-13", hour=8, minute=0),
queue="logistics_reminder_queue")
def first_district():
district_randr_partial(13)
@periodic_task(run_every=crontab(day_of_month="13-15", hour=8, minute=0),
queue="logistics_reminder_queue")
def second_district():
district_randr_partial(15)
@periodic_task(run_every=crontab(day_of_month="14-16", hour=14, minute=0),
queue="logistics_reminder_queue")
def third_district():
district_randr_partial(16)
@periodic_task(run_every=crontab(day_of_month="26-31", hour=14, minute=15),
queue="logistics_reminder_queue")
def supervision_task():
now = datetime.utcnow()
last_business_day = get_business_day_of_month(month=now.month, year=now.year, count=-1)
if now.day == last_business_day.day:
send_for_all_domains(last_business_day, SupervisionReminder)
def get_last_and_nth_business_day(date, n):
last_month = datetime(date.year, date.month, 1) - timedelta(days=1)
last_month_last_day = get_business_day_of_month(month=last_month.month, year=last_month.year, count=-1)
nth_business_day = get_business_day_of_month(month=date.month, year=date.year, count=n)
return last_month_last_day, nth_business_day
@periodic_task(run_every=crontab(day_of_month="26-31", hour=14, minute=0),
queue="logistics_reminder_queue")
def first_soh_task():
now = datetime.utcnow()
last_business_day = get_business_day_of_month(month=now.month, year=now.year, count=-1)
if now.day == last_business_day.day:
send_for_all_domains(last_business_day, SOHReminder)
@periodic_task(run_every=crontab(day_of_month="1-3", hour=9, minute=0),
queue="logistics_reminder_queue")
def second_soh_task():
now = datetime.utcnow()
last_month_last_day, first_business_day = get_last_and_nth_business_day(now, 1)
if now.day == first_business_day.day:
send_for_all_domains(last_month_last_day, SOHReminder)
@periodic_task(run_every=crontab(day_of_month="5-7", hour=8, minute=15),
queue="logistics_reminder_queue")
def third_soh_task():
now = datetime.utcnow()
last_month_last_day, fifth_business_day = get_last_and_nth_business_day(now, 5)
if now.day == fifth_business_day.day:
send_for_all_domains(last_month_last_day, SOHReminder)
```
#### File: tests/handlers/loss_adjust.py
```python
from corehq.apps.commtrack.models import StockState
from custom.ilsgateway.tanzania.reminders import LOSS_ADJUST_CONFIRM, SOH_CONFIRM
from custom.ilsgateway.tests import ILSTestScript
class ILSLossesAdjustmentsTest(ILSTestScript):
def setUp(self):
super(ILSLossesAdjustmentsTest, self).setUp()
def test_losses_adjustments(self):
script = """
5551234 > Hmk Id 400 Dp 569 Ip 678
5551234 < {0}
""".format(unicode(SOH_CONFIRM))
self.run_script(script)
self.run_script(script)
self.assertEqual(StockState.objects.count(), 3)
for ps in StockState.objects.all():
self.assertEqual(self.user_fac1.location.linked_supply_point().get_id, ps.case_id)
self.assertTrue(0 != ps.stock_on_hand)
script = """
5551234 > um id -3 dp -5 ip 13
5551234 < {0}
""".format(unicode(LOSS_ADJUST_CONFIRM))
self.run_script(script)
self.assertEqual(StockState.objects.count(), 3)
self.assertEqual(StockState.objects.get(sql_product__code="id").stock_on_hand, 397)
self.assertEqual(StockState.objects.get(sql_product__code="dp").stock_on_hand, 564)
self.assertEqual(StockState.objects.get(sql_product__code="ip").stock_on_hand, 691)
def test_losses_adjustments_la_word(self):
script = """
5551234 > Hmk Id 400 Dp 569 Ip 678
5551234 < {0}
""".format(unicode(SOH_CONFIRM))
self.run_script(script)
self.run_script(script)
self.assertEqual(StockState.objects.count(), 3)
for ps in StockState.objects.all():
self.assertEqual(self.user_fac1.location.linked_supply_point().get_id, ps.case_id)
self.assertTrue(0 != ps.stock_on_hand)
script = """
5551234 > la id -3 dp -5 ip 13
5551234 < {0}
""".format(unicode(LOSS_ADJUST_CONFIRM))
self.run_script(script)
self.assertEqual(StockState.objects.count(), 3)
self.assertEqual(StockState.objects.get(sql_product__code="id").stock_on_hand, 397)
self.assertEqual(StockState.objects.get(sql_product__code="dp").stock_on_hand, 564)
self.assertEqual(StockState.objects.get(sql_product__code="ip").stock_on_hand, 691)
```
#### File: custom/ilsgateway/utils.py
```python
from datetime import datetime
from decimal import Decimal
from casexml.apps.stock.models import StockReport, StockTransaction
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.locations.models import LocationType, Location
from corehq.apps.products.models import SQLProduct
from corehq.apps.sms.api import send_sms_to_verified_number
from corehq.util.translation import localize
from custom.ilsgateway.models import SupplyPointStatus, ILSGatewayConfig
from dimagi.utils.dates import get_business_day_of_month_before
from django.db.models.aggregates import Max
GROUPS = ('A', 'B', 'C')
def get_next_meta_url(has_next, meta, next_url):
if not meta.get('next', False):
has_next = False
else:
next_url = meta['next'].split('?')[1]
return has_next, next_url
def get_current_group():
month = datetime.utcnow().month
return GROUPS[(month + 2) % 3]
def send_for_all_domains(date, reminder_class, **kwargs):
for domain in ILSGatewayConfig.get_all_enabled_domains():
reminder_class(domain=domain, date=date, **kwargs).send()
def send_for_day(date, cutoff, reminder_class, **kwargs):
now = datetime.utcnow()
date = get_business_day_of_month_before(now.year, now.month, date)
cutoff = get_business_day_of_month_before(now.year, now.month, cutoff)
if now.day == date.day:
send_for_all_domains(cutoff, reminder_class, **kwargs)
def supply_points_with_latest_status_by_datespan(sps, status_type, status_value, datespan):
"""
This very similar method is used by the reminders.
"""
ids = [sp._id for sp in sps]
inner = SupplyPointStatus.objects.filter(location_id__in=ids,
status_type=status_type,
status_date__gte=datespan.startdate,
status_date__lte=datespan.enddate).annotate(pk=Max('id'))
ids = SupplyPointStatus.objects.filter(
id__in=inner.values('pk').query,
status_type=status_type,
status_value=status_value).distinct().values_list("supply_point", flat=True)
return [SupplyPointCase.get(id) for id in ids]
def ils_bootstrap_domain_test_task(domain, endpoint):
from custom.logistics.commtrack import bootstrap_domain
from custom.ilsgateway.api import ILSGatewayAPI
return bootstrap_domain(ILSGatewayAPI(domain, endpoint))
def send_translated_message(user, message, **kwargs):
verified_number = user.get_verified_number()
if not verified_number:
return False
with localize(user.get_language_code()):
send_sms_to_verified_number(verified_number, message % kwargs)
return True
def make_loc(code, name, domain, type, metadata=None, parent=None):
name = name or code
location_type, _ = LocationType.objects.get_or_create(domain=domain, name=type)
loc = Location(site_code=code, name=name, domain=domain, location_type=type, parent=parent)
loc.metadata = metadata or {}
loc.save()
if not location_type.administrative:
SupplyPointCase.create_from_location(domain, loc)
loc.save()
return loc
def create_stock_report(location, products_quantities, date=datetime.utcnow()):
sql_location = location.sql_location
report = StockReport.objects.create(
form_id='test-form-id',
domain=sql_location.domain,
type='balance',
date=date
)
for product_code, quantity in products_quantities.iteritems():
StockTransaction(
stock_on_hand=Decimal(quantity),
report=report,
type='stockonhand',
section_id='stock',
case_id=sql_location.supply_point_id,
product_id=SQLProduct.objects.get(domain=sql_location.domain, code=product_code).product_id
).save()
```
#### File: _legacy/a5288/reports.py
```python
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
import pytz
from corehq.apps.hqcase.dbaccessors import get_cases_in_domain
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from casexml.apps.case.models import CommCareCase
from corehq.apps.sms.models import ExpectedCallbackEventLog, CALLBACK_PENDING, CALLBACK_RECEIVED, CALLBACK_MISSED
from datetime import datetime, timedelta
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.parsing import json_format_date
class MissedCallbackReport(CustomProjectReport, GenericTabularReport):
name = ugettext_noop("Missed Callbacks")
slug = "missed_callbacks"
description = ugettext_noop("Summarizes two weeks of SMS / Callback interactions for all participants.")
flush_layout = True
def get_past_two_weeks(self):
now = datetime.utcnow()
local_datetime = ServerTime(now).user_time(self.timezone).done()
return [(local_datetime + timedelta(days = x)).date() for x in range(-14, 0)]
@property
def headers(self):
args = [
DataTablesColumn(_("Participant ID")),
DataTablesColumn(_("Total No Response")),
DataTablesColumn(_("Total Indicated")),
DataTablesColumn(_("Total Pending")),
]
args += [DataTablesColumn(date.strftime("%b %d")) for date in self.get_past_two_weeks()]
return DataTablesHeader(*args)
@property
def rows(self):
group_id = None
if self.request.couch_user.is_commcare_user():
group_ids = self.request.couch_user.get_group_ids()
if len(group_ids) > 0:
group_id = group_ids[0]
data = {}
for case in get_cases_in_domain(self.domain, type='participant'):
if case.closed:
continue
# If a site coordinator is viewing the report, only show participants from that site (group)
if group_id is None or group_id == case.owner_id:
timezone = pytz.timezone(case.get_case_property("time_zone"))
data[case._id] = {
"name": case.name,
"time_zone": timezone,
"dates": [None] * 14,
}
dates = self.get_past_two_weeks()
date_strings = [json_format_date(date) for date in dates]
start_date = dates[0] - timedelta(days=1)
end_date = dates[-1] + timedelta(days=2)
start_utc_timestamp = json_format_date(start_date)
end_utc_timestamp = json_format_date(end_date)
expected_callback_events = ExpectedCallbackEventLog.view("sms/expected_callback_event",
startkey=[self.domain, start_utc_timestamp],
endkey=[self.domain, end_utc_timestamp],
include_docs=True).all()
for event in expected_callback_events:
if event.couch_recipient in data:
timezone = data[event.couch_recipient]["time_zone"]
event_date = (ServerTime(event.date).user_time(timezone)
.ui_string("%Y-%m-%d"))
if event_date in date_strings:
data[event.couch_recipient]["dates"][date_strings.index(event_date)] = event.status
result = []
for case_id, data_dict in data.items():
row = [
self._fmt(data_dict["name"]),
None,
None,
None,
]
total_no_response = 0
total_indicated = 0
total_pending = 0
for date_status in data_dict["dates"]:
if date_status == CALLBACK_PENDING:
total_indicated += 1
total_pending += 1
row.append(self._fmt(_("pending")))
elif date_status == CALLBACK_RECEIVED:
total_indicated += 1
row.append(self._fmt(_("OK")))
elif date_status == CALLBACK_MISSED:
total_indicated += 1
total_no_response += 1
row.append(self._fmt_highlight(_("No Response")))
else:
row.append(self._fmt(_("not indicated")))
if total_no_response > 0:
row[1] = self._fmt_highlight(total_no_response)
else:
row[1] = self._fmt(total_no_response)
row[2] = self._fmt(total_indicated)
row[3] = self._fmt(total_pending)
result.append(row)
return result
def _fmt(self, value):
return self.table_cell(value, '<div style="text-align:center">%s</div>' % value)
def _fmt_highlight(self, value):
return self.table_cell(value, '<div style="background-color:#f33; font-weight:bold; text-align:center">%s</div>' % value)
```
#### File: management/commands/mvp_force_update.py
```python
from gevent import monkey; monkey.patch_all()
from itertools import islice
from casexml.apps.case.models import CommCareCase
import time
from corehq.apps.hqcase.dbaccessors import get_number_of_cases_in_domain, \
get_case_ids_in_domain
import sys
import gevent
from restkit.session import set_session
set_session("gevent")
from gevent.pool import Pool
from couchdbkit.exceptions import ResourceNotFound
from django.core.management.base import LabelCommand
from corehq.apps.indicators.models import CaseIndicatorDefinition, \
FormIndicatorDefinition, DocumentMismatchError, DocumentNotInDomainError, \
FormLabelIndicatorDefinition
from couchforms.models import XFormInstance
from dimagi.utils.couch.database import get_db, iter_docs
from mvp.models import MVP
POOL_SIZE = 10
class Command(LabelCommand):
help = "Update MVP indicators in existing cases and forms."
args = "<domain> <case or form> <case or form label> <start at record #>"
label = ""
start_at_record = 0
domains = None
def handle(self, *args, **options):
self.domains = [args[0]] if len(args) > 0 and args[0] != "all" else MVP.DOMAINS
cases = ['child', 'pregnancy', 'household']
process_forms = True
process_cases = True
self.start_at_record = int(args[3]) if len(args) > 3 else 0
if len(args) > 1 and args[1] != "all":
document = args[1]
process_cases = document == "case"
process_forms = document == "form"
if len(args) > 2 and args[2] != "all":
document_type = args[2]
if process_cases:
cases = [document_type]
else:
document_type = None
if process_forms:
for domain in self.domains:
self.update_indicators_for_xmlns(domain, form_label_filter=document_type)
if process_cases:
for case_type in cases:
for domain in self.domains:
self.update_indicators_for_case_type(case_type, domain)
def update_indicators_for_xmlns(self, domain, form_label_filter=None):
key = [MVP.NAMESPACE, domain]
all_labels = FormLabelIndicatorDefinition.get_db().view(
'indicators/form_labels',
reduce=False,
startkey=key,
endkey=key + [{}],
).all()
for label in all_labels:
label_name = label['value']
if form_label_filter is not None and form_label_filter != label_name:
continue
xmlns = label['key'][-2]
print "\n\nGetting Forms of Type %s and XMLNS %s for domain %s" % (label_name, xmlns, domain)
relevant_forms = XFormInstance.get_db().view(
"reports_forms/all_forms",
reduce=True,
startkey=['submission xmlns', domain, xmlns],
endkey=['submission xmlns', domain, xmlns, {}],
).first()
num_forms = relevant_forms['value'] if relevant_forms else 0
form_ids = [r['id'] for r in XFormInstance.view(
"reports_forms/all_forms",
reduce=False,
include_docs=False,
startkey=['submission xmlns', domain, xmlns],
endkey=['submission xmlns', domain, xmlns, {}],
).all()]
print "Found %d forms with matching XMLNS %s" % (num_forms, xmlns)
relevant_indicators = FormIndicatorDefinition.get_all(
namespace=MVP.NAMESPACE,
domain=domain,
xmlns=xmlns
)
if relevant_indicators:
self._throttle_updates(
"Forms (TYPE: %s, XMLNS %s, DOMAIN: %s)" % (
label_name, xmlns, domain),
relevant_indicators, num_forms, domain,
form_ids, XFormInstance)
def update_indicators_for_case_type(self, case_type, domain):
print "\n\n\nFetching %s cases in domain %s...." % (case_type, domain)
relevant_indicators = CaseIndicatorDefinition.get_all(
namespace=MVP.NAMESPACE,
domain=domain,
case_type=case_type
)
if relevant_indicators:
num_cases = get_number_of_cases_in_domain(domain, type=case_type)
print ("\nFound the following Case Indicator Definitions "
"for Case Type %s in Domain %s") % (case_type, domain)
print "--%s\n" % "\n--".join([i.slug for i in relevant_indicators])
print "Found %d possible cases for update." % num_cases
case_ids = get_case_ids_in_domain(domain, type=case_type)
self._throttle_updates(
"Cases of type %s in %s" % (case_type, domain),
relevant_indicators, num_cases, domain, case_ids, CommCareCase)
def update_indicators(self, indicators, docs, domain):
def _update_doc(doc):
try:
is_update = doc.update_indicator(indicator)
if is_update:
sys.stdout.write("N")
else:
sys.stdout.write(".")
except ResourceNotFound:
sys.stdout.write("R")
except (DocumentMismatchError, DocumentNotInDomainError):
sys.stdout.write('-')
except Exception as e:
sys.stdout.write('!')
sys.stdout.flush()
for indicator in indicators:
print "Indicator %s v.%d, %s" % (indicator.slug, indicator.version, domain)
pool = Pool(POOL_SIZE)
for doc in docs:
pool.spawn(_update_doc, doc)
pool.join() # blocking
print "\n"
def _throttle_updates(self, document_type, indicators, total_docs, domain,
doc_ids, document_class, limit=300):
doc_ids = iter(doc_ids)
if self.start_at_record:
doc_ids = islice(doc_ids, self.start_at_record, None)
for skip in range(self.start_at_record, total_docs, limit):
print "\n\nUpdating %s %d to %d of %d\n" % (
document_type, skip, min(total_docs, skip + limit), total_docs)
matching_docs = map(
document_class.wrap,
iter_docs(document_class.get_db(), islice(doc_ids, limit))
)
self.update_indicators(indicators, matching_docs, domain)
print "Pausing..."
time.sleep(3)
print "Going..."
```
#### File: pact/reports/patient_list.py
```python
from django.core.urlresolvers import NoReverseMatch
from django.utils import html
from corehq.apps.api.es import ReportCaseES, ReportXFormES
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.users.models import CommCareUser
from corehq.elastic import SIZE_LIMIT
from pact.enums import PACT_DOMAIN, PACT_HP_CHOICES, PACT_DOT_CHOICES, PACT_CASE_TYPE
from pact.reports import PactElasticTabularReportMixin
from pact.reports.dot import PactDOTReport
from pact.reports.patient import PactPatientInfoReport
from pact.utils import query_per_case_submissions_facet
class PactPrimaryHPField(BaseSingleOptionFilter):
slug = "primary_hp"
label = "PACT HPs"
default_text = "All CHWs"
@property
def options(self):
chws = list(self.get_chws())
return [(c['val'], c['text']) for c in chws]
@classmethod
def get_chws(cls):
users = CommCareUser.by_domain(PACT_DOMAIN)
for x in users:
#yield dict(val=x._id, text=x.raw_username)
yield dict(val=x.raw_username, text=x.raw_username)
# self.options = [dict(val=case['_id'], text="(%s) - %s" % (case['pactid'], case['name'])) for case in patient_cases]
class HPStatusField(BaseSingleOptionFilter):
slug = "hp_status"
label = "HP Status"
default_text = "All Active HP"
ANY_HP = "any_hp"
@property
def options(self):
options = [(self.ANY_HP, "All Active HP")]
options.extend(PACT_HP_CHOICES)
return options
class DOTStatus(BaseSingleOptionFilter):
slug = "dot_status"
label = "DOT Status"
default_text = "All"
ANY_DOT = "any_dot"
@property
def options(self):
options = [(self.ANY_DOT, "Any DOT")]
options.extend(PACT_DOT_CHOICES[:3])
return options
class PatientListDashboardReport(PactElasticTabularReportMixin):
name = "<NAME>"
slug = "patients"
ajax_pagination = True
asynchronous = True
default_sort = {"pactid": "asc"}
report_template_path = "reports/async/tabular.html"
flush_layout = True
fields = [
'pact.reports.patient_list.PactPrimaryHPField',
'pact.reports.patient_list.HPStatusField',
'pact.reports.patient_list.DOTStatus',
]
case_es = ReportCaseES(PACT_DOMAIN)
xform_es = ReportXFormES(PACT_DOMAIN)
def get_pact_cases(self):
query = self.case_es.base_query(start=0, size=None)
query['fields'] = ['_id', 'name', 'pactid.#value']
results = self.case_es.run_query(query)
for res in results['hits']['hits']:
yield res['fields']
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("PACT ID", prop_name="pactid.#value"),
DataTablesColumn("Name", prop_name="name", sortable=False, span=3),
DataTablesColumn("Primary HP", prop_name="hp.#value"),
DataTablesColumn("Opened On", prop_name="opened_on"),
DataTablesColumn("Last Modified", prop_name="modified_on"),
DataTablesColumn("HP Status", prop_name="hp_status.#value"),
DataTablesColumn("DOT Status", prop_name='dot_status.#value'),
DataTablesColumn("Status", prop_name="closed"),
DataTablesColumn("Submissions", sortable=False),
)
return headers
def case_submits_facet_dict(self, limit):
query = query_per_case_submissions_facet(self.request.domain, limit=limit)
results = self.xform_es.run_query(query)
case_id_count_map = {}
for f in results['facets']['case_submissions']['terms']:
case_id_count_map[f['term']] = f['count']
return case_id_count_map
@property
def rows(self):
"""
Override this method to create a functional tabular report.
Returns 2D list of rows.
[['row1'],[row2']]
"""
def _format_row(row_field_dict):
yield row_field_dict.get("pactid.#value", '---').replace('_', ' ').title()
yield self.pact_case_link(row_field_dict['_id'], row_field_dict.get("name", "---")),
yield row_field_dict.get("hp.#value", "---")
yield self.format_date(row_field_dict.get("opened_on"))
yield self.format_date(row_field_dict.get("modified_on"))
yield self.render_hp_status(row_field_dict.get("hp_status.#value"))
yield self.pact_dot_link(row_field_dict['_id'], row_field_dict.get("dot_status.#value"))
#for closed on, do two checks:
if row_field_dict.get('closed', False):
#it's closed
yield "Closed (%s)" % self.format_date(row_field_dict.get('closed_on'))
else:
yield "Active"
yield facet_dict.get(row_field_dict['_id'], 0)
res = self.es_results
if res.has_key('error'):
pass
else:
#hack, do a facet query here
facet_dict = self.case_submits_facet_dict(SIZE_LIMIT)
for result in res['hits']['hits']:
yield list(_format_row(result['fields']))
@property
def es_results(self):
fields = [
"_id",
"name",
"pactid.#value",
"opened_on",
"modified_on",
"hp_status.#value",
"hp.#value",
"dot_status.#value",
"closed_on",
"closed"
]
full_query = self.case_es.base_query(terms={'type': PACT_CASE_TYPE}, fields=fields,
start=self.pagination.start,
size=self.pagination.count)
full_query['sort'] = self.get_sorting_block()
def status_filtering(slug, field, prefix, any_field, default):
if self.request.GET.get(slug, None) is not None:
field_status_filter_query = self.request.GET[slug]
if field_status_filter_query == "":
#silly double default checker here - set default or the any depending on preference
field_status_filter_query = default
if field_status_filter_query is None:
return
else:
if field_status_filter_query.startswith(prefix):
field_status_prefix = field_status_filter_query
elif field_status_filter_query == any_field:
field_status_prefix = prefix
else:
field_status_prefix = None
full_query['filter']['and'].append({"term": {field: field_status_filter_query.lower()}})
if field_status_prefix is not None:
field_filter = {"prefix": {field: field_status_prefix.lower()}}
full_query['filter']['and'].append(field_filter)
status_filtering(DOTStatus.slug, "dot_status.#value", "DOT", DOTStatus.ANY_DOT, None)
status_filtering(HPStatusField.slug, "hp_status.#value", "HP", HPStatusField.ANY_HP, HPStatusField.ANY_HP)
#primary_hp filter from the user filter
if self.request.GET.get(PactPrimaryHPField.slug, "") != "":
primary_hp_term = self.request.GET[PactPrimaryHPField.slug]
primary_hp_filter = {"term": {"hp.#value": primary_hp_term}}
full_query['filter']['and'].append(primary_hp_filter)
return self.case_es.run_query(full_query)
def pact_case_link(self, case_id, name):
try:
return html.mark_safe("<a class='ajax_dialog' href='%s'>%s</a>" % (
html.escape(
PactPatientInfoReport.get_url(*[self.domain]) + "?patient_id=%s" % case_id),
html.escape(name),
))
except NoReverseMatch:
return "%s (bad ID format)" % name
def render_hp_status(self, status):
if status is None or status == '':
return ''
else:
if status.lower() == 'discharged':
css = 'label'
else:
css = 'label label-info'
return '<span class="%s">%s</span>' % (css, status)
def pact_dot_link(self, case_id, status):
if status is None or status == '':
return ''
try:
return html.mark_safe("<span class='label label-info'>%s</span> <a class='ajax_dialog' href='%s'>Report</a>" % (
html.escape(status),
html.escape(
PactDOTReport.get_url(*[self.domain]) + "?dot_patient=%s" % case_id),
))
except NoReverseMatch:
return "%s (bad ID format)" % status
```
#### File: logistics/tests/utils.py
```python
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.commtrack.tests.util import TEST_USER, TEST_DOMAIN, TEST_NUMBER, TEST_PASSWORD, TEST_BACKEND
from corehq.apps.users.models import CommCareUser
def bootstrap_user(loc, username=TEST_USER, domain=TEST_DOMAIN,
phone_number=TEST_NUMBER, password=<PASSWORD>,
backend=TEST_BACKEND, first_name='', last_name='',
home_loc=None, user_data=None,
):
user_data = user_data or {}
user = CommCareUser.create(
domain,
username,
password,
phone_numbers=[phone_number],
user_data=user_data,
first_name=first_name,
last_name=last_name
)
if home_loc == loc.site_code:
if not SupplyPointCase.get_by_location(loc):
make_supply_point(domain, loc)
user.set_location(loc)
user.save_verified_number(domain, phone_number, verified=True, backend_id=backend)
return CommCareUser.wrap(user.to_json())
```
#### File: custom/m4change/fields.py
```python
import datetime
from corehq.util.dates import iso_string_to_date
from dimagi.utils.dates import DateSpan
import json
from django.utils.translation import ugettext as _, ugettext_noop
from corehq.apps.reports.dont_use.fields import ReportField
class DateRangeField(ReportField):
name = ugettext_noop("Date Range")
slug = "datespan"
template = "m4change/fields/daterange.html"
inclusive = True
default_days = 30
def update_context(self):
self.context["datespan_name"] = self.name
range = self.request.GET.get('range', None)
if range is not None:
dates = str(range).split(_(' to '))
self.request.datespan.startdate = datetime.datetime.combine(
iso_string_to_date(dates[0]), datetime.time())
self.request.datespan.enddate = datetime.datetime.combine(
iso_string_to_date(dates[1]), datetime.time())
self.datespan = DateSpan.since(self.default_days, timezone=self.timezone, inclusive=self.inclusive)
if self.request.datespan.is_valid():
self.datespan.startdate = self.request.datespan.startdate
self.datespan.enddate = self.request.datespan.enddate
self.context['timezone'] = self.timezone.zone
self.context['datespan'] = self.datespan
report_labels = json.dumps({
'year_to_date': _('Year to Date'), 'last_month': _('Last Month'),
'last_quarter': _('Last Quarter'), 'last_two_quarters': _('Last Two Quarters'),
'last_three_quarters': _('Last Three Quarters'), 'last_year': _('Last Year'),
'last_two_years': _('Last Two Years'), 'last_three_years': _('Last Three Years'),
'last_four_years': _('Last Four Years')
})
self.context['report_labels'] = report_labels
self.context['separator'] = _(' to ')
class CaseSearchField(ReportField):
name = ugettext_noop("Case Search")
slug = "case_search"
template = "reports/filters/search.html"
def update_context(self):
self.search_query = self.request.GET.get("case_search", "")
self.context["search_query"] = self.search_query
self.context["label"] = _("Case Search")
```
#### File: custom/openclinica/utils.py
```python
from __future__ import absolute_import
from collections import namedtuple
from datetime import datetime, date, time
import logging
import re
from lxml import etree
import os
from django.conf import settings
import yaml
from corehq.util.quickcache import quickcache
from couchforms.models import XFormDeprecated
logger = logging.Logger(__name__)
class OpenClinicaIntegrationError(Exception):
pass
Item = namedtuple('Item', ('study_event_oid', 'form_oid', 'item_group_oid', 'item_oid'))
AdminDataUser = namedtuple('AdminDataUser', ('user_id', 'first_name', 'last_name'))
OpenClinicaUser = namedtuple('OpenClinicaUser', ('user_id', 'first_name', 'last_name', 'username', 'full_name'))
# CDISC OMD XML namespace map
odm_nsmap = {
'odm': "http://www.cdisc.org/ns/odm/v1.3",
'OpenClinica': "http://www.openclinica.org/ns/odm_ext_v130/v3.1",
'OpenClinicaRules': "http://www.openclinica.org/ns/rules/v3.1",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
}
def simplify(fancy):
"""
Replace dict-like data types with dicts, and list-like data types with lists
>>> from collections import defaultdict
>>> simplify(defaultdict(list, {'bacon': ['spam']}))
{'bacon': ['spam']}
>>> simplify(('ham',))
['ham']
>>> simplify({'spam'})
['spam']
"""
if hasattr(fancy, 'keys'):
return {simplify(k): simplify(fancy[k]) for k in fancy.keys()}
elif isinstance(fancy, unicode):
return fancy.encode('utf8')
elif isinstance(fancy, str):
return fancy
elif hasattr(fancy, '__iter__'):
return [simplify(i) for i in fancy]
else:
return fancy
@quickcache(['domain'])
def _get_question_items(domain):
"""
Return a dictionary of form_xmlns: {question_name: openclinica_item}
"""
file_path = os.path.join(settings.BASE_DIR, 'custom', 'openclinica', 'commcare_questions.yaml')
with file(file_path) as question_items_file:
question_items = yaml.load(question_items_file)
return question_items
def get_question_item(domain, form_xmlns, question):
"""
Returns an Item namedtuple given a CommCare form and question name
"""
question_items = _get_question_items(domain)
try:
se_oid, form_oid, ig_oid, item_oid = question_items[form_xmlns]['questions'][question]
return Item(se_oid, form_oid, ig_oid, item_oid)
except KeyError:
# Did an old form set the value of a question that no longer exists? Best to check that out.
logger.error('Unknown CommCare question "{}" found in form "{}"'.format(question, form_xmlns))
return None
except TypeError:
# CommCare question does not match an OpenClinica item. This happens with CommCare-only forms
return None
@quickcache(['domain'])
def get_study_metadata_string(domain):
"""
Return the study metadata for the given domain
"""
# For this first OpenClinica integration project, for the sake of simplicity, we are just fetching
# metadata from custom/openclinica/study_metadata.xml. In future, metadata must be stored for each domain.
metadata_filename = os.path.join(settings.BASE_DIR, 'custom', 'openclinica', 'study_metadata.xml')
with open(metadata_filename) as metadata_file:
return metadata_file.read()
def get_study_metadata(domain):
"""
Return the study metadata for the given domain as an ElementTree
"""
# We can't cache an ElementTree instance. Split this function from get_study_metadata_string() to cache the
# return value of get_study_metadata_string() when fetching via web service.
return etree.fromstring(get_study_metadata_string(domain))
def get_study_constant(domain, name):
"""
Return the study metadata of the given name for the given domain
"""
xpath_text = lambda xml, xpath: xml.xpath(xpath, namespaces=odm_nsmap)[0].text
xpath_xml = lambda xml, xpath: etree.tostring(xml.xpath(xpath, namespaces=odm_nsmap)[0])
func = {
'study_oid': lambda xml: xml.xpath('./odm:Study', namespaces=odm_nsmap)[0].get('OID'),
'study_name': lambda xml: xpath_text(xml, './odm:Study/odm:GlobalVariables/odm:StudyName'),
'study_description': lambda xml: xpath_text(xml, './odm:Study/odm:GlobalVariables/odm:StudyDescription'),
'protocol_name': lambda xml: xpath_text(xml, './odm:Study/odm:GlobalVariables/odm:ProtocolName'),
'study_xml': lambda xml: xpath_xml(xml, './odm:Study'),
'admin_data_xml': lambda xml: xpath_xml(xml, './odm:AdminData'),
}[name]
metadata_xml = get_study_metadata(domain)
return func(metadata_xml)
def get_item_measurement_unit(domain, item):
"""
Return the measurement unit OID for the given Item, or None
"""
xml = get_study_metadata(domain)
mu_ref = xml.xpath(
'./odm:Study/odm:MetaDataVersion/odm:ItemDef[@OID="{}"]/odm:MeasurementUnitRef'.format(item.item_oid),
namespaces=odm_nsmap)
return mu_ref[0].get('MeasurementUnitOID') if mu_ref else None
def get_study_event_name(domain, oid):
xml = get_study_metadata(domain)
return xml.xpath('./odm:Study/odm:MetaDataVersion/odm:StudyEventDef[@OID="{}"]'.format(oid),
namespaces=odm_nsmap)[0].get('Name')
def is_study_event_repeating(domain, oid):
xml = get_study_metadata(domain)
return xml.xpath('./odm:Study/odm:MetaDataVersion/odm:StudyEventDef[@OID="{}"]'.format(oid),
namespaces=odm_nsmap)[0].get('Repeating') == 'Yes'
def is_item_group_repeating(domain, oid):
xml = get_study_metadata(domain)
return xml.xpath('./odm:Study/odm:MetaDataVersion/odm:ItemGroupDef[@OID="{}"]'.format(oid),
namespaces=odm_nsmap)[0].get('Repeating') == 'Yes'
def mk_oc_username(cc_username):
"""
Makes a username that meets OpenClinica requirements from a CommCare username.
Strips off "@domain.name", replaces non-alphanumerics, and pads with "_" if less than 5 characters
>>> mk_oc_username('<EMAIL>')
'eric_idle'
>>> mk_oc_username('eric')
'eric_'
>>> mk_oc_username('I3#')
'I3___'
"""
username = cc_username.split('@')[0]
username = re.sub(r'[^\w]', '_', username)
if len(username) < 5:
username += '_' * (5 - len(username))
return username
@quickcache(['domain'])
def get_oc_users_by_name(domain):
# We have to look up OpenClinica users by name because usernames are excluded from study metadata
oc_users_by_name = {}
xml = get_study_metadata(domain)
admin = xml.xpath('./odm:AdminData', namespaces=odm_nsmap)[0]
for user_e in admin:
try:
first_name = user_e.xpath('./odm:FirstName', namespaces=odm_nsmap)[0].text
except IndexError:
first_name = None
try:
last_name = user_e.xpath('./odm:LastName', namespaces=odm_nsmap)[0].text
except IndexError:
last_name = None
user_id = user_e.get('OID')
oc_users_by_name[(first_name, last_name)] = AdminDataUser(user_id, first_name, last_name)
return oc_users_by_name
def get_oc_user(domain, cc_user):
"""
Returns OpenClinica user details for corresponding CommCare user (CouchUser)
"""
oc_users_by_name = get_oc_users_by_name(domain)
oc_user = oc_users_by_name.get((cc_user.first_name, cc_user.last_name))
return OpenClinicaUser(
user_id=oc_user.user_id,
username=mk_oc_username(cc_user.username),
first_name=oc_user.first_name,
last_name=oc_user.last_name,
full_name=' '.join((oc_user.first_name, oc_user.last_name)),
) if oc_user else None
def oc_format_date(answer):
"""
Format CommCare datetime answers for OpenClinica
>>> from datetime import datetime
>>> answer = datetime(2015, 8, 19, 19, 8, 15)
>>> oc_format_date(answer)
'2015-08-19 19:08:15'
"""
if isinstance(answer, datetime):
return answer.isoformat(sep=' ')
if isinstance(answer, date) or isinstance(answer, time):
return answer.isoformat()
return answer
def originals_first(forms):
"""
Return original (deprecated) forms before edited versions
"""
def get_previous_versions(form_id):
form_ = XFormDeprecated.get(form_id)
if getattr(form_, 'deprecated_form_id', None):
return get_previous_versions(form_.deprecated_form_id) + [form_]
else:
return [form_]
for form in forms:
if getattr(form, 'deprecated_form_id', None):
for previous in get_previous_versions(form.deprecated_form_id):
yield previous
yield form
```
#### File: custom/openlmis/commtrack.py
```python
import logging
from django.dispatch import Signal
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.hqcase.dbaccessors import \
get_supply_point_case_in_domain_by_id
from corehq.apps.programs.models import Program
from corehq.apps.products.models import Product
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from custom.api.utils import apply_updates
from custom.openlmis.api import OpenLMISEndpoint
from custom.openlmis.exceptions import BadParentException, OpenLMISAPIException
from corehq.apps.commtrack import const
from collections import defaultdict
from custom.requisitions.models import RequisitionCase
requisition_approved = Signal(providing_args=["requisitions"])
requisition_receipt = Signal(providing_args=["requisitions"])
def bootstrap_domain(domain):
project = Domain.get_by_name(domain)
if project.commtrack_settings and project.commtrack_settings.openlmis_config.is_configured:
endpoint = OpenLMISEndpoint.from_config(project.commtrack_settings.openlmis_config)
for f in endpoint.get_all_facilities():
try:
sync_facility_to_supply_point(domain, f)
except OpenLMISAPIException, e:
logging.exception('Problem syncing facility %s' % f.code)
for program in endpoint.get_all_programs(include_products=True):
sync_openlmis_program(domain, program)
def get_supply_point(domain, facility_or_code):
facility_code = facility_or_code if isinstance(facility_or_code, basestring) else facility_or_code.code
return get_supply_point_case_in_domain_by_id(domain, facility_code)
def sync_facility_to_supply_point(domain, facility):
supply_point = get_supply_point(domain, facility)
facility_dict = {
'domain': domain,
'location_type': facility.type,
'external_id': facility.code,
'name': facility.name,
'site_code': facility.code, # todo: do they have a human readable code?
'latitude': facility.latitude,
'longitude': facility.longitude,
}
parent_sp = None
if facility.parent_id:
parent_sp = get_supply_point(domain, facility.parent_id)
if not parent_sp:
raise BadParentException('No matching supply point with code %s found' % facility.parent_id)
if supply_point is None:
if parent_sp:
facility_dict['parent'] = parent_sp.location
facility_loc = Location(**facility_dict)
facility_loc.save()
return make_supply_point(domain, facility_loc)
else:
facility_loc = supply_point.location
if parent_sp and facility_loc.parent_id != parent_sp.location._id:
raise BadParentException('You are trying to move a location. This is currently not supported.')
should_save = apply_updates(facility_loc, facility_dict)
if should_save:
facility_loc.save()
return supply_point
def get_product(domain, lmis_product):
return Product.get_by_code(domain, lmis_product.code)
def get_program(domain, lmis_program):
program = Program.get_by_code(domain, lmis_program.code)
return program
def sync_openlmis_program(domain, lmis_program):
program = get_program(domain, lmis_program)
if program is None:
program = Program(domain=domain)
program.name = lmis_program.name
program.code = lmis_program.code.lower()
program._doc_type_attr = "Program"
program.save()
if lmis_program.products:
for lmis_product in lmis_program.products:
sync_openlmis_product(domain, program, lmis_product)
return program
def sync_openlmis_product(domain, program, lmis_product):
product = get_product(domain, lmis_product)
product_dict = {
'domain': domain,
'name': lmis_product.name,
'code': lmis_product.code,
'unit': str(lmis_product.unit),
'description': lmis_product.description,
'category': lmis_product.category,
'program_id': program._id,
}
if product is None:
product = Product(**product_dict)
product.save()
else:
if apply_updates(product, product_dict):
product.save()
return product
def supply_point_to_json(supply_point):
base = {
'agentCode': supply_point.location.site_code,
'agentName': supply_point.name,
'active': not supply_point.closed,
}
if len(supply_point.location.lineage) > 0:
parent_facility_code = Location.get(supply_point.location.lineage[0]).external_id
base['parentFacilityCode'] = parent_facility_code
# todo phone number
return base
def sync_stock_data_to_openlmis(submission, openlmis_endpoint):
return openlmis_endpoint.submit_requisition(submission)
def sync_supply_point_to_openlmis(supply_point, openlmis_endpoint, create=True):
"""
https://github.com/OpenLMIS/documents/blob/master/4.1-CreateVirtualFacility%20API.md
{
"agentCode":"A2",
"agentName":"AgentVinod",
"parentFacilityCode":"F10",
"phoneNumber":"0099887766",
"active":"true"
}
"""
json_sp = supply_point_to_json(supply_point)
if create:
return openlmis_endpoint.create_virtual_facility(json_sp)
else:
return openlmis_endpoint.update_virtual_facility(supply_point.location.site_code, json_sp)
def sync_requisition_from_openlmis(domain, requisition_id, openlmis_endpoint):
cases = []
send_notification = False
lmis_requisition_details = openlmis_endpoint.get_requisition_details(requisition_id)
if lmis_requisition_details:
rec_cases = [c for c in RequisitionCase.get_by_external_id(domain, str(lmis_requisition_details.id)) if c.type == const.REQUISITION_CASE_TYPE]
if len(rec_cases) == 0:
products = [product for product in lmis_requisition_details.products if product.skipped == False]
for product in products:
pdt = Product.get_by_code(domain, product.code.lower())
if pdt:
case = lmis_requisition_details.to_requisition_case(pdt._id)
case.save()
if case.requisition_status == 'AUTHORIZED':
send_notification = True
cases.append(case)
else:
for case in rec_cases:
before_status = case.requisition_status
if apply_updates(case, lmis_requisition_details.to_dict(case.product_id)):
after_status = case.requisition_status
case.save()
if before_status in ['INITIATED', 'SUBMITTED'] and after_status == 'AUTHORIZED':
send_notification = True
cases.append(case)
return cases, send_notification
else:
return None, False
def submit_requisition(requisition, openlmis_endpoint):
return openlmis_endpoint.submit_requisition(requisition)
def approve_requisition(requisition_cases, openlmis_endpoint):
groups = defaultdict( list )
for case in requisition_cases:
groups[case.external_id].append(case)
for group in groups.keys():
if(group):
cases = groups.get(group)
products = []
approver = CommCareUser.get(cases[0].user_id)
for rec in cases:
product = Product.get(rec.product_id)
products.append({"productCode": product.code, "quantityApproved": rec.amount_approved})
approve_data = {
"approverName": approver.human_friendly_name,
"products": products
}
openlmis_endpoint.approve_requisition(approve_data, group)
def delivery_update(requisition_cases, openlmis_endpoint):
order_id = requisition_cases[0].get_case_property("order_id")
products = []
for rec in requisition_cases:
product = Product.get(rec.product_id)
products.append({'productCode': product.code, 'quantityReceived': rec.amount_received})
delivery_data = {'podLineItems': products}
return openlmis_endpoint.confirm_delivery(order_id, delivery_data)
```
#### File: custom/opm/case_calcs.py
```python
import datetime
import fluff
class VhndAvailabilityCalc(fluff.Calculator):
def dates_from_forms(self, case, condition):
"""
Condition should accept a form and return a boolean
"""
available = False
for form in case.get_forms():
vhnd_date = form.form.get("date_vhnd_held")
if isinstance(vhnd_date, (datetime.datetime, datetime.date)):
if condition(form):
available = True
yield vhnd_date
if not available:
yield [datetime.date.min, 0]
def dates_available(self, case, prop):
return self.dates_from_forms(case, lambda form: form.form.get(prop) == '1')
@fluff.date_emitter
def available(self, case):
return self.dates_from_forms(case, lambda form: True)
@fluff.date_emitter
def asha_present(self, case):
return self.dates_available(case, "attend_ASHA")
@fluff.date_emitter
def anm_present(self, case):
return self.dates_available(case, "attend_ANM")
@fluff.date_emitter
def cmg_present(self, case):
return self.dates_available(case, "attend_cmg")
@fluff.date_emitter
def ifa_available(self, case):
return self.dates_available(case, "stock_ifatab")
@fluff.date_emitter
def adult_scale_available(self, case):
return self.dates_available(case, "stock_bigweighmach")
@fluff.date_emitter
def child_scale_available(self, case):
return self.dates_available(case, "stock_childweighmach")
@fluff.date_emitter
def adult_scale_functional(self, case):
return self.dates_available(case, "func_bigweighmach")
@fluff.date_emitter
def child_scale_functional(self, case):
return self.dates_available(case, "func_childweighmach")
@fluff.date_emitter
def ors_available(self, case):
return self.dates_available(case, "stock_ors")
@fluff.date_emitter
def zn_available(self, case):
return self.dates_available(case, "stock_zntab")
@fluff.date_emitter
def measles_vacc_available(self, case):
return self.dates_available(case, "stock_measlesvacc")
```
#### File: custom/opm/health_status.py
```python
from django.utils.translation import ugettext_lazy as _
from dimagi.utils.decorators.memoized import memoized
class AWCHealthStatus(object):
"""
Takes a set of OPMCaseRow objects, all from the same AWC, and performs
aggregations on it.
"""
method_map = [
# method, header, help_text, count_method
('awc_code',
_("AWC Code"),
"",
'no_denom'),
('awc_name',
_("AWC Name"),
"",
'no_denom'),
('gp',
_("Gram Panchayat"),
"",
'no_denom'),
('beneficiaries',
_("Registered Beneficiaries"),
_("Beneficiaries registered with BCSP"),
'no_denom'),
('pregnancies',
_("Registered pregnant women"),
_("Pregnant women registered with BCSP"),
'beneficiaries'),
('mothers',
_("Registered mothers"),
_("Mothers registered with BCSP"),
'beneficiaries'),
('children',
_("Registered children"),
_("Children below 3 years of age registered with BCSP"),
'beneficiaries'),
('eligible_by_fulfillment',
_("Eligible for payment upon fulfillment of cash conditions"),
_("Registered beneficiaries eligilble for cash payment for the month "
"upon fulfillment of cash conditions"),
'beneficiaries'),
('eligible_by_default',
_("Eligible for payment upon absence of services"),
_("Registered beneficiaries eligilble for cash payment for the month upon absence of services at VHND"),
'beneficiaries'),
('eligible',
_("Eligible for payment"),
_("Registered beneficiaries eligilble for cash payment for the month"),
'beneficiaries'),
('total_payment',
_("Total cash payment"),
_("Total cash payment made to registered beneficiaries for the month"),
'no_denom'),
('preg_vhnd',
_("Pregnant women attended VHND"),
_("Registered pregnant women who attended VHND for the month"),
'pregnancies'),
('child_vhnd',
_("Children attended VHND"),
_("Registered children below 3 years of age who attended VHND for the month"),
'children'),
('beneficiary_vhnd',
_("Beneficiaries attended VHND"),
_("Registered beneficiaries who attended VHND for the month"),
'beneficiaries'),
('ifa_tablets',
_("Received at least 30 IFA tablets"),
_("Registered pregnant women (6 months pregnant) who received at least "
"30 IFA tablets in second trimester"),
'preg_6_months'),
('preg_weighed_6',
_("Weight monitored in second trimester"),
_("Registered pregnant women (6 months pregnant) who got their weight monitored in second trimester"),
'preg_6_months'),
('preg_weighed_9',
_("Weight monitored in third trimester"),
_("Registered pregnant women (9 months pregnant) who got their weight monitored in third trimester"),
'preg_9_months'),
('child_weighed',
_("Weight monitored at birth"),
_("Registered children (3 months old) whose weight was monitored at birth"),
'child_3_months'),
('children_registered',
_("Child birth registered"),
_("Registered children (6 months old) whose birth was registered in the first 6 months after birth"),
'child_6_months'),
('child_growth_monitored_0_3',
_("Growth monitoring when 0-3 months old"),
_("Registered Children (3 months old) who have "
"attended at least one growth monitoring session between the age 0-3 months"),
'child_0_3_months'),
('child_growth_monitored_4_6',
_("Growth Monitoring when 4-6 months old"),
_("Registered Children (6 months old) who have "
"attended at least one growth monitoring session between the age 4-6 months"),
'child_4_6_months'),
('child_growth_monitored_7_9',
_("Growth Monitoring when 7-9 months old"),
_("Registered Children (9 months old) who have "
"attended at least one growth monitoring session between the age 7-9 months"),
'child_7_9_months'),
('child_growth_monitored_10_12',
_("Growth Monitoring when 10-12 months old"),
_("Registered Children (12 months old) who have "
"attended at least one growth monitoring session between the age 10-12 months"),
'child_10_12_months'),
('child_growth_monitored_13_15',
_("Growth Monitoring when 13-15 months old"),
_("Registered Children (15 months old) who have "
"attended at least one growth monitoring session between the age 13-15 months"),
'child_13_15_months'),
('child_growth_monitored_16_18',
_("Growth Monitoring when 16-18 months old"),
_("Registered Children (18 months old) who have "
"attended at least one growth monitoring session between the age 16-18 months"),
'child_16_18_months'),
('child_growth_monitored_19_21',
_("Growth Monitoring when 19-21 months old"),
_("Registered Children (21 months old) who have "
"attended at least one growth monitoring session between the age 19-21 months"),
'child_19_21_months'),
('child_growth_monitored_22_24',
_("Growth Monitoring when 22-24 months old"),
_("Registered Children (24 months old) who have "
"attended at least one growth monitoring session between the age 22-24 months"),
'child_22_24_months'),
('incidence_of_diarrhea',
_("Incidence of diarrhea"),
_("Incidence of diarrhea"),
'beneficiaries'),
('ors_received',
_("Received ORS and Zinc treatment for diarrhoea"),
_("Registered children who received ORS and Zinc treatment if he/she contracts diarrhoea"),
'has_diarhea'),
('child_breastfed',
_("Exclusively breastfed for first 6 months"),
_("Registered children (6 months old) who have been exclusively breastfed for first 6 months"),
'child_6_months'),
('measles_vaccine',
_("Received Measles vaccine"),
_("Registered children (12 months old) who have received Measles vaccine"),
'child_12_months'),
('vhnd_held',
_("VHND organised"),
_("Whether VHND was organised at AWC for the month"),
'one'),
('adult_scale_available',
_("Adult Weighing Machine Available"),
_("Whether adult weighing machine was available for the month"),
'one'),
('adult_scale_functional',
_("Adult Weighing Machine Functional"),
_("Whether adult weighing machine was functional for the month"),
'one'),
('child_scale_available',
_("Child Weighing Machine Available"),
_("Whether child weighing machine was available for the month"),
'one'),
('child_scale_functional',
_("Child Weighing Machine Functional"),
_("Whether child weighing machine was functional for the month"),
'one'),
('anm_present',
_("ANM Present"),
_("Whether ANM present at VHND for the month"),
'one'),
('asha_present',
_("ASHA Present"),
_("Whether ASHA present at VHND for the month"),
'one'),
('cmg_present',
_("CMG Present"),
_("Whether CMG present at VHND for the month"),
'one'),
('ifa_stock_available',
_("Stock of IFA tablets"),
_("Whether AWC has enough stock of IFA tablets for the month"),
'one'),
('ors_stock_available',
_("Stock of ORS packets"),
_("Whether AWC has enough stock of ORS packets for the month"),
'one'),
('zinc_stock_available',
_("Stock of ZINC tablets"),
_("Whether AWC has enough stock of Zinc Tablets for the month"),
'one'),
('measles_stock_available',
_("Stock of Measles Vaccine"),
_("Whether AWC has enough stock of measles vaccine for the month"),
'one'),
('birth_spacing_bonus',
_("Eligilble for Birth Spacing bonus"),
_("Registered beneficiaries eligible for birth spacing bonus for the month"),
'beneficiaries'),
('nutritional_status_sam',
_("Severely underweight"),
_("Registered children severely underweight (very low weight for age) for the month"),
'children'),
('nutritional_status_mam',
_("Underweight"),
_("Registered children underweight (low weight for age) for the month"),
'children'),
('nutritional_status_normal',
_("Normal weight for age"),
_("Registered children with normal weight for age for the month"),
'children'),
('nutritional_bonus',
_("Eligilble for Nutritional status bonus"),
_("Registered beneficiaries eligible for nutritonal status bonus for the month"),
'children'),
('closed_pregnants',
_("Pregnant women cases closed"),
_("Registered pregnant women cases closed for the month"),
'beneficiaries'),
('closed_mothers',
_("Mother cases closed"),
_("Registered mother cases closed for the month"),
'mothers'),
('closed_children',
_("Children cases closed"),
_("Registered children cases closed for the month"),
'children'),
]
# TODO possible general approach in the future:
# subclass OPMCaseRow specifically for this report, and add in indicators to
# our hearts' content. This would allow us to override definitions of
# indicators based on their meanings in THIS report.
def __init__(self, cases, awc, awc_code, gp, block):
# Some of the cases are second or third children of the same mother
# include that distinction here
self.all_cases = cases
self.primary_cases = [c for c in cases if not c.is_secondary]
self.awc_name = awc
self.awc_code = awc_code
self.gp = gp
self.block = block
@property
def no_denom(self):
return None
@property
@memoized
def beneficiaries(self):
return self.pregnancies + self.children
@property
@memoized
def pregnancies(self):
return len([c for c in self.all_cases if c.status == 'pregnant'])
@property
@memoized
def mothers(self):
return len([c for c in self.primary_cases if c.status == 'mother'])
@property
def children(self):
return sum([c.raw_num_children for c in self.primary_cases])
@property
@memoized
def eligible_by_fulfillment(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return len([c for c in self.all_cases
if c.vhnd_available and c.all_conditions_met])
@property
@memoized
def eligible_by_default(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return len([c for c in self.all_cases
if not c.vhnd_available and c.all_conditions_met])
@property
def eligible(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return self.eligible_by_default + self.eligible_by_fulfillment
@property
def total_payment(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return sum([c.cash_amt for c in self.all_cases])
@property
def preg_vhnd(self):
return len([c for c in self.all_cases if c.preg_attended_vhnd])
@property
def child_vhnd(self):
return len([c for c in self.all_cases if c.child_attended_vhnd])
@property
def beneficiary_vhnd(self):
return len([c for c in self.all_cases if c.child_attended_vhnd or c.preg_attended_vhnd])
@property
def ifa_tablets(self):
return len([c for c in self.all_cases if c.preg_received_ifa])
@property
def preg_6_months(self):
return len([c for c in self.all_cases if c.preg_month == 6])
@property
def preg_9_months(self):
return len([c for c in self.all_cases if c.preg_month == 9])
@property
def preg_6_or_9_months(self):
return len([c for c in self.all_cases if c.preg_month in (6, 9)])
@property
def preg_weighed_6(self):
return len([c for c in self.all_cases if c.preg_weighed_trimestered(6)])
@property
def preg_weighed_9(self):
return len([c for c in self.all_cases if c.preg_weighed_trimestered(9)])
@property
def child_weighed(self):
return len([c for c in self.all_cases if c.child_weighed_once])
@property
def child_3_months(self):
return len([c for c in self.all_cases if c.child_age == 3])
@property
def ors_received(self):
return len([c for c in self.all_cases if c.child_with_diarhea_received_ors])
@property
def has_diarhea(self):
return len([c for c in self.all_cases if c.child_has_diarhea])
@property
def children_registered(self):
return len([c for c in self.all_cases if c.child_birth_registered])
@property
def child_6_months(self):
return len([c for c in self.all_cases if c.child_age == 6])
@property
def child_growth_monitored_0_3(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(3)])
@property
def child_0_3_months(self):
# number of children whose age is a multiple of 3 months
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(0, 4)])
@property
def child_growth_monitored_4_6(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(6)])
@property
def child_4_6_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(4, 7)])
@property
def child_growth_monitored_7_9(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(9)])
@property
def child_7_9_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(7, 10)])
@property
def child_growth_monitored_10_12(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(12)])
@property
def child_10_12_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(10, 13)])
@property
def child_growth_monitored_13_15(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(15)])
@property
def child_13_15_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(13, 16)])
@property
def child_growth_monitored_16_18(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(18)])
@property
def child_16_18_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(16, 19)])
@property
def child_growth_monitored_19_21(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(21)])
@property
def child_19_21_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(19, 22)])
@property
def child_growth_monitored_22_24(self):
return len([c for c in self.all_cases if c.child_growth_calculated_in_window(24)])
@property
def child_22_24_months(self):
return len([c for c in self.all_cases
if c.child_age and c.child_age in range(22, 25)])
@property
def child_breastfed(self):
return len([c for c in self.all_cases if c.child_breastfed])
@property
def measles_vaccine(self):
return len([c for c in self.all_cases if c.child_received_measles_vaccine])
@property
def child_12_months(self):
return len([c for c in self.all_cases if c.child_age == 12])
@property
def vhnd_held(self):
return 1 if self.all_cases and self.all_cases[0].vhnd_available else 0
def service_available(self, service):
return (1 if self.all_cases and
self.all_cases[0].is_service_available(service, 1) else 0)
@property
def anm_present(self):
return self.service_available('attend_ANM')
@property
def asha_present(self):
return self.service_available('attend_ASHA')
@property
def cmg_present(self):
return self.service_available('attend_cmg')
@property
def adult_scale_available(self):
return self.service_available('big_weight_machine_avail')
@property
def adult_scale_functional(self):
return self.service_available('func_bigweighmach')
@property
def child_scale_available(self):
return self.service_available('child_weight_machine_avail')
@property
def child_scale_functional(self):
return self.service_available('func_childweighmach')
@property
def ifa_stock_available(self):
return self.service_available('stock_ifatab')
@property
def ors_stock_available(self):
return self.service_available('stock_ors')
@property
def zinc_stock_available(self):
return self.service_available('stock_zntab')
@property
def measles_stock_available(self):
return self.service_available('stock_measlesvacc')
@property
def birth_spacing_bonus(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return len([c for c in self.all_cases if c.birth_spacing_years])
@property
def nutritional_bonus(self):
if self.block is not None and self.block == 'Khijarsarai':
return 'NA'
return len([c for c in self.all_cases if c.weight_grade_normal])
@property
def nutritional_status_sam(self):
return len([c for c in self.all_cases if c.weight_grade_status('SAM')])
@property
def nutritional_status_mam(self):
return len([c for c in self.all_cases if c.weight_grade_status('MAM')])
@property
def nutritional_status_normal(self):
return len([c for c in self.all_cases if c.weight_grade_status('normal')])
@property
def closed_pregnants(self):
return len([c for c in self.all_cases if c.status == 'pregnant' and c.closed_in_reporting_month])
@property
def closed_mothers(self):
return len([c for c in self.primary_cases if c.status == 'mother' and c.closed_in_reporting_month])
@property
def closed_children(self):
return sum([c.num_children for c in self.primary_cases
if c.status == 'mother' and c.closed_in_reporting_month])
@property
def incidence_of_diarrhea(self):
return len([c for c in self.all_cases if c.child_has_diarhea])
```
#### File: opm/tests/test_prior_month_references.py
```python
from custom.opm.tests import OPMCaseReportTestBase, MockDataProvider, OPMCase, MockCaseRow
from dimagi.utils.dates import add_months_to_date
class TestPriorMonthReferences(OPMCaseReportTestBase):
@property
def edd(self):
return add_months_to_date(self.report_date, 3)
@property
def owner_id(self):
return 'mock_owner_id'
def _make_row(self, data_provider, forms=None):
forms = forms or []
case = OPMCase(
forms=forms,
edd=self.edd,
owner_id=self.owner_id,
)
return MockCaseRow(case, self.report, data_provider)
def test_available_this_month(self):
data_provider = MockDataProvider(explicit_map={
self.owner_id: {
'vhnd_available': [self.report_date]
}
})
row = self._make_row(data_provider)
self.assertTrue(row.vhnd_available)
self.assertFalse(row.last_month_row.vhnd_available)
def test_available_last_month(self):
data_provider = MockDataProvider(explicit_map={
self.owner_id: {
'vhnd_available': [add_months_to_date(self.report_date, -1)]
}
})
row = self._make_row(data_provider)
self.assertFalse(row.vhnd_available)
self.assertTrue(row.last_month_row.vhnd_available)
```
#### File: custom/opm/user_calcs.py
```python
import fluff
from couchforms.models import XFormInstance
from dimagi.utils.parsing import json_format_date
from .constants import *
def user_date_group(form, value=1):
return {
'date': form.received_on,
'value': value,
'group_by': [
form.domain,
form.metadata.userID,
],
}
class WomenRegistered(fluff.Calculator):
"""
"No. of women registered under BCSP"
Value represents the number of women delivered by that case
"""
@fluff.date_emitter
def total(self, case):
if case.type == "Pregnancy":
yield case.opened_on
class ChildrenRegistered(fluff.Calculator):
"""
"No. of children registered under BCSP"
Value represents the number of children delivered by that case
"""
@fluff.date_emitter
def total(self, case):
if case.type == "Pregnancy":
total = 0
for form in case.get_forms():
if form.xmlns == DELIVERY_XMLNS:
children = form.form.get('live_birth_amount')
if children:
total += int(children)
yield { 'date': case.opened_on, 'value': total }
class ServiceForms(fluff.Calculator):
"""
"Submission of Service Availability form"
Number of Service Availability Forms Filled Out in Time Period
"""
@fluff.date_emitter
def total(self, form):
if form.xmlns == VHND_XMLNS:
yield user_date_group(form)
class GrowthMonitoring(fluff.Calculator):
"""
"No. of Growth monitoring Sections Filled for eligible children"
Sum of form property (in child followup form) where child1_child_growthmon,
child2_child_growthmon, and child3_child_growthmon = '1' in the time period.
Within a form, if multiple = '1', give xtimes the amount. "Union" this so
that if ever '1' within the time period, that this triggers payment
"""
@fluff.date_emitter
def total(self, form):
if form.xmlns in CHILDREN_FORMS:
# child_<n>/child<n>_child_growthmon == 1 if weight was monitored this month
total = 0
for child_num in list('123'):
xpath = ('form/child_{num}/child{num}_child_growthmon'
.format(num=child_num))
if form.xpath(xpath) == '1':
total += 1
if total:
yield {
'date': form.received_on,
'value': total,
'group_by': [
form.domain,
form.metadata.userID,
],
}
def get_result(self, key, date_range=None, reduce=True):
# This block is pretty much a stripped copy-paste from fluff
# except I needed to make sure the results were unique by case
assert isinstance(date_range, tuple)
start, end = date_range
shared_key = [self.fluff._doc_type] + key + [self.slug, 'total']
q = self.fluff.view(
'fluff/generic',
startkey=shared_key + [json_format_date(start)],
endkey=shared_key + [json_format_date(end)],
reduce=False,
).all()
def strip(id_string):
prefix = '%s-' % self.fluff.__name__
assert id_string.startswith(prefix)
return id_string[len(prefix):]
cases = {}
for form in q:
form_id = strip(form['id'])
case_id = XFormInstance.get(form_id).form['case']['@case_id']
cases[case_id] = max(cases.get(case_id, 0), form['value'])
return {'total': sum(cases.values())}
```
#### File: custom/requisitions/signals.py
```python
from collections import defaultdict
import itertools
from django.dispatch import Signal
from corehq.apps.commtrack.const import REQUISITION_CASE_TYPE, RequisitionStatus
from corehq.apps.commtrack.models import CommtrackConfig
from corehq.apps.sms.api import send_sms_to_verified_number
from custom.openlmis.commtrack import requisition_receipt, requisition_approved
from custom.requisitions.models import RequisitionCase
from custom.requisitions.utils import get_notification_recipients, get_notification_message
from dimagi.utils import create_unique_filter
requisition_modified = Signal(providing_args=['cases'])
def send_notifications(xform, cases):
# todo: this should be removed with requisitions. the only things that depend on it currently
# are custom code
# TODO: fix circular imports
# for now the only notifications are for requisitions that were touched.
# todo: if we wanted to include previously requested items we could do so
# by either polling for other open requisitions here, or by ensuring that
# they get touched by the commtrack case processing.
requisitions = [RequisitionCase.wrap(case._doc) for case in cases if case.type == REQUISITION_CASE_TYPE]
if requisitions:
by_status = defaultdict(list)
for r in requisitions:
by_status[r.requisition_status].append(r)
req_config = CommtrackConfig.for_domain(requisitions[0].domain).requisition_config
# since each state transition might trigger a different person to be notified
for s, reqs in by_status.items():
next_action = req_config.get_next_action(RequisitionStatus.to_action_type(s))
if next_action:
# we could make this even more customizable by specifying it per requisition
# but that would get even messier in terms of constructing the messages
# so we'll just compose one message per status type now, and then send
# it to everyone who should be notified.
to_notify = filter(
create_unique_filter(lambda u: u._id),
itertools.chain(*(get_notification_recipients(next_action, r) for r in reqs))
)
msg = get_notification_message(next_action, reqs)
for u in to_notify:
phone = u.get_verified_number()
if phone:
send_sms_to_verified_number(phone, msg)
def raise_events(xform, cases):
"""
Raise requisition events associated with cases
"""
# todo: nothing calls this and it can be removed today, though openlmis code depends
# on it being called during case processing
requisition_cases = [RequisitionCase.wrap(c._doc) for c in cases if c.type == REQUISITION_CASE_TYPE]
if requisition_cases and requisition_cases[0].requisition_status == RequisitionStatus.APPROVED:
requisition_approved.send(sender=None, requisitions=requisition_cases)
if requisition_cases and requisition_cases[0].requisition_status == RequisitionStatus.RECEIVED:
requisition_receipt.send(sender=None, requisitions=requisition_cases)
if requisition_cases and requisition_cases[0].requisition_status == RequisitionStatus.REQUESTED:
requisition_modified.send(sender=None, cases=requisition_cases)
```
#### File: custom/requisitions/utils.py
```python
from corehq.apps.commtrack.const import RequisitionActions
from corehq.apps.users.cases import get_owning_users, get_owner_id
from custom.requisitions.const import UserRequisitionRoles
def should_notify_user(user, next_action_type):
return user.user_data.get(UserRequisitionRoles.get_user_role(next_action_type), False)
def get_notification_recipients(next_action, requisition):
# given a status and list of requisitions, get the exhaustive list of
# people to notify about the requisition entering that status.
users = get_owning_users(get_owner_id(requisition))
if len(users) == 1:
return users
return [u for u in users if should_notify_user(u, next_action.action_type)]
def get_notification_message(next_action, requisitions):
# NOTE: it'd be weird if this was None but for now we won't fail hard
guessed_location = requisitions[0].get_location()
summary = ', '.join(r.sms_format() for r in requisitions)
requester = requisitions[0].get_requester()
return notification_template(next_action.action).format(
name=requester.full_name if requester else "Unknown",
summary=summary,
loc=guessed_location.site_code if guessed_location else "<loc code>",
keyword=next_action.keyword,
)
def notification_template(action):
# this had to be a method to do translations
from django.utils.translation import ugettext as _
return {
RequisitionActions.APPROVAL: _('{name} has requested the following supplies: {summary}. please respond "{keyword} {loc}" to approve.'),
RequisitionActions.FULFILL: _('{name} should be supplied with the following supplies: {summary}. please respond "{keyword} {loc}" to confirm the order.'),
RequisitionActions.RECEIPTS: _('your order of {summary} is ready to be picked up. please respond with a "{keyword}" message to report receipts.'),
}[action]
```
#### File: succeed/reports/patient_interactions.py
```python
from collections import OrderedDict
from django.utils import html
from corehq.apps.users.models import CouchUser
from custom.succeed.reports.patient_task_list import PatientTaskListReport
from custom.succeed.reports import *
from custom.succeed.reports.patient_details import PatientDetailsReport
from dimagi.utils.decorators.memoized import memoized
from custom.succeed.utils import is_cm, is_chw
RISK_FACTOR_CONFIG = OrderedDict()
RISK_FACTOR_CONFIG['Status:'] = ['risk-factor_at_status', 'risk-factor_bp_status',
'risk-factor_cholesterol_status', 'risk-factor_psycho-social_status',
'risk-factor_diabetes_status', 'risk-factor_smoking_status']
RISK_FACTOR_CONFIG['CHW Protocol Indicated:'] = ['risk-factor_at_chw', 'risk-factor_bp_chw',
'risk-factor_cholesterol_chw', 'risk-factor_psycho-social_chw',
'risk-factor_diabetes_chw', 'risk-factor_smoking_chw']
RISK_FACTOR_CONFIG['CHW Protocol Count:'] = ['CHW_antithrombotic_count', 'CHW_bp_count', 'CHW_cholesterol_count',
'CHW_psycho-social_count', 'CHW_diabetes_count', 'CHW_smoking_count']
RISK_FACTOR_CONFIG['Notes:'] = ['risk-factor_at_notes', 'risk-factor_bp_notes', 'risk-factor_cholesterol_notes',
'risk-factor_psycho-social_notes', 'risk-factor_diabetes_notes',
'risk-factor_smoking_notes']
class PatientInteractionsReport(PatientDetailsReport):
slug = "patient_interactions"
name = 'Patient Interactions'
@property
def report_context(self):
self.report_template_path = "patient_interactions.html"
ret = super(PatientInteractionsReport, self).report_context
self.update_app_info()
ret['view_mode'] = 'interactions'
ret['problem_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_PD_MODULE, PD1, ret['patient']['_id'])
ret['huddle_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_HUD_MODULE, HUD2, ret['patient']['_id'])
ret['cm_phone_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CM_MODULE, CM6_PHONE, ret['patient']['_id'])
ret['cm_visits_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CM_MODULE, CM4, ret['patient']['_id'])
ret['anti_thrombotic_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2AM, ret['patient']['_id'])
ret['blood_pressure_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2BPM, ret['patient']['_id'])
ret['cholesterol_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2CHM, ret['patient']['_id'])
ret['diabetes_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2DIABM, ret['patient']['_id'])
ret['depression_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2DEPM, ret['patient']['_id'])
ret['smoking_cessation_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2SCM, ret['patient']['_id'])
ret['other_meds_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_MEDICATIONS_MODULE, PD2OM, ret['patient']['_id'])
ret['interaction_table'] = []
for visit_key, visit in enumerate(VISIT_SCHEDULE):
if visit['target_date_case_property'] in ret['patient'] and \
ret['patient'][visit['target_date_case_property']]:
try:
target_date = (ret['patient'][visit['target_date_case_property']])
except TypeError:
target_date = _("Bad Date Format!")
else:
target_date = EMPTY_FIELD
received_date = EMPTY_FIELD
for completed in visit['completed_date']:
if completed in ret['patient']:
received_date = ret['patient'][completed]
interaction = {
'url': '',
'name': visit['visit_name'],
'target_date': target_date,
'received_date': received_date,
}
if visit['show_button']:
interaction['url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
visit['module_idx'], visit['xmlns'], ret['patient']['_id'])
ret['interaction_table'].append(interaction)
medication = []
for med_prop in MEDICATION_DETAILS:
if med_prop == 'MEDS_diabetes_prescribed':
oral = getattr(ret['patient'], 'MEDS_diabetes-oral_prescribed', None)
insulin = getattr(ret['patient'], 'MEDS_diabetes-insulin_prescribed', None)
if oral == 'yes':
to_append = oral
elif insulin == 'yes':
to_append = insulin
else:
to_append = EMPTY_FIELD
medication.append(to_append)
else:
medication.append(getattr(ret['patient'], med_prop, EMPTY_FIELD))
ret['medication_table'] = medication
user = self.request.couch_user
ret['patient_task_list_url'] = html.escape(
PatientTaskListReport.get_url(*[ret['patient']["domain"]]) +
"?patient_id=%s&task_status=%s" % (ret['patient']["_id"], "open"))
if is_cm(user):
ret['create_new_task_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_CREATE_TASK_MODULE, CM_NEW_TASK,
ret['patient']['_id'])
elif is_chw(user):
ret['create_new_task_url'] = self.get_form_url(self.chw_app_dict, self.latest_chw_build,
CHW_APP_TASK_MODULE, CM_NEW_TASK, ret['patient']['_id'])
ret['view_appointments_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_APPOINTMENTS_MODULE, AP2,
parent_id=ret['patient']['_id'])
ret['add_appointments_url'] = self.get_form_url(self.cm_app_dict, self.latest_cm_build,
CM_APP_PD_MODULE, AP1,
case_id=ret['patient']['_id'])
# Risk Factor Table
rows = []
for key, val in RISK_FACTOR_CONFIG.iteritems():
data = [key]
for v in val:
case_data = ret['patient'][v] if v in ret['patient'] else ''
if key == 'Status:':
if case_data:
case_data = case_data.replace('-', ' ').title()
else:
case_data = EMPTY_FIELD
data.append(case_data)
rows.append(data)
ret['risk_factor_table'] = rows
return ret
@memoized
def get_user(self, user_id):
return CouchUser.get(user_id)
```
#### File: world_vision/sqldata/mother_sqldata.py
```python
from sqlagg import CountUniqueColumn, AliasColumn
from sqlagg.columns import SimpleColumn, SumColumn
from sqlagg.filters import LTE, AND, GTE, GT, EQ, NOTEQ, OR, IN
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import DatabaseColumn, AggregateColumn
from custom.world_vision.sqldata import BaseSqlData
from custom.world_vision.sqldata.main_sqldata import AnteNatalCareServiceOverview, DeliveryPlaceDetails
class MotherRegistrationDetails(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'mother_registration_details'
title = 'Mother Registration Details'
@property
def filters(self):
return super(MotherRegistrationDetails, self).filters[1:]
@property
def rows(self):
from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS
result = []
for column in self.columns:
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['mother_registration_details'], column.slug)},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]}])
return result
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number')])
@property
def columns(self):
columns = [
DatabaseColumn("Total mothers registered ever", CountUniqueColumn('doc_id', alias="total")),
]
if 'startdate' not in self.config and 'enddate' not in self.config or 'startdate' not in self.config and 'enddate' in self.config:
columns.extend([
DatabaseColumn("Total open mother cases",
CountUniqueColumn('doc_id',
alias="no_date_opened",
filters=self.filters + [EQ('closed_on', 'empty')]
)
),
DatabaseColumn("Total closed mother cases",
CountUniqueColumn('doc_id',
alias="no_date_closed",
filters=self.filters + [NOTEQ('closed_on', 'empty')]
)
),
DatabaseColumn("New registrations during last 30 days",
CountUniqueColumn('doc_id',
alias="no_date_new_registrations",
filters=self.filters + [AND([GTE('opened_on', "last_month"), LTE('opened_on', "today")])]
)
)
])
else:
columns.extend([
DatabaseColumn(
"Mother cases open at end of period", CountUniqueColumn(
'doc_id', alias="opened",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GT('closed_on', "stred")])])]
)
),
DatabaseColumn(
"Mother cases closed during period", CountUniqueColumn(
'doc_id', alias="closed",
filters=self.filters + [AND([GTE('closed_on', "strsd"), LTE('closed_on', "stred")])]
)
),
DatabaseColumn(
"Total mothers followed during period", CountUniqueColumn(
'doc_id', alias="followed",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GTE('closed_on', "strsd")])])]
)
),
DatabaseColumn(
"Total pregnant", CountUniqueColumn(
'doc_id', alias="total_pregnant",
filters=self.filters + [AND([LTE('opened_on', "stred"),
OR([EQ('closed_on', 'empty'), GTE('closed_on', "strsd")]),
EQ('mother_state', 'pregnant_mother_type')])]
)
),
DatabaseColumn(
"New registrations during time period", CountUniqueColumn(
'doc_id', alias="new_registrations",
filters=self.filters + [AND([LTE('opened_on', "stred"), GTE('opened_on', "strsd")])]
)
)
])
return columns
class ClosedMotherCasesBreakdown(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'closed_mother_cases-breakdown'
title = 'Closed Mother Cases Breakdown'
show_total = True
total_row_name = "Mother cases closed during the time period"
chart_title = 'Closed Maternal Cases'
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_only = True
@property
def group_by(self):
return ['reason_for_mother_closure']
@property
def rows(self):
from custom.world_vision import REASON_FOR_CLOSURE_MAPPING
return self._get_rows(REASON_FOR_CLOSURE_MAPPING, super(ClosedMotherCasesBreakdown, self).rows)
@property
def filters(self):
filter = super(ClosedMotherCasesBreakdown, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('closed_on', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('closed_on', 'stred'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Reason for closure'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Reason for closure", SimpleColumn('reason_for_mother_closure')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))
]
class PregnantMotherBreakdownByTrimester(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'pregnant_mother_by_trimester'
title = 'Pregnant Woman Breakdown by Trimester'
chart_title = 'Pregnant Mother Visits'
show_total = True
total_row_name = "Total pregnant "
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_only = True
def percent_fn(self, y):
x = self.data['trimester_1'] + self.data['trimester_2'] + self.data['trimester_3']
return "%(p).0f%%" % \
{
"p": (100 * float(y or 0) / float(x or 1))
}
@property
def filters(self):
filter = super(PregnantMotherBreakdownByTrimester, self).filters
filter.append(EQ('mother_state', 'pregnant_mother_type'))
filter.append(NOTEQ('edd', 'empty'))
return filter
@property
def rows(self):
result = []
for column in self.columns:
percent = self.percent_fn(self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]},
{'sort_key': 'percentage', 'html': percent}]
)
return result
@property
def columns(self):
return [
DatabaseColumn("Trimester 1",
CountUniqueColumn('doc_id',
alias="trimester_1", filters=self.filters + [GT('edd', "today_plus_196")]
)
),
DatabaseColumn("Trimester 2",
CountUniqueColumn('doc_id',
alias="trimester_2",
filters=self.filters + [AND([LTE('edd', "today_plus_196"), GT('edd', "today_plus_84")])]
)
),
DatabaseColumn("Trimester 3",
CountUniqueColumn('doc_id',
alias="trimester_3", filters=self.filters + [LTE('edd', 'today_plus_84')]
)
)
]
class AnteNatalCareServiceOverviewExtended(AnteNatalCareServiceOverview):
slug = 'ante_natal_care_service_overview_extended'
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_only = True
@property
def rows(self):
from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS
result = [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header},
{'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]},
{'sort_key': 'n/a', 'html': 'n/a'},
{'sort_key': 'n/a', 'html': 'n/a'}]]
for i in range(1,15):
result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header,
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['ante_natal_care_service_details'], self.columns[i].slug)},
{'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]},
{'sort_key': self.data[self.columns[i + 14].slug], 'html': self.data[self.columns[i + 14].slug],
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['ante_natal_care_service_details'], self.columns[i+ 14].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[i + 14].slug], self.data[self.columns[i].slug]),
'html': self.percent_fn(self.data[self.columns[i + 14].slug], self.data[self.columns[i].slug])}])
return result
@property
def columns(self):
return [
DatabaseColumn("Total pregnant", CountUniqueColumn('doc_id', alias="total_pregnant")),
DatabaseColumn("No ANC", CountUniqueColumn('doc_id', alias="no_anc",
filters=self.filters + [NOTEQ('anc_1', 'yes')])),
DatabaseColumn("ANC1", CountUniqueColumn('doc_id', alias="anc_1",
filters=self.filters + [EQ('anc_1', 'yes')])),
DatabaseColumn("ANC2", CountUniqueColumn('doc_id', alias="anc_2",
filters=self.filters + [EQ('anc_2', 'yes')])),
DatabaseColumn("ANC3", CountUniqueColumn('doc_id', alias="anc_3",
filters=self.filters + [EQ('anc_3', 'yes')])),
DatabaseColumn("ANC4", CountUniqueColumn('doc_id', alias="anc_4",
filters=self.filters + [EQ('anc_4', 'yes')])),
DatabaseColumn("TT1", CountUniqueColumn('doc_id', alias="tt_1",
filters=self.filters + [EQ('tt_1', 'yes')])),
DatabaseColumn("TT2", CountUniqueColumn('doc_id', alias="tt_2",
filters=self.filters + [EQ('tt_2', 'yes')])),
DatabaseColumn("TT Booster", CountUniqueColumn('doc_id', alias="tt_booster",
filters=self.filters + [EQ('tt_booster', 'yes')])),
DatabaseColumn("TT Complete",
CountUniqueColumn('doc_id', alias="tt_completed",
filters=self.filters + [OR([EQ('tt_2', 'yes'),
EQ('tt_booster', 'yes')])])),
DatabaseColumn("IFA received", CountUniqueColumn('doc_id', alias="ifa_tablets",
filters=self.filters + [EQ('iron_folic', 'yes')])),
DatabaseColumn("100 IFA consumed",
CountUniqueColumn('doc_id', alias="100_tablets",
filters=self.filters[1:-1] + [AND([EQ('completed_100_ifa', 'yes'),
GTE('delivery_date', 'strsd'),
LTE('delivery_date', 'stred')])])),
DatabaseColumn("Clinically anemic mothers",
CountUniqueColumn('doc_id', alias="clinically_anemic",
filters=self.filters + [EQ('anemia_signs', 'yes')])),
DatabaseColumn("Number of pregnant mother referrals due to danger signs",
CountUniqueColumn('doc_id', alias="danger_signs",
filters=self.filters + [EQ('currently_referred', 'yes')])),
DatabaseColumn("Knows closest health facility",
CountUniqueColumn('doc_id', alias="knows_closest_facility",
filters=self.filters + [EQ('knows_closest_facility', 'yes')])),
DatabaseColumn("No ANC Total Eligible",
CountUniqueColumn('doc_id', alias="no_anc_eligible",
filters=self.filters + [LTE('edd', 'today_plus_196')])),
DatabaseColumn("ANC1 Total Eligible",
CountUniqueColumn('doc_id', alias="anc_1_eligible",
filters=self.filters + [LTE('edd', 'today_plus_196')])),
DatabaseColumn("ANC2 Total Eligible",
CountUniqueColumn('doc_id', alias="anc_2_eligible",
filters=self.filters + [AND([EQ('anc_1', 'yes'),
LTE('edd', 'today_plus_112')])])),
DatabaseColumn("ANC3 Total Eligible",
CountUniqueColumn('doc_id', alias="anc_3_eligible",
filters=self.filters + [AND([EQ('anc_2', 'yes'),
LTE('edd', 'today_plus_56')])])),
DatabaseColumn("ANC4 Total Eligible",
CountUniqueColumn('doc_id', alias="anc_4_eligible",
filters=self.filters + [AND([EQ('anc_3', 'yes'),
LTE('edd', 'today_plus_35')])])),
DatabaseColumn("TT1 Total Eligible",
CountUniqueColumn('doc_id', alias="tt_1_eligible",
filters=self.filters + [NOTEQ('previous_tetanus', 'yes')])),
DatabaseColumn("TT2 Total Eligible", CountUniqueColumn('doc_id', alias="tt_2_eligible",
filters=self.filters + [EQ('tt_1', 'yes')])),
DatabaseColumn("TT Booster Total Eligible",
CountUniqueColumn('doc_id', alias="tt_booster_eligible",
filters=self.filters + [EQ('previous_tetanus', 'yes')])),
DatabaseColumn("TT Completed (TT2 or Booster) Total Eligible",
CountUniqueColumn('doc_id', alias="tt_completed_eligible",
filters=self.filters + [OR([EQ('tt_1', 'yes'),
EQ('previous_tetanus', 'yes')])])),
DatabaseColumn("Taking IFA tablets Total Eligible",
CountUniqueColumn('doc_id', alias="ifa_tablets_eligible")),
DatabaseColumn("Completed 100 IFA tablets Total Eligible",
CountUniqueColumn('doc_id', alias="100_tablets_eligible",
filters=self.filters[1:-1] + [AND([GTE('delivery_date', 'strsd'),
LTE('delivery_date', 'stred')])])),
DatabaseColumn("Clinically anemic mothers Total Eligible",
CountUniqueColumn('doc_id', alias="clinically_anemic_eligible")),
DatabaseColumn("Number of mother referrals due to danger signs Total Eligible",
CountUniqueColumn('doc_id', alias="danger_signs_eligible")),
DatabaseColumn("Know closest health facility Total Eligible",
CountUniqueColumn('doc_id', alias="knows_closest_facility_eligible"))
]
class DeliveryMothersIds(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
@property
def filters(self):
filter = super(DeliveryMothersIds, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('delivery_date', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('delivery_date', 'stred'))
return filter
@property
def group_by(self):
return ['doc_id']
@property
def columns(self):
return [
DatabaseColumn("Mother ID", SimpleColumn('doc_id'))
]
class DeliveryLiveBirthDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'delivery_live_birth_details'
title = ''
show_charts = True
chart_x_label = ''
chart_y_label = ''
show_total = True
total_row_name = "Total live births"
chart_title = 'Live Births'
accordion_start = False
accordion_end = False
chart_only = True
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def filters(self):
self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',)
return [IN('mother_id', 'mother_ids')]
@property
def columns(self):
return [
DatabaseColumn("Live birth (Male)",
CountUniqueColumn('doc_id', alias='girls', filters=self.filters + [EQ('gender', 'female')])
),
DatabaseColumn("Live birth (Female)",
CountUniqueColumn('doc_id', alias='boys', filters=self.filters + [EQ('gender', 'male')])
)
]
@property
def rows(self):
total = sum(v if v else 0 for v in self.data.values())
result = []
for column in self.columns:
percent = self.percent_fn(total, self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug] if self.data[column.slug] else 0,
'html': self.data[column.slug] if self.data[column.slug] else 0},
{'sort_key': 'percentage', 'html': percent}
])
return result
class DeliveryStillBirthDetails(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'delivery_still_birth_details'
title = ''
accordion_start = False
accordion_end = True
@property
def filters(self):
filter = super(DeliveryStillBirthDetails, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('delivery_date', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('delivery_date', 'stred'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn(''), DataTablesColumn('Number')])
@property
def columns(self):
return [
DatabaseColumn("Still births",
SumColumn('number_of_children_born_dead_total')
),
DatabaseColumn("Abortions",
CountUniqueColumn('doc_id', alias="abortions", filters=self.filters + [EQ('reason_for_mother_closure', 'abortion')]),
),
]
@property
def rows(self):
from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS
result = []
for column in self.columns:
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['delivery_details'], column.slug)},
{'sort_key': self.data[column.slug] if self.data[column.slug] else 0,
'html': self.data[column.slug] if self.data[column.slug] else 0}]
)
return result
class PostnatalCareOverview(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'postnatal_care_overview'
title = 'Postnatal Care Overview'
show_charts = True
chart_title = 'PNC Visits'
chart_x_label = ''
chart_y_label = ''
accordion_end = False
chart_only = True
@property
def filters(self):
filter = super(PostnatalCareOverview, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('delivery_date', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('delivery_date', 'stred'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'),
DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')])
@property
def rows(self):
from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS
result = []
for i in range(0,4):
result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header,
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['postnatal_care_details'], self.columns[i].slug)},
{'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]},
{'sort_key': self.data[self.columns[i + 4].slug], 'html': self.data[self.columns[i + 4].slug],
'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['postnatal_care_details'], self.columns[i+4].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[i + 4].slug], self.data[self.columns[i].slug]),
'html': self.percent_fn(self.data[self.columns[i + 4].slug], self.data[self.columns[i].slug])}])
return result
@property
def columns(self):
return [
DatabaseColumn(
"PNC in 48 hours",
CountUniqueColumn('doc_id', alias="pnc_1", filters=self.filters + [EQ('pp_1_done', 'yes')]),
),
DatabaseColumn(
"PNC in 2-4 days",
CountUniqueColumn('doc_id', alias="pnc_2", filters=self.filters + [EQ('pp_2_done', 'yes')]),
),
DatabaseColumn(
"PNC in 5-7",
CountUniqueColumn('doc_id', alias="pnc_3", filters=self.filters + [EQ('pp_3_done', 'yes')]),
),
DatabaseColumn(
"PNC in 21-42 days",
CountUniqueColumn('doc_id', alias="pnc_4", filters=self.filters + [EQ('pp_4_done', 'yes')]),
),
DatabaseColumn(
"PNC 1 visits Total Eligible",
CountUniqueColumn('doc_id', alias="pnc_1_eligible",
filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'),
LTE('delivery_date', 'today')])]),
),
DatabaseColumn("PNC 2 visits Total Eligible",
CountUniqueColumn('doc_id', alias="pnc_2_eligible",
filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_2')])]),
),
DatabaseColumn("PNC 3 visits Total Eligible",
CountUniqueColumn('doc_id', alias="pnc_3_eligible",
filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_25')])]),
),
DatabaseColumn("PNC 4 visits Total Eligible",
CountUniqueColumn('doc_id', alias="pnc_4_eligible",
filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_21')])]),
)
]
class CauseOfMaternalDeaths(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'Cause_of_maternal_deaths'
title = 'Cause of Maternal Deaths'
show_total = True
total_row_name = "Total Mother Deaths"
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_title = 'Mother Deaths'
table_only = True
@property
def group_by(self):
return ['cause_of_death_maternal']
@property
def rows(self):
from custom.world_vision import MOTHER_DEATH_MAPPING
return self._get_rows(MOTHER_DEATH_MAPPING, super(CauseOfMaternalDeaths, self).rows)
@property
def filters(self):
filter = super(CauseOfMaternalDeaths, self).filters[1:]
filter.append(EQ('reason_for_mother_closure', 'death'))
if 'strsd' in self.config:
filter.append(GTE('date_of_mother_death', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('date_of_mother_death', 'stred'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Maternal Death'), DataTablesColumn('Number'),
DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Reason", SimpleColumn('cause_of_death_maternal')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))
]
class FamilyPlanningMethods(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'family_planning_methods'
title = 'Family Planning Methods'
show_total = True
total_row_name = "Total Families who reported using Family Planning"
show_charts = True
chart_title = 'Family Planning Methods'
chart_x_label = ''
chart_y_label = ''
@property
def group_by(self):
return ['fp_method']
@property
def rows(self):
from custom.world_vision import FAMILY_PLANNING_METHODS
return self._get_rows(FAMILY_PLANNING_METHODS, super(FamilyPlanningMethods, self).rows)
@property
def filters(self):
filter = super(FamilyPlanningMethods, self).filters
filter.append(NOTEQ('fp_method', 'empty'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Method'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Method", SimpleColumn('fp_method')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))
]
class DeliveryPlaceDetailsExtended(DeliveryPlaceDetails):
show_charts = True
chart_title = 'Delivery Place'
chart_x_label = ''
chart_y_label = ''
slug = 'delivery_place_details_extended'
@property
def columns(self):
columns = super(DeliveryPlaceDetailsExtended, self).columns
additional_columns = [
DatabaseColumn("Home deliveries",
CountUniqueColumn('doc_id', alias="home_deliveries",
filters=self.filters + [OR([EQ('place_of_birth', 'home'),
EQ('place_of_birth', 'on_route')])])),
DatabaseColumn("Other places",
CountUniqueColumn('doc_id', alias="other_places",
filters=self.filters + [OR([EQ('place_of_birth', 'empty'),
EQ('place_of_birth', 'other')])]))
]
columns.extend(additional_columns)
return columns
class DeliveryPlaceMotherDetails(DeliveryPlaceDetails):
title = ''
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_title = 'Delivery Place Mother'
slug = 'delivery_place_mother_details'
accordion_start = False
accordion_end = False
@property
def columns(self):
return [
DatabaseColumn("Total Deliveries (with/without outcome)",
CountUniqueColumn('doc_id', alias="total_delivery", filters=self.filters),
),
DatabaseColumn("Normal deliveries",
CountUniqueColumn('doc_id', alias="normal_deliveries",
filters=self.filters + [EQ('type_of_delivery', 'normal_delivery')])),
DatabaseColumn("Caesarean deliveries",
CountUniqueColumn('doc_id', alias="caesarean_deliveries",
filters=self.filters + [EQ('type_of_delivery', 'cesarean_delivery')])),
DatabaseColumn("Delivery type unknown",
CountUniqueColumn('doc_id', alias="unknown",
filters=self.filters + [OR([EQ('type_of_delivery', 'empty'),
EQ('type_of_delivery', 'unknown_delivery')])]))
]
@property
def rows(self):
return super(DeliveryPlaceMotherDetails, self).rows[1:]
class NumberOfPNCVisits(BaseSqlData):
table_name = "fluff_WorldVisionMotherFluff"
slug = 'number_of_pnc_visits'
title = ''
show_total = True
total_row_name = "Total mothers who delivered more than 42 days ago"
show_charts = True
chart_title = 'PNC Visits'
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = True
@property
def rows(self):
result = []
rows = super(NumberOfPNCVisits, self).rows
counter = {k: 0 for k in range(0, 5)}
for row in rows:
counter[row[-1]['html']] += 1
for k, v in counter.iteritems():
percent = self.percent_fn(len(rows), v)
result.append([{'sort_key': "Mothers with %d PNC visits within 42 days of delivery" % k,
'html': "Mothers with %d PNC visits within 42 days of delivery" % k},
{'sort_key': v, 'html': v},
{'sort_key': 'percentage', 'html': percent}])
return result
@property
def group_by(self):
return ['doc_id', 'pp_1_done', 'pp_2_done', 'pp_3_done', 'pp_4_done']
@property
def filters(self):
filters = super(NumberOfPNCVisits, self).filters[1:]
filters.append(AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_42')]))
return filters
@property
def columns(self):
def format_pnc_count(*args):
return sum([1 if arg == 'yes' else 0 for arg in args])
return [
DatabaseColumn("PP 1", SimpleColumn('pp_1_done', alias='pp_1_done')),
DatabaseColumn("PP 2", SimpleColumn('pp_2_done', alias='pp_2_done')),
DatabaseColumn("PP 3", SimpleColumn('pp_3_done', alias='pp_3_done')),
DatabaseColumn("PP 4", SimpleColumn('pp_4_done', alias='pp_4_done')),
AggregateColumn('PNC Count', format_pnc_count,
[AliasColumn('pp_1_done'), AliasColumn('pp_2_done'), AliasColumn('pp_3_done'),
AliasColumn('pp_4_done')])]
```
#### File: test_pillowtop/tests/test_settings.py
```python
import os
from django.conf import settings
from django.test import TestCase
import json
from corehq.util.test_utils import TestFileMixin
from pillowtop import get_all_pillow_classes
from pillowtop.listener import AliasedElasticPillow
class PillowtopSettingsTest(TestCase, TestFileMixin):
file_path = ('data',)
root = os.path.dirname(__file__)
maxDiff = None
@classmethod
def setUpClass(cls):
cls._PILLOWTOPS = settings.PILLOWTOPS
if not settings.PILLOWTOPS:
# assumes HqTestSuiteRunner, which blanks this out and saves a copy here
settings.PILLOWTOPS = settings._PILLOWTOPS
@classmethod
def tearDownClass(cls):
settings.PILLOWTOPS = cls._PILLOWTOPS
def test_instantiate_all(self):
all_pillow_classes = get_all_pillow_classes()
expected_meta = self.get_json('all-pillow-meta')
self.assertEqual(len(all_pillow_classes), len(expected_meta))
for pillow_class in all_pillow_classes:
self.assertEqual(expected_meta[pillow_class.__name__], _pillow_meta_from_class(pillow_class))
def _rewrite_file(self, pillow_classes):
# utility that should only be called manually
with open(self.get_path('all-pillow-meta', 'json'), 'w') as f:
f.write(
json.dumps({cls.__name__: _pillow_meta_from_class(cls) for cls in pillow_classes},
indent=4)
)
def _pillow_meta_from_class(pillow_class):
is_elastic = issubclass(pillow_class, AliasedElasticPillow)
kwargs = {'create_index': False, 'online': False} if is_elastic else {}
pillow_instance = pillow_class(**kwargs)
props = {
'class_name': pillow_instance.__class__.__name__,
'document_class': pillow_instance.document_class.__name__ if pillow_instance.document_class else None,
'couch_filter': pillow_instance.couch_filter,
'include_docs': pillow_instance.include_docs,
'extra_args': pillow_instance.extra_args,
'checkpoint_id': pillow_instance.checkpoint.checkpoint_id
}
if is_elastic:
props.update({
'es_alias': pillow_instance.es_alias,
'es_type': pillow_instance.es_type,
'es_index': pillow_instance.es_index,
'unique_id': pillow_instance.get_unique_id(),
})
return props
```
#### File: johan--/commcare-hq/testrunner.py
```python
from collections import defaultdict
from functools import wraps
from unittest.util import strclass
from couchdbkit import Database, ResourceNotFound
from couchdbkit.ext.django import loading
from couchdbkit.ext.django.testrunner import CouchDbKitTestSuiteRunner
import datetime
from django.conf import settings
from django.utils import unittest
import settingshelper
from django.test import TransactionTestCase
from mock import patch, Mock
def set_db_enabled(is_enabled):
def decorator(fn):
@wraps(fn)
def _inner(*args, **kwargs):
original_value = settings.DB_ENABLED
settings.DB_ENABLED = is_enabled
try:
return fn(*args, **kwargs)
finally:
settings.DB_ENABLED = original_value
return _inner
return decorator
class HqTestSuiteRunner(CouchDbKitTestSuiteRunner):
"""
A test suite runner for Hq. On top of the couchdb testrunner, also
apply all our monkeypatches to the settings.
To use this, change the settings.py file to read:
TEST_RUNNER = 'Hq.testrunner.HqTestSuiteRunner'
"""
dbs = []
def setup_test_environment(self, **kwargs):
# monkey patch TEST_APPS into INSTALLED_APPS
# so that tests are run for them
# without having to explicitly have them in INSTALLED_APPS
# weird list/tuple type issues, so force everything to tuples
settings.INSTALLED_APPS = (tuple(settings.INSTALLED_APPS) +
tuple(settings.TEST_APPS))
settings.CELERY_ALWAYS_EAGER = True
# keep a copy of the original PILLOWTOPS setting around in case other tests want it.
settings._PILLOWTOPS = settings.PILLOWTOPS
settings.PILLOWTOPS = {}
return super(HqTestSuiteRunner, self).setup_test_environment(**kwargs)
def setup_databases(self, **kwargs):
self.newdbname = self.get_test_db_name(settings.COUCH_DATABASE_NAME)
print "overridding the couch settings!"
new_db_settings = settingshelper.get_dynamic_db_settings(
settings.COUCH_SERVER_ROOT,
settings.COUCH_USERNAME,
settings.COUCH_PASSWORD,
self.newdbname,
)
settings.COUCH_DATABASE_NAME = self.newdbname
for (setting, value) in new_db_settings.items():
setattr(settings, setting, value)
print "set %s settting to %s" % (setting, value)
settings.EXTRA_COUCHDB_DATABASES = {
db_name: self.get_test_db_name(url)
for db_name, url in settings.EXTRA_COUCHDB_DATABASES.items()
}
return super(HqTestSuiteRunner, self).setup_databases(**kwargs)
def teardown_databases(self, old_config, **kwargs):
for db_uri in settings.EXTRA_COUCHDB_DATABASES.values():
db = Database(db_uri)
self._assert_is_a_test_db(db_uri)
self._delete_db_if_exists(db)
super(HqTestSuiteRunner, self).teardown_databases(old_config, **kwargs)
@staticmethod
def _assert_is_a_test_db(db_uri):
assert db_uri.endswith('_test'), db_uri
@staticmethod
def _delete_db_if_exists(db):
try:
db.server.delete_db(db.dbname)
except ResourceNotFound:
pass
def get_all_test_labels(self):
return [self._strip(app) for app in settings.INSTALLED_APPS
if app not in settings.APPS_TO_EXCLUDE_FROM_TESTS
and not app.startswith('django.')]
def run_tests(self, test_labels, extra_tests=None, **kwargs):
test_labels = test_labels or self.get_all_test_labels()
return super(HqTestSuiteRunner, self).run_tests(
test_labels, extra_tests, **kwargs
)
def _strip(self, app_name):
return app_name.split('.')[-1]
class TimingTestSuite(unittest.TestSuite):
def __init__(self, tests=()):
super(TimingTestSuite, self).__init__(tests)
self.test_times = []
self._patched_test_classes = set()
def patch_test_class(self, klass):
if klass in self._patched_test_classes:
return
suite = self
original_call = klass.__call__
def new_call(self, *args, **kwargs):
start = datetime.datetime.utcnow()
result = original_call(self, *args, **kwargs)
end = datetime.datetime.utcnow()
suite.test_times.append((self, end - start))
return result
klass.__call__ = new_call
original_setUpClass = getattr(klass, 'setUpClass', None)
if original_setUpClass:
@wraps(original_setUpClass)
def new_setUpClass(cls, *args, **kwargs):
start = datetime.datetime.utcnow()
result = original_setUpClass(*args, **kwargs)
end = datetime.datetime.utcnow()
suite.test_times.append((cls.setUpClass, end - start))
return result
klass.setUpClass = classmethod(new_setUpClass)
self._patched_test_classes.add(klass)
def addTest(self, test):
self.patch_test_class(test.__class__)
super(TimingTestSuite, self).addTest(test)
@staticmethod
def get_test_class(method):
"""
return the TestCase class associated with method
method can either be a test_* method, or setUpClass
"""
try:
# setUpClass
return method.im_self
except AttributeError:
# test_* method
return method.__class__
class TwoStageTestRunner(HqTestSuiteRunner):
"""
Test runner which splits testing into two stages:
- Stage 1 runs all test that don't require DB access (test that don't inherit from TransactionTestCase)
- Stage 2 runs all DB tests (test that do inherit from TransactionTestCase)
Based off http://www.caktusgroup.com/blog/2013/10/02/skipping-test-db-creation/
"""
def get_test_labels(self):
return self.get_all_test_labels()
def split_suite(self, suite):
"""
Check if any of the tests to run subclasses TransactionTestCase.
"""
simple_tests = unittest.TestSuite()
db_tests = TimingTestSuite()
for test in suite:
if isinstance(test, TransactionTestCase):
db_tests.addTest(test)
else:
simple_tests.addTest(test)
return simple_tests, db_tests
def setup_mock_database(self):
"""
Ensure that touching the DB raises and error.
"""
self._db_patch = patch('django.db.backends.util.CursorWrapper')
db_mock = self._db_patch.start()
error = RuntimeError(
"Attempt to access database in a 'no database' test suite run. "
"It could be that you don't have 'BASE_ADDRESS' set in your localsettings.py. "
"If your test really needs database access it must subclass 'TestCase' and not 'SimpleTestCase'.")
db_mock.side_effect = error
mock_couch = Mock(side_effect=error, spec=[])
# register our dbs with the extension document classes
old_handler = loading.couchdbkit_handler
for app, value in old_handler.app_schema.items():
for name, cls in value.items():
cls.set_db(mock_couch)
def teardown_mock_database(self):
"""
Remove cursor patch.
"""
self._db_patch.stop()
@set_db_enabled(False)
def run_non_db_tests(self, suite):
print("Running {0} tests without database".format(suite.countTestCases()))
self.setup_mock_database()
result = self.run_suite(suite)
self.teardown_mock_database()
return self.suite_result(suite, result)
@set_db_enabled(True)
def run_db_tests(self, suite):
print("Running {0} tests with database".format(suite.countTestCases()))
old_config = self.setup_databases()
result = self.run_suite(suite)
from corehq.db import Session, connection_manager
Session.remove()
connection_manager.dispose_all()
self.teardown_databases(old_config)
return self.suite_result(suite, result)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests in two groups, those that don't need db access
first and those that require db access afterwards.
"""
test_labels = test_labels or self.get_test_labels()
self.setup_test_environment()
full_suite = self.build_suite(test_labels, extra_tests)
simple_suite, db_suite = self.split_suite(full_suite)
failures = 0
if simple_suite.countTestCases():
failures += self.run_non_db_tests(simple_suite)
if failures and self.failfast:
return failures
if db_suite.countTestCases():
failures += self.run_db_tests(db_suite)
self.print_test_times(db_suite)
self.teardown_test_environment()
return failures
def print_test_times(self, suite, percent=.5):
self.print_test_times_by_test(suite, percent)
self.print_test_times_by_class(suite, percent)
def _get_total_time(self, time_tuples):
return reduce(
lambda x, y: x + y,
(test_time for _, test_time in time_tuples),
datetime.timedelta(seconds=0)
)
def _print_test_times(self, sorted_times, percent):
total_time = self._get_total_time(sorted_times)
rounded_total_time = total_time - datetime.timedelta(
microseconds=total_time.microseconds
)
cumulative_time = datetime.timedelta(seconds=0)
print (
'{:.0f}% of the test time (total: {}) '
'was spent in the following tests:'.format(
percent * 100,
rounded_total_time,
)
)
for test, test_time in sorted_times:
cumulative_time += test_time
print ' ', test, test_time
if cumulative_time > total_time / 2:
break
def print_test_times_by_test(self, suite, percent=.5):
self._print_test_times(
sorted(suite.test_times, key=lambda x: x[1], reverse=True),
percent,
)
def print_test_times_by_class(self, suite, percent=.5):
times_by_class = defaultdict(datetime.timedelta)
for test, test_time in suite.test_times:
times_by_class[strclass(TimingTestSuite.get_test_class(test))] += test_time
self._print_test_times(
sorted(times_by_class.items(), key=lambda x: x[1], reverse=True),
percent,
)
class NonDbOnlyTestRunner(TwoStageTestRunner):
"""
Override run_db_test to do nothing.
"""
def run_db_tests(self, suite):
print("Skipping {0} database tests".format(suite.countTestCases()))
return 0
class DbOnlyTestRunner(TwoStageTestRunner):
"""
Override run_non_db_tests to do nothing.
"""
def run_non_db_tests(self, suite):
print("Skipping {0} non-database tests".format(suite.countTestCases()))
return 0
class _OnlySpecificApps(HqTestSuiteRunner):
app_labels = set()
# If include is False, then run for all EXCEPT app_labels
include = True
def get_test_labels(self):
test_labels = self.get_all_test_labels()
test_labels = [app_label for app_label in test_labels
if self.include == (app_label in self.app_labels)]
print "Running tests for the following apps:"
for test_label in sorted(test_labels):
print " {}".format(test_label)
return test_labels
class GroupTestRunnerCatchall(_OnlySpecificApps, TwoStageTestRunner):
include = False
@property
def app_labels(self):
return {app_label
for app_labels in settings.TRAVIS_TEST_GROUPS
for app_label in app_labels}
def run_tests(self, test_labels, extra_tests=None, **kwargs):
self.setup_test_environment()
failures = 0
# run all non-db tests from ALL apps first irrespective of which app labels get passed in
all_test_labels = self.get_all_test_labels()
all_suite = self.build_suite(all_test_labels, extra_tests)
simple_suite, _ = self.split_suite(all_suite)
if simple_suite.countTestCases():
failures += self.run_non_db_tests(simple_suite)
if failures and self.failfast:
return failures
# then run db tests from specified apps
db_labels = test_labels or self.get_test_labels()
full_suite = self.build_suite(db_labels, extra_tests)
_, db_suite = self.split_suite(full_suite)
if db_suite.countTestCases():
failures += self.run_db_tests(db_suite)
self.print_test_times(db_suite)
self.teardown_test_environment()
return failures
def _bootstrap_group_test_runners():
"""
Dynamically insert classes named GroupTestRunner[0-N] and GroupTestRunnerCatchall
generated from the TRAVIS_TEST_GROUPS settings variable
into this module, so they can be used like
python manage.py test --testrunner=testrunner.GroupTestRunner0
python manage.py test --testrunner=testrunner.GroupTestRunner1
...
python manage.py test --testrunner=testrunner.GroupTestRunnerCatchall
When you change the number of groups in TRAVIS_TEST_GROUPS, you must also
manually edit travis.yml have the following env variables:
env:
[...] TEST_RUNNER=testrunner.GroupTestRunnerCatchall
[...] TEST_RUNNER=testrunner.GroupTestRunner0
[...] TEST_RUNNER=testrunner.GroupTestRunner1
...
"""
for i, app_labels in enumerate(settings.TRAVIS_TEST_GROUPS):
class_name = 'GroupTestRunner{}'.format(i)
globals()[class_name] = type(
class_name,
(_OnlySpecificApps, DbOnlyTestRunner),
{
'app_labels': settings.TRAVIS_TEST_GROUPS[i]
}
)
_bootstrap_group_test_runners()
``` |
{
"source": "JohanComparat/nbody-npt-functions",
"score": 2
} |
#### File: bin_DF/test_scripts/densityField-comparison-MDPL-meshsize.py
```python
import cPickle
import numpy as n
import astropy.cosmology as co
import astropy.units as uu
aa =co.Planck13
import time
from astropy.io import fits
import os
from os.path import join
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
mockDir = join("..","MD_1Gpc","density_field")
inFiles = n.array(["dmdens_cic_104_DFhist.dat", "dmdens_cic_101_DFhist.dat", "dmdens_cic_097_DFhist.dat", "dmdens_cic_087_DFhist.dat"])
# ZS = 0.7 0.8 1.0 1.48
def getNN0_sim(inSim,NR=10):
f=open(join(mockDir, inSim))
bins, HDF0 = cPickle.load(f)
f.close()
#bins = n.hstack((0,n.logspace(-3, 4, 1000)))
xb = (bins[1:]+bins[:-1])/2.
dx = bins[1:] - bins[:-1]
X, Y = n.meshgrid(xb,xb)
N0 = HDF0 /dx / (1000.-2*1000./2048)**3.
HDF0R = n.array([HDF0[ii::NR] for ii in range(NR)]).sum(axis=0)
binsR = bins[::NR]
N0R = HDF0R / ((binsR[1:] - binsR[:-1]) * 250.**3.)
return N0, bins, N0R, binsR
N0z07s, binsz07s, N0z07, binsz07 = getNN0_sim(inFiles[0])
xb = (binsz07[1:]+binsz07[:-1])/2.
f=open(join(mockDir,'Planck-ng512-L250.0.HDF0.pkl'),'r')
bins, HDF0, N0 = cPickle.load(f)
f.close()
NR = 10
HDF0R = n.array([HDF0[ii::NR] for ii in range(NR)]).sum(axis=0)
binsR = bins[::NR]
N0R = HDF0R / ((binsR[1:] - binsR[:-1]) * 250.**3.)
N0R_sig = n.array([N0[ii::NR] for ii in range(NR)]).std(axis=0)
muscleDelta = interp1d( N0R, (binsR[:-1]+binsR[1:])/2.)
mdplDelta = interp1d( N0z07, xb)
ok=(N0R>0)&(N0R<=100)#n.max(N0z07))
trueDelta = mdplDelta(N0R[ok])
index=n.argsort(N0R[ok])
deltaMuscle = (binsR[:-1]+binsR[1:])/2.
n.savetxt(join(mockDir,"delta-conversion-muscle-mdpl.txt"),n.transpose([deltaMuscle[ok][index],trueDelta[index]]),header="deltaMuscle deltaMDPL")
p.figure(0)
p.title('QSO')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0,'bx', rasterized=True, label='z=0.7 muscle')
p.plot((binsR[:-1]+binsR[1:])/2., N0R,'rx', rasterized=True, label='z=0.7 muscle resampled')
p.plot(trueDelta[index], N0R[ok][index], 'm--', lw=2, rasterized=True, label='z=0.7 muscle corr')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e2))
p.xlim((0.1, 1e4))
gl = p.legend(loc=3)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","muscle-delta-HDF0.png"))
p.show()
```
#### File: bin_DF/test_scripts/fit_density_field_to-tracers.py
```python
import sys
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import cPickle
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.stats import scoreatpercentile as sc
from scipy.stats import norm
import matplotlib.pyplot as p
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
from scipy.optimize import curve_fit
DFdir = join("/data2", "users", "gustavo", "BigMD", "1Gpc_3840_Planck1_New", "DENSFIELDS")
# mockDir = "/data1/DATA/eBOSS/Multidark-box-mocks/parts/"
mockDir = join("..","MD_1Gpc","density_field")
#inFiles = n.array(["dmdens_cic_104_DFhist.dat",cd "dmdens_cic_101_DFhist.dat", "dmdens_cic_097_DFhist.dat", "dmdens_cic_087_DFhist.dat"])
# inFiles = n.array(["dmdens_cic_104_DF0DF1hist.dat", "dmdens_cic_101_DF0DF1hist.dat", "dmdens_cic_097_DF0DF1hist.dat", "dmdens_cic_087_DF0DF1hist.dat"])
inFiles = n.array(["dmdens_cic_104_DFhist.dat", "dmdens_cic_101_DFhist.dat", "dmdens_cic_097_DFhist.dat", "dmdens_cic_087_DFhist.dat"])
# ZS = 0.7 0.8 1.0 1.48
bins = n.hstack((0,n.logspace(-3, 4, 1000)))
dx = bins[1:] - bins[:-1]
xb = (bins[1:]+bins[:-1])/2.
"""
ii=0
f=open(join(mockDir, inFiles[ii]))
bins, HDF0, HDF1, H = cPickle.load(f)
f.close()
X, Y = n.meshgrid(xb,xb)
N0 = HDF0 /dx / (1000.-2*1000./2048)**3.
N1 = HDF1 /dx / (1000.-2*1000./2048)**3.
inGal = n.array([ "Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz", "Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz" ])
"""
#################################################
#################################################
# delta - probability to have a galaxy relation
#################################################
#################################################
def getNN(inGalFile, bins = bins):
hd = fits.open(inGalFile)[1].data
Hqso, xedges, yedges = n.histogram2d(hd['DF'], hd['DF_N1'], bins)
HDF0qso = n.histogram(hd['DF'], bins= bins)[0] #n.logspace(-1.5,4,80))
HDF1qso = n.histogram(hd['DF_N1'], bins= bins)[0] #n.logspace(-1.5,4,80))
N0qso = HDF0qso /dx / 1000.**3.
N1qso = HDF1qso /dx / 1000.**3.
return Hqso, N0qso, N1qso
def getNN0_sim(inSim):
f=open(join(mockDir, inSim))
bins, HDF0 = cPickle.load(f)
f.close()
#bins = n.hstack((0,n.logspace(-3, 4, 1000)))
xb = (bins[1:]+bins[:-1])/2.
dx = bins[1:] - bins[:-1]
X, Y = n.meshgrid(xb,xb)
N0 = HDF0 /dx / (1000.-2*1000./2048)**3.
return N0, bins
def getNN0(inGalFile, bins):
hd = fits.open(inGalFile)[1].data
HDF0, bins = n.histogram(hd['DF'], bins= bins) #n.logspace(-1.5,4,80))
dx = bins[1:] - bins[:-1]
N0 = HDF0 /dx / 1000.**3.
return N0, HDF0
"""
def smooth(Hin, ns=4, xb = (bins[1:]+bins[:-1])/2.):
n1, n2 = H.shape
nNew = int(float(n1)/ns)
print nNew
Hout = n.empty(( nNew, nNew ))
xout = n.empty((nNew))
for ii in n.arange(nNew):
idI = n.arange(ii*ns, (ii+1)*ns, 1)
xout[ii] = (xb[idI[-1]] + xb[idI[0]]) /2.
for jj in n.arange(nNew):
idJ = n.arange(jj*ns, (jj+1)*ns, 1)
idX, idY = n.meshgrid(idI,idJ)
Hout[ii, jj] = n.sum(n.array([Hin[n.hstack(idX)[kk], n.hstack(idY)[kk]] for kk in range(ns**2)]))
#print ii, jj
#print n.transpose([n.hstack(idX),n.hstack(idY)])
#print "-------------------------------------------------------------------"
return xout, Hout
"""
#################################################
#################################################
# density field order 0 : distribution
#################################################
#################################################
NR=10
N0z07s, binsz07s = getNN0_sim(inFiles[0])
N0z07 = n.array([N0z07s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07 = binsz07s[::NR]
N0z08s, binsz08s = getNN0_sim(inFiles[1])
N0z08 = n.array([N0z08s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08 = binsz08s[::NR]
N0z15s, binsz15s = getNN0_sim(inFiles[3])
N0z15 = n.array([N0z15s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz15 = binsz15s[::NR]
bins = binsz07
xb = (bins[1:]+bins[:-1])/2.
dx = bins[1:] - bins[:-1]
inGal = n.array([ "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz","Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z1.480160_nbar1.930000e-05_QSO.DF.fits.gz" ])
N0qsoz07, N0qsoz07T = getNN0(join( mockDir,inGal[0]), bins)
N0qsoz08, N0qsoz08T = getNN0(join( mockDir,inGal[1]), bins)
N0qsoz15, N0qsoz15T = getNN0(join( mockDir,inGal[2]), bins)
p.figure(0)
p.title('QSO')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0qsoz07,'ko', rasterized=True, label='z=0.7 qso')
p.plot(xb, N0qsoz08,'bo', rasterized=True, label='z=0.8 qso')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.plot(xb, N0z15,'rx', rasterized=True, label='z=1.5 all')
p.plot(xb, N0qsoz15,'ro', rasterized=True, label='z=1.5 qso')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-QSO-delta-HDF0.png"))
p.clf()
p.figure(0)
p.title('QSO')
p.plot(xb, N0qsoz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, N0qsoz08/N0z08,'bx', rasterized=True, label='z=0.8')
p.plot(xb, N0qsoz15/N0z15,'rx', rasterized=True, label='z=1.5')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10 , 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-QSO-delta-HDF0-ratio.png"))
p.clf()
inGal = n.array([ "Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.818843_nbar1.000000e-04_LRG.DF.fits.gz"])
N0lrgz07, N0lrgz07T = getNN0(join( mockDir,inGal[0]), bins)
N0lrgz08, N0lrgz08T = getNN0(join( mockDir,inGal[1]), bins)
p.figure(0)
p.title('LRG')
p.plot(xb, N0lrgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, N0lrgz08/N0z08,'bx', rasterized=True, label='z=0.8')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-LRG-delta-HDF0-ratio.png"))
p.clf()
p.figure(0)
p.title('LRG')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.plot(xb, N0lrgz07,'ko', rasterized=True, label='z=0.7 lrg')
p.plot(xb, N0lrgz08,'bo', rasterized=True, label='z=0.8 lrg')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-LRG-delta-HDF0.png"))
p.clf()
inGal = n.array([ "Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz" , "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
N0elgz07, N0elgz07T = getNN0(join( mockDir,inGal[0]), bins)
N0elgz08, N0elgz08T = getNN0(join( mockDir,inGal[1]), bins)
p.figure(0)
p.title('ELG')
p.plot(xb, N0elgz07/N0z07,'kx', rasterized=True, label='z=0.7')
#p.plot(xb, N0elgz08/N0z08,'bx', rasterized=True, label='z=0.8')
#p.plot(xb[xb>1e2], 10**fun(n.log10(xb[xb>1e2]), prs[0],prs[1],prs[2]), 'r--', lw = 2, label='')
#p.plot(xb[xb<10**1.2], 10**fun(n.log10(xb[xb<10**1.2]), prsL[0],prsL[1],prsL[2]), 'r--', lw = 2)
#p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2)
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-ELG-delta-HDF0-ratio.png"))
p.clf()
p.figure(0)
p.title('ELG')
p.plot(xb, N0elgz07,'ko', rasterized=True, label='z=0.7 elg')
p.plot(xb, N0elgz08,'bo', rasterized=True, label='z=0.8 elg')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-ELG-delta-HDF0.png"))
p.clf()
########## FIT z=1.5 QSO
NR = 5
N0z15R = n.array([N0z15[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz15R = binsz15[::NR]
N0qsoz15R = n.array([N0qsoz15[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz15R_sig = n.array([N0qsoz15[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz15R[1:]+binsz15R[:-1])/2.
dxR = binsz15R[1:] - binsz15R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz15T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz15T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz15R_sig)
ok = (N0qsoz15>0)&(N0z15>0)&(N0qsoz15/N0z15>-6)#&(xb>10**2)
y = n.log10(N0qsoz15[ok]/N0z15[ok])
yplus = n.log10((N0qsoz15[ok] + errorsP(xb[ok])*N0qsoz15[ok] )/N0z15[ok])
yminus = n.log10((N0qsoz15[ok] - errorsP(xb[ok])*N0qsoz15[ok] )/N0z15[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz15/N0z15,'kx', rasterized=True, label='z=1.5')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z15-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz15/N0z15)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=1.5')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z15-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z15.data",ps)
########## FIT z=0.8 LRG
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0lrgz08R = n.array([N0lrgz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0lrgz08R_sig = n.array([N0lrgz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0lrgz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0lrgz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0lrgz08R_sig)
ok = (N0lrgz08>0)&(N0z08>0)&(N0lrgz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0lrgz08[ok]/N0z08[ok])
yplus = n.log10((N0lrgz08[ok] + errorsP(xb[ok])*N0lrgz08[ok] )/N0z08[ok])
yminus = n.log10((N0lrgz08[ok] - errorsP(xb[ok])*N0lrgz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('LRG ')#+str(ps))
p.plot(xb, N0lrgz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2, label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('LRG')#+str(n.round(ps,5)))
p.plot(xb, (N0lrgz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-LRG-z08.data",ps)
########## FIT z=0.8 QSO
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0qsoz08R = n.array([N0qsoz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz08R_sig = n.array([N0qsoz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz08R_sig)
ok = (N0qsoz08>0)&(N0z08>0)&(N0qsoz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0qsoz08[ok]/N0z08[ok])
yplus = n.log10((N0qsoz08[ok] + errorsP(xb[ok])*N0qsoz08[ok] )/N0z08[ok])
yminus = n.log10((N0qsoz08[ok] - errorsP(xb[ok])*N0qsoz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z08.data",ps)
########## FIT z=0.8 ELG
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0elgz08R = n.array([N0elgz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0elgz08R_sig = n.array([N0elgz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0elgz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0elgz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0elgz08R_sig)
ok = (N0elgz08>0)&(N0z08>0)&(N0elgz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0elgz08[ok]/N0z08[ok])
yplus = n.log10((N0elgz08[ok] + errorsP(xb[ok])*N0elgz08[ok] )/N0z08[ok])
yminus = n.log10((N0elgz08[ok] - errorsP(xb[ok])*N0elgz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('ELG ')#+str(ps))
p.plot(xb, N0elgz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('ELG')#+str(n.round(ps,5)))
p.plot(xb, (N0elgz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-ELG-z08.data",ps)
sys.exit()
########## FIT z=0.7 LRG
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0lrgz07R = n.array([N0lrgz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0lrgz07R_sig = n.array([N0lrgz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0lrgz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0lrgz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0lrgz07R_sig)
ok = (N0lrgz07>0)&(N0z07>0)&(N0lrgz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0lrgz07[ok]/N0z07[ok])
yplus = n.log10((N0lrgz07[ok] + errorsP(xb[ok])*N0lrgz07[ok] )/N0z07[ok])
yminus = n.log10((N0lrgz07[ok] - errorsP(xb[ok])*N0lrgz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('LRG ')#+str(ps))
p.plot(xb, N0lrgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2, label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('LRG')#+str(n.round(ps,5)))
p.plot(xb, (N0lrgz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-LRG-z07.data",ps)
########## FIT z=0.7 QSO
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0qsoz07R = n.array([N0qsoz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz07R_sig = n.array([N0qsoz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz07R_sig)
ok = (N0qsoz07>0)&(N0z07>0)&(N0qsoz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0qsoz07[ok]/N0z07[ok])
yplus = n.log10((N0qsoz07[ok] + errorsP(xb[ok])*N0qsoz07[ok] )/N0z07[ok])
yminus = n.log10((N0qsoz07[ok] - errorsP(xb[ok])*N0qsoz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z07.data",ps)
########## FIT z=0.7 ELG
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0elgz07R = n.array([N0elgz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0elgz07R_sig = n.array([N0elgz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0elgz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0elgz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0elgz07R_sig)
ok = (N0elgz07>0)&(N0z07>0)&(N0elgz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0elgz07[ok]/N0z07[ok])
yplus = n.log10((N0elgz07[ok] + errorsP(xb[ok])*N0elgz07[ok] )/N0z07[ok])
yminus = n.log10((N0elgz07[ok] - errorsP(xb[ok])*N0elgz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('ELG ')#+str(ps))
p.plot(xb, N0elgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('ELG')#+str(n.round(ps,5)))
p.plot(xb, (N0elgz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-ELG-z07.data",ps)
sys.exit()
#########################################
#########################################
#########################################
#Z=0.8
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#Z=0.8
#########################################
#########################################
#########################################
inGal = n.array([ "Box_HAM_z1.480160_nbar1.930000e-05_QSO.DF.fits.gz" ])
inGal = n.array([ "Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
Hqso, N0qso, N1qso = getNN(join( mockDir,inGal[0]))
Helg, N0elg, N1elg = getNN(join( mockDir,inGal[1]))
xs, Hs = smooth(H)
xs, Hselg = smooth(Helg)
xs, Hsqso = smooth(Hqso)
X, Y = n.meshgrid(xb[::4], xb[::4]) #xs,xs)
n.savetxt(join(mockDir,"grid-x-z08.data"), X)
n.savetxt(join(mockDir,"grid-y-z08.data"), Y)
Z = Hsqso.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"qso-z08.data"), Z)
Z = Hselg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"elg-z08.data"), Z)
Z=n.log10(Hsqso.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(QSO)/N(all))')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z08-qso.png"))
p.clf()
Z=n.log10(Hselg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(ELG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z08-elg.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0qso/N0,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0elg/N0,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z08-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1qso/N1,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1elg/N1,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z08-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N0qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z08.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N1qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z08.png"))
p.clf()
#########################################
#########################################
#########################################
#Z=0.7
#########################################
#########################################
#########################################
inGal = n.array([ "Box_HAM_z0.818843_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
Hlrg, N0lrg, N1lrg = getNN(join( mockDir,inGal[0]))
Hqso, N0qso, N1qso = getNN(join( mockDir,inGal[1]))
Helg, N0elg, N1elg = getNN(join( mockDir,inGal[2]))
xs, Hs = smooth(H)
xs, Hslrg = smooth(Hlrg)
xs, Hselg = smooth(Helg)
xs, Hsqso = smooth(Hqso)
X, Y = n.meshgrid(xb[::4], xb[::4]) #xs,xs)
n.savetxt(join(mockDir,"grid-x-z07.data"), X)
n.savetxt(join(mockDir,"grid-y-z07.data"), Y)
Z = Hsqso.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"qso-z07.data"), Z)
Z = Hslrg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"lrg-z07.data"), Z)
Z = Hselg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"elg-z07.data"), Z)
Z=n.log10(Hsqso.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(QSO)/N(all))')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-qso.png"))
p.clf()
Z=n.log10(Hselg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(ELG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-elg.png"))
p.clf()
Z=n.log10(Hslrg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(LRG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-lrg.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0qso/N0,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0lrg/N0,'rx', rasterized=True, label='LRG ')
p.plot(xb, N0elg/N0,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z07-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1qso/N1,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1lrg/N1,'rx', rasterized=True, label='LRG ')
p.plot(xb, N1elg/N1,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z07-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N0qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0lrg,'rx', rasterized=True, label='LRG ')
p.plot(xb, N0elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z07.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N1qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1lrg,'rx', rasterized=True, label='LRG ')
p.plot(xb, N1elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z07.png"))
p.clf()
sys.exit()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, proba)
p.xlabel('DF N1')
p.ylabel('DF')
p.xscale('log')
p.yscale('log')
p.show()
p.figure(1, figsize=(8, 8))
axScatter = p.axes(rect_scatter)
axScatter.set_yscale('log')
axScatter.set_xscale('log')
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
levels = (0.01, 0.1, 0.5, 1)
cset = p.contour(X, Y, proba, levels, origin='lower',colors=['black','green','blue','red'],linewidths=(1.9, 1.6, 1.5, 1.4),extent=extent)
p.clabel(cset, inline=1, fontsize=10, fmt='%1.0i')
for c in cset.collections:
c.set_linestyle('solid')
p.xlabel('DF N1')
p.ylabel('DF')
axHistx = p.axes(rect_histx)
axHisty = p.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axHistx.plot(xb, HDF1, 'k')
axHistx.plot(xb, HDF1qso, 'b')
axHistx.set_yscale('log')
axHistx.set_xscale('log')
axHisty.plot(xb, HDF0, 'r', orientation='horizontal')
axHisty.plot(xb, HDF0qso, 'g', orientation='horizontal')
axHisty.set_yscale('log')
p.show()
p.imshow(n.log10(proba))
p.colorbar()
p.show()
dxAll = binsAll[1:] - binsAll[:-1]
xAll = (binsAll[1:]*binsAll[:-1])**0.5
NAll = result /dxAll / 1000**3.
nqso, binQSO = n.histogram(hd['DF'], bins= n.logspace(-1.5,4,80))
dxQso = binQSO[1:] - binQSO[:-1]
xQSO = (binQSO[1:]*binQSO[:-1])**0.5
NQSO = nqso /dxQso / 1000**3.
p.figure(0)
p.title('z=0.7')
p.plot(xAll, NAll,'kx', rasterized=True, label='MD Planck 1Gpc mesh 2048 cube')
p.plot(xQSO, NQSO,'bx', rasterized=True, label='QSO ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.legend(loc= 3)
p.grid()
p.savefig(join(mockDir,"plots","delta-numberdensity-z07-fit.png"))
p.clf()
#################################################
#################################################
# delta - vmax relation
#################################################
#################################################
# for each bin in delta compute vmax mean and its std
pcs = [0, 1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99, 100]
bins = n.hstack((0,n.logspace(-3, 4, 60)))
vmaxBar = n.empty(len(bins)-1)
vmaxStd = n.empty(len(bins)-1)
distrib = n.empty((len(bins)-1, len(pcs)))
Nbins = 10
bbs = n.empty((len(bins)-1, Nbins+1))
N = n.empty((len(bins)-1, Nbins))
for ii in range(len(bins)-1):
sel = (hd['DF']>bins[ii]) & (hd['DF']<bins[ii+1])
y = hd['Vmax'][sel]
vmaxBar[ii], vmaxStd[ii], distrib[ii] = n.mean(y), n.std(y), sc(y,pcs)
N[ii],bbs[ii] = n.histogram(y, bins= Nbins )
ok = (vmaxBar>0)&(vmaxStd>0)&(bins[1:]>0.4)&(bins[:-1]<100)&(N.sum(axis=1)>100)
x = n.log10(1.+(bins[1:]*bins[:-1])**0.5)[ok]
y = n.log10(vmaxBar)[ok]
yerr = vmaxStd[ok] / vmaxBar[ok]
f= lambda x,a,b : a*x+b
out, cov = curve_fit(f, x, y, (1,0), yerr )
p.figure(0)
p.plot(n.log10(1+hd['DF']), n.log10(hd['Vmax']),'r.',alpha=0.1, label='QSO z=0.7',rasterized = True)
p.errorbar(x,y,yerr=yerr/2.,label='mean - std')
p.plot(x, f(x,out[0],out[1]),'k--',lw=2,label='fit y='+str(n.round(out[0],3))+'x+'+str(n.round(out[1],3)))
p.xlabel(r'$log_{10}(1+\delta)$')
p.ylabel(r'$log_{10}(V_{max})$')
p.legend(loc= 2)
p.grid()
p.savefig(join(mockDir,"plots","delta-vmax-qso-z07-fit.png"))
p.clf()
#log10(vmax) = 0.0973259*log10(1+delta) + 2.254723554
params = n.empty((len(bins[ok])-1,3))
paramsErr = n.empty((len(bins[ok])-1,3))
histBins = n.arange(-0.7, 0.71, 0.05)
p.figure(0, (12,8))
for jj in range(len(bins[ok])-1):
sel = (hd['DF']>bins[ok][jj]) & (hd['DF']<bins[ok][jj+1])
yDat= hd['Vmax'][sel]
#print jj, yDat
x1 = n.log10(yDat) - n.log10(n.mean(yDat))
counts, bs = n.histogram(x1, bins=histBins)
#print counts, bs
xx=(bs[1:]+bs[:-1])/2.
p.errorbar(xx,counts,yerr = counts**0.5 , label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid
p.xlim((-1, 1.3))
p.legend(fontsize=8)
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDelta-qso-z07.png"))
p.clf()
xs = n.empty((len(bins[ok])-1, len(histBins)))
ys = n.empty((len(bins[ok])-1, len(histBins)-1))
p.figure(0, (12,8))
for jj in range(len(bins[ok])-1):
sel = (hd['DF']>bins[ok][jj]) & (hd['DF']<bins[ok][jj+1])
yDat= hd['Vmax'][sel]
#print jj, yDat
x1 = n.log10(yDat) - n.log10(n.mean(yDat))
counts, bs = n.histogram(x1, normed = True, bins = histBins)
#print counts, bs
xx=(bs[1:]+bs[:-1])/2.
p.plot(xx,counts, ls='--',lw=0.5, label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
ys[jj] = counts
xs[jj] = bs
Xbin=bs # n.mean(xs,axis=0)
X=(Xbin[1:]+Xbin[:-1])/2.
Y=n.mean(ys,axis=0)
YERR=n.std(ys,axis=0)
p.errorbar(X,Y, yerr = YERR, lw=2)
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.xlim((-1, 1.3))
p.legend(fontsize=8)
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDeltaNormed-qso-z07.png"))
p.clf()
g = lambda var, sig, A, mu : A *n.e**(- (var- mu)**2./ (2*sig**2.))
positive= (Y>0)&(YERR>0)
out2, cov2 = curve_fit(g, X[positive], Y[positive], (0.12, n.max(Y), -0.025), YERR[positive])# , maxfev = 5000)
#g = lambda var, sig, A : A *n.e**(- (var+0.025)**2./ (2*sig**2.))
#out2, cov2 = curve_fit(g, X[:-2], Y[:-2], (0.13, n.max(Y)), YERR[:-2])# , maxfev = 5000)
#print out2
p.figure(0)
p.errorbar(X,Y, yerr = YERR, label='DATA')
xpl = n.arange(X.min(),X.max(),0.001)
#p.plot(xpl, g(xpl, out2[0],out2[1]), label='gaussian fit')
p.plot(xpl, g(xpl, out2[0],out2[1],out2[2]), label='gaussian fit')
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.xlim((-1, 1.3))
p.legend()
p.title(r'$\sigma=$'+str(n.round(out2[0],3))+r', $\mu=$'+str(n.round(out2[2],3))+r', $A=$'+str(n.round(out2[2],3)))
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDeltaNormed-FIT-qso-z07.png"))
p.clf()
"""
g = lambda var, sig, A, mu : A *n.e**(- (var- mu)**2./ (2*sig**2.))
out2, cov2 = curve_fit(g, xx, counts, (0.1, n.max(counts), 0.), 2*counts**0.5 , maxfev = 500000000)
chi2 = n.sum((g(xx,out2[0], out2[1],out2[2]) - counts)**2. * counts**(-0.5) / (len(counts) - len(out2)))
params[jj]=out2
paramsErr[jj] = [cov2[0][0], cov2[1][1], cov2[2][2]]
p.errorbar(xx,counts,yerr = counts**0.5 , label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
xpl = n.arange(xx.min(),xx.max(),0.001)
p.plot(xpl, g(xpl, out2[0],out2[1],out2[2]), label='gaussian')
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.title(r'$\sigma=$'+str(n.round(out2[0],3))+r', $\mu=$'+str(n.round(out2[2],3))+r', $A=$'+str(n.round(out2[2],3)))
p.legend()
p.show()
hd = fits.open(join( mockDir,"Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz"))
hd = fits.open(join( mockDir,"Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz"))
"""
```
#### File: bin_MD/v0/relation-mass-radius.py
```python
import pylab as p
import glob
import numpy as n
import astropy.cosmology as co
aa=co.Planck13
import astropy.units as uu
import cPickle
import sys
from scipy.interpolate import interp1d
import glob
snL=glob.glob("/data2/DATA/eBOSS/Multidark-properties/MDPL/*0023*.cat.gz")
import numpy as n
import cPickle
massB=n.arange(8,16,0.01)
vcirB=n.arange(0,4.5,0.01)
concB=n.arange(0,200,0.5)
meanR=[]
ms=n.arange(11.4,14,0.1)
for ii in range(len(ms)-1):
cen=(distinct==0)&(mtot>10**ms[ii])&(mtot<10**ms[ii+1])
meanR.append([n.mean(rvir[cen]), n.std(rvir[cen]),len(rvir[cen])])
meanR=n.array(meanR)
meanR2=meanR.T[:2].T
def pt(arr):
out=n.array([str(n.round(el,0))+" & " for el in arr])
return "".join(out)
for i in range(len(meanR2)):
print ms[i]," & ",ms[i+1]," & ",pt(meanR2[i])," \\\\"
for ii in range(len(snL)):
print snL[ii]
mtot, rvir, vcir, conc,distinct=n.loadtxt(snL[ii],unpack=True)
cen=(distinct==0)
volume=10**9
snList=[]
snL=glob.glob("/Volumes/data/BigMD/1Gpc_3840_Planck1/MFMC/*0023*MVr*")
snList=n.array(snList)
snapNum,snapZ,snapA=n.loadtxt("/Volumes/data/BigMD/1Gpc_3840_Planck1/redshift-snapshot.list",unpack=True)
numtoZ=interp1d(snapNum,snapZ)
snL=glob.glob("/data2/DATA/eBOSS/Multidark-properties/MDPL/*.cat.gz")
js=[0,1,3,10,30,50]
for jj in js:
print jj
snList[jj]
snL=glob.glob("/Volumes/data/BigMD/1Gpc_3840_Planck1/MFMC/*"+ str(snList[jj])+ "*MF.hist.dat")
nnC=n.empty([6,799])*0.
nnS=n.empty([6,799])*0.
for iii in range(len(snL)):
mmin,mmax,nnC[iii],nnS[iii]=n.loadtxt(snL[iii],unpack=True)
nC=n.sum(nnC,axis=0)
nS=n.sum(nnS,axis=0)
mass=(mmin+mmax)/2.
dLogM=0.01 # n.median(mmax-mmin)
sel=(nC>=10)&(nC<3840**3)
dmf=interp1d(mass[sel],nC[sel]/dLogM/volume)
p.plot(mass[sel],nC[sel]/dLogM/volume,ls='None',marker='+',label='halos z='+str(numtoZ(snList[jj])))
#p.plot(mass[sel],nS[sel]/dLogM/volume,'b+',label='sat')
p.grid()
p.xlabel('log halo mass')
p.ylabel('dN/dlogM/V $h^4$ Mpc$^{-3}M_\odot^{-1}$')
p.yscale('log')
p.ylim((1e-7,1))
p.xlim((10,16))
p.legend(fontsize=9)
p.savefig("1Gpc_3840_Planck1/MFplot/massfunction-evolution.pdf")
p.clf()
js=[0,1,3,10,30,50]
for jj in js:
print jj
snList[jj]
snL=glob.glob("/Volumes/data/BigMD/1Gpc_3840_Planck1/MFMC/*"+ str(snList[jj])+ "*MF.hist.dat")
nnC=n.empty([6,799])*0.
nnS=n.empty([6,799])*0.
for iii in range(len(snL)):
mmin,mmax,nnC[iii],nnS[iii]=n.loadtxt(snL[iii],unpack=True)
nC=n.sum(nnC,axis=0)
nS=n.sum(nnS,axis=0)
mass=(mmin+mmax)/2.
dLogM=0.01 # n.median(mmax-mmin)
sel=(nC>=10)&(nC<3840**3)
p.plot(mass[sel],nC[sel]/dLogM/volume,ls='None',marker='+',label='halos z='+str(numtoZ(snList[jj])))
#p.plot(mass[sel],nS[sel]/dLogM/volume,'b+',label='sat')
p.grid()
p.xlabel('log halo mass')
p.ylabel('dN/dlogM/V $h^4$ Mpc$^{-3}M_\odot^{-1}$')
p.yscale('log')
p.ylim((1e-7,1))
p.xlim((10,16))
p.legend(fontsize=9)
p.savefig("1Gpc_3840_Planck1/MFplot/massfunction-evolution.pdf")
p.clf()
rho0i=aa.critical_density0.to(uu.solMass/(uu.megaparsec)**3)
rho0=1.51*10**9 * 3840**3 / 10**9 # Modot/Mpc^3
js=[0,1,3,10,30,50]
for jj in js:
print jj
snList[jj]
snL=glob.glob("/Volumes/data/BigMD/1Gpc_3840_Planck1/MFMC/*"+ str(snList[jj])+ "*MF.hist.dat")
nnC=n.empty([6,799])
nnS=n.empty([6,799])
for iii in range(len(snL)):
mmin,mmax,nnC[iii],nnS[iii]=n.loadtxt(snL[iii],unpack=True)
nC=n.sum(nnC,axis=0)
nS=n.sum(nnS,axis=0)
mass=(mmin+mmax)/2.
dLogM=n.median(mmax-mmin)
sel=(nC>=10)&(nC<3840**3)
p.plot(mass[sel], 10**(2*mass[sel])/rho0*nC[sel]/ dLogM/ volume,ls='None', marker='+', label= 'halos z='+str(numtoZ(snList[jj])))
#p.plot(mass[sel],nS[sel]/dLogM/volume,'b+',label='sat')
p.grid()
p.xlabel('log halo mass')
p.ylabel(r'M$^2/\rho_0$ dN/dlogM ')
p.yscale('log')
#p.ylim((1e-7,1))
p.xlim((10,16))
p.legend(fontsize=9)
p.savefig("1Gpc_3840_Planck1/MFplot/massfunction-M2-evolution.pdf")
p.clf()
sys.exit()
import numpy as n
import cPickle
massB=n.arange(8,16,0.01)
vcirB=n.arange(0,4.5,0.01)
concB=n.arange(0,200,0.5)
for ii in range(len(snL)):
print snL[ii][:-6]
mtot, rvir, vcir, conc,distinct=n.loadtxt(snL[ii],unpack=True)
cen=(distinct==0)
sat=(cen==False)
nnS,bb=n.histogram(n.log10(mtot[sat]),bins=massB)
nnC,bb=n.histogram(n.log10(mtot[cen]),bins=massB)
n.savetxt(snL[ii][:-6]+"MF.hist.dat",n.transpose([bb[:-1], bb[1:],nnC,nnS]))
print "M"
nnS,bb=n.histogram(n.log10(vcir[sat]),bins= vcirB)
nnC,bb=n.histogram(n.log10(vcir[cen]),bins= vcirB)
n.savetxt(snL[ii][:-6]+"VCIR.hist.dat",n.transpose([bb[:-1], bb[1:],nnC,nnS]))
print"V"
dataC=n.histogram2d(n.log10(mtot[cen]),conc[cen],bins=[massB,concB])
dataS=n.histogram2d(n.log10(mtot[sat]),conc[sat],bins=[massB,concB])
f=open(snL[ii][:-6]+"MCr.2dhist.pkl",'w')
cPickle.dump([dataC,dataS],f)
f.close()
print "MC"
dataC=n.histogram2d(n.log10(mtot[cen]),n.log10(vcir[cen]),bins=[massB, vcirB])
dataS=n.histogram2d(n.log10(mtot[sat]),n.log10(vcir[sat]),bins=[massB, vcirB])
f=open(snL[ii][:-6]+"MVr.2dhisti.pkl",'w')
cPickle.dump([dataC,dataS],f)
f.close()
print "MV"
```
#### File: bin_onePT/extra/mivr-2-fit-z0.py
```python
import glob
import sys
import cPickle
from os.path import join
import numpy as n
import astropy.io.fits as fits
import os
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
from scipy.optimize import minimize
from scipy.optimize import curve_fit
fun = lambda lg_X, lg_A, lg_X0, lg_alpha, lg_beta : n.log10( 10**lg_A * (10**lg_X/10**lg_X0)**(-10**lg_beta) * n.e**(- (10**lg_X/10**lg_X0)**(10**lg_alpha) ) )
dir='..'
dir_04 = join(dir,"MD_0.4Gpc")
dir_10 = join(dir,"MD_1Gpc")
dir_25 = join(dir,"MD_2.5Gpc")
dir_40 = join(dir,"MD_4Gpc")
dir_25N = join(dir,"MD_2.5GpcNW")
dir_40N = join(dir,"MD_4GpcNW")
data = fits.open( join("..", "M200c", "MD_M200c_summary.fits") )[1].data
errorLog = 0.03
NminCount = 10
Npmin = 300
limits_04 = [Npmin*9.63 * 10**7, 5e12]
limits_10 = [Npmin*1.51 * 10**9., 5e13]
limits_25 = [Npmin*2.359 * 10**10., 5e14]
limits_40 = [Npmin* 9.6 * 10**10. , 5e15]
MPART = n.array([9.63 * 10**7, 1.51 * 10**9, 2.359 * 10**10, 9.6 * 10**10])
names = n.array(["SMD", "MDPL", "BigMD", "HMD", "BigMDNW", "HMDNW"])
zmin = -0.01
zmax = 0.01
def fitData(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 0.1, p0 = [-4., 13.0, -0.3, -0.04]):
"""
Plots the data to be used in the fits later in the analysis.
"""
# redshift selection
zSel = (data["redshift"]>zmin)&(data["redshift"]<zmax)
# mass selection
if cos == "cen":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) | ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
if cos == "sat":
mSel = ((data["boxLength"]==400.)&(data["log_"+qty+"_min"]>n.log10(limits_04[0]))) | ((data["boxLength"]==1000.)&(data["log_"+qty+"_min"]>n.log10(limits_10[0]))) #| ((data["boxLength"]==2500.)&(data["log_"+qty+"_min"]>n.log10(limits_25[0]))) | ((data["boxLength"]==4000.)&(data["log_"+qty+"_min"]>n.log10(limits_40[0])))
# minimum number counts selection
nSel = (data['dN_counts_'+cos]>NminCount)
# altogether
ok = (zSel) & (mSel) & (nSel)
# now the plot
lg_M200c = (data["log_"+qty+"_min"][ok]+data["log_"+qty+"_max"][ok])/2.
#print len(lg_M200c), lg_M200c
lg_MF_c = n.log10(data["dNdVdlnM_"+cos+"_c"][ok])
#print lg_MF_c
lg_1pz = n.log10(1+ data["redshift"][ok])
#print lg_1pz
funG = lambda lg_X, lg_z, ps : fun( lg_X, ps[0], ps[1], ps[2], ps[3] ) #
chi2fun = lambda ps : n.sum( (funG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2. )/(len(lg_MF_c) - len(ps))
res = minimize(chi2fun, p0, method='Powell',options={'xtol': 1e-8, 'disp': True, 'maxiter' : 5000000000000})
pOpt = res.x
cov = res.direc
chi2perpoint = lambda ps : (funG(lg_M200c, lg_1pz, ps) - lg_MF_c)**2. / (errorLog)**2.
chi2pp = chi2perpoint(pOpt)
print pOpt, cov
lg_M200c_model = n.arange(n.min(lg_M200c),n.max(lg_M200c),0.1)
X,Y = n.meshgrid(lg_M200c_model, n.arange(zmin, zmax+0.025,0.025))
Z = funG(X,n.log10(1+Y),pOpt)
n.savetxt(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-model-pts.txt"),n.transpose([n.hstack((X)), n.hstack((Y)), n.hstack((Z))]) )
f=open(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-params.pkl"), 'w')
cPickle.dump(res, f)
f.close()
X,Y,Z = n.loadtxt(join(dir,qty,"M200c-"+cos+"-cumulative-function-z0-model-pts.txt"), unpack=True)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(X, Z, c=Y, s=5, marker='o',label="model", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-model.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(lg_M200c, lg_MF_c, c=chi2pp, s=5, marker='o',label="chi2", rasterized=True)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("chi2 per point")
p.xlabel(r'log$_{10}[M_{200c}/(h^{-1}M_\odot)]$')
p.ylabel(r'log n(>M)')
gl = p.legend(loc=3,fontsize=10)
gl.set_frame_on(False)
p.ylim((-8, 1))
p.xlim((9.5,16))
p.grid()
p.savefig(join(dir,qty,"M200c-"+cos+"-cumulative-function-chi2PP.png"))
p.clf()
fitData(qty = 'M200c', cos = "cen", zmin = -0.01, zmax = 0.1, p0 = [-4, 13.5, -0.2, -0.1])
fitData(qty = 'M200c', cos = "sat", zmin = -0.01, zmax = 0.1, p0 = [-4., 12.8, -0.3, -0.03])
```
#### File: bin/bin_onePT/mvir-6-substructure-relative-mf.py
```python
import astropy.io.fits as fits
import matplotlib.pyplot as p
import numpy as n
from os.path import join
import os
import sys
from os.path import join
import numpy as n
import astropy.io.fits as fits
import os
import sys
import lib_functions_1pt as lib
from hmf import MassFunction
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmo = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
sigma_val=0.8229
delta_c = 1.686
from scipy.interpolate import interp1d
from scipy.integrate import quad
import numpy as n
from scipy.interpolate import interp1d
from scipy.misc import derivative
from scipy.optimize import minimize
from scipy.optimize import curve_fit
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
boxRedshift = 0.
version='v3'
omega = lambda zz: cosmo.Om0*(1+zz)**3. / cosmo.efunc(zz)**2
DeltaVir_bn98 = lambda zz : (18.*n.pi**2. + 82.*(omega(zz)-1)- 39.*(omega(zz)-1)**2.)/omega(zz)
hf = MassFunction(cosmo_model=cosmo, sigma_8=sigma_val, z=boxRedshift, delta_h=DeltaVir_bn98(boxRedshift), delta_wrt='mean', Mmin=7, Mmax=16.5)
f_BH = lambda sigma, A, a, p, q: A* (2./n.pi)**(0.5) * ( 1 + (sigma**2./(a**delta_c*2.))**(p) )*(delta_c*a**0.5/sigma)**(q)*n.e**(-a*delta_c**2./(2.*sigma**2.))
X = n.arange(-0.6, 0.5, 0.01) #n.log10(1./sigma)
sigma = 10**-X
hz = cosmo.H( boxRedshift ).value / 100.
# m sigma relation using the sigma8 corrected power spectrum
m2sigma = interp1d(hf.M, hf.sigma )
# m nu relation: nu = (delta_c / sigma_m)**2
m2nu = interp1d(hf.M, hf.nu )
# jacobian
toderive = interp1d(n.log(hf.M), n.log(hf.sigma))
mass=hf.M[100:-100]
dlnsigmadlnm = derivative(toderive, n.log(mass) )
rhom_units = cosmo.Om(boxRedshift)*cosmo.critical_density(boxRedshift).to(u.solMass/(u.Mpc)**3.)#/(cosmo.h)**2.
# in units (Msun/h) / (Mpc/h)**3
rhom = rhom_units.value # hf.mean_density#/(hz)**2.
ftC16 = f_BH(hf.sigma[100:-100], 0.279, 0.908, 0.671, 1.737)
MF_MD = interp1d(mass, ftC16*rhom*abs(dlnsigmadlnm)/mass)
NpartMin = 50.
p_init = (-1.85, 7., -2.3, 4.)
hd04_1 = fits.open(join(os.environ['MD04_DIR'],version, "subhalos", "out_88_subhalos_inDistinct.fits"))[1].data
hd04_2 = fits.open(join(os.environ['MD04_DIR'],version, "subhalos", "out_88_subhalos_inDistinct2.fits"))[1].data
hd04_3 = fits.open(join(os.environ['MD04_DIR'],version, "subhalos", "out_88_subhalos_inDistinct3.fits"))[1].data
mp04 = n.log10(NpartMin*9.63 * 10**7)
hd10_1 = fits.open(join(os.environ['MD10_DIR'],version, "subhalos", "out_128_subhalos_inDistinct.fits"))[1].data
hd10_2 = fits.open(join(os.environ['MD10_DIR'],version, "subhalos", "out_128_subhalos_inDistinct2.fits"))[1].data
hd10_3 = fits.open(join(os.environ['MD10_DIR'],version, "subhalos", "out_128_subhalos_inDistinct3.fits"))[1].data
mp10 = n.log10(NpartMin*1.51 * 10**9)
hd25_1 = fits.open(join(os.environ['MD25_DIR'],version, "subhalos", "out_80_subhalos_inDistinct.fits"))[1].data
hd25_2 = fits.open(join(os.environ['MD25_DIR'],version, "subhalos", "out_80_subhalos_inDistinct2.fits"))[1].data
hd25_3 = fits.open(join(os.environ['MD25_DIR'],version, "subhalos", "out_80_subhalos_inDistinct3.fits"))[1].data
mp25 = n.log10(NpartMin*2.359 * 10**10)
hd25nw_1 = fits.open(join(os.environ['MD25NW_DIR'],version, "subhalos", "out_80_subhalos_inDistinct.fits"))[1].data
hd25nw_2 = fits.open(join(os.environ['MD25NW_DIR'],version, "subhalos", "out_80_subhalos_inDistinct2.fits"))[1].data
hd25nw_3 = fits.open(join(os.environ['MD25NW_DIR'],version, "subhalos", "out_80_subhalos_inDistinct3.fits"))[1].data
mp25nw = mp25
hd40_1 = fits.open(join(os.environ['MD40_DIR'],version, "subhalos", "out_128_subhalos_inDistinct.fits"))[1].data
hd40_2 = fits.open(join(os.environ['MD40_DIR'],version, "subhalos", "out_128_subhalos_inDistinct2.fits"))[1].data
hd40_3 = fits.open(join(os.environ['MD40_DIR'],version, "subhalos", "out_128_subhalos_inDistinct3.fits"))[1].data
mp40 = n.log10(NpartMin*9.6 * 10**10. )
hd40nw_1 = fits.open(join(os.environ['MD40NW_DIR'],version, "subhalos", "out_16_subhalos_inDistinct.fits"))[1].data
hd40nw_2 = fits.open(join(os.environ['MD40NW_DIR'],version, "subhalos", "out_16_subhalos_inDistinct2.fits"))[1].data
hd40nw_3 = fits.open(join(os.environ['MD40NW_DIR'],version, "subhalos", "out_16_subhalos_inDistinct3.fits"))[1].data
mp40nw = mp40
def get_ids(hd04_1, mmin=14.5, mmax=15.5):
msel = (hd04_1['mvir_cen']>mmin) & (hd04_1['mvir_cen']<mmax)
return set(hd04_1['id_cen'][msel])
#id_1=get_ids(hd04_1)
#id_2=get_ids(hd04_2)
#id_3=get_ids(hd04_3)
#hd04_1['GroupSize'][msel]
#hd04_1['GroupID'][msel]
allidsat = set(hd04_1['id_sat'])
exponent = 4.
fsat_unev = lambda xi, a, b, N0 : N0 * xi**a * n.e**(-b*xi**3.)
fsat = lambda xi, a, b, N0, exponent : N0 * xi**a * n.e**(-b*xi**exponent)
logfsat= lambda logxi, a, b, logN0, exponent : n.log10( 10**logN0 * (10**logxi)**a * n.e**(-b*(10**logxi)**exponent))
def get_hist_MR(hd04_1, Msat = 'mvir_sat', mmin=14.5, mmax=15.5, Lbox=400.,dlogBins = 0.05, MP = 9, stat=False):
"""return dNsat / volume / dln(Msub/Mdistinct)
"""
msel = (hd04_1['mvir_cen']>mmin) & (hd04_1['mvir_cen']<mmax) & (hd04_1[Msat]>MP)
massR = - hd04_1['mvir_cen'][msel] + hd04_1[Msat][msel]
bins = n.arange(-6, 0.06, dlogBins)
xb = (bins[1:]+bins[:-1])/2.
NcenWS04 = n.histogram(massR, bins, weights=n.ones_like(massR)/Lbox**3./(dlogBins*n.log(10))*(10**(mmin/2.+mmax/2.)/rhom))[0]
NNN,bins0 = n.histogram(massR, bins)
#bins0 = n.histogram(massR, bins)[1]
ok = (xb>0.3+MP-mmin)
if stat :
print "MD",Lbox,",Nhalo in distinct with", mmin, "<m<",mmax, "=", len(hd04_1['mvir_cen'][msel])
print "bins",bins0[NNN>10]+(mmin+mmax)/2.
print "Nsub",NNN[NNN>10]
return xb, NcenWS04, NNN, ok
def get_total(hd04_1, hd04_2, hd04_3, Lbox, mmin=14.5, mmax=15.5, MP=9):
"""return dNsat / volume / d(Msub/Mdistinct)
print '------------------------------------------------------------------'
print '------------------------------------------------------------------'
"""
print '----------------- mvir_sat'
xb, ratio_1, NN_1,ok_1 = get_hist_MR(hd04_1, 'mvir_sat', Lbox=Lbox, mmin=mmin, mmax=mmax, MP=MP, stat=True)
print '----------------- mvir_sat_sat'
xb, ratio_2, NN_2,ok_1 = get_hist_MR(hd04_2, 'mvir_sat_n_sat_n_1', Lbox= Lbox, mmin=mmin, mmax=mmax,MP=MP, stat=True)
print '----------------- mvir_sat_sat_sat'
xb, ratio_3, NN_3,ok_1 = get_hist_MR(hd04_3, 'mvir_sat_n_sat_n_1_sat_n_2', Lbox= Lbox, mmin=mmin, mmax=mmax,MP=MP, stat=True)
err = (NN_1+NN_2+NN_3)**(-0.5)
return xb, (ratio_1+ratio_2+ratio_3)*10**-xb, err, ok_1
def plot_SHMFR(mmin, mmax):
p.figure(0, (5,5))
p.axes([0.17, 0.17, 0.75, 0.75])
print '------------------------------------------------------------------'
print 'MD04'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd04_1, hd04_2, hd04_3, 400., mmin, mmax, mp04)
print ok
x_data = xb[ok]
y_data = y[ok]
y_data_err = err[ok]
if len(xb[ok])>2:
#print len(xb[ok])
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M04')
print '------------------------------------------------------------------'
print 'MD10'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd10_1, hd10_2, hd10_3, 1000., mmin, mmax, mp10)
print ok
if len(xb[ok])>2:
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M10')
x_data = n.hstack((x_data, xb[ok]))
y_data = n.hstack((y_data, y[ok]))
y_data_err = n.hstack((y_data_err, err[ok]))
print '------------------------------------------------------------------'
print 'MD25'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd25_1, hd25_2, hd25_3, 2500., mmin, mmax, mp25)
print ok
if len(xb[ok])>2:
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M25')
x_data = n.hstack((x_data, xb[ok]))
y_data = n.hstack((y_data, y[ok]))
y_data_err = n.hstack((y_data_err, err[ok]))
print '------------------------------------------------------------------'
print 'MD25n'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd25nw_1, hd25nw_2, hd25nw_3, 2500., mmin, mmax, mp25nw)
print ok
if len(xb[ok])>2:
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M25n')
x_data = n.hstack((x_data, xb[ok]))
y_data = n.hstack((y_data, y[ok]))
y_data_err = n.hstack((y_data_err, err[ok]))
print '------------------------------------------------------------------'
print 'MD40'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd40_1, hd40_2, hd40_3, 4000., mmin, mmax, mp40)
print ok
if len(xb[ok])>2:
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M40')
x_data = n.hstack((x_data, xb[ok]))
y_data = n.hstack((y_data, y[ok]))
y_data_err = n.hstack((y_data_err, err[ok]))
print '------------------------------------------------------------------'
print 'MD40n'
print '------------------------------------------------------------------'
xb, y, err, ok = get_total(hd40nw_1, hd40nw_2, hd40nw_3, 4000., mmin, mmax, mp40nw)
print ok
if len(xb[ok])>2:
p.errorbar(xb[ok], n.log10(y[ok])+xb[ok], yerr= err[ok], label='M40n')
x_data = n.hstack((x_data, xb[ok]))
y_data = n.hstack((y_data, y[ok]))
y_data_err = n.hstack((y_data_err, err[ok]))
pouet = (y_data>0)
print "fitting", len(x_data[pouet]), "points"
if len(x_data[pouet])>10:
out = curve_fit(logfsat, x_data[pouet], n.log10(y_data[pouet]), sigma = 0.05+y_data_err[pouet], p0 = p_init, maxfev = 500000000)
print "fit:", out[0], out[1].diagonal()**0.5
xx = n.arange(-6,0, 0.01)
#p.plot(xx, n.log10(fsat_unev(10**xx, -1.8, 6.283, 0.21)/(10**(mmin/2.+mmax/2.)/rhom))+xx, label='unevolved', ls='solid', color='k')
p.plot(xx, logfsat(xx, out[0][0], out[0][1], out[0][2], out[0][3])+xx, label='fit', ls='solid', color='k')
p.ylabel(r'$\log_{10}\left[ \frac{M_d M_s}{\rho_m} \frac{dn}{dM_s} \right] $')
p.xlabel(r'$\log_{10}(M_{s}/M_{d})$')
p.title(r"$"+str(mmin)+"<M_{d}<"+str(mmax)+"$")
p.legend(loc=0, frameon=False)
#p.yscale('log')
p.ylim((-5, 1))
p.xlim(( -4, 0 ))
p.grid()
p.savefig(join(os.environ['MVIR_DIR'], 'shmfr_'+str(mmin)+"_M_"+str(mmax)+".png"))
n.savetxt(join(os.environ['MVIR_DIR'], 'shmfr_'+str(mmin)+"_M_"+str(mmax)+".txt"), n.transpose([x_data[pouet], n.log10(y_data[pouet]), 0.05+y_data_err[pouet]]))
p.clf()
return out
else:
return -99.99*n.ones_like(p_init)
outs = []
mms = n.hstack(( n.arange(12.5, 14.6, 0.5), 15.5 ))
for mmin, mmax in zip(mms[:-1], mms[1:]):
print mmin, mmax
outs.append( plot_SHMFR(mmin, mmax) )
for out in outs:
print n.round(out[0][0],4), n.round(out[1].diagonal()[0]**0.5,4)
for out in outs:
print n.round(out[0][1],4), n.round(out[1].diagonal()[1]**0.5,4)
for out in outs:
print n.round(out[0][2],4), n.round(out[1].diagonal()[2]**0.5,4)
for out in outs:
print n.round(out[0][3],4), n.round(out[1].diagonal()[3]**0.5,4)
import glob
datalist=n.array(glob.glob(join(os.environ['MVIR_DIR'], "shmfr_*_M_*.txt")))
x_fit=[]
y_fit=[]
yerr_fit=[]
for file in datalist:
xx, yy, ye = n.loadtxt(file, unpack = True)
x_fit.append(xx)
y_fit.append(yy)
yerr_fit.append(ye)
out = curve_fit(logfsat, n.hstack((x_fit)), n.hstack((y_fit)), sigma = n.hstack((yerr_fit)), p0 = p_init, maxfev = 500000000)
print out[0], out[1].diagonal()**0.5
```
#### File: bin/bin_SMHMr/MD10_add_LSAR_3.py
```python
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
# specific functions
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
# read the Xray AGN luminosity function and add a condition to reproduce it
def create_catalogs_out(fileList, z, snap_name):
"""
Adds Xray emission mass using the Bongiorno et al. 2016 model to the rockstar outputs.
"""
def f_lambda_sar( DATA ):
logM, log_lambda_SAR = DATA
log_lambda_SAR_var = 10**( log_lambda_SAR - 33.8 + 0.48 * (logM - 11.) )
return 1. / ( log_lambda_SAR_var**(1.01 - 0.58 * (z - 1.1)) + log_lambda_SAR_var**(3.72) )
dl=0.01
log_lambda_SAR_values = n.arange(32-dl,36+2*dl,dl)
# loops over files
for fileName in fileList:
t0=time.time()
outFile = fileName[:-5]+"_LSAR.fits"
# opens all relevant files
msFile = fileName[:-5]+"_Ms.fits"
hd = fits.open(fileName)
hm = fits.open(msFile)
logM = hm[1].data['stellar_mass_Mo13_mvir']
agn_random_number = n.random.random(len(logM))
log_lSAR = n.zeros(len(logM))
t0 = time.time()
ii0=0
ii_step=12000
for ii0 in n.arange(0, len(logM), ii_step):
ii1=ii0+ii_step
X,Y = n.meshgrid(logM[ii0:ii1], log_lambda_SAR_values)
#Z = n.ones_like(X)*z
probas_un = f_lambda_sar([ X, Y])#, Z ])
norm = n.sum(probas_un, axis=0)
probas = probas_un / norm
cmat = n.array([ agn_random_number[ii0:ii1] > n.sum(probas.T[:,jj:], axis=1) for jj in n.arange(len(log_lambda_SAR_values)) ])
#print(cmat.shape)#, cmat[0])
#print(cmat.T[1])
print(ii0, len(logM), cmat.shape, time.time()-t0)
values = log_lambda_SAR_values[n.array([n.min(n.where(cmat.T[jj]==True)) for jj in n.arange(len(cmat.T)) ])]
#print(values.shape, values[:10])
log_lSAR[ii0:ii1] = values
# columns related to Xray AGN
col1 = fits.Column(name='lambda_sar_Bo16',format='D', array = log_lSAR )
col1b = fits.Column(name='agn_random_number',format='D', array = agn_random_number )
#define the table hdu
colArray = [col1]
colArray.append(col1b)
#for col in hd[1].columns :
#colArray.append(col)
hdu_cols = fits.ColDefs(colArray)
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
if os.path.isfile(outFile):
os.system("rm "+outFile)
thdulist.writeto(outFile)
print time.time()-t0
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
for el in summ[27:36]:#[27:36]:
print el
fileList_snap = n.array(glob.glob(os.path.join(os.environ["MD10"], 'work_agn', 'out_'+el['snap_name']+'_SAM_Nb_?.fits')))
fileList_snap.sort()
print fileList_snap
create_catalogs_out(fileList_snap, el['redshift'], el['snap_name'])
```
#### File: bin/bin_SMHMr/MD10_add_Ms_2.py
```python
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
# specific functions
from scipy.stats import norm
# dedicated packages
#import StellarMass
meanSM= lambda Mh, z : n.log10(Mh * 2. * ( 0.0351 - 0.0247 * z/(1.+z)) / ((Mh/ (10**(11.79 + 1.5 * z/(1.+z))) )**(- 0.9 + 0.5 * z/(1.+z)) + ( Mh /(10**(11.79 + 1.5 * z/(1.+z))) )**(0.67 + 0.2 * z/(1.+z)) ) )
fun = lambda mmm : norm.rvs( loc = mmm, scale = 0.15 )
def create_catalogs_out(fileList, z):
"""
Adds stellar mass using the Moster et al. 2013 model to the rockstar outputs.
"""
for fileName in fileList:
t0=time.time()
outFile = fileName[:-5]+"_Ms.fits"
hd = fits.open(fileName)
mean_SM = meanSM(10**hd[1].data['mvir']/0.6777, z)
#print "mean mgal", mean_SM
Mgal_mvir_Mo13 = n.array([fun(el) for el in mean_SM]) # n.array(pool.starmap( fun, mean_SM ))
#print "res mgal", Mgal_mvir_Mo13
#print "diff mgal - mvir", n.mean(mean_SM-Mgal_mvir_Mo13)
#print "mean, std magl - mh",n.mean(mean_SM-Mgal_mvir_Mo13), n.std(mean_SM-Mgal_mvir_Mo13)
sel = (hd[1].data['mvir']>0)
Mgal_mvir_Mo13[sel==False] = n.zeros_like(Mgal_mvir_Mo13[sel==False])
col00 = fits.Column(name='stellar_mass_Mo13_mvir',format='D', unit='logMsun', array = Mgal_mvir_Mo13 )
col01 = fits.Column(name='stellar_mass_reliable', format='L', array = sel )
#define the table hdu
colArray = []
colArray.append(hd[1].columns[0])
# Mvir stellar mass
colArray.append(col00)
colArray.append(col01)
hdu_cols = fits.ColDefs(colArray)
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdr['SAMfile'] = os.path.basename(fileName)
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
if os.path.isfile(outFile):
os.system("rm "+outFile)
thdulist.writeto(outFile)
print( time.time()-t0)
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
for ii in range(len(summ))[18:27]:
print( summ[ii])
fileList = n.array(glob.glob(os.path.join(os.environ["MD10"], 'work_agn', 'out_'+summ['snap_name'][ii]+'_SAM_Nb_?.fits')))
#outFile = fileName[:-5]+"_Ms.fits"
z = summ['redshift'][ii]
print( fileList)
create_catalogs_out(fileList, z)
```
#### File: bin/bin_SMHMr/MD10-pie-plot.py
```python
import time
t0 = time.time()
import os
import numpy as n
import sys
import glob
import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
L_box = 1000./0.6777
tracer_names = n.array(['S8_ELG', 'S8_BG1', 'S8_BG2', 'S5_GAL', 'S8_QSO', 'S6_AGN', 'S5_BCG'])
marker_dict={'S5_BCG':'1', 'S5_GAL':'2', 'S6_AGN':'3', 'S8_BG1':',', 'S8_BG2':',', 'S8_ELG':',', 'S8_QSO':'x'}
color_dict ={'S5_BCG':'r', 'S5_GAL':'r', 'S6_AGN':'m', 'S8_BG1':'k', 'S8_BG2':'g', 'S8_ELG':'b', 'S8_QSO':'g'}
p0 = n.array([[-1., -1.]])
points = {'S5_BCG':p0, 'S5_GAL':p0, 'S6_AGN':p0, 'S8_BG1':p0, 'S8_BG2':p0, 'S8_ELG':p0, 'S8_QSO':p0}
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
zs = n.arange(0.,4,0.001)
dc_2_z = interp1d(cosmoMD.comoving_distance(zs),zs)
import astropy.io.fits as fits
sf = fits.open(os.path.join(os.environ['MD10'],'output_MD_1.0Gpc.fits'))[1].data
plot_dir = '/afs/mpe/www/people/comparat/eRoMok/pie_plots/'
work_dir = os.path.join(os.environ['MD10'],'work_agn')
# redshift loop
#ii = 0
def get_slice(cpickle_dump_file, x_observer=0., y_observer=0., z_observer = 0., x_shift=0., y_shift=0., z_shift=0., slice_z_min=0., slice_z_max = 10., distance_min=0., distance_max = L_box):
snap_selection = (sf['comoving_distance']<distance_max)&(sf['comoving_distance']>distance_min)
snaps = sf[snap_selection]
z_all = sf['redshift'][snap_selection]
z_boundaries = n.hstack((dc_2_z(distance_min), (z_all[1:]+z_all[:-1])/2., dc_2_z(distance_max)))
for ii, el in enumerate(snaps): # in range(len(z_all)):
z_min, z_max = z_boundaries[ii], z_boundaries[ii+1]
r_min, r_max = cosmoMD.comoving_distance(z_min).value, cosmoMD.comoving_distance(z_max).value
position_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_?.fits')))
position_files.sort()
# position file loop
print r_min, r_max
for index in range(len(position_files)):
print time.time()-t0
print position_files[index]
positions = fits.open(position_files[index])[1].data
tracer_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_'+str(index)+'_4MOST_*.fits')))
tracer_files.sort()
# tracer loop
#path_2_tracer_file = tracer_files[0]
for path_2_tracer_file in tracer_files:
print path_2_tracer_file
spl_bn = os.path.basename(path_2_tracer_file)[:-5].split('_')
tracer_name = spl_bn[-2]+'_'+spl_bn[-1]
ids = fits.open(path_2_tracer_file)[1].data['line_number']
x_i = positions['x'][ids]/0.6777 - x_observer + x_shift
y_i = positions['y'][ids]/0.6777 - y_observer + y_shift
z_i = positions['z'][ids]/0.6777 - z_observer + z_shift
shell = (x_i*x_i + y_i*y_i + z_i*z_i < r_max**2.) & (x_i*x_i + y_i*y_i + z_i*z_i > r_min**2.)
slice = (shell) & (z_i>slice_z_min) &(z_i<slice_z_max)
points[tracer_name] = n.vstack(( points[tracer_name], n.transpose([x_i[slice], y_i[slice]]) ))
f=open(cpickle_dump_file, 'w')
cPickle.dump(points,f)
f.close()
return points
points_1 = get_slice(os.path.join(work_dir, 'slice_1_Lbox.pkl'))
points_2 = get_slice(os.path.join(work_dir, 'slice_2_Lbox.pkl'), x_shift = L_box, distance_min=L_box, distance_max = 2*L_box)
points_3 = get_slice(os.path.join(work_dir, 'slice_3_Lbox.pkl'), x_shift = 2*L_box, distance_min=2*L_box, distance_max = 3*L_box)
points_4 = get_slice(os.path.join(work_dir, 'slice_4_Lbox.pkl'), x_shift = 3*L_box, distance_min=3*L_box, distance_max = 4*L_box)
points_1 = cPickle.load(open(os.path.join(work_dir, 'slice_1_Lbox.pkl'),'r'))
points_2 = cPickle.load(open(os.path.join(work_dir, 'slice_2_Lbox.pkl'),'r'))
points_3 = cPickle.load(open(os.path.join(work_dir, 'slice_3_Lbox.pkl'),'r'))
points_4 = cPickle.load(open(os.path.join(work_dir, 'slice_4_Lbox.pkl'),'r'))
def plot_slice(points, name='slice_1_Lbox.png', lims=(0,L_box)) :
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0,L_box))
p.title(str(n.round(dc_2_z(lims[0]),2))+'<z<'+str(n.round(dc_2_z(lims[1]),2)) )
p.savefig(os.path.join(plot_dir, name))
p.clf()
plot_slice(points_1, name='slice_1_Lbox.png', lims=(0*L_box,1*L_box))
plot_slice(points_2, name='slice_2_Lbox.png', lims=(1*L_box,2*L_box))
plot_slice(points_3, name='slice_3_Lbox.png', lims=(2*L_box,3*L_box))
plot_slice(points_4, name='slice_4_Lbox.png', lims=(3*L_box,4*L_box))
sys.exit()
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points_2[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0.,L_box))
p.savefig(os.path.join(plot_dir, 'slice_2_Lbox.png'))
p.clf()
```
#### File: bin/bin_SMHMr/plot_HMF.py
```python
import numpy as n
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
mbins = n.arange(8,14.5,0.25)
import matplotlib.pyplot as p
out_dir = os.path.join(os.path.join(os.environ['MD10'],"results","mvir_mass_function", "images"))
# compare the stellar mass function measured to the Ilbert function
# take the AGN HGMF model
def plot_HMF_DC(snap_name, redshift):
"""
Plots the stellar mass functions and the corresponding duty cycle.
"""
# path for the output file
# path for stellar mass function
out_HMF = os.path.join(os.environ['MD10'],"results", "mvir_mass_function", "data", "out_" + snap_name + "_HMF.txt")
# path to tracer SMFs
out_file = lambda tracer_name : os.path.join(os.environ['MD10'],"results", "mvir_mass_function", "data", "out_"+snap_name+"_"+tracer_name+"_HMF.txt")
p.figure(1, (6,6))
logMs_low, logMs_up, counts, dN_dVdlogM_g = n.loadtxt(out_HMF, unpack=True)
ok = (dN_dVdlogM_g>0)
p.plot((logMs_low[ok] + logMs_up[ok])/2., n.log10(dN_dVdlogM_g[ok]), label='MD10', lw=2, ls='dotted')
def plot_tracer(tracer_name='4MOST_S5_BCG'):
file_name = out_file(tracer_name )
print file_name
if os.path.isfile(file_name) :
#print tracer_name
logMs_low, logMs_up, counts, dN_dVdlogM_g = n.loadtxt(file_name , unpack=True )
ok = (dN_dVdlogM_g>0)
p.plot((logMs_low[ok] + logMs_up[ok])/2., n.log10(dN_dVdlogM_g[ok]), label=tracer_name, ls='dashed', lw=0.75)
plot_tracer("4MOST_S5_BCG" )
plot_tracer("4MOST_S5_GAL" )
plot_tracer("4MOST_S6_AGN" )
plot_tracer("4MOST_S8_BG1" )
plot_tracer("4MOST_S8_BG2" )
plot_tracer("4MOST_S8_ELG" )
plot_tracer("4MOST_S8_QSO" )
p.ylabel(r'$\log_{10}(dN/dV/dlogM_{vir})$')
p.xlabel(r'$\log_{10}(M_{vir})$')
p.xlim((11., 15.))
p.ylim((-8.5,-2))
p.title('z='+str(n.round(redshift,3)))
p.grid()
p.legend(loc=0, frameon=False)
p.savefig(os.path.join(out_dir, "MD10_"+snap_name.zfill(5)+"_HMF_tracers.png"))
p.clf()
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
for el in summ:
print el
plot_HMF_DC(el['snap_name'], el['redshift'])
os.system('cp $MD10/results/mvir_mass_function/images/*.png ~/wwwDir/eRoMok/mvir_mass_function/')
#p.figure(1, (6,6))
#p.plot(logMS_DC_04, duty_cycle_04, label='MD 04')
#p.plot(logMS_DC_10, duty_cycle_10, label='MD 10')
#p.plot(logMS_DC_25, duty_cycle_25, label='MD 25')
#p.plot(logMS_DC_04_h, duty_cycle_04_h, label='MD h 04')
#p.plot(logMS_DC_10_h, duty_cycle_10_h, label='MD h 10')
#p.plot(logMS_DC_25_h, duty_cycle_25_h, label='MD h 25')
#p.axvline(7.2, c='k' , ls='dashed')
#p.axvline(9.7, c='k' , ls='dashed')
#p.axvline(11.3, c='k', ls='dashed')
#p.xlabel('active fraction')
#p.ylabel('log stellar mass')
#p.xlim((6.5,12.2))
#p.yscale('log')
#p.ylim((0.005, .9))
#p.grid()
#p.legend(loc=0, frameon=False)
#p.savefig('/home/comparat/data/eRoMok/BO12_duty_cycle.png')
#p.clf()
```
#### File: bin/bin_SMHMr/plot-Ms-xray.py
```python
import StellarMass
import XrayLuminosity
import numpy as n
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
print " set up box, and redshift "
#MD 1 hlist_0.74980_SAM_Nb_0.fits
#MD 25 hlist_0.75440_SAM_Nb_10.fits
def create_plots(env='MD04', file_type="out"):
fileList = n.array(glob.glob(os.path.join(os.environ[env], "catalogs", +file_type"*.Ms.fits")))
print fileList
for fileN in fileList:
print fileN
hd = fits.open(fileN)[1].data
p.figure(1, (6,6))
p.plot(hd['Mgal_mvir_Mo13'], hd['lambda_sar_Bo16']+hd['Mgal_mvir_Mo13'], 'b,', rasterized=True)
p.xlabel(r'$\log_{10} M_\odot$')
p.ylabel(r'$\log_{10}\lambda_{SAR} + \log_{10}M_\odot$')
p.grid()
p.savefig(os.path.join(os.environ[env], "results", os.path.basename(fileN)[:-5]+'.pdf'))
p.clf()
create_plots(env='MD04', file_type="out")
create_plots(env='MD10', file_type="out")
create_plots(env='MD25', file_type="out")
os.system("cp $MD04/results/*.pdf ~/wwwDir/eRoMok/plots/MD_0.4Gpc")
os.system("cp $MD10/results/*.pdf ~/wwwDir/eRoMok/plots/MD_1.0Gpc")
os.system("cp $MD25/results/*.pdf ~/wwwDir/eRoMok/plots/MD_2.5Gpc")
```
#### File: nbody-npt-functions/python/DarkSkies.py
```python
import cPickle
import fileinput
import astropy.io.fits as fits
import astropy.cosmology as co
import astropy.units as u
c2 = co.Planck13
from scipy.interpolate import interp1d
from os.path import join
import os
import astropy.units as uu
import numpy as n
import glob
import scipy.spatial.ckdtree as t
import time
class DarkSkiesSimulation :
"""
Loads the environement proper to the DarkSkies simulations. This is the fixed framework of the simulation.
:param Lbox: length of the box in Mpc/h
:param wdir: Path to the multidark lightcone directory
:param boxDir: box directory name
:param snl: list of snapshots available
:param zsl: list of redshift corresponding to the snapshots
:param zArray: redshift array to be considered to interpolate the redshift -- distance conversion
:param Hbox: Hubble constant at redshift 0 of the box
:param Melement: Mass of the resolution element in solar masses.
:param columnDict: dictionnary to convert column name into the index to find it in the snapshots
"""
def __init__(self,Lbox=8000.0 * uu.Mpc, wdir = join(os.environ['DATA_DIR'],"DarkSkies"), boxDir="snapshots", snl=n.array(glob.glob(join(os.environ['DATA_DIR'],"DarkSkies", "snapshots","ds14_catalog_300particles.dat"))), zsl=None, zArray=n.arange(0.2,2.4,1e-1), Hbox = 67.77 * uu.km / (uu.s * uu.Mpc), Melement = 3.9*10**(10.0) ):
self.Lbox = Lbox # box length
self.Hbox = Hbox # Hubble constant at redshift 0 in the box
self.wdir = wdir # working directory
self.boxDir = boxDir # directory of the box where the snapshots a stored
self.snl = snl # snapshot list
self.zsl = zsl # corresponding redshift list
self.zArray = zArray # redshift for the dC - z conversion
self.Melement = Melement # mass of one particle in the box
self.h = 0.6881
self.ns = 0.9676
self.G = 6.67428 * 10**(-9) # cm3 g-1 s-2
self.Msun = 1.98892 * 10**(33.) # g
self.Npart = 10240
self.force_resolution = 36.8 # kpc /h
self.columnDict = {'mvir': 0, 'vmax': 1, 'x': 2, 'y': 3, 'z': 4, 'id': 5, 'pid': 6}
self.Melement = 3.9*10**(10.0)
def writePositionCatalogPM(self, ii, vmin=30., mmin=300*3.9*10**(10.0) , NperBatch = 10000000):
"""
Extracts the positions and velocity out of a snapshot of the Multidark simulation.
:param ii: index of the snapshot in the list self.snl
:param vmin: name of the quantity of interest, mass, velocity.
:param vmax: of the quantity of interest in the snapshots.
:param NperBatch: number of line per fits file, default: 1000000
"""
fl = fileinput.input(self.snl[ii])
nameSnapshot = os.path.basename(self.snl[ii])[:-4]
Nb = 0
count = 0
output = n.zeros((NperBatch,7))
for line in fl:
if line[1] == "#" :
continue
line = line.split()
newline =n.array([int(line[self.columnDict['id']]), float(line[self.columnDict['pid']]), float(line[self.columnDict['x']]), float(line[self.columnDict['y']]), float(line[self.columnDict['z']]), float(line[self.columnDict['vmax']]), n.log10(float(line[self.columnDict['mvir']])) ])
if float(line[self.columnDict['vmax']])>vmin and float(line[self.columnDict['mvir']])>mmin :
output[count] = newline
count+=1
if count == NperBatch :
#print "count",count
#print output
#print output.shape
#print output.T[0].shape
#define the columns
col0 = fits.Column(name='id',format='D', array= output.T[0] )
col1 = fits.Column(name='pid',format='D', array= output.T[1] )
col2 = fits.Column(name='x',format='D', array=output.T[2] )
col3 = fits.Column(name='y',format='D', array= output.T[3] )
col4 = fits.Column(name='z',format='D', array= output.T[4] )
col5 = fits.Column(name='vmax',format='D', array= output.T[5] )
col6 = fits.Column(name='mvir',format='D', array=output.T[6] )
#define the table hdu
hdu_cols = fits.ColDefs([col0, col1, col2, col3, col4, col5, col6])
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['HIERARCH nameSnapshot'] = nameSnapshot
prihdr['count'] = count
prihdr['batchN'] = Nb
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
os.system("rm "+self.snl[ii][:-4]+"_PM_Nb_"+str(Nb)+".fits")
thdulist.writeto(self.snl[ii][:-4]+"_PM_Nb_"+str(Nb)+".fits")
Nb+=1
count=0
#resest the output matrix
output = n.zeros((NperBatch,7))
# and for the last batch :
col0 = fits.Column(name='id',format='D', array= output.T[0][:count] )
col1 = fits.Column(name='pid',format='D', array= output.T[1][:count] )
col2 = fits.Column(name='x',format='D', array=output.T[2][:count] )
col3 = fits.Column(name='y',format='D', array= output.T[3][:count] )
col4 = fits.Column(name='z',format='D', array= output.T[4][:count] )
col5 = fits.Column(name='vmax',format='D', array= output.T[5][:count] )
col6 = fits.Column(name='mvir',format='D', array=output.T[6][:count] )
#define the table hdu
hdu_cols = fits.ColDefs([col0, col1, col2, col3, col4, col5, col6])
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['HIERARCH nameSnapshot'] = nameSnapshot
prihdr['count'] = count
prihdr['batchN'] = Nb
prihdr['author'] = 'JC'
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
os.system("rm "+self.snl[ii][:-4]+"_PM_Nb_"+str(Nb)+".fits")
thdulist.writeto(self.snl[ii][:-4]+"_PM_Nb_"+str(Nb)+".fits")
def computeSingleDistributionFunctionJKresampling(self, fileList, rootname, name, bins, Ljk = 100., overlap = 1. ) :
"""
Extracts the distribution of quantity 'name' out of all snapshots of the Multidark simulation.
Resamples the box in smaller boxes of length Ljk in Mpc/h
:param ii: index of the snapshot in the list self.snl
:param name: name of the quantity of interest, mass, velocity.
:param index: of the quantity of interest in the snapshots.
:param bins: binning scheme to compute the historgram.
:param Ljk: length of the resampled box
:param overlap: allowed overlap between resampled realizations : 1 = no overlap 2 : 50% overlap ...
"""
output_dir = join(self.wdir,"properties",name)
os.system('mkdir '+ output_dir)
# define boundaries
NBoundariesPerSide = int(overlap*self.Lbox.value/Ljk)
bounds = n.arange(NBoundariesPerSide+1)* Ljk / overlap
#print "boundaries on each side: ", bounds
Xi, Yi, Zi = n.meshgrid(bounds[:-1],bounds[:-1],bounds[:-1])
X = n.ravel(Xi)
Y = n.ravel(Yi)
Z = n.ravel(Zi)
#print X.min(), X.max(), len(X),len(bounds)
# loops over the fileList : fits files with the data
nnC = n.zeros((len(fileList),len(X),len(bins)-1))
nnS = n.zeros((len(fileList),len(X),len(bins)-1))
for jj, file in enumerate(fileList):
#print file
dd = fits.open(file)[1].data
cen = (dd['pid']==-1)
sat = (cen==False) # (dd['pid']>=1)
#computes the histogram for each resampling of the file
for ii, xel in enumerate(X):
#print ii
xmin, ymin, zmin, xmax, ymax, zmax = X[ii], Y[ii], Z[ii], X[ii]+Ljk, Y[ii]+Ljk, Z[ii]+Ljk
sel = (dd['x']>=xmin)&(dd['x']<xmax)&(dd['y']>=ymin)&(dd['y']<ymax)&(dd['z']>=zmin)&(dd['z']<zmax)&(dd[name]>bins[0])&(dd[name]<bins[-1])
#print len(dd[name][(sel)&(cen)]), len(dd[name][(sel)&(sat)])
if len(dd[name][(sel)&(cen)])>=1:
nnC[jj][ii] = n.histogram(dd[name][(sel)&(cen)], bins = bins)[0]
if len(dd[name][(sel)&(sat)])>=1:
nnS[jj][ii] = n.histogram(dd[name][(sel)&(sat)], bins = bins)[0]
f = open(join(output_dir, rootname +"_Central_JKresampling.pkl"),'w')
cPickle.dump(n.sum(nnC,axis=0),f)
f.close()
f = open(join(output_dir,rootname +"_Satellite_JKresampling.pkl"),'w')
cPickle.dump(n.sum(nnS,axis=0),f)
f.close()
n.savetxt(join(output_dir,rootname+"_"+name+"_JKresampling.bins"),n.transpose([bins]))
```
#### File: nbody-npt-functions/python/HaloSelection.py
```python
import random
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import astropy.io.fits as fits
import numpy as n
from scipy.interpolate import interp1d
import scipy.stats as st
import os
from os.path import join
class MultiDarkMock:
"""
:param hdu: hdu of the lightcone
:param area: area in deg2
:param mockOutput_dir: directory where to output mocks
:param mockName: name of the file where to save the mock
:param zmin: minimum redshift array defining the bins
:param zmax: maximum redshift array defining the bins
:param nGal_Deg2: number of galaxies per square degrees
"""
def __init__(self,hdu, area, mockOutput_dir, mockName, zmin, zmax, nGal_Deg2 ):
self.hdu = hdu
self.area = area
self.mockOutput_dir = mockOutput_dir
self.mockName = mockName
self.zmin = zmin
self.zmax = zmax
self.nGal_Deg2 = nGal_Deg2
def initialize(self):
"""
Initializes the procedure by putting into memroy arrays of central and satellites.
"""
# derived numbers common to all SHAMs.
self.cen = (self.hdu[1].data['pid'] == -1)
self.sat = (self.cen == False)
self.Nhalos = self.hdu[1].header['NAXIS2']
self.IDh = n.arange(self.Nhalos)
self.nGal = n.array([ int(el * self.area) for el in self.nGal_Deg2 ])
#self.nGal_to_z = interp1d(nGal_Deg2,(self.zmax+self.zmin)/2.)
#function to slice by redshift
self.slice_Z = lambda z1, z2 : (self.hdu[1].data['z_redshift_space'] >= z1) & ( self.hdu[1].data['z_redshift_space'] < z2)
def write_catalog_ascii(self):
"""Writes the obtained mock catalog for clustering estimation: just ra, dec and redshift. """
print "writes ascii catalog :", self.mockName
outPutFileName = join( self.mockOutput_dir, self.mockName + "_radecz.cat" )
self.raMock = self.hdu[1].data['ra'][self.idSel]
self.decMock = self.hdu[1].data['dec'][self.idSel]
self.zMock = self.hdu[1].data['z_redshift_space'][self.idSel]
n.savetxt(outPutFileName, n.transpose([ self.raMock, self.decMock, self.zMock]),fmt = '%.8f %.8f %.5f')
def write_full_catalog_fits(self):
"""Writes the obtained with all the columns from the parent lightcone catalog."""
print "writes fits catalog :", self.mockName
tbhdu = fits.BinTableHDU.from_columns( self.hdu[1].columns )
tbhdu.data = tbhdu.data[self.idSel]
prihdu = fits.PrimaryHDU(header = self.hdu[0].header)
thdulist = fits.HDUList([prihdu, tbhdu])
outPutFileName = join(self.mockOutput_dir,self.mockName+"_allCols.fits")
os.system('rm -rf '+ outPutFileName)
thdulist.writeto(outPutFileName)
def get_distrib_QTY(self, colN, z1, z2):
"""Computes the cumulative histogram of a column for halos in the range z1, z2.
:param colN: name of the column you want to take the histogram.
:param z1: minimum redshift
:param z2: maximum redshift
"""
zsel = self.slice_Z( z1, z2)
IDhz = self.IDh[zsel] # all ids in this redshift bin
QTY = self.hdu[1].data[colN][zsel] # all QTY in this redshift bin
nn,bb,pp = p.hist(QTY,cumulative = True,bins = len(QTY)/100)
p.clf()
print len(IDhz), "halos with ",z1, "<z<", z2
return IDhz,QTY,nn,bb
def select_sham(self, nGal_perbin, IDhz, QTY, nn, bb):
"""
Returns the ids corresponding to a given density.
:param nGal_perbin: number of galaxies to be selected
:param IDhz: parent ID distribution
:param QTY: quantity to select halos on
:param nn: cumulative distribution o QTY
:param bb: bins of the cumulative distribution
"""
mfc = interp1d(nn, (bb[:-1]+bb[1:])/2.)
QTYmax = mfc(len(QTY))
QTYmin = mfc(len(QTY)-nGal_perbin)
qsel = (QTY>QTYmin)&(QTY<= QTYmax)
IDhzq = IDhz[qsel]
print "N to be selected:",nGal_perbin,", Nselected:",len(IDhzq)
return IDhzq
def make_sham_catalog(self, colN='mvir'):
"""
Creates lists of ids of halos corresponding to the density given in the n(z).
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = self.get_distrib_QTY( colN, self.zmin[ii], self.zmax[ii])
ids.append( self.select_sham(self.nGal[ii],IDhz, QTY, nn,bb))
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def select_shamIncomplete(self, incompFactor, nGal_perbin, IDhz, QTY, nn, bb):
"""
Returns the ids corresponding to a given density and an incompleteness factor.
:param nGal_perbin: number of galaxies to be selected
:param IDhz: parent ID distribution
:param QTY: quantity to select halos on
:param nn: cumulative distribution o QTY
:param bb: bins of the cumulative distribution
:param incompFactor: incompleteness factor compared to the max of QTY : max(QTY)/incompFactor will be set as the max of the extracted distribution.
"""
mfc = interp1d(nn,(bb[1:]+bb[:-1])/2.)
mfcInv = interp1d((bb[1:]+bb[:-1])/2.,nn)
QTYmaxAll = mfc(len(QTY))/incompFactor
Nmax = mfcInv(QTYmaxAll)
QTYmax = mfc(Nmax)
QTYmin = mfc(Nmax-nGal_perbin)
qsel = (QTY>QTYmin)&(QTY<= QTYmax)
IDhzq = IDhz[qsel]
return IDhzq
def make_shamIncomplete_catalog(self, colN, incompletenessFactor ):
"""
Creates lists of ids of halos corresponding to the density given in the n(z).
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_shamIncomplete( incompletenessFactor[ii], self.nGal[ii], IDhz, QTY, nn, bb ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def select_shamMAX(self,QTY_max, nGal_perbin,IDhz, QTY, nn,bb):
"""
Returns the ids corresponding to a given density with a maximum in the QTY.
:param nGal_perbin: number of galaxies to be selected
:param IDhz: parent ID distribution
:param QTY: quantity to select halos on
:param nn: cumulative distribution o QTY
:param bb: bins of the cumulative distribution
:param QTY_max: the max of QTY is set to QTY_max.
"""
mfc = interp1d(nn,(bb[1:]+bb[:-1])/2.)
mfcInv = interp1d((bb[1:]+bb[:-1])/2.,nn)
Nmax = mfcInv(QTY_max)
QTYmax = mfc(Nmax)
QTYmin = mfc(Nmax-nGal_perbin)
qsel = (QTY>QTYmin)&(QTY<= QTYmax)
IDhzq = IDhz[qsel]
return IDhzq
def make_shamMAX_catalog(self, colN, maxQTY ):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_shamMAX( maxQTY[ii], self.nGal[ii], IDhz, QTY, nn, bb ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def select_Gaussian(self, meanQTY, scatterQTY, nGal_perbin, IDhz, QTY):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z) and to a gaussian distribution .
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
:param meanQTY: mean of the distribution
:param scatterQTY: scatter of the distribution
:param nGal_perbin: total number of galaxies in this bins to mock
:param IDhz: IDs of the halos in this bin
:param QTY: array of the column to do the match on, mass, velocity, ...
"""
# constructs the QTY intervals around the distribution
expected_cdf = lambda x : st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
interval = [ meanQTY - 9 * scatterQTY , meanQTY + 9 * scatterQTY]
xs = n.arange(interval[0],interval[1],(interval[1]-interval[0])/1000.)
out = expected_cdf(xs)
expected_cdf_inv = interp1d(out,xs)
boundaries = n.hstack((expected_cdf_inv(0.01),expected_cdf_inv(n.arange(0.1,0.91,0.1)), interval[1]))
# gets the number of halos to select
expected_cdf_tot = lambda x : nGal_perbin * st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
Up = expected_cdf_tot(boundaries[1:])
Low = n.hstack(( 0., expected_cdf_tot(boundaries[1:])[:-1] ))
N2select = Up-Low
print N2select,Up,Low
# select in mass in the box
qsels = n.array([ (QTY>boundaries[ii])&(QTY<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll = n.array([ IDhz[qs] for qs in qsels ])
# random downsample to the N2select in each bin
i = 0
ids_selected = []
for arr in IDhzqAll:
random.shuffle(arr)
ids_selected.append(arr[:N2select[i]])
i+= 1
ids_selected = n.hstack(( n.array(ids_selected) ))
return ids_selected
def make_Gaussian_catalog(self, colN, means, scatters):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_Gaussian( means[ii], scatters[ii], self.nGal[ii], IDhz, QTY ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def get_distrib_QTY_cen(self, colN, z1, z2):
"""Computes the cumulative histogram of a column for central halos in the range z1, z2.
:param colN: name of the column you want to take the histogram.
:param z1: minimum redshift
:param z2: maximum redshift
"""
zsel = self.slice_Z(z1, z2) & (self.cen)
IDhz = self.IDh[zsel] # all ids in this redshift bin
QTY = self.hdu[1].data[colN][zsel] # all QTY in this redshift bin
nn,bb,pp = p.hist(QTY,cumulative = True,bins = len(QTY)/100)
p.clf()
return IDhz,QTY,nn,bb
def get_distrib_QTY_sat(self, colN, z1, z2):
"""Computes the cumulative histogram of a column for satellite halos in the range z1, z2.
:param colN: name of the column you want to take the histogram.
:param z1: minimum redshift
:param z2: maximum redshift
"""
zsel = self.slice_Z(z1, z2) & (self.sat)
IDhz = self.IDh[zsel] # all ids in this redshift bin
QTY = self.hdu[1].data[colN][zsel] # all QTY in this redshift bin
nn,bb,pp = p.hist(QTY,cumulative = True,bins = len(QTY)/100)
p.clf()
return IDhz,QTY,nn,bb
def select_GaussianFsat(self,meanQTY,scatterQTY,fsat, nGal_perbin, IDhz_c, QTY_c, IDhz_s, QTY_s ):
"""
Extracts the ids of halos to create a mock with a Gaussian distribution.
:param colN: name of the column you wish to work on for the sham.
:param meanQTY: mean of the distribution
:param scatterQTY: scatter of the distribution
:param fsat: fraction of satellite in this bin
:param nGal_perbin: total number of galaxies in this bins to mock
:param IDhz_c: IDs of the central halos in this bin
:param QTY_c: column to do the match on, mass, velocity, ... for the central halos
:param IDhz_s: IDs of the satellite halos in this bin
:param QTY_s: column to do the match on, mass, velocity, ... for the satellite halos
"""
nSat = int(nGal_perbin*fsat)
print "satellites",nGal_perbin,nSat,fsat,meanQTY,scatterQTY
# constructs the QTY intervals around the distribution
expected_cdf = lambda x : st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
interval = [ meanQTY - 9 * scatterQTY , meanQTY + 9 * scatterQTY]
xs = n.arange(interval[0],interval[1],(interval[1]-interval[0])/1000.)
out = expected_cdf(xs)
expected_cdf_inv = interp1d(out,xs)
boundaries = n.hstack((expected_cdf_inv(0.01),expected_cdf_inv(n.arange(0.1,0.91,0.1)), interval[1]))
# gets the number of halos to select the SAT
expected_cdf_s = lambda x : nSat * st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
Up_s = expected_cdf_s(boundaries[1:])
Low_s = n.hstack(( 0., expected_cdf_s(boundaries[1:])[:-1] ))
N2select_s = Up_s-Low_s
# select in mass in the box
qsels_s = n.array([ (QTY_s>boundaries[ii])&(QTY_s<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll_s = n.array([ IDhz_s[qs] for qs in qsels_s ])
# random downsample to the N2select in each bin
i = 0
ids_selected_s = []
for arr2 in IDhzqAll_s:
random.shuffle(arr2)
#print len(arr2),int(N2select_s[i])
ids_selected_s.append(arr2[:int(N2select_s[i])])
i+= 1
id_s = n.hstack((n.array(ids_selected_s)))
nSatReal = len(id_s)
nCen = nGal_perbin-nSatReal
print "centrals", nGal_perbin,nSat,nCen,fsat,meanQTY,scatterQTY
# gets the number of halos to select the CEN, compatible with the sat fraction to get the right density.
print "centrals"
expected_cdf_c = lambda x : nCen * st.norm.cdf(x, loc = meanQTY, scale = scatterQTY)
Up_c = expected_cdf_c(boundaries[1:])
Low_c = n.hstack(( 0., expected_cdf_c(boundaries[1:])[:-1] ))
N2select_c = Up_c-Low_c
# select in mass in the box
qsels_c = n.array([ (QTY_c>boundaries[ii])&(QTY_c<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll_c = n.array([ IDhz_c[qs] for qs in qsels_c ])
# random downsample to the N2select in each bin
i = 0
ids_selected_c = []
for arr in IDhzqAll_c:
random.shuffle(arr)
#print len(arr),int(N2select_c[i])
ids_selected_c.append(arr[:int(N2select_c[i])])
i+= 1
id_c = n.hstack((n.array(ids_selected_c)))
ids_selected = n.hstack((id_c,id_s ))
print len(id_c),len(id_s),len(ids_selected)
return ids_selected
def make_GaussianFsat_catalog(self, colN, means, scatters, fsats):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
:param fsats: fractions of satellite, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ",self.zmin[ii],"<z<",self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz_c,QTY_c,nn_c,bb_c = self.get_distrib_QTY_cen( colN, z1=self.zmin[ii], z2=self.zmax[ii])
IDhz_s,QTY_s,nn_s,bb_s = self.get_distrib_QTY_sat( colN, z1=self.zmin[ii], z2=self.zmax[ii])
ids.append( self.select_GaussianFsat( means[ii], scatters[ii], fsats[ii], self.nGal[ii], IDhz_c, QTY_c, IDhz_s, QTY_s ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def select_LogNorm(self, meanQTY, scatterQTY, nGal_perbin,IDhz, QTY, nn,bb):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z) and to a gaussian distribution .
For every bin of redshift, it gets the distribution o fthe column of interest and matches to the density of galaxies in the NZ given.
Then provides a column of ids extracted from teh lightcone.
:param colN: name of the column you wish to work on for the sham.
:param meanQTY: mean of the distribution
:param scatterQTY: scatter of the distribution
:param nGal_perbin: total number of galaxies in this bins to mock
:param IDhz: IDs of the halos in this bin
:param QTY: array of the column to do the match on, mass, velocity, ...
"""
# constructs the QTY intervals around the distribution
expected_cdf = lambda x : st.lognorm.cdf(x, meanQTY, scatterQTY)
interval = [ meanQTY - 9 * scatterQTY , meanQTY + 9 * scatterQTY]
xs = n.arange(interval[0],interval[1],(interval[1]-interval[0])/1000.)
out = expected_cdf(xs)
expected_cdf_inv = interp1d(out,xs)
boundaries = n.hstack((expected_cdf_inv(0.01),expected_cdf_inv(n.arange(0.1,0.91,0.1)), interval[1]))
# gets the number of halos to select
expected_cdf_tot = lambda x : nGal_perbin * st.lognorm.cdf(x, meanQTY, scatterQTY)
Up = expected_cdf_tot(boundaries[1:])
Low = n.hstack(( 0., expected_cdf_tot(boundaries[1:])[:-1] ))
N2select = Up-Low
#print N2select,Up,Low
# select in mass in the box
qsels = n.array([ (QTY>boundaries[ii])&(QTY<= boundaries[ii+1]) for ii in range(len(boundaries)-1) ])
IDhzqAll = n.array([ IDhz[qs] for qs in qsels ])
# random downsample to the N2select in each bin
i = 0
ids_selected = []
for arr in IDhzqAll:
random.shuffle(arr)
ids_selected.append(arr[:N2select[i]])
i+= 1
ids_selected = n.hstack(( n.array(ids_selected) ))
return ids_selected
def make_LogNorm_catalog(self, colN, means, scatters):
"""
Creates lists of ids of centrals and satellite galaxies corresponding to the density given in the n(z).
:param colN: name of the column you construct the catalog with
:param means: means of the Gaussians, array the same length of the redshift bin
:param scatters: scatters of the Gaussians, array the same length of the redshift bin
"""
ids = []
for ii in range(len(self.zmin)):
print "gets all halos for ", self.zmin[ii], "<z<", self.zmax[ii], "with col5 to mock ", self.nGal[ii], " galaxies."
IDhz, QTY, nn, bb = get_distrib_QTY( hdu, colN, self.zmin[ii], self.zmax[ii] )
ids.append( self.select_LogNorm( means[ii], scatters[ii], self.nGal[ii], IDhz, QTY ) )
self.idSel = n.hstack(( ids ))
self.NhaloMock = len((self.idSel).nonzero()[0])
def create_random_catalog(self, factor = 5., dz=0.025 ):
"""Writes a random catalog"""
self.nRandom = int(self.NhaloMock * factor )
raR = n.random.uniform(n.min(self.raMock), n.max(self.raMock), self.nRandom )
decR = n.random.uniform(n.min(self.decMock), n.max(self.decMock), self.nRandom )
z1=n.arange(n.min(self.zMock)-0.1, n.max(self.zMock)+0.1, dz)
nn,bb,pp=p.hist(self.zMock, bins=z1)
nz=interp1d((z1[1:]+z1[:-1])/2.,factor*nn)
zs=n.arange(n.min(self.zMock), n.max(self.zMock), dz)
rdsz=[]
for i in range(len(zs)-1):
inter=n.random.uniform(low=zs[i], high=zs[i+1], size=int(2* nz( zs[i]+dz/2. )))
rdsz.append(inter)
rds=n.hstack((rdsz))
n.random.shuffle(rds)
selRDS=(n.random.rand(len(raR))<float(self.nRandom)/len(raR))
RR=rds[:len(raR[selRDS])]
print "N final",len(raR[selRDS])
outPutFileName = join( self.mockOutput_dir, self.mockName + "_random.cat" )
n.savetxt(outPutFileName,n.transpose([raR[selRDS],decR[selRDS],RR]),fmt='%.8f %.8f %.5f')
raR,decR,RR=0,0,0
def writeClusteringParamFile(self,type,decade=""):
""" Writes the clustering commands that command the CUTE code, see Alonso et al. 2012 https://arxiv.org/abs/1210.1833
:param type: monopole or angular or ...
:param decade: string suffix that is appended if you study different scales (decades) _d1, _d2, _d3 are used for the angular clustering."""
f=open(join( self.mockOutput_dir, self.mockName +".param2PCF_"+type+decade),'a')
f.write("data_filename= "+join( self.mockOutput_dir, self.mockName + "_radecz.cat" )+" \n")
f.write("random_filename= "+join( self.mockOutput_dir, self.mockName + "_random.cat" )+" \n")
f.write("input_format= 2 \n")
f.write("mask_filename= 'none' \n")
f.write("z_dist_filename= 'none' \n")
f.write("output_filename= "+join( self.mockOutput_dir, self.mockName )+"_2PCF_"+type+decade+".dat \n")
f.write("num_lines= all \n")
f.write("corr_type= "+type+" \n")
f.write("corr_estimator= LS \n")
f.write("np_rand_fact= 5 \n")
f.write("omega_M= 0.307115 \n")
f.write("omega_L= 0.692885 \n")
f.write("w= -1 \n")
f.write("radial_aperture= 1 \n")
f.write("use_pm= 0 \n")
f.write("n_pix_sph= 2048 \n")
f.close()
def compute_clustering(self):
""" Runs the CUTE code to estimate clustering using the LS estimator. """
os.system("/home2/jcomparat/code/CUTE-1.1A1/CUTE/CUTE "+join( self.mockOutput_dir, self.mockName +".param2PCF_angular_d1"))
os.system("/home2/jcomparat/code/CUTE-1.1A2/CUTE/CUTE "+join( self.mockOutput_dir, self.mockName +".param2PCF_angular_d2"))
os.system("/home2/jcomparat/code/CUTE-1.1A3/CUTE/CUTE "+join( self.mockOutput_dir, self.mockName +".param2PCF_angular_d3"))
os.system("/home2/jcomparat/code/CUTE-1.1M/CUTE/CUTE "+join( self.mockOutput_dir, self.mockName +".param2PCF_monopole"))
def compare_clustering_data_mock(self, w_data, xi_data, theta_min_chi2 = -2.3, theta_max_chi2= -1.5, w_bins=15, s_min_chi2=0.5, s_max_chi2=1.2, s_bins=10):
"""Compares the clustering of the mock catalog and the clustering of the data.
:param w_data: angular clustering from the data [x, y, yErr].
:param xi_data: monopole clustering from the data [x, y, yErr]. """
ths = n.logspace(theta_min_chi2,theta_max_chi2,w_bins)
ss = n.logspace(s_min_chi2,s_max_chi2,s_bins)
# loads the angular clustering from the mock
xx0, yy0, y2E = n.loadtxt( join( self.mockOutput_dir, self.mockName )+"_2PCF_"+"angular"+"_d3"+".dat",unpack=True,usecols = (0,1,2))
xx1, yy1, y1E= n.loadtxt( join( self.mockOutput_dir, self.mockName )+"_2PCF_"+"angular"+"_d2"+".dat",unpack=True,usecols = (0,1,2))
w_M = interp1d( n.hstack((xx0,xx1[1:])), n.hstack((yy0,yy1[1:])) ) #,yy2[1:]))
# loads the monopole from the mock
s_M_a, xi_M_a = n.loadtxt( join( self.mockOutput_dir, self.mockName )+"_2PCF_"+"monopole"+".dat",unpack=True,usecols = (0,1))
xi_M = interp1d( s_M_a, xi_M_a )
# loads the monopole from the mock
#s_selection_data=( xi_data[0] > s_min_chi2 ) & ( xi_data[0] < s_max_chi2 ) & (xi_data[1] > 2 * xi_data[2])
#theta_selection_data = ( w_data[0] > theta_min_chi2 ) & ( w_data[0] < theta_max_chi2 ) & (w_data[1] > 2 * w_data[2])
xi_D = interp1d( xi_data[0], xi_data[1]) #[s_selection_data], xi_data[1][s_selection_data] )
xi_D_err = interp1d( xi_data[0], xi_data[2]) #[s_selection_data], xi_data[2][s_selection_data] )
w_D = interp1d( w_data[0], w_data[1]) #[theta_selection_data], w_data[1][theta_selection_data] )
w_D_err = interp1d( w_data[0], w_data[2]) #[theta_selection_data], w_data[2][theta_selection_data] )
chi2Wr = n.sum((w_D(ths) - w_M(ths))**2. / w_D_err(ths)**2) /len(ths)
chi2Xr = n.sum((xi_D(ss) - xi_M(ss))**2. / xi_D_err(ss)**2) /len(ss)
return chi2Wr, chi2Xr
```
#### File: nbody-npt-functions/python/LineLuminosityFunctionFromSimulations.py
```python
from os.path import join
import os
import astropy.cosmology as co
cosmo=co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
class LineLuminosityFunctionFromSimulations:
"""
The line luminosity function class
:param lineWavelength: restframe wavelength in the air
:param lineName: name of the line used in the catalogs.
:param cosmology: cosmology used (astropy class) Default H0=70,Omega matter=0.3
:param surveyName: Name of the survey used (needs to be the one given in the database)
:param redshift_catalog: name of the redshift catalog
:param luminosityBins: bins in luminosity equally spaced in log space.
:param outputFolder: folder where the results will be written
:param zmin: minimum redshift included
:param zmax: maximum redshift included
"""
def __init__(self, lineWavelength=3727.4228417998916, lineName="OII3727", cosmology = cosmo, surveyName ="GALFORM", surveyDir = join("Simulations","galform-lightcone"), redshift_catalog = "galform.ELG.fits", luminosityBins = n.logspace(38,45,50), outputFolder="emissionLineLuminosityFunctions" , zmin=0.6, zmax=0.8):
self.lineWavelength = lineWavelength
self.lineName = lineName
self.cosmology = cosmology
self.surveyName = surveyName
self.redshift_catalog = redshift_catalog
self.database_dir = os.environ['DATA_DIR']
self.survey_dir = join(self.database_dir , surveyDir)
self.catalog_dir = join(self.survey_dir,"catalogs")
self.output_dir = join(self.survey_dir,"products",outputFolder,lineName)
os.system('mkdir '+self.output_dir)
hd = fits.open(join(self.catalog_dir,self.redshift_catalog))
self.catalog = hd[1].data
hd.close()
self.Ngalaxies = len(self.catalog)
#self.nbins = 15#n.arange(38.5,45,0.25)#15
self.luminosityBins = luminosityBins #15
#self.nbinsUD = 4
self.zmin = zmin
self.zmax = zmax
self.luminosity = self.catalog[lineName+'_luminosity']
self.volume_per_sq_degree=lambda z1,z2 : (cosmo.comoving_volume( z2 ) - cosmo.comoving_volume( z1 )) *n.pi/129600.
def setRedshiftArray(self,redshiftColumn='zObs'):
""" sets the redshift array
:param redshiftColumn: column of the catalog corresponding to the redshift.
Stores it in self.redshift.
"""
self.redshift = self.catalog[redshiftColumn]
def setRedshiftSelection(self):
""" sets the redshift selection
:param redshiftQualityColumn: column of the catalog corresponding to the quality of the redshifts.
:param lowerBound : lower bound to redshift quality : zquality > lowerBound
:param upperBound : upper bound to the redshift quality : zquality < upperBound
Stores it in self.redshiftSelection.
"""
self.redshiftSelection = ( self.redshift>self.zmin ) & ( self.redshift<self.zmax )
def setWeightArray(self,weightColumn):
""" sets the weight column
:param weightColumn: statistical weight per galaxy 1 / (area * TSR * SSR)
Divides the weight by the volume of the bin stores it in self.weight.
"""
self.weight = n.ones_like(self.luminosity) * weightColumn / self.volume_per_sq_degree(self.zmin,self.zmax)
def computeMeanWeightedRedshift(self,sel):
""" Computes the weighted mean redshift of the sample.
"""
selection = (sel) & (self.redshiftSelection)
self.meanRedshift = n.average(self.redshift[selection], weights = self.weight[selection])
def computeHistogramLF(self,sel):
""" Computes the weighted and unweighted histogram to get the number density and Poisson errors.
:param sel: array selecting the galaxies of interest in the catalog (Boolean).
Returns Weighted density, Error on the weighted density, Number of galaxies used in eacah bin, the luminosity bins.
It stores the values in self.LF, self.LFerr_poisson, self.ngals. It also evaluates the mean luminosity in each luminosity bin self.xL and dlogL to obtain the LF
"""
selection = (sel) & (self.redshiftSelection)
N10p,bin1p=n.histogram(self.luminosity[selection],bins=self.luminosityBins)
N10,bin1=n.histogram(self.luminosity[selection], bins= self.luminosityBins, weights= self.weight[selection] )
self.LF, self.LFerr_poisson, self.ngals = N10, N10*N10p**0.5/N10p, N10p
xSelections=n.array([ (self.luminosity > self.luminosityBins[ii]) &(self.luminosity< self.luminosityBins[ii+1] ) & (selection) for ii in range( len( self.luminosityBins ) -1 ) ])
xLi= []
for jj in range(len(xSelections)) :
if len(self.luminosity[xSelections[jj]])>0:
xLi.append( n.average( self.luminosity[xSelections[jj]], weights= self.weight[xSelections[jj]] ) )
else:
xLi.append( (self.luminosityBins[jj]+self.luminosityBins[jj+1])/2. )
self.xL=n.array(xLi)
dLogL_all = (self.luminosityBins[1:] - self.luminosityBins[:-1]) / ((self.luminosityBins[1:] + self.luminosityBins[:-1])/2.)
self.dLogL = dLogL_all[0]
def computeHistogramVariance(self,sel,jk=0.1):
""" Computes the variance of the histogram using N subsamples.
:param sel: array selecting the galaxies of interest in the catalog (Boolean).
:param jk: percentage of the data set removed in each realization.
Stores the values in self.LFerr_jackknife
"""
selection = (sel) & (self.redshiftSelection)
#N10p,bin1p=n.histogram(self.luminosity[selection],bins=self.luminosityBins)
L_jk = self.luminosity[selection]
w_jk = self.weight[selection]
rdArr=n.random.rand(len(L_jk))
values=n.arange(0,1+0.9*jk,jk)
randSelNot=n.array([(rdArr>values[jj])&(rdArr<values[jj+1]) for jj in range(len(values)-1)])
randSel=n.array([(el==False) for el in randSelNot])
lumJK=[]
for selR in randSel :
N10,bin1=n.histogram(L_jk[selR], bins= self.luminosityBins, weights= w_jk[selR] )
lumJK.append(N10)
self.LFerr_jackknife = n.std(lumJK,axis=0)
def get_completness_limit(self,sel):
selection = (sel) & (self.redshiftSelection)
bins=n.logspace(1,3,20)
aa,bb = n.histogram(self.catalog[self.lineName+'_EW'][selection], bins=bins)
self.completness_limit_EW = bb[n.argmax(aa)+3]
EWselection = (self.catalog[self.lineName+'_EW'][selection] >0.9* self.completness_limit_EW )&( self.catalog[self.lineName+'_EW'][selection]<1.1* self.completness_limit_EW)
self.completness_limit_luminosity = n.median( self.catalog[ self.lineName+'_luminosity'][ selection ][ EWselection ])
# bins=n.logspace(39.5,43,20)
# aa,bb = n.histogram(self.catalog[self.lineName+'_luminosity'][selection], bins=bins)
#self.completness_limit_luminosity = bb[n.argmax(aa)+1]
def writeLF(self,sel,surveyNameSuffix=""):
""" writes the measured LF and the data used to derive it to an ascii and a fits file.
"""
filename = self.lineName + "-" + self.surveyName+surveyNameSuffix + "-z" + str( n.round( self.meanRedshift ,3 ))
selection = (sel) & (self.redshiftSelection)
new_columns = self.catalog.columns
hdu2 = fits.BinTableHDU.from_columns(new_columns)
hdu2.data = hdu2.data[selection]
hdu2.header.add_comment(str(self.completness_limit_luminosity))
os.system('rm -rf '+ join(self.output_dir , filename + ".fits"))
hdu2.writeto(join(self.output_dir , filename + ".fits"))
head= " Lmin Lmax Lmean phi phiErr_jk phiErr_poisson Ngalaxy"
f=open(join(self.output_dir , filename + ".txt"),'w')
n.savetxt(f, n.transpose([self.luminosityBins[:-1], self.luminosityBins[1:], self.xL, self.LF/self.dLogL, self.LFerr_poisson/self.dLogL, self.LFerr_jackknife /self.dLogL, self.ngals]) ,header= head)
f.close()
```
#### File: nbody-npt-functions/python/StellarMass.py
```python
from scipy.stats import lognorm
from scipy.stats import norm
#import cPickle
import fileinput
import astropy.io.fits as fits
import astropy.cosmology as co
import astropy.units as u
c2 = co.Planck13
from scipy.interpolate import interp1d
from os.path import join
import os
import astropy.units as uu
import numpy as n
import glob
import scipy.spatial.ckdtree as t
import time
class StellarMass() :
"""
Loads the environement to assign stellar masses to halos from dark matter only simulations, here MultiDark simulations.
:param Lbox: length of the box in Mpc/h
:param wdir: Path to the multidark lightcone directory
:param boxDir: box directory name
:param snl: list of snapshots available
:param zsl: list of redshift corresponding to the snapshots
:param zArray: redshift array to be considered to interpolate the redshift -- distance conversion
:param Hbox: Hubble constant at redshift 0 of the box
:param Melement: Mass of the resolution element in solar masses.
:param columnDict: dictionnary to convert column name into the index to find it in the snapshots
"""
def __init__(self,Lbox=2500.0 * uu.Mpc, boxDir=os.environ['MD04'], snl=[], Hbox = 67.77 * uu.km / (uu.s * uu.Mpc), Melement = 23593750000.0 ):
self.Lbox = Lbox # box length
self.Hbox = Hbox # Hubble constant at redshift 0 in the box
self.boxDir = boxDir # directory of the box where the snapshots a stored
self.snl = snl # snapshot list, path to files
self.Melement = Melement # mass of one particle in the box
self.h = 0.6777
# parameters used to run the simulation
self.omega_lambda = 0.692885
self.omega_matter = 0.307115
self.omega_baryon = 0.048206
self.ns = 0.96
self.sigma8 = 0.8228
self.G = 6.67428 * 10**(-9) # cm3 g-1 s-2
self.Msun = 1.98892 * 10**(33.) # g
self.Npart = 3840
self.force_resolution = 5. # kpc /h
def SMHMr(self, Mh, z):
"""
Computes the mu star parameter for a halo mass according to the Moster et al. 2013 equations
Returns :
$\mu_* = 2\left(0.0351 - 0.0247 \frac{z}{1+z}\right)\left(\left[\left(\frac{10^{11.59 + 1.195 \frac{z}{1+z}}}{M_h}\right)^{1.376 - 0.826 \frac{z}{1+z}} + \left(\frac{M_h}{10^{11.59 + 1.195 \frac{z}{1+z}}} \right)^{0.608 + 0.329 \frac{z}{1+z}} \right]^{-1}\right)- 0.0225$
:param Mh: halo mass array
:param z: redshift array
"""
aexp = z/(1.+z)
return 2. * ( 0.0351 - 0.0247 * aexp) / ((Mh/ (10**(11.59 + 1.195 * aexp)) )**(- 1.376 + 0.826 * aexp) + ( Mh /(10**(11.59 + 1.195 * aexp)) )**(0.608 + 0.329 *aexp) ) #- 0.0225
def meanSM(self, Mh, z):
"""
Computes the mu star parameter for a halo mass according to the Moster et al. 2013 equations
Returns :
$\mu_* = 2\left(0.0351 - 0.0247 \frac{z}{1+z}\right)\left(\left[\left(\frac{10^{11.59 + 1.195 \frac{z}{1+z}}}{M_h}\right)^{1.376 - 0.826 \frac{z}{1+z}} + \left(\frac{M_h}{10^{11.59 + 1.195 \frac{z}{1+z}}} \right)^{0.608 + 0.329 \frac{z}{1+z}} \right]^{-1}\right)- 0.0225$
:param Mh: halo mass array
:param z: redshift array
"""
aexp = z/(1.+z)
return n.log10(Mh * 2. * ( 0.0351 - 0.0247 * aexp) / ((Mh/ (10**(11.59 + 1.195 * aexp)) )**(- 1.376 + 0.826 * aexp) + ( Mh /(10**(11.59 + 1.195 * aexp)) )**(0.608 + 0.329 *aexp) )) #- 0.0225
def sample_Ms( self, Mh, z, scatter = 0.15 ):
"""
Draws a stellar mass from a lognormal distribution centered on mu_star with witdth sigma_star
:param Mh: halo mass
:param z: redshift
:param scatter: scatter in the stellar mass to halo mass relation
"""
return norm.rvs( loc = self.meanSM(Mh, z), scale = scatter )
class StellarMass_EMERGE_Moster2017() :
"""
Loads the environement to assign stellar masses to halos from dark matter only simulations, here MultiDark simulations.
:param Lbox: length of the box in Mpc/h
:param wdir: Path to the multidark lightcone directory
:param boxDir: box directory name
:param snl: list of snapshots available
:param zsl: list of redshift corresponding to the snapshots
:param zArray: redshift array to be considered to interpolate the redshift -- distance conversion
:param Hbox: Hubble constant at redshift 0 of the box
:param Melement: Mass of the resolution element in solar masses.
:param columnDict: dictionnary to convert column name into the index to find it in the snapshots
"""
def __init__(self,Lbox=1000.0 * uu.Mpc, boxDir=os.environ['MD10'], snl=[], Hbox = 67.77 * uu.km / (uu.s * uu.Mpc), Melement = 23593750000.0 ):
# parameters related to the simulations
self.Lbox = Lbox # box length
self.Hbox = Hbox # Hubble constant at redshift 0 in the box
self.boxDir = boxDir # directory of the box where the snapshots a stored
self.snl = snl # snapshot list, path to files
self.Melement = Melement # mass of one particle in the box
self.h = 0.6777
# parameters used to run the simulation
self.omega_lambda = 0.692885
self.omega_matter = 0.307115
self.omega_baryon = 0.048206
self.ns = 0.96
self.sigma8 = 0.8228
self.G = 6.67428 * 10**(-9) # cm3 g-1 s-2
self.Msun = 1.98892 * 10**(33.) # g
self.Npart = 3840
self.force_resolution = 5. # kpc /h
# parameters and equations related to EMERGE
# equation (7)
self.log_M0 = 11.339 # +0.005 -0.080
self.log_Mz = 0.692 # +0.010 -0.009
self.log10_M1 = lambda z : self.log_M0 + self.log_Mz * (z/(1.+z))
# equation (8)
self.epsilon_0 = 0.005
self.epsilon_z = 0.689
self.epsilon_N = lambda z : self.epsilon_0 + self.epsilon_z * (z/(1.+z))
# equation (9)
self.beta_0 = 3.334
self.beta_z = -2.079
self.beta = lambda z : self.beta_0 + self.beta_z * (z/(1.+z))
# equation (10)
self.gamma_0 = 0.966
self.gamma = lambda z : self.gamma_0
# equation (5) <= (7, 8, 9, 10)
# integrated efficiency function of mass and redshift
self.epsilon = lambda stellar_mass, z : 2. * self.epsilon_N(z) /((stellar_mass / 10**self.log10_M1(z))**(-self.beta(z)) + (stellar_mass / 10**self.log10_M1(z))**(self.gamma(z)))
# equation (6)
# mass at which baryon conversion is most efficient
self.M_max = lambda z : 10**self.log10_M1(z) * (self.beta(z)/self.gamma(z))**(1/(self.beta(z) + self.gamma(z)))
# equation (13)
self.tau_0 = 4.282
self.tau_s = 0.363
self.tau = lambda t_dyn, stellar_mass : t_dyn * self.tau_0 * (stellar_mass * 10**(-10.))**(-self.tau_s)
# equation (14), stripping
self.f_s = 0.122
# equation (15), merging
self.f_esc = 0.338
def reconsitute_history(self):
"""
reads a fits file at a given redshift:
#. split central - sat
#. read and match to its predecessors at the previous redshift for centrals.
#. read and match at all previous redshifts for sat
#. 2 catalogs of matched properties
#. write history catalogs with properties of interest
#. retrieve the properties of interest
#.
columns available in short files
'id': 0, 'desc_id': 1, 'mvir': 2, 'vmax': 3, 'vrms': 4, 'rvir': 5, 'rs': 6, 'Np': 7, 'x': 8, 'y': 9, 'z': 10, 'vx': 11, 'vy': 12, 'vz': 13, 'Jx': 14, 'Jy': 15, 'Jz': 16, 'Spin':17, 'Rs_Klypin': 18, 'Mmvir_all': 19, 'M200b': 20, 'M200c': 21, 'M500c': 22, 'M2500c': 23, 'Xoff': 24, 'Voff': 25, 'Spin_Bullock': 26, 'b_to_a': 27, 'c_to_a': 28, 'Ax': 29, 'Ay': 30, 'Az': 31, 'b_to_a_500c': 32, 'pid': 33
"""
return 0.
def sample_stellar_mass(self):
"""
Given a file written by reconstitute history,
#. computes the galaxy properties
#. writes them to a new file "_galaxy.fits"
"""
return 0.
``` |
{
"source": "JohanComparat/pyEmerge",
"score": 2
} |
#### File: pyEmerge/bin_bao_tracers/lc_convert_2_fits.py
```python
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import astropy.io.fits as fits
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
def write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max):
f = h5py.File(path_to_lc, 'r')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<z_max)&(f['/cosmo_4most/is_ELG_eBOSS'].value)#&(f['/agn_properties/agn_activity'].value==1)
hdu_cols = fits.ColDefs([
fits.Column(name='Vmax',format='D', array= f['/halo_properties/Vmax'].value[is_gal], unit='km/s' )
,fits.Column(name='mvir',format='D', array= f['/halo_properties/mvir'].value[is_gal], unit='Msun' )
,fits.Column(name='log_stellar_mass',format='D', array= n.log10(f['/moster_2013_data/stellar_mass'].value[is_gal]) , unit='log10(stellar_mass/[Msun])' )
,fits.Column(name='RA',format='D', array= f['/sky_position/RA'].value[is_gal] , unit='RA/[deg]' )
,fits.Column(name='DEC',format='D', array= f['/sky_position/DEC'].value[is_gal], unit='DEC/[deg]' )
,fits.Column(name='redshift_R',format='D', array= f['/sky_position/redshift_R'].value[is_gal], unit='real space redshift' )
,fits.Column(name='redshift_S',format='D', array= f['/sky_position/redshift_S'].value[is_gal], unit='redshift space redshift' )
])
f.close()
tb_hdu = fits.BinTableHDU.from_columns( hdu_cols )
#define the header
prihdr = fits.Header()
prihdr['author'] = 'JC'
prihdr['DEC_max'] = dec_max
prihdr['DEC_max'] = - dec_max
prihdr['RA_max'] = ra_max
prihdr['RA_max'] = - ra_max
prihdr['z_min'] = z_min
prihdr['z_max'] = z_max
prihdu = fits.PrimaryHDU(header=prihdr)
#writes the file
thdulist = fits.HDUList([prihdu, tb_hdu])
print( out_filename )
os.system("rm "+out_filename)
thdulist.writeto(out_filename)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L3.fits'
z_min = 0.
z_max = 1.08
dec_max = 8.269819492449505
ra_max = 6.7529257176359
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L6.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L6.fits'
z_min = 0.
z_max = 3.0
dec_max = 2.0047373031569915
ra_max = 1.9766516114702513
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L15.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L15.fits'
z_min = 0.
z_max = 0.54
dec_max = 20.257311381848154
ra_max = 14.323944878104827
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3_z1.hdf5'
out_filename = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_eBOSS_L3_z1.fits'
z_min = 1.08
z_max = 3.0
dec_max = 4.134909746242654
ra_max = 3.3764628588325674
write_fits_lc(path_to_lc, out_filename, z_min, z_max, dec_max, ra_max)
# z< 0.5423857379098544 |ra [deg]|< 14.323944878104827 |dec [deg]|< 20.257311381848154
# L3 characteristics :
# z< 1.0889947373832305 |ra [deg]|< 6.7529257176359 |dec [deg]|< 8.269819492449505
# N points: 8037075
#
# L3_z1 characteristics
# z< 3.8309961826584344 |ra [deg]|< 3.3764628588325674 |dec [deg]|< 4.134909746242654
# N points: 8511571
#
# L6 characteristics
# z< 6.697087333514605 |ra [deg]|< 1.9766516114702513 |dec [deg]|< 2.0047373031569915
# N points: 3287299
```
#### File: pyEmerge/bin_cluster/lc_create_shells.py
```python
import sys
ii = int(sys.argv[1])
env = sys.argv[2] # 'MD10'
L_box = float(sys.argv[3]) / 0.6777
positions_group_name = sys.argv[4] # 'remaped_position_L3'
if positions_group_name == 'remaped_position_L3' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = 0., 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L3_z1' :
positions_group = 'remaped_position_L3'
x_obs, y_obs, z_obs = -2.4495*L_box, 0.7071/2.*L_box, 0.5774/2.*L_box
if positions_group_name == 'remaped_position_L6' :
positions_group = 'remaped_position_L6'
x_obs, y_obs, z_obs = 0., 0.4140/2.*L_box, 0.4082/2.*L_box
if positions_group_name == 'remaped_position_L15' :
positions_group = 'remaped_position_L15'
#1.4142', '1.0000', '0.7071
x_obs, y_obs, z_obs = 0., 1.0000/2.*L_box, 0.7071/2.*L_box
import h5py # HDF5 support
import os
import glob
import numpy as n
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
h5_lc_dir = os.path.join(os.environ[env], 'h5_lc', 'cluster_shells_'+positions_group_name )
if os.path.isdir(h5_lc_dir)==False:
os.mkdir(h5_lc_dir)
h5_dir = os.path.join(os.environ[env], 'cluster_h5' )
input_list_i = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????.hdf5")))
input_list_i.sort()
# removing snapshots that cannote be remapped ...
input_list = n.delete(input_list_i,n.array([
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.16620_emerge.hdf5")), # LSAR issue
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.17770_emerge.hdf5")), # LSAR issue
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.18990_emerge.hdf5")), # LSAR issue
#n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.19410_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.21210_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.24230_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.28920_emerge.hdf5")), # LSAR issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.27060_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.43090_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.71730_emerge.hdf5")), # remap issue
n.argwhere(input_list_i== os.path.join(h5_dir, "hlist_0.93570_emerge.hdf5")) # remap issue
]) )
# creates the redshift list
redshifts = []
for file_1 in input_list :
f1 = h5py.File(file_1, "r")
redshifts.append(f1.attrs['redshift'])
f1.close()
redshifts = n.array(redshifts)
# creates the shell list
Dcom = cosmoMD.comoving_distance(redshifts).value
Dmax = n.hstack((Dcom[0],(Dcom[1:]+Dcom[:-1])/2.))
Dmin = n.hstack(((Dcom[1:]+Dcom[:-1])/2., Dcom[-1]))
def copylc_data(ii, option=False):
"""
Creates the selection array to obtain the shell in a snapshot to be added in the light cone
Writes a lightcone shell for each snapshot
"""
file_1 = input_list[ii]
file_out = os.path.join(h5_lc_dir, 'shell_'+os.path.basename( input_list[ii] ) )
print(file_1, "==>>", file_out)
if os.path.isfile(file_out):
os.system('rm '+file_out)
f1 = h5py.File(file_1, "r")
print( "n halos=",f1['/halo_properties/'].attrs['N_halos'])
x,y,z=f1[positions_group + '/xyz_Lbox'].value.T*L_box
distance = ((x-x_obs)**2.+(y-y_obs)**2.+(z-z_obs)**2.)**0.5
selection = (distance>=Dmin[ii])&(distance<Dmax[ii])
print( len(distance[selection])," halos in shell ", Dmin[ii], "<d comoving<",Dmax[ii])
if len(distance[selection])>1:
f = h5py.File(file_out, "a")
f.attrs['file_name'] = os.path.basename(file_out)
f.attrs['HDF5_Version'] = h5py.version.hdf5_version
f.attrs['h5py_version'] = h5py.version.version
halo_data = f.create_group('halo_position')
ds = halo_data.create_dataset('x', data = x[selection] )
ds = halo_data.create_dataset('y', data = y[selection] )
ds = halo_data.create_dataset('z', data = z[selection] )
ds = halo_data.create_dataset('vx', data = f1['/halo_position/vx'].value[selection] )
ds = halo_data.create_dataset('vy', data = f1['/halo_position/vy'].value[selection] )
ds = halo_data.create_dataset('vz', data = f1['/halo_position/vz'].value[selection] )
halo_data = f.create_group('moster_2013_data')
ds = halo_data.create_dataset('stellar_mass', data = f1['/moster_2013_data/stellar_mass'].value[selection] )
halo_data = f.create_group('remaped_position_L15')
ds = halo_data.create_dataset('xyz_Lbox', data = f1['/remaped_position_L15/xyz_Lbox'].value[selection] )
halo_data = f.create_group('remaped_position_L3')
ds = halo_data.create_dataset('xyz_Lbox', data = f1['/remaped_position_L15/xyz_Lbox'].value[selection] )
halo_data = f.create_group('remaped_position_L6')
ds = halo_data.create_dataset('xyz_Lbox', data = f1['/remaped_position_L15/xyz_Lbox'].value[selection] )
halo_data = f.create_group('cluster_data')
ds = halo_data.create_dataset('cool_class', data = f1['/cluster_data/cool_class'].value[selection] )
ds = halo_data.create_dataset('kT', data = f1['/cluster_data/kT'].value[selection] )
ds = halo_data.create_dataset('log_LX_05_24', data = f1['/cluster_data/log_LX_05_24'].value[selection] )
ds = halo_data.create_dataset('log_LceX_05_24', data = f1['/cluster_data/log_LceX_05_24'].value[selection] )
ds = halo_data.create_dataset('log_Mgas', data = f1['/cluster_data/log_Mgas'].value[selection] )
halo_data = f.create_group('halo_properties')
halo_data.attrs['N_halos'] = len(distance[selection])
ds = halo_data.create_dataset('scale' , data=f1['halo_properties/scale'] [selection])
ds = halo_data.create_dataset('id' , data=f1['halo_properties/id'] [selection])
ds = halo_data.create_dataset('desc_scale' , data=f1['halo_properties/desc_scale'] [selection])
ds = halo_data.create_dataset('desc_id' , data=f1['halo_properties/desc_id'] [selection])
ds = halo_data.create_dataset('num_prog' , data=f1['halo_properties/num_prog'] [selection])
ds = halo_data.create_dataset('pid' , data=f1['halo_properties/pid'] [selection])
ds = halo_data.create_dataset('upid' , data=f1['halo_properties/upid'] [selection])
ds = halo_data.create_dataset('desc_pid' , data=f1['halo_properties/desc_pid'] [selection])
ds = halo_data.create_dataset('mvir' , data=f1['halo_properties/mvir'] [selection])
ds = halo_data.create_dataset('rvir' , data=f1['halo_properties/rvir'] [selection])
ds = halo_data.create_dataset('rs' , data=f1['halo_properties/rs'] [selection])
ds = halo_data.create_dataset('vrms' , data=f1['halo_properties/vrms'] [selection])
ds = halo_data.create_dataset('mmp' , data=f1['halo_properties/mmp'] [selection])
ds = halo_data.create_dataset('scale_of_last_MM' , data=f1['halo_properties/scale_of_last_MM'] [selection])
ds = halo_data.create_dataset('vmax' , data=f1['halo_properties/vmax'] [selection])
ds = halo_data.create_dataset('Jx' , data=f1['halo_properties/Jx'] [selection])
ds = halo_data.create_dataset('Jy' , data=f1['halo_properties/Jy'] [selection])
ds = halo_data.create_dataset('Jz' , data=f1['halo_properties/Jz'] [selection])
ds = halo_data.create_dataset('Spin' , data=f1['halo_properties/Spin'] [selection])
ds = halo_data.create_dataset('Breadth_first_ID' , data=f1['halo_properties/Breadth_first_ID'] [selection])
ds = halo_data.create_dataset('Depth_first_ID' , data=f1['halo_properties/Depth_first_ID'] [selection])
ds = halo_data.create_dataset('Tree_root_ID' , data=f1['halo_properties/Tree_root_ID'] [selection])
ds = halo_data.create_dataset('Orig_halo_ID' , data=f1['halo_properties/Orig_halo_ID'] [selection])
ds = halo_data.create_dataset('Next_coprogenitor_depthfirst_ID' , data=f1['halo_properties/Next_coprogenitor_depthfirst_ID'] [selection])
ds = halo_data.create_dataset('Last_progenitor_depthfirst_ID' , data=f1['halo_properties/Last_progenitor_depthfirst_ID'] [selection])
ds = halo_data.create_dataset('Last_mainleaf_depthfirst_ID' , data=f1['halo_properties/Last_mainleaf_depthfirst_ID'] [selection])
ds = halo_data.create_dataset('Tidal_Force' , data=f1['halo_properties/Tidal_Force'] [selection])
ds = halo_data.create_dataset('Tidal_ID' , data=f1['halo_properties/Tidal_ID'] [selection])
ds = halo_data.create_dataset('Rs_Klypin' , data=f1['halo_properties/Rs_Klypin'] [selection])
ds = halo_data.create_dataset('Mmvir_all' , data=f1['halo_properties/Mmvir_all'] [selection])
ds = halo_data.create_dataset('M200b' , data=f1['halo_properties/M200b'] [selection])
ds = halo_data.create_dataset('M200c' , data=f1['halo_properties/M200c'] [selection])
ds = halo_data.create_dataset('M500c' , data=f1['halo_properties/M500c'] [selection])
ds = halo_data.create_dataset('M2500c' , data=f1['halo_properties/M2500c'] [selection])
ds = halo_data.create_dataset('Xoff' , data=f1['halo_properties/Xoff'] [selection])
ds = halo_data.create_dataset('Voff' , data=f1['halo_properties/Voff'] [selection])
ds = halo_data.create_dataset('Spin_Bullock' , data=f1['halo_properties/Spin_Bullock'] [selection])
ds = halo_data.create_dataset('b_to_a' , data=f1['halo_properties/b_to_a'] [selection])
ds = halo_data.create_dataset('c_to_a' , data=f1['halo_properties/c_to_a'] [selection])
ds = halo_data.create_dataset('Ax' , data=f1['halo_properties/Ax'] [selection])
ds = halo_data.create_dataset('Ay' , data=f1['halo_properties/Ay'] [selection])
ds = halo_data.create_dataset('Az' , data=f1['halo_properties/Az'] [selection])
ds = halo_data.create_dataset('b_to_a_500c' , data=f1['halo_properties/b_to_a_500c'] [selection])
ds = halo_data.create_dataset('c_to_a_500c' , data=f1['halo_properties/c_to_a_500c'] [selection])
ds = halo_data.create_dataset('Ax_500c' , data=f1['halo_properties/Ax_500c'] [selection])
ds = halo_data.create_dataset('Ay_500c' , data=f1['halo_properties/Ay_500c'] [selection])
ds = halo_data.create_dataset('Az_500c' , data=f1['halo_properties/Az_500c'] [selection])
ds = halo_data.create_dataset('TU' , data=f1['halo_properties/TU'] [selection])
ds = halo_data.create_dataset('M_pe_Behroozi' , data=f1['halo_properties/M_pe_Behroozi'] [selection])
ds = halo_data.create_dataset('M_pe_Diemer' , data=f1['halo_properties/M_pe_Diemer'] [selection])
ds = halo_data.create_dataset('Macc' , data=f1['halo_properties/Macc'] [selection])
ds = halo_data.create_dataset('Mpeak' , data=f1['halo_properties/Mpeak'] [selection])
ds = halo_data.create_dataset('Vacc' , data=f1['halo_properties/Vacc'] [selection])
ds = halo_data.create_dataset('Vpeak' , data=f1['halo_properties/Vpeak'] [selection])
ds = halo_data.create_dataset('Halfmass_Scale' , data=f1['halo_properties/Halfmass_Scale'] [selection])
ds = halo_data.create_dataset('Acc_Rate_Inst' , data=f1['halo_properties/Acc_Rate_Inst'] [selection])
ds = halo_data.create_dataset('Acc_Rate_100Myr' , data=f1['halo_properties/Acc_Rate_100Myr'] [selection])
ds = halo_data.create_dataset('Acc_Rate_1Tdyn' , data=f1['halo_properties/Acc_Rate_1Tdyn'] [selection])
ds = halo_data.create_dataset('Acc_Rate_2Tdyn' , data=f1['halo_properties/Acc_Rate_2Tdyn'] [selection])
ds = halo_data.create_dataset('Acc_Rate_Mpeak' , data=f1['halo_properties/Acc_Rate_Mpeak'] [selection])
ds = halo_data.create_dataset('Mpeak_Scale' , data=f1['halo_properties/Mpeak_Scale'] [selection])
ds = halo_data.create_dataset('Acc_Scale' , data=f1['halo_properties/Acc_Scale'] [selection])
ds = halo_data.create_dataset('First_Acc_Scale' , data=f1['halo_properties/First_Acc_Scale'] [selection])
ds = halo_data.create_dataset('First_Acc_Mvir' , data=f1['halo_properties/First_Acc_Mvir'] [selection])
ds = halo_data.create_dataset('First_Acc_Vmax' , data=f1['halo_properties/First_Acc_Vmax'] [selection])
ds = halo_data.create_dataset('VmaxAtMpeak' , data=f1['halo_properties/VmaxAtMpeak'] [selection])
ds = halo_data.create_dataset('Tidal_Force_Tdyn' , data=f1['halo_properties/Tidal_Force_Tdyn'] [selection])
ds = halo_data.create_dataset('logVmaxVmaxmaxTdynTmpeak' , data=f1['halo_properties/logVmaxVmaxmaxTdynTmpeak'] [selection])
ds = halo_data.create_dataset('Time_to_future_merger' , data=f1['halo_properties/Time_to_future_merger'] [selection])
ds = halo_data.create_dataset('Future_merger_MMP_ID' , data=f1['halo_properties/Future_merger_MMP_ID'] [selection])
f.close()
f1.close()
copylc_data(ii)
```
#### File: pyEmerge/bin/lc_lognlogs_agns.py
```python
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
plotDir = os.path.join(os.environ['HOME'], 'wwwDir', "eRoMok", "logNlogS")
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
def get_lognlogs(path_to_lc, area, z_max=3., ra_max=10., dec_max=10.):
f = h5py.File(path_to_lc, 'r+')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<z_max)&(abs(f['/sky_position/DEC'].value)<dec_max)&(abs(f['/sky_position/RA'].value)<ra_max)
is_agn = (f['/sky_position/selection'].value)&(f['/agn_properties/agn_activity'].value==1)&(f['/agn_properties/rxay_flux_05_20'].value>0)
n_gal = len(f['/sky_position/redshift_S'].value[is_gal])
n_agn = len(f['/sky_position/redshift_S'].value[is_agn])
z = f['/sky_position/redshift_S'].value[is_agn]
#logm = n.log10(f['/moster_2013_data/stellar_mass'].value[is_agn])
#lsar = f['/agn_properties/log_lambda_sar'].value[is_agn]
#lx = logm + lsar
log_f_05_20 = n.log10(f['/agn_properties/rxay_flux_05_20'].value[is_agn]) #- 0.6
f.close()
out = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))
# cumulative number density per square degrees
x_out = 0.5*(out[1][1:] + out[1][:-1])
N_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ])
c_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ]) / area
c_out_up = (1 + N_out**(-0.5)) * c_out
c_out_low = (1 - N_out**(-0.5)) * c_out
c_err = (n.log10(c_out_up) - n.log10(c_out_low))/2.
return x_out, c_out, c_err
p.figure(1, (6,6))
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3.hdf5'
area = 6.7529257176359*2. * 2* 8.269819492449505
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 1.1, 6.7529257176359, 8.269819492449505)
#p.plot(x_out, n.log10(c_out), lw=2, rasterized = True, label = 'z<1.08' )
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 z<1.08, 223deg2' )
x_out_a, c_out_a, c_err_a = x_out, c_out, c_err
p.axhline(n.log10(300), ls='dashed')
#path_to_lc=='/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L3_z1.hdf5'
#area = 3.3764628588325674*2. * 2* 4.134909746242654
#x_out, c_out, c_err = get_lognlogs(path_to_lc, area, z_max=3.)
#p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 1.08<z<3.' )
#p.plot(x_out, n.log10(c_out+c_out_a), ls='dashed', label='total')
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L6.hdf5'
area = 1.9766516114702513*2. * 2*2.0047373031569915
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 1.9766516114702513, 2.0047373031569915)
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L6 z<3., 15deg2' )
#p.plot(x_out-0.1, n.log10(c_out), 'k', lw=2, rasterized = True, label = 'L3 lc-0.1' )
#p.plot(x_out, n.log10(c_out*(1-frac_err_13deg2)), 'k--', lw=1, rasterized = True, label = 'v0.6, 13.3deg2 scatter' )
#p.plot(x_out, n.log10(c_out*(1+frac_err_13deg2)), 'k--', lw=1, rasterized = True)
#p.plot(x_out, n.log10(c_out*(1-frac_err_3deg2)), 'r--', lw=1, rasterized = True, label = 'v0.6, 3.5deg2 scatter' )
#p.plot(x_out, n.log10(c_out*(1+frac_err_3deg2)), 'r--', lw=1, rasterized = True)
#p.plot(x_out_0, n.log10(c_out_0), 'm--', rasterized = True, label = 'Planck mock v0.0' )
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L15.hdf5'
area = 14.323944878104827*2. * 2*20.257311381848154
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 14.323944878104827, 20.257311381848154)
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L15 z<0.54 1160deg2' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Georgakakis_08_AGN.data')
x_data, y_data, yerr = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(x_data, y1 = n.log10(y_data-yerr), y2=n.log10(y_data+yerr), color='b' , rasterized = True, alpha=0.5, label = 'Georgakakis 08' )
#p.plot(x_data, n.log10(y_data))
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Merloni_12_AGN.data')
x_data, y_data = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.plot(x_data, n.log10(y_data), label = 'Merloni 12' )
p.axhline(7, ls='dashed')
p.xlabel('log(F[0.5-2 keV])')
p.ylabel('log(>F) [/deg2]')
p.legend(frameon=False, loc=0)
#p.yscale('log')
p.xlim((-17, -12))
p.ylim((-2, 4.))
#p.title('Mocks')
p.grid()
p.savefig(os.path.join(plotDir, "logN_logS_AGN.jpg"))
p.clf()
```
#### File: pyEmerge/bin/lc_write_clustering_sample.py
```python
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)
Lname='L3'
def write_samples(Lname):
path_2_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_'+Lname+'.hdf5'
topdir = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/clustering_catalogs_remaped_position_'+Lname+'/'
plotDir = os.path.join(os.environ['HOME'], 'wwwDir', "eRoMok", "h5", "clustering_AGN", Lname)
if os.path.isdir(plotDir)==False:
os.mkdir(plotDir)
f = h5py.File(path_2_lc, 'r')
is_gal = (f['/sky_position/selection'].value)
is_agn = (f['/sky_position/selection'].value)&(f['/agn_properties/agn_activity'].value==1)
n_gal = len(f['/sky_position/redshift_S'].value[is_gal])
n_agn = len(f['/sky_position/redshift_S'].value[is_agn])
z = f['/sky_position/redshift_S'].value[is_agn]
logm = n.log10(f['/moster_2013_data/stellar_mass'].value[is_agn])
lsar = f['/agn_properties/log_lambda_sar'].value[is_agn]
lx = logm + lsar
log_f_05_20 = n.log10(f['/agn_properties/rxay_flux_05_20'].value)
raR, decR = n.loadtxt(topdir + 'random-ra-dec.txt', unpack=True)
def write_samp(zmax,lxmin, out_name = 'lc_remaped_position_'+Lname+'_z_lt_03_lx_gt_438.ascii'):
zmin=0.001
sel = (is_agn)&(f['/sky_position/redshift_S'].value>zmin)&(f['/sky_position/redshift_S'].value<zmax)&(n.log10(f['/moster_2013_data/stellar_mass'].value)+f['/agn_properties/log_lambda_sar'].value>lxmin)
n.savetxt(out_name, n.transpose([f['/sky_position/RA'].value[sel], f['/sky_position/DEC'].value[sel], f['/sky_position/redshift_S'].value[sel], n.ones_like(f['/sky_position/redshift_S'].value[sel])]) )
print(zmax, lxmin, len(f['/sky_position/RA'].value[sel]))
N_data = len(f['/sky_position/RA'].value[sel])
N_rds = 20*N_data # len(raR)
print("D,R=",N_data, N_rds)
dz=0.05
zs=n.arange(zmin, zmax + dz, dz)
nn,bb = n.histogram(f['/sky_position/redshift_S'].value[sel], bins=zs)#, weights=1./w_col.array)
nz=interp1d((zs[1:]+zs[:-1])/2.,nn)
rdsz=[]
for i in range(1,len(zs)-1,1):
inter=n.random.uniform(low=zs[i]-dz/2., high=zs[i]+dz/2., size=int( 1000* nz( zs[i] )))
rdsz.append(inter)
rds=n.hstack((rdsz))
n.random.shuffle(rds)
RR=rds[:N_rds]#-dz/2.
print("RR=",len(rds), len(RR))
n.random.shuffle(raR)
n.random.shuffle(decR)
n.savetxt(out_name[:-5]+'random', n.transpose([raR[:N_rds], decR[:N_rds], RR, n.ones_like(RR) ]))
p.figure(1, (6,6))
p.plot(f['/sky_position/redshift_S'].value[sel], n.log10(f['/halo_properties/mvir'].value[sel]), 'k,', rasterized = True )
p.axvline(0.08, ls='dashed')
p.ylabel('mvir')
p.xlabel('redshift')
p.legend(frameon=False, loc=0)
#p.yscale('log')
p.xlim((0,1.2))
#p.ylim((40, 46))
p.title('200deg2 mock')
p.grid()
p.savefig(os.path.join(plotDir, "HOD_z_"+str(zmax)+"_lx_"+str(lxmin)+".jpg"))
p.clf()
return sel
#p.figure(1, (6,6))
#p.plot(f['/sky_position/redshift_S'].value[sel], n.log10(f['/halo_properties/mvir'].value[sel]), 'k,', rasterized = True )
#p.axvline(0.08, ls='dashed')
#p.ylabel('mvir')
#p.xlabel('redshift')
#p.legend(frameon=False, loc=0)
##p.yscale('log')
#p.xlim((0,1.2))
#p.ylim((40, 46))
#p.title('200deg2 mock')
#p.grid()
#p.savefig(os.path.join(plotDir, "HOD_z_"+str(zmax)+"_lx_"+str(lxmin)+".jpg"))
#p.clf()
sel = write_samp(0.3, 44.0, out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_440.ascii')
sel = write_samp(0.3, 43.5, out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_435.ascii')
sel = write_samp(0.3, 43., out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_430.ascii')
sel = write_samp(0.3, 42.5, out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_425.ascii')
sel = write_samp(0.3, 42., out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_420.ascii')
sel = write_samp(0.3, 41.5, out_name=topdir+'lc_'+Lname+'_z_lt_03_lx_gt_415.ascii')
sel = write_samp(0.4, 44., out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_440.ascii')
sel = write_samp(0.4, 43.5, out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_435.ascii')
sel = write_samp(0.4, 43., out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_430.ascii')
sel = write_samp(0.4, 42.5, out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_425.ascii')
sel = write_samp(0.4, 42., out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_420.ascii')
sel = write_samp(0.4, 41.5, out_name=topdir+'lc_'+Lname+'_z_lt_04_lx_gt_415.ascii')
#p.figure(1, (6,6))
#p.plot(z, lx, 'k,', rasterized = True )
#p.plot(z[log_f_05_20>-12.7], lx[log_f_05_20>-12.7], 'r+', rasterized = True )
##p.axvline(0.08, ls='dashed')
#p.ylabel('log(LX)')
#p.xlabel('redshift')
#p.legend(frameon=False, loc=0)
##p.yscale('log')
#p.xlim((0,1.2))
#p.ylim((40, 46))
#p.title('200deg2 mock')
#p.grid()
#p.savefig(os.path.join(plotDir, Lname+"_z_lx_AGN.jpg"))
#p.clf()
#write_samples("L3")
write_samples("L6")
write_samples("L15")
```
#### File: pyEmerge/bin/measure_SFRD.py
```python
import numpy as n
import glob
import h5py
import os
import time
import sys
h5_files = n.array(glob.glob(os.path.join(os.environ['MD10'], "h5", "hlist_?.?????_emerge.hdf5")))
h5_files.sort()
def measureSMF(h5_file, volume=1000.**3., update=True):
f1 = h5py.File(h5_file, "r+")
sfr = f1['/emerge_data/star_formation_rate'].value
print( h5_file, len(sfr) )
if len(sfr)>0:
ok = (sfr>0)&(sfr<1000)
if len(sfr[ok])>0:
sfrd = n.sum(sfr[ok])/volume
print(sfrd)
if update:
print('updates', f1['/star_formation_rate_density/sfrd'].value)
data = f1['/star_formation_rate_density/sfrd']
data[...] = sfrd
print(f1['/star_formation_rate_density/sfrd'].value)
else:
print('creates')
stellar_mass_function_data = f1.create_group('star_formation_rate_density')
ds = stellar_mass_function_data.create_dataset('sfrd', data = sfrd )
ds.attrs['units'] = r'$M_\odot h^{-3} Mpc^{3}$'
ds.attrs['long_name'] = 'SFRD'
f1.close()
for h5_file in h5_files:
try:
measureSMF(h5_file, update=True)
except( ValueError, KeyError, UnboundLocalError, RuntimeError ):
pass
```
#### File: pyEmerge/bin/print_data_structure.py
```python
import sys
ii = int(sys.argv[1])
env = sys.argv[2]
# python3 print_data_structure.py 22 MD10
import glob
import os
import numpy as n
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(ii, env)
iterate.open_snapshots()
def print_attr(h5item):
for attr in h5item:
print(attr, h5item[attr])
def print_all_key(h5item):
for key in h5item.keys():
print('========================================')
print(key, h5item[key])
print('- - - - - - - - - - - - - - - - - - - - ')
print_attr(h5item[key])
def print_data_structure(h5item):
print('+ + + + + + + HEADER + + + + + + + + +')
print_attr(h5item.attrs)
print('\n')
print('+ + + + + + + DATA + + + + + + + + + +')
print_all_key(h5item)
print_data_structure(iterate.f0)
```
#### File: pyEmerge/bin/print_h5_structure.py
```python
import sys
file_name = sys.argv[1]
# python3 print_data_structure.py filename
import glob
import os
import numpy as n
import h5py # HDF5 support
f0 = h5py.File(file_name, "r")
def print_attr(h5item):
for attr in h5item:
print(attr, h5item[attr])
def print_all_key(h5item):
for key in h5item.keys():
print('========================================')
print(key, h5item[key])
print('- - - - - - - - - - - - - - - - - - - - ')
print_attr(h5item[key])
def print_data_structure(h5item):
print('+ + + + + + + HEADER + + + + + + + + +')
print_attr(h5item.attrs)
print('\n')
print('+ + + + + + + DATA + + + + + + + + + +')
print_all_key(h5item)
print_data_structure(f0)
```
#### File: pyEmerge/python/EmergeIterate.py
```python
import sys
# only input parameter is the numbering of the snapshot. These have to be
#processed in sequence, cannot be done in parallel ... First 1, 2, 3, ...
#ii = int(sys.argv[1])
#print('snapshot' ii)
import time
t0 = time.time()
from multiprocessing import Pool
#p=Pool(12)
import h5py
import os
import glob
import numpy as n
import EmergeStellarMass as sm
model = sm.StellarMass()
import pandas as pd
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)#, Ob0=0.048206)
import astropy.constants as constants
# generic functions
# =================
f_loss = lambda t : 0.05*n.log( 1 + t / (1.4*10**6))
t_dyn = lambda rvir, mvir : (rvir**3./(9.797465327217671e-24*mvir))**0.5
def tau_quenching( m_star, tdyn, tau_0=4.282, tau_s=0.363):
out = n.zeros_like(m_star)
case_1 = (m_star < 1e10 )
out[case_1] = tdyn[case_1] * tau_0
case_2 = (case_1==False)
out[case_2] = tdyn[case_2] * tau_0 * (m_star[case_2] * 10.**(-10.))**(tau_s)
return out
def compute_qtys_new_halos_pk(mvir, rvir, redshift, age_yr):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
:param mvir: list of mvir [Msun], length = n.
:param rvir: list of rvir [kpc] , length = n.
:param redshift: redshift of the snapshot replicated n times.
:param age_yr: age of the Universe for the snapshot replicated n times.
Typically inputs should be :
* mvir=self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos],
* rvir=self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos],
* age_yr=self.f1.attrs['age_yr']
returns
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass
"""
f_b=model.f_b
epsilon = model.epsilon(mvir, redshift )
f_lost = f_loss(age_yr)
# evaluate equation (4)
mvir_dot = mvir / age_yr
# no pseudo evolution correction
dMdt = mvir_dot
# evaluate equation (1)
dmdt_star = f_b * dMdt * epsilon
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion = n.zeros_like(self.dmdt_star)
# evaluate equation (11)
# equation (12)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
return mvir_dot, rvir / age_yr, dMdt, dmdt_star, star_formation_rate, star_formation_rate * age_yr
def compute_qtys_evolving_halos_pk(mvir_f0, mvir_f1, age_f0, age_f1, rvir_f0, rvir_f1, redshift, t_dynamical, rs_f1, mpeak_f1, mpeak_scale_f1, f1_scale, m_icm_f0, stellar_mass_f0, star_formation_rate_f0 ):
"""
update the quantities for evolving halos, present in f0 and f1.
inputs
mvir_f0 [Msun] : self.f0['/halo_properties/mvir'].value[self.mask_f0_evolving_11_halos]
mvir_f1 [Msun] : self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]
age_f0 [yr] : self.f0.attrs['age_yr'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
age_f1 [yr] : self.f1.attrs['age_yr'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
mvir_f0 [Msun] : self.f0['/halo_properties/rvir'].value[self.mask_f0_evolving_11_halos]
mvir_f1 [Msun] : self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos]
redshift : self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos])
t_dynamical : self.t_dynamical[self.mask_f1_evolving_11_halos]
rs_f1 [kpc] : self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]
mpeak_f1 [Msun] : self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]
mpeak_scale_f1 : self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos]
f1_scale : float(self.f1_scale)
m_icm_f0 [Msun] : self.f0['/emerge_data/m_icm'].value[self.mask_f0_evolving_11_halos]
stellar_mass_f0 [Msun] : self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos]
star_formation_rate_f0 [Msun/yr] : self.f0['/halo_properties/star_formation_rate'].value[self.mask_f0_evolving_11_halos]
masks :
* mask_f1_evolving_11_halos
* mask_f0_evolving_11_halos
subcases :
* quenching : (mvir < Mpeak) & (Mpeak_scale < f1_scale)
* case 1. ( age >= t_mpeak ) & ( age_yr < t_mpeak + t_quench)
* case 2. (age_yr >= t_mpeak + t_quench)
* stripping, case 1 : (dMdt < 0), then all mass goes to ICM, m=0, mdot=0
* stripping, case 2 : after reaching its peak mass, if M < 0.122 * Mpeak, then all mass goes to ICM, m=0, mdot=0
"""
# computing dMdt for the halo
dt = age_f1 - age_f0
mvir_dot = (mvir_f1-mvir_f0) / (dt)
rvir_dot = (rvir_f1-rvir_f0) / (dt)
c = rvir_f1 / rs_f1
rho_nfw = mvir_f1 / (rs_f1**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4. * n.pi * rvir_f1 * rvir_f1 * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
# initialize the ICM mass to the previous value
m_icm = m_icm_f0
# Direct estimates of stellar mass and SFR
dmdt_star = model.f_b * dMdt * model.epsilon(mvir_f1, redshift)
# evaluate accretion: 0 in this first step
# dmdt_star_accretion = n.zeros_like(dmdt_star)
# evaluate equation (11)
f_lost = f_loss(dt)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# Variations due to stripping, merging and quenching
# quenching
quenching = (mvir_f1 < mpeak_f1) & (mpeak_scale_f1 < f1_scale)
#t_quench = tau_quenching( stellar_mass_f0, t_dynamical )
if stellar_mass_f0 < 1e10 :
t_quench = t_dynamical * 4.282
else :
t_quench = t_dynamical * 4.282 * (stellar_mass_f0 * 10.**(-10.))**(0.363)
t_mpeak = cosmoMD.age( 1. / mpeak_scale_f1 - 1. ).to(u.yr).value
# case 1. mdot = mdot at tpeak
quench_1 = (quenching) & (age_f1 >= t_mpeak ) & ( age_f1 < t_mpeak + t_quench)
if quench_1 :
star_formation_rate = n.ones_like(star_formation_rate)*star_formation_rate_f0
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# case 2. m dot =0
quench_2 = (quenching) &(age_f1 >= t_mpeak + t_quench )
if quench_2:
star_formation_rate = n.zeros_like(star_formation_rate)
stellar_mass = stellar_mass_f0
# stripping, case 1
# negative growth value self.dMdt => 0
stripping_1 = (dMdt < 0)
# stripping, case 2
# after reaching its peak mass,
# if M < 0.122 * Mpeak, all mass goes to ICM, m=0, mdot=0
stripping_2 = (mvir_f1 < 0.122*mpeak_f1) & (mpeak_scale_f1 < f1_scale)
# both cases together
stripping = (stripping_1) | (stripping_1)
if stripping :
m_icm += stellar_mass_f0
stellar_mass = n.zeros_like(stellar_mass)
star_formation_rate = n.zeros_like(star_formation_rate)
return mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass, m_icm
def merge_system(mvir_f0, mvir_f1, age_f0, age_f1, rvir_f0, rvir_f1, redshift, t_dynamical, rs_f1, mpeak_f1, mpeak_scale_f1, f1_scale, m_icm_f0, stellar_mass_f0, star_formation_rate_f0, sum_stellar_mass_guests):
"""
given f1_host, f0_host and f0 guests,
creates the right quantities, stellar mass and so on...
# m_star_sat x f_esc => m_host_ICM
# m_star_sat x (1-f_esc) => m_star_host
# f_esc = 0.388
#Time_to_future_merger: Time (in Gyr) until the given halo merges into a larger halo. (-1 if no future merger happens)
#Future_merger_MMP_ID: most-massive progenitor of the halo into which the given halo merges. (-1 if the main progenitor of the future merger halo does not exist at the given scale factor.)
"""
# evolution of the host
dt = age_f1 - age_f0
mvir_dot = (mvir_f1-mvir_f0) / (dt)
rvir_dot = (rvir_f1-rvir_f0) / (dt)
c = rvir_f1 / rs_f1
rho_nfw = mvir_f1 / (rs_f1**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4. * n.pi * rvir_f1 * rvir_f1 * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
m_icm = m_icm_f0
dmdt_star = model.f_b * dMdt * model.epsilon(mvir_f1, redshift)
f_lost = f_loss(dt)
star_formation_rate = dmdt_star * (1. - f_lost)
stellar_mass = star_formation_rate * dt + stellar_mass_f0
# merging the sub systems, i.e. adding stellar mass
stellar_mass += (1.-0.388)*sum_stellar_mass_guests
m_icm += 0.388*sum_stellar_mass_guests
return mvir_dot, rvir_dot, dMdt, dmdt_star, stellar_mass, star_formation_rate, m_icm
class EmergeIterate():
"""
Loads iterates one step with the Emerge model.
:param ii: index of the snapshot of interest
:param env: environment variable of the box. In this dir must be a sub dir
'h5' with the 'hlist_?.?????_emerge.hdf5' data files in it
:param L_box: length of the box in Mpc/h
Running the iteration
---------------------
ipython3
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(12, 'MD10')
iterate.open_snapshots()
iterate.map_halos_between_snapshots()
iterate.init_new_quantities()
if len((iterate.mask_f1_new_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_new_halos()
if len((iterate.mask_f0_evolving_11_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_evolving_halos()
if len(self.mask_f1_in_a_merging.nonzero()[0]) > 0 :
iterate.compute_qtys_merging_halos()
self.write_results()
"""
def __init__(self, ii, env, L_box=1000.0 ):
self.ii = ii
self.env = env
self.L_box = L_box # box length
def open_snapshots(self):
"""
Opens the files into the class as f0 and f1
"""
h5_dir = os.path.join(os.environ[self.env], 'h5' )
input_list = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????_emerge.hdf5")))
input_list.sort()
file_0 = input_list[self.ii-1]
file_1 = input_list[self.ii]
self.f0 = h5py.File(file_0, "r")
self.f0_scale = os.path.basename(file_0).split('_')[1]
self.positions_f0 = n.arange(len(self.f0['/halo_properties/id'].value))
self.f1 = h5py.File(file_1, "r+")
self.f1_scale = os.path.basename(file_1).split('_')[1]
self.positions_f1 = n.arange(len(self.f1['/halo_properties/id'].value))
def map_halos_between_snapshots(self):
"""
id mapping for halos present in the previous snapshot
Creates 6 arrays to do the mapping in the different cases
* mask_f1_new_halos
* mask_f0_evolving_11_halos
* mask_f1_evolving_11_halos
* f1_id_with_multiple_progenitors
* mask_f1_in_a_merging
* mask_f0_in_a_merging
"""
#f0_desc_id_unique_list_all_descendents = n.unique(self.f0['/halo_properties/desc_id'].value)
f1_id_unique_list_descendents_detected_at_next_scale = n.intersect1d(n.unique(self.f0['/halo_properties/desc_id'].value), self.f1['/halo_properties/id'].value)
mask_f0_to_propagate = n.in1d(self.f0['/halo_properties/desc_id'].value, f1_id_unique_list_descendents_detected_at_next_scale)
# mask_f0_lost = (mask_f0_to_propagate == False )
# evolving halos are given after applying this boolean mask to a f1 quantity :
mask_f1_evolved_from_previous = n.in1d( self.f1['/halo_properties/id'].value, f1_id_unique_list_descendents_detected_at_next_scale )
# new halos are given after applying this boolean mask to a f1 quantity
# new halos in f1, not present in f0
self.mask_f1_new_halos = (mask_f1_evolved_from_previous==False)
print('new halos', len(self.mask_f1_new_halos.nonzero()[0]))
# halos descending :
# mask_f0_to_propagate
# mask_f1_evolved_from_previous
s = pd.Series(self.f0['/halo_properties/desc_id'].value[mask_f0_to_propagate])
self.f1_id_with_multiple_progenitors = s[s.duplicated()].get_values()
# also = f0_desc_id merging into 1 halo in f1
# merging systems [many halos in f0 into a single f1 halo]
self.mask_f1_in_a_merging = n.in1d( self.f1['/halo_properties/id'].value, self.f1_id_with_multiple_progenitors )
self.mask_f0_in_a_merging = n.in1d( self.f0['/halo_properties/desc_id'].value, self.f1_id_with_multiple_progenitors )
# halos mapped fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b between snapshots
self.mask_f0_evolving_11_halos = ( mask_f0_to_propagate ) & ( self.mask_f0_in_a_merging == False )
self.mask_f1_evolving_11_halos = ( mask_f1_evolved_from_previous ) & ( self.mask_f1_in_a_merging == False )
print('11 mapping', len(self.mask_f0_evolving_11_halos.nonzero()[0]), len(self.mask_f1_evolving_11_halos.nonzero()[0]))
print('merging systems', len(self.f1_id_with_multiple_progenitors))
#for dede in self.f1_id_with_multiple_progenitors :
#sel = self.f0['/halo_properties/desc_id'].value == dede
#print( dede )
#print('desc id', self.f0['/halo_properties/desc_id'].value[sel])
#print('id', self.f0['/halo_properties/id'].value[sel])
#print('pid', self.f0['/halo_properties/pid'].value[sel])
#print('mvir', self.f0['/halo_properties/mvir'].value[sel])
#print('futur merger mmpid', self.f0['/halo_properties/Future_merger_MMP_ID'].value[sel])
#print('time to future merger', self.f0['/halo_properties/Time_to_future_merger'].value[sel])
#print('Ms', self.f0['/emerge_data/stellar_mass'].value[sel])
#print('SFR',self.f0['/emerge_data/star_formation_rate'].value[sel])
#print('mCIM',self.f0['/emerge_data/m_icm'].value[sel])
#print('=================================')
def init_new_quantities(self):
"""
Quantities computed for every halos are initialized to 0
* mvir_dot
* rvir_dot
* dMdt
* dmdt_star
* dmdt_star_accretion
* stellar_mass
* star_formation_rate
* m_icm
* t_dynamical [in years]
Along with the iteration, these quantities will be updated accordingly
"""
self.mvir_dot = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.rvir_dot = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dMdt = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dmdt_star = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.dmdt_star_accretion=n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.stellar_mass = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.star_formation_rate = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.m_icm = n.zeros_like(self.f1['/halo_properties/mvir'].value)
self.t_dynamical = t_dyn( self.f1['/halo_properties/rvir'].value, self.f1['/halo_properties/mvir'].value )
def write_results(self):
"""
After computing all quantities, you need to write the results in the h5 file.
"""
emerge_data = self.f1.create_group('emerge_data')
#emerge_data.attrs['f_lost'] = f_lost
ds = emerge_data.create_dataset('mvir_dot', data = self.mvir_dot )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$d M_{vir} / dt$'
ds = emerge_data.create_dataset('rvir_dot', data = self.rvir_dot )
ds.attrs['units'] = r'$h^{-1} kpc / yr$'
ds.attrs['long_name'] = r'$d r_{vir} / dt$'
ds = emerge_data.create_dataset('dMdt', data = self.dMdt )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$\langle d M / dt \rangle$ (4)'
ds = emerge_data.create_dataset('dmdt_star', data = self.dmdt_star )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m_* / dt $ (1)'
ds = emerge_data.create_dataset('dmdt_star_accretion', data =
self.dmdt_star_accretion )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m_{acc} / dt $ '
ds = emerge_data.create_dataset('star_formation_rate', data =
self.star_formation_rate )
ds.attrs['units'] = r'$h^{-1} M_\odot / yr$'
ds.attrs['long_name'] = r'$ d m / dt $ '
ds = emerge_data.create_dataset('stellar_mass', data = self.stellar_mass )
ds.attrs['units'] = r'$h^{-1} M_\odot $'
ds.attrs['long_name'] = r'$ m_* $ (11)'
ds = emerge_data.create_dataset('m_icm', data = self.m_icm )
ds.attrs['units'] = r'$h^{-1} M_\odot $'
ds.attrs['long_name'] = r'$ m_{ICM}$ '
self.f0.close()
self.f1.close()
print("Results written")
def compute_qtys_new_halos(self):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
"""
# evaluate equation (4)
self.mvir_dot[self.mask_f1_new_halos] = self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos] / self.f1.attrs['age_yr']
self.rvir_dot[self.mask_f1_new_halos] = self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos] / self.f1.attrs['age_yr']
# no pseudo evolution correction
self.dMdt[self.mask_f1_new_halos] = self.mvir_dot[self.mask_f1_new_halos]
# evaluate equation (1)
self.dmdt_star[self.mask_f1_new_halos] = model.f_b * self.dMdt[self.mask_f1_new_halos] * model.epsilon(self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos]))
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion[self.mask_f1_new_halos] = n.zeros_like(self.dmdt_star[self.mask_f1_new_halos])
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']) # equation (12)
# evaluate stellar mass
self.star_formation_rate[self.mask_f1_new_halos] = self.dmdt_star[self.mask_f1_new_halos] * (1. - f_lost) + self.dmdt_star_accretion[self.mask_f1_new_halos]
self.stellar_mass[self.mask_f1_new_halos] = self.star_formation_rate[self.mask_f1_new_halos] * self.f1.attrs['age_yr']
# intra-cluster mass is currently 0
# self.m_icm[self.mask_f1_new_halos] = n.zeros_like(self.stellar_mass[self.mask_f1_new_halos])
def compute_qtys_evolving_halos(self):
"""
update the quantities for evolving halos, present in f0 and f1.
masks :
* mask_f1_evolving_11_halos
* mask_f0_evolving_11_halos
subcases :
* quenching : (mvir < Mpeak) & (Mpeak_scale < f1_scale)
* case 1. ( age >= t_mpeak ) & ( age_yr < t_mpeak + t_quench)
* case 2. (age_yr >= t_mpeak + t_quench)
* stripping, case 1 : (dMdt < 0), then all mass goes to ICM, m=0, mdot=0
* stripping, case 2 : after reaching its peak mass, if M < 0.122 * Mpeak, then all mass goes to ICM, m=0, mdot=0
"""
# computing dMdt for the halo
self.mvir_dot[self.mask_f1_evolving_11_halos] = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]-self.f0['/halo_properties/mvir'].value[self.mask_f0_evolving_11_halos]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
self.rvir_dot[self.mask_f1_evolving_11_halos] = (self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos]-self.f0['/halo_properties/rvir'].value[self.mask_f0_evolving_11_halos]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
c = self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] / self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]
rho_nfw = self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] / (self.f1['/halo_properties/rs'].value[self.mask_f1_evolving_11_halos]**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4.*n.pi*self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] *self.f1['/halo_properties/rvir'].value[self.mask_f1_evolving_11_halos] * self.rvir_dot[self.mask_f1_evolving_11_halos] * rho_nfw
self.dMdt[self.mask_f1_evolving_11_halos] = self.mvir_dot[self.mask_f1_evolving_11_halos] - pseudo_evolution_correction
# initialize the ICM mass to the previous value
self.m_icm[self.mask_f1_evolving_11_halos] = self.f0['/emerge_data/m_icm'].value[self.mask_f0_evolving_11_halos]
# Direct estimates of stellar mass and SFR
self.dmdt_star[self.mask_f1_evolving_11_halos] = model.f_b * self.dMdt[self.mask_f1_evolving_11_halos] * model.epsilon(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos]))
# evaluate accretion: 0 in this first step
# dmdt_star_accretion = n.zeros_like(dmdt_star[self.mask_f1_evolving_11_halos])
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']-self.f0.attrs['age_yr'])
# evaluate stellar mass
self.star_formation_rate[self.mask_f1_evolving_11_halos] = self.dmdt_star[self.mask_f1_evolving_11_halos] * (1. - f_lost) + self.dmdt_star_accretion[self.mask_f1_evolving_11_halos]
self.stellar_mass[self.mask_f1_evolving_11_halos] = self.star_formation_rate[self.mask_f1_evolving_11_halos] * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos]
# Variations due to stripping, merging and quenching
# quenching
quenching = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] < self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]) & (self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] < float(self.f1_scale))
t_quench = tau_quenching( self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos], self.t_dynamical[self.mask_f1_evolving_11_halos] )
t_mpeak = cosmoMD.age( 1. / self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] - 1. ).to(u.yr).value
# case 1. mdot = mdot at tpeak
quench_1 = (quenching) & (self.f1.attrs['age_yr'] >= t_mpeak ) & ( self.f1.attrs['age_yr'] < t_mpeak + t_quench)
if len(quench_1.nonzero()[0])>0:
print("quenching1")
self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1] = n.ones_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1])*self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_1]
self.stellar_mass[self.mask_f1_evolving_11_halos][quench_1] = self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_1] * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_1]
# case 2. m dot =0
quench_2 = (quenching) &(self.f1.attrs['age_yr'] >= t_mpeak + t_quench )
if len(quench_2.nonzero()[0])>0:
print("quenching2")
self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_2] = n.zeros_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][quench_2])
self.stellar_mass[self.mask_f1_evolving_11_halos][quench_2] = self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][quench_2]
# stripping, case 1
# negative growth value self.dMdt[self.mask_f1_evolving_11_halos] => 0
stripping_1 = (self.dMdt[self.mask_f1_evolving_11_halos] < 0)
# stripping, case 2
# after reaching its peak mass,
# if M < 0.122 * Mpeak, all mass goes to ICM, m=0, mdot=0
stripping_2 = (self.f1['/halo_properties/mvir'].value[self.mask_f1_evolving_11_halos] < 0.122*self.f1['/halo_properties/Mpeak'].value[self.mask_f1_evolving_11_halos]) & (self.f1['/halo_properties/Mpeak_scale'].value[self.mask_f1_evolving_11_halos] < float(self.f1_scale))
# both cases together
stripping = (stripping_1) | (stripping_1)
if len(stripping.nonzero()[0])>0:
print("stripping")
self.m_icm[self.mask_f1_evolving_11_halos][stripping] += self.f0['/emerge_data/stellar_mass'].value[self.mask_f0_evolving_11_halos][stripping]
self.stellar_mass[self.mask_f1_evolving_11_halos][stripping] = n.zeros_like(self.stellar_mass[self.mask_f1_evolving_11_halos][stripping])
self.star_formation_rate[self.mask_f1_evolving_11_halos][stripping] = n.zeros_like(self.star_formation_rate[self.mask_f1_evolving_11_halos][stripping])
def get_position_merger_players(self, merger_id):
"""
Given the identifier of the merger
:param merger_id: id of the parent halo of the merger at the later time. One integer.
Outputs the position on the f0 and f1 arrays of the hosts and of the merging systems
returns :
position_f1_host [int], position_f0_host [int], position_f0_merging [list]
"""
# about the host at t1
#print(merger_id)
mask_f1_host = (self.f1['/halo_properties/id'].value == merger_id)
#print(mask_f1_host)
position_f1_host = self.positions_f1[mask_f1_host]
#print(position_f1_host)
# about the host and merging subhalos at t0
mask_f0_all = (self.f0['/halo_properties/desc_id'].value == merger_id)
#print(mask_f0_all)
id_f0_all = self.f0['/halo_properties/id'].value[mask_f0_all]
#print(id_f0_all)
# the host at t1 is flagged at t0 as the most massive progenitor
#print(n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))
#print(n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all])))
#print(id_f0_all[n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))])
f0_host_id = id_f0_all[n.in1d(id_f0_all, n.unique(self.f0['/halo_properties/Future_merger_MMP_ID'].value[mask_f0_all]))][0]
mask_f0_host = (mask_f0_all) & (self.f0['/halo_properties/id'].value == f0_host_id)
mask_f0_merging = (mask_f0_all) & (self.f0['/halo_properties/id'].value != f0_host_id)
position_f0_host = self.positions_f0[mask_f0_host]
position_f0_merging = self.positions_f0[mask_f0_merging]
return position_f1_host, position_f0_host, position_f0_merging
def merging_single_system(self, merger_id):
"""
:param merger_id: id of the parent halo of the merger at the later time. One integer.
Merging goes as follows. Assume escape fraction: f_esc = 0.388, then
* m_star_satellite x f_esc goes to m_host_ICM
* m_star_satellite x (1-f_esc) goes to m_star_host
returns :
parameters of the emerge model of the galaxies undergoing merger at this point.
[ mvir_dot, rvir_dot, dMdt, dmdt_star, dmdt_star_accretion, stellar_mass, star_formation_rate, m_icm ]
"""
position_f1_host, position_f0_host, position_f0_merging = self.get_position_merger_players(merger_id)
mvir_dot = (self.f1['/halo_properties/mvir'].value[position_f1_host]-self.f0['/halo_properties/mvir'].value[position_f0_host]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
rvir_dot = (self.f1['/halo_properties/rvir'].value[position_f1_host]-self.f0['/halo_properties/rvir'].value[position_f0_host]) / (self.f1.attrs['age_yr'] - self.f0.attrs['age_yr'])
c = self.f1['/halo_properties/rvir'].value[position_f1_host] / self.f1['/halo_properties/rs'].value[position_f1_host]
rho_nfw = self.f1['/halo_properties/mvir'].value[position_f1_host] / (self.f1['/halo_properties/rs'].value[position_f1_host]**3. * 4. * n.pi * c * (1+c)**2. * (n.log(1.+c)-c/(1.+c)))
pseudo_evolution_correction = 4.*n.pi*self.f1['/halo_properties/rvir'].value[position_f1_host] *self.f1['/halo_properties/rvir'].value[position_f1_host] * rvir_dot * rho_nfw
dMdt = mvir_dot - pseudo_evolution_correction
# initialize the ICM mass to the previous value
m_icm = self.f0['/emerge_data/m_icm'].value[position_f0_host]
# Direct estimates of stellar mass and SFR
dmdt_star = model.f_b * dMdt * model.epsilon(self.f1['/halo_properties/mvir'].value[position_f1_host], self.f1.attrs['redshift'] * n.ones_like(self.f1['/halo_properties/mvir'].value[position_f1_host]))
# evaluate accretion: 0 in this first step
dmdt_star_accretion = n.zeros_like(dmdt_star)
# evaluate equation (11)
f_lost = f_loss(self.f1.attrs['age_yr']-self.f0.attrs['age_yr'])
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost) + dmdt_star_accretion
stellar_mass = star_formation_rate * (self.f1.attrs['age_yr']-self.f0.attrs['age_yr']) + self.f0['/emerge_data/stellar_mass'].value[position_f0_host]
# merging
# m_star_sat x f_esc => m_host_ICM
# m_star_sat x (1-f_esc) => m_star_host
# f_esc = 0.388
#Time_to_future_merger: Time (in Gyr) until the given halo merges into a larger halo. (-1 if no future merger happens)
#Future_merger_MMP_ID: most-massive progenitor of the halo into which the given halo merges. (-1 if the main progenitor of the future merger halo does not exist at the given scale factor.)
stellar_mass += (1.-0.388)*n.sum(self.f0['/emerge_data/stellar_mass'].value[position_f0_merging])
m_icm += 0.388*n.sum(self.f0['/emerge_data/stellar_mass'].value[position_f0_merging])
return mvir_dot, rvir_dot, dMdt, dmdt_star, dmdt_star_accretion, stellar_mass, star_formation_rate, m_icm
def merging_set_of_system(self, merger_ids):
"""
Loops over self.merging_single_system over a list of ids and returns a merged output array
"""
return n.hstack(( n.array([self.merging_single_system(merger_id) for merger_id in merger_ids]) ))
def compute_qtys_merging_halos(self):
"""
computes all quantities for merging halos
"""
pool = Pool(processes=12)
self.out3 = pool.map(self.merging_set_of_system, self.f1['/halo_properties/id'].value[ self.mask_f1_in_a_merging ])
#self.out3 = p.starmap(self.merging_set_of_system, self.f1['/halo_properties/id'].value[ self.mask_f1_in_a_merging ])
"""
if __name__ == '__main__':
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(22, 'MD10')
iterate.open_snapshots()
iterate.map_halos_between_snapshots()
iterate.init_new_quantities()
if len((iterate.mask_f1_new_halos).nonzero()[0]) > 0 :
# computes the new quantitiess
pool = Pool(processes=12)
DATA = n.transpose([iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos], rvir=iterate.f1['/halo_properties/rvir'].value[iterate.mask_f1_new_halos], iterate.f1.attrs['redshift']*n.ones_like(iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos]), iterate.f1.attrs['age_yr']*n.ones_like(iterate.f1['/halo_properties/mvir'].value[iterate.mask_f1_new_halos]) ])
out = p.starmap(iterate.compute_qtys_new_halos_pk, DATA)
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass = out
#, f_b=model.f_b, epsilon = model.epsilon(mvir, redshift * n.ones_like(mvir)), f_lost = f_loss(iterate.f1.attrs['age_yr']))
# updates the initiated array with the results
iterate.mvir_dot[iterate.mask_f1_new_halos] = mvir_dot
iterate.rvir_dot[iterate.mask_f1_new_halos] = rvir_dot
iterate.dMdt[iterate.mask_f1_new_halos] = dMdt
iterate.dmdt_star[iterate.mask_f1_new_halos] = dmdt_star
iterate.star_formation_rate[iterate.mask_f1_new_halos] = star_formation_rate
iterate.stellar_mass[iterate.mask_f1_new_halos] = stellar_mass
#iterate.compute_qtys_new_halos()
if len((iterate.mask_f0_evolving_11_halos).nonzero()[0]) > 0 :
iterate.compute_qtys_evolving_halos()
if len(iterate.mask_f1_in_a_merging.nonzero()[0]) > 0 :
iterate.compute_qtys_merging_halos()
# iterate.write_results()
"""
``` |
{
"source": "JohanComparat/pySU",
"score": 3
} |
#### File: pySU/absorber/select_pairs.py
```python
from scipy.interpolate import interp1d
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
from matplotlib.patches import Polygon
from astropy import constants
import astropy.io.fits as fits
from matplotlib import gridspec
import subprocess
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from sklearn.neighbors import BallTree
from astropy.table import Table, Column
import pymangle
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams.update({'font.size': 14})
#
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
match_radius_arcmin = interp1d( np.arange(0.001,1.5,0.001), cosmo.arcsec_per_kpc_comoving(np.arange(0.001,1.5,0.001))*1200*u.kpc/(60*u.arcsec) )
deg_to_rad = np.pi / 180.
nl = lambda selection : len(selection.nonzero()[0])
# define pathes
env = 'HOME'
p2_spall = os.path.join(os.environ[env], 'data2/firefly/v1_1_0/v5_13_0/catalogs/spAll-v5_13_0.fits')
p_2_out = os.path.join(os.environ[env], 'wwwDir/stuff/catalogue_qso_v5_13_0.fits')
p2_spall = os.path.join(os.environ[env], 'data2/firefly/v1_1_0/26/catalogs/specObj-SDSS-dr12.fits')
p_2_out = os.path.join(os.environ[env], 'wwwDir/stuff/catalogue_qso_26.fits')
p2_codex_bcg = os.path.join(os.environ[env], 'hegcl/SPIDERS/mastercatalogue_FINAL_CODEXID.fits')
spall = fits.open(p2_spall)#[1].data
codex = fits.open(p2_codex_bcg)[1].data
#ok = (spall[1].data['Z']>0.3) & (spall[1].data['Z_ERR'] > 0 ) & (spall[1].data['Z_ERR']<spall[1].data['Z']) & (spall[1].data['ZWARNING']==0) # & (spall[1].data['CLASS']=="QSO")
ok = (spall[1].data['Z']>0.3) & (spall[1].data['Z_ERR'] > 0 ) & (spall[1].data['Z_ERR']<spall[1].data['Z']) & (spall[1].data['ZWARNING']==0) & (spall[1].data['CLASS']=="QSO")
dr16_rough = ( spall[1].data['PLUG_DEC'] < 15 ) & (spall[1].data['PLUG_RA']>100 ) & (spall[1].data['PLUG_RA']<280 )
select = (dr16_rough==False) & (ok)
plug_ra = spall[1].data['PLUG_RA'][select]
plug_dec = spall[1].data['PLUG_DEC'][select]
Z_dr16 = spall[1].data['Z'][select]
print(len(Z_dr16))
plate = spall[1].data['PLATE'][select]
mjd = spall[1].data['MJD'][select]
fiberid = spall[1].data['FIBERID'][select]
#clu_coord = deg_to_rad * np.array([codex['DEC_OPT'], codex['RA_OPT']]).T
#Tree_Cluster_Cat = BallTree(clu_coord, metric='haversine')
CAT_coord = deg_to_rad * np.transpose([plug_dec, plug_ra])
CAT_Tree = BallTree(CAT_coord, metric='haversine')
# around each cluster, query
def get_data(id_c = 0, N_RADIUS = 3):
clu_coord = deg_to_rad * np.transpose([np.array([codex['DEC_OPT'][id_c]]), np.array([codex['RA_OPT'][id_c]])])
radius = deg_to_rad * codex['R200C_DEG'][id_c]
indexes, distances = CAT_Tree.query_radius(clu_coord, r = radius * N_RADIUS, return_distance = True, sort_results = True)
relevant = (Z_dr16[indexes[0]] > codex['SCREEN_CLUZSPEC'][id_c] + 0.01)
Z_out = Z_dr16[indexes[0][relevant]]
distance_out = distances[0][relevant] / deg_to_rad
plate_out = plate [indexes[0][relevant]]
mjd_out = mjd [indexes[0][relevant]]
fiberid_out = fiberid [indexes[0][relevant]]
plug_ra_out = plug_ra [indexes[0][relevant]]
plug_dec_out = plug_dec[indexes[0][relevant]]
cluster_ID = np.ones_like(Z_out).astype('str')
cluster_ID[:] = codex['CLUS_ID'][id_c]
cluster_ra = codex['RA_OPT'][id_c] * np.ones_like(Z_out)
cluster_dec = codex['DEC_OPT'][id_c] * np.ones_like(Z_out)
cluster_z = codex['SCREEN_CLUZSPEC'][id_c] * np.ones_like(Z_out)
cluster_r200c_deg = codex['R200C_DEG'][id_c] * np.ones_like(Z_out)
cluster_KT = codex['KT'][id_c] * np.ones_like(Z_out)
DATA_i = np.transpose([
cluster_ID ,
cluster_ra ,
cluster_dec ,
cluster_z ,
cluster_r200c_deg,
cluster_KT ,
Z_out ,
distance_out ,
plate_out ,
mjd_out ,
fiberid_out ,
plug_ra_out ,
plug_dec_out
])
print(id_c, DATA_i.shape)
return DATA_i
d0 = get_data(id_c = 0)
for id_c in np.arange(1, len(codex), 1) :
d1 = get_data(id_c)
d0 = np.vstack((d0,d1))
t = Table()
t.add_column( Column(name="cluster_ID" , data = d0.T[0] ) )
t.add_column( Column(name="cluster_ra" , unit='deg', data = d0.T[1].astype('float') ) )
t.add_column( Column(name="cluster_dec" , unit='deg', data = d0.T[2].astype('float') ) )
t.add_column( Column(name="cluster_z" , data = d0.T[3].astype('float') ) )
t.add_column( Column(name="cluster_r200c_deg" , data = d0.T[4].astype('float') ) )
t.add_column( Column(name="cluster_KT" , data = d0.T[5].astype('float') ) )
t.add_column( Column(name="galaxy_z" , data = d0.T[6].astype('float') ) )
t.add_column( Column(name="angular_separation", unit='deg' , data = d0.T[7].astype('float') ) )
t.add_column( Column(name="plate" , data = d0.T[8].astype('int') ) )
t.add_column( Column(name="mjd" , data = d0.T[9].astype('int') ) )
t.add_column( Column(name="fiberid" , data = d0.T[10].astype('int') ) )
t.add_column( Column(name="RA" , data = d0.T[11].astype('float') ) )
t.add_column( Column(name="DEC" , data = d0.T[12].astype('float') ) )
t.write(p_2_out, overwrite=True)
```
#### File: galaxy/bin_eBOSS_ELG/convert_stack_2_qmost_template.py
```python
import os
import numpy as np
import astropy
from astropy.table import Table
from astropy.io import fits
import astropy.units as u
from os.path import join
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as n
# speclite
import speclite.filters
from speclite.filters import FilterConvolution
from speclite.filters import ab_reference_flux
### define the required columns for a spectral template
minimum_required_keywords = ["LAMBDA", "FLUX_DENSITY"]
allowed_units = [
u.Unit("Angstrom", format="fits"),
u.Unit("erg / (Angstrom cm2 s)", format="fits"),
]
output1_directory = join( os.environ['HOME'], "SDSS/stacks" )
input1 = join(output1_directory,"X_AGN", "DR16_ELG-stitched-stack.fits")
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT2-stitched-stack.fits")
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT1-stitched-stack.fits")
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT1-DR16QSO-stitched-stack.fits")
input1 = join(output1_directory,"X_AGN", "DR16LRG-stitched-stack.fits")
def write_qmost_template(input1):
output1_png = input1[:-5] + "-qmost-template.png"
output1_pdf = input1[:-5] + "-qmost-template.pdf"
output1_tpl = input1[:-5] + "-qmost-template.fits"
print(f"\n*** the following files will be accessed with this tutorial:")
print(f"\t- input: {input1}")
print(f"\t- output: {output1_png}")
print(f"\t- output: {output1_pdf}")
hdu1 = fits.open(input1)
print(f"\n*** loading FITS file: {input1}")
print(fits.info(input1))
if hdu1[0].header.get("EXTEND", False):
# FITS tables should have the bintable as an extension
found_axes1 = hdu1[1].columns.info(output=False)
data1 = hdu1[1].data
else:
# this section
found_axes1 = hdu1[0].columns.info(output=False)
data1 = hdu1[0].data
print("\n*** found the following axes:")
for name,unit in zip(found_axes1['name'], found_axes1['unit']):
print(f"\t{name} ({unit})")
if not unit in allowed_units:
print(f"\t\t***** WARNING: {unit} was not in the allowed units: {allowed_units}!")
### define bounds
wavelength_min = 3000
wavelength_max = 11000
### arrays below and above the data bounds
wavelength_step = np.diff(data1['wavelength']).mean()
print("min: ", data1['wavelength'].min())
print("max: ", data1['wavelength'].max())
print("step: ", wavelength_step)
### define a new table, based on the old one but with appended data
if data1['wavelength'].max() < wavelength_max:
start = data1['wavelength'].max() + wavelength_step
stop = wavelength_max + wavelength_step
wave_red = np.arange(start=start, stop=stop, step=wavelength_step)
print("will append the following to the 'red' wavelength spectrum: ", wave_red)
nrows1 = len(data1['wavelength'])
nrows2 = len(wave_red)
nrows = nrows1 + nrows2
wavelength = n.hstack((data1['wavelength'], wave_red))
flux_density = n.hstack((data1['medianStack'], n.ones_like(wave_red)*data1['medianStack'][-1]))
else:
wavelength = data1['wavelength']
flux_density = data1['medianStack']
# where the filters are
path = os.path.join(
os.environ['GIT_AGN_MOCK'],
'data',
'photometry',
'filters')
Tab_HSC_r = Table.read(os.path.join(path, 'hsc', 'r_HSC.txt'), format='ascii')
ok = (
Tab_HSC_r['col1'] > 5190) & (
Tab_HSC_r['col1'] < 7300) & (
Tab_HSC_r['col2'] > 0)
Tab_HSC_r['col2'][(ok == False)] = 0.
Pass_HSC_r = speclite.filters.FilterResponse(
wavelength=Tab_HSC_r['col1'] * u.AA,
response=Tab_HSC_r['col2'],
meta=dict(
group_name='HSC',
band_name='r'))
hsc_r_filter = speclite.filters.load_filters('HSC-r')
# redshift=0.0
ll = wavelength * u.AA
flambda = flux_density * 1e-17 * u.erg * u.cm**(-2) * u.s**(-1) * u.AA**(-1)
# ll_rf = ll / (1 + redshift)
ABmag_sdss = hsc_r_filter.get_ab_magnitudes(flambda, ll )['HSC-r'][0]
##
def rescale_by(r_mag_out, redshift): return 10 ** ((r_mag_out + 48.6) / -2.5) / 10 ** ( (ABmag_sdss + 48.6) / -2.5)
# rescaling values
rsbs = rescale_by(14, 0.0)
### save to new file
hdu_cols = fits.ColDefs([
fits.Column(name="LAMBDA", format='D', unit='Angstrom', array = wavelength ),
fits.Column(name="FLUX_DENSITY", format='D', unit='erg / (Angstrom * cm * cm * s)', array = rsbs * flux_density * 1e-17 )
])
hdu = fits.BinTableHDU.from_columns(hdu_cols)
hdu.name = 'SPECTRUM'
hdu.header['MAG'] = 14
outf = fits.HDUList([fits.PrimaryHDU(), hdu]) # , ])
outf.writeto(output1_tpl, overwrite=True)
print(output1_tpl, 'written')#
fig1 = plt.figure(10)
for subplot in [211, 212]:
plt.subplot(subplot)
label1 = os.path.basename(input1)[:-5]
plt.plot(hdu.data['LAMBDA'], hdu.data['FLUX_DENSITY'], label=label1)
if subplot == 211:
plt.xlim(None, 9500)
# add HRS overlay
ys = plt.gca().get_ylim()
plt.fill_betweenx(ys, x1=[3700, 3700], x2=[5000, 5000], alpha=0.1, color='b')
plt.fill_betweenx(ys, x1=[5000, 5000], x2=[7000, 7000], alpha=0.1, color='g')
plt.fill_betweenx(ys, x1=[7000, 7000], x2=[9500, 9500], alpha=0.1, color='r')
plt.xscale('log')
plt.yscale('log')
# plt.fill_betweenx(ys, x1=[3926,3926],x2=[4355, 4355], alpha=0.1, color='b')
# plt.fill_betweenx(ys, x1=[5160,5160],x2=[5730, 5730], alpha=0.1, color='g')
# plt.fill_betweenx(ys, x1=[6100,6100],x2=[6790, 6790], alpha=0.1, color='r')
else:
plt.xlabel(f"{found_axes1['name'][0]} ({found_axes1['unit'][0]})")
#plt.ylabel(f"{found_axes1['name'][1]} ({found_axes1['unit'][1]})")
plt.ylabel(f"{found_axes1['name'][1]} (erg/s/cm**2/Angstrom)") # manually modified, for shorter length
plt.yscale('log')
plt.xlim(3700, 9500)
# create legend and show plot
#plt.legend()
plt.show()
fig1.savefig(output1_png, bbox_inches='tight')
# fig1.savefig(output1_pdf, bbox_inches='tight')
plt.clf()
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT2-highZ-stitched-stack.fits")
write_qmost_template(input1)
input1 = join(output1_directory,"X_AGN", "DR16_ELG-stitched-stack.fits")
write_qmost_template(input1)
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT2-stitched-stack.fits")
write_qmost_template(input1)
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT1-stitched-stack.fits")
write_qmost_template(input1)
input1 = join(output1_directory,"X_AGN", "ROSAT_AGNT1-DR16QSO-stitched-stack.fits")
write_qmost_template(input1)
input1 = join(output1_directory,"X_AGN", "DR16LRG-stitched-stack.fits")
write_qmost_template(input1)
``` |
{
"source": "johanDDC/ttax",
"score": 3
} |
#### File: ttax/ttax/base_class.py
```python
from typing import List, Union
import numpy as np
import jax.numpy as jnp
import flax
class TTBase:
"""Represents the base for both `TT-Tensor` and `TT-Matrix` (`TT-object`).
Includes some basic routines and properties.
"""
def __mul__(self, other):
# We can't import ops in the beginning since it creates cyclic dependencies.
from ttax import ops
return ops.multiply(self, other)
def __matmul__(self, other):
# We can't import ops in the beginning since it creates cyclic dependencies.
from ttax import ops
return ops.matmul(self, other)
def __add__(self, other):
# We can't import ops in the beginning since it creates cyclic dependencies.
from ttax import ops
return ops.add(self, other)
def __rmul__(self, other):
# We can't import ops in the beginning since it creates cyclic dependencies.
from ttax import ops
return ops.multiply(self, other)
@property
def axis_dim(self):
"""Get the position of mode axis in `TT-core`.
It could differ according to the batch shape.
:return: index
:rtype: int
"""
return self.num_batch_dims + 1
@property
def batch_shape(self):
"""Get the list representing the shape of the batch of `TT-object`.
:return: batch shape
:rtype: list
"""
return self.tt_cores[0].shape[:self.num_batch_dims]
@property
def tt_ranks(self):
"""Get `TT-ranks` of the `TT-object` in amount of ``ndim + 1``.
The first `TT-rank` and the last one equals to `1`.
:return: `TT-ranks`
:rtype: list
"""
ranks = [c.shape[self.num_batch_dims] for c in self.tt_cores]
ranks.append(self.tt_cores[-1].shape[-1])
return ranks
@property
def ndim(self):
"""Get the number of dimensions of the `TT-object`.
:return: dimensions number
:rtype: int
"""
return len(self.tt_cores)
@property
def dtype(self):
"""Represents the `dtype` of elements in `TT-object`.
:return: `dtype` of elements
:rtype: dtype
"""
return self.tt_cores[0].dtype
@property
def batch_loc(self):
"""Represents the batch indexing for `TT-object`.
Wraps `TT-object` by special `BatchIndexing` class
with overloaded ``__getitem__`` method.
Example:
``tt.batch_loc[1, :, :]``
"""
return BatchIndexing(self)
@flax.struct.dataclass
class TT(TTBase):
"""Represents a `TT-Tensor` object as a list of `TT-cores`.
`TT-Tensor` cores take form (r_l, n, r_r), where
- r_l, r_r are `TT-ranks`
- n makes `TT-Tensor` shape
"""
tt_cores: List[jnp.array]
@property
def shape(self):
"""Get the tuple representing the shape of `TT-Tensor`.
In batch case includes the shape of the batch.
:return: `TT-Tensor` shape with batch shape
:rtype: tuple
"""
no_batch_shape = [c.shape[self.axis_dim] for c in self.tt_cores]
return tuple(list(self.batch_shape) + no_batch_shape)
@property
def num_batch_dims(self):
"""Get the number of batch dimensions for batch of `TT-Tensors`.
:return: number of batch dimensions
:rtype: int
"""
return len(self.tt_cores[0].shape) - 3
@property
def is_tt_matrix(self):
"""Determine whether the object is a `TT-Matrix`.
:return: `True` if `TT-Matrix`, `False` if `TT-Tensor`
:rtype: bool
"""
return False
@property
def raw_tensor_shape(self):
"""Get the tuple representing the shape of `TT-Tensor`.
In batch case does not include the shape of the batch.
:return: `TT-Tensor` shape
:rtype: tuple
"""
return [c.shape[self.axis_dim] for c in self.tt_cores]
def __str__(self):
"""Creates a string describing TT-Tensor.
:return: TT-Tensor description
:rtype: string
"""
if self.num_batch_dims == 0:
s = "TT-Tensor of shape {0} and TT-ranks {1}"
s = s.format(self.shape, self.tt_ranks)
else:
s = "Batch of {0} TT-Tensors of shape {1} and TT-ranks {2}"
s = s.format(self.batch_shape, self.raw_tensor_shape, self.tt_ranks)
s += " with {0} elements.".format(self.dtype)
return s
def __getitem__(self, slice_spec):
"""Basic indexing, returns a TT containing the specified element / slice.
Examples:
>>> a = ttax.random.tensor(rng, [2, 3, 4])
>>> a[1, :, :]
is a 2D TensorTrain 3 x 4.
>>> a[1:2, :, :]
is a 3D TensorTrain 1 x 3 x 4
"""
if len(slice_spec) != self.ndim:
raise ValueError('Expected %d indices, got %d' % (self.ndim,
len(slice_spec)))
new_tt_cores = []
remainder = None
for i in range(self.ndim):
curr_core = self.tt_cores[i]
sliced_core = curr_core[..., :, slice_spec[i], :]
if len(curr_core.shape) != len(sliced_core.shape):
# This index is specified exactly and we want to collapse this axis.
if remainder is None:
remainder = sliced_core
else:
remainder = jnp.matmul(remainder, sliced_core)
else:
if remainder is not None:
# Add reminder from the previous collapsed cores to the current core.
sliced_core = jnp.einsum('...ab,...bid->...aid',
remainder, sliced_core)
remainder = None
new_tt_cores.append(sliced_core)
if remainder is not None:
# The reminder obtained from collapsing the last cores.
new_tt_cores[-1] = jnp.einsum('...aib,...bd->...aid',
new_tt_cores[-1], remainder)
remainder = None
return TT(new_tt_cores)
@flax.struct.dataclass
class TTMatrix(TTBase):
"""Represents a `TT-Matrix` object as a list of `TT-cores`.
`TT-Matrix` cores take form (r_l, n_l, n_r, r_r), where
- r_l, r_r are `TT-ranks` just as for `TT-Tensor`
- n_l, n_r make left and right shapes of `TT-Matrix` as rows and cols
"""
tt_cores: List[jnp.array]
@property
def raw_tensor_shape(self):
"""Get the lists representing left and right shapes of `TT-Matrix`.
In batch case does not include the shape of the batch.
For example if `TT-Matrix` cores are (1, 2, 3, 5) (5, 6, 7, 1)
returns (2, 6), (3, 7).
:return: `TT-Matrix` shapes
:rtype: list, list
"""
left_shape = [c.shape[self.axis_dim] for c in self.tt_cores]
right_shape = [c.shape[self.axis_dim + 1] for c in self.tt_cores]
return left_shape, right_shape
@property
def shape(self):
"""Get the tuple representing the shape of underlying dense tensor as matrix.
In batch case includes the shape of the batch.
For example if `TT-Matrix` cores are (1, 2, 3, 5) (5, 6, 7, 1)
it's shape is (12, 21).
:return: `TT-Matrix` shape in dense form with batch shape
:rtype: tuple
"""
left_shape, right_shape = self.raw_tensor_shape
no_batch_shape = [np.prod(left_shape), np.prod(right_shape)]
return tuple(list(self.batch_shape) + no_batch_shape)
@property
def num_batch_dims(self):
"""Get the number of batch dimensions for batch of `TT-Matrices.`
:return: number of batch dimensions
:rtype: int
"""
return len(self.tt_cores[0].shape) - 4
@property
def is_tt_matrix(self):
"""Determine whether the object is a `TT-Matrix`.
:return: `True` if `TT-Matrix`, `False` if `TT-Tensor`
:rtype: bool
"""
return True
def __str__(self):
"""Creates a string describing TT-Matrix.
:return: TT-Matrix description
:rtype: string
"""
if self.num_batch_dims == 0:
s = "TT-Matrix of shape {0} and TT-ranks {1}"
s = s.format(self.raw_tensor_shape, self.tt_ranks)
else:
s = "Batch of {0} TT-Matrices of shape {1} and TT-ranks {2}"
s = s.format(self.batch_shape, self.raw_tensor_shape, self.tt_ranks)
s += " with {0} elements.".format(self.dtype)
return s
def __getitem__(self, slice_spec):
"""Basic indexing, returns a TTMatrix containing the specified element / slice."""
d = self.ndim
if len(slice_spec) != 2 * d:
raise ValueError('Expected %d indices, got %d' % (2 * d, len(slice_spec)))
for i in range(d):
if isinstance(slice_spec[i], slice) != isinstance(slice_spec[d+i], slice):
raise ValueError('Elements i_%d and j_%d should be the same type, '
'instead: %s and %s.' % (i, i, slice_spec[i],
slice_spec[d+i]))
new_tt_cores = []
remainder = None
for i in range(self.ndim):
curr_core = self.tt_cores[i]
sliced_core = curr_core[..., :, slice_spec[i], slice_spec[d+i], :]
if len(curr_core.shape) != len(sliced_core.shape):
# These indices are specified exactly and we want to collapse this axis.
if remainder is None:
remainder = sliced_core
else:
remainder = jnp.matmul(remainder, sliced_core)
else:
if remainder is not None:
# Add reminder from the previous collapsed cores to the current core.
sliced_core = jnp.einsum('...ab,...bijd->...aijd',
remainder, sliced_core)
remainder = None
new_tt_cores.append(sliced_core)
if remainder is not None:
# The reminder obtained from collapsing the last cores.
new_tt_cores[-1] = jnp.einsum('...aijb,...bd->...aijd',
new_tt_cores[-1], remainder)
remainder = None
return TTMatrix(new_tt_cores)
class BatchIndexing:
def __init__(self, tt):
self._tt = tt
def __getitem__(self, indices: list):
non_none_indices = [idx for idx in indices if idx is not None]
if len(non_none_indices) > self._tt.num_batch_dims:
raise ValueError('Expected %d indices, got %d' % (self._tt.num_batch_dims,
len(non_none_indices)))
new_cores = []
for core_idx in range(self._tt.ndim):
curr_core = self._tt.tt_cores[core_idx]
new_cores.append(curr_core.__getitem__(indices))
if self._tt.is_tt_matrix:
return TTMatrix(new_cores)
else:
return TT(new_cores)
TTTensOrMat = Union[TT, TTMatrix]
``` |
{
"source": "JohanDevv/Word-Scrambler",
"score": 4
} |
#### File: Word-Scrambler/src/main.py
```python
import profiles as encoding
#-- Functions --#
#-- Encode Function --#
def encode(word):
encoded = ''
word = str(word)
for char in word:
str(char)
encoded += str(encoding.encode[str(char)])
return encoded
#-- Decode Funtion --#
def decode(word):
decoded = ''
word = str(word)
for char in word:
str(char)
decoded += str(encoding.decode[str(char)])
return decoded
#-- Blank Function --#
def blank():
print(" ")
#-- Main Function --#
def main():
blank()
ans = str(input("Do you want to encode or decode a word? (e/d) "))
blank()
if (ans.lower() == 'e'):
word = str(input("Enter a word to encode: ")).lower()
blank()
print(encode(word))
elif (ans.lower() == 'd'):
word = str(input("Enter a word to decode: ")).lower()
blank()
print(decode(word))
else:
blank()
print("An unexpected error has occurred.")
#-- While True Loop --#
while True:
#-- Call Main Function --#
main()
``` |
{
"source": "johandicap/optimal-mouse-grouping",
"score": 3
} |
#### File: optimal_mouse_grouping/test/test_mouse_grouping_utils.py
```python
import os
import tempfile
import numpy as np
import pandas as pd
from optimal_mouse_grouping.mouse_grouping_utils import compute_group_sizes, save_mouse_grouping_as_xlsx
########################################################################################################################
def test_compute_group_sizes_ex1() -> None:
"""
Example 1: 44 mice and a minimum group size of 5 result in four groups of 6 and four groups of 5.
"""
#
# Given
#
num_mice = 44
min_group_size = 5
expected_group_sizes = np.array([6, 6, 6, 6, 5, 5, 5, 5], dtype=np.int64)
#
# When
#
group_sizes = compute_group_sizes(num_mice, min_group_size)
#
# Then
#
np.testing.assert_array_equal(group_sizes, expected_group_sizes)
########################################################################################################################
def test_compute_group_sizes_ex2() -> None:
"""
Example 2: 14 mice and a minimum group size of 5 result in two groups of 7.
"""
#
# Given
#
num_mice = 14
min_group_size = 5
expected_group_sizes = np.array([7, 7], dtype=np.int64)
#
# When
#
group_sizes = compute_group_sizes(num_mice, min_group_size)
#
# Then
#
np.testing.assert_array_equal(group_sizes, expected_group_sizes)
########################################################################################################################
def test_compute_group_sizes_ex3() -> None:
"""
Example 3: 199 mice and a minimum group size of 50 result in three groups of size [67, 66, 66].
"""
#
# Given
#
num_mice = 199
min_group_size = 50
expected_group_sizes = np.array([67, 66, 66], dtype=np.int64)
#
# When
#
group_sizes = compute_group_sizes(num_mice, min_group_size)
#
# Then
#
np.testing.assert_array_equal(group_sizes, expected_group_sizes)
########################################################################################################################
def test_save_mouse_grouping_as_xlsx() -> None:
"""
Test the writing mouse grouping results to an Excel file, including formatting of cells.
"""
#
# Given
#
df_sorted = pd.DataFrame(data={
"group": [1, 1, 2, 2],
"mouse_id": [9, 11, 39, 41],
"tumor_size": [20.000615, 36.217478, 20.087039, 130.426312],
})
df_groups = pd.DataFrame(data={
"group": [1, 2, 3],
"num_mice_in_group": [6, 6, 6],
"mouse_ids_in_group": ["9, 11, 22, 39, 41, 49", "7, 13, 32, 35, 46, 48", "6, 15, 19, 34, 50, 51"],
"tumor_size_mean": [40.52, 42.80, 42.89],
"overall_mean_diff": [-1.7255, 0.5577, 0.6445],
})
# Generate temporary file path
temp_xlsx_file_path = tempfile.NamedTemporaryFile(suffix=".xlsx").name
#
# When
#
save_mouse_grouping_as_xlsx(df_sorted, df_groups, temp_xlsx_file_path)
#
# Then
#
assert os.path.isfile(temp_xlsx_file_path)
os.remove(temp_xlsx_file_path)
########################################################################################################################
``` |
{
"source": "johandry/sales-scraper",
"score": 3
} |
#### File: johandry/sales-scraper/scraper.py
```python
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import os
from time import sleep
import json
import pandas as pd
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DRIVER_BIN = os.path.join(PROJECT_ROOT, "chromedriver")
SOURCE_DIR = './source'
class SalesScraper:
def __init__(self, username, password, companyNames, fields):
self.driver = None
self.username = username
self.password = password
self.companyNames = companyNames
self.fields = fields
self.data = {}
self.sourceDir = SOURCE_DIR
def login(self):
if self.driver:
return
self.driver = webdriver.Chrome(executable_path=DRIVER_BIN)
self.driver.get('https://www.linkedin.com/login')
username = self.driver.find_element_by_id('username')
username.send_keys(self.username)
sleep(0.5)
password = self.driver.find_element_by_id('password')
password.send_keys(<PASSWORD>)
sleep(0.5)
log_in_button = self.driver.find_element_by_xpath('//*[@type="submit"]')
log_in_button.click()
sleep(1)
def logout(self):
if self.driver:
self.driver.quit()
def savePages(self, overwrite=True):
existsSourceDir = os.path.exists(self.sourceDir)
if existsSourceDir and not overwrite:
return
self.login()
if not existsSourceDir:
os.mkdir(self.sourceDir)
for companyName in self.companyNames:
page_source = self._getPageSource(companyName)
soup = BeautifulSoup(page_source, 'html.parser')
with open(os.path.join(self.sourceDir, f'{companyName}.html'), 'w') as file:
file.write(soup.prettify())
self.logout()
def _getPageSource(self, companyName):
if self.driver == None:
return open(os.path.join(self.sourceDir, f'{companyName}.html'), 'r').read()
self.driver.get(f'https://www.linkedin.com/company/{companyName}/about/')
return self.driver.page_source
def _scrapData(self, companyName):
data = { }
page_source = self._getPageSource(companyName)
soup = BeautifulSoup(page_source, 'html.parser')
for f in self.fields:
pattern = re.compile(rf'{f}')
data[f] = soup.find('dt', text=pattern).find_next_sibling("dd").text.strip()
return data
def _scrapCompanies(self):
for companyName in self.companyNames:
companyData = self._scrapData(companyName)
self.data[companyName] = companyData
def scrap(self):
self.login()
self._scrapCompanies()
self.logout()
def scrapFromLocal(self):
self._scrapCompanies()
def to_json(self, filename=None):
if filename:
with open(filename, 'w') as f:
json.dump(self.data, f)
else:
return json.dumps(self.data)
def to_csv(self, filename=None):
df = pd.DataFrame.from_dict(self.data, orient="index")
if filename:
df.to_csv(filename, index_label="Company", encoding='utf-8')
else:
return df.to_csv()
def to_markdown(self, filename=None):
df = pd.DataFrame.from_dict(self.data, orient="index")
if filename:
with open(filename, 'w') as f:
df.to_markdown(f)
else:
return df.to_markdown()
ss = SalesScraper('<EMAIL>', 'NataL1nda',
companyNames=['salesforce', 'vmware'],
fields=['Company size', 'Website', 'Headquarters', 'Industry'])
localSource = True
if localSource:
ss.savePages(overwrite=False)
ss.scrapFromLocal()
else:
ss.scrap()
ss.to_csv('output.csv')
ss.to_markdown('output.md')
print(ss.to_json())
# Notes:
# Download driver from: https://chromedriver.chromium.org/downloads
``` |
{
"source": "johan-ejstrud/aminder",
"score": 3
} |
#### File: johan-ejstrud/aminder/relight.py
```python
from __future__ import print_function
import os
import sys
import click
import random
import sqlite3
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
NOTES_DIR = os.path.join(SCRIPT_DIR, 'notes')
QUERY = (
"SELECT "
"Bookmark.BookmarkID, "
"content.Title, "
"content.Attribution, "
"Bookmark.Text "
"FROM Bookmark INNER JOIN content "
"ON Bookmark.VolumeID = content.ContentID "
"WHERE Bookmark.Text != 'None';"
)
def print_note():
note_file = os.path.join(NOTES_DIR, random.choice(os.listdir(NOTES_DIR)))
with open(note_file, 'r') as f:
note = f.readlines()
print(
'Title : %sAuthor: %s%s' %
(note[0], note[1], ("".join([str(x) for x in note[2:]]))))
def store_notes(path):
if not os.path.exists(path):
print("ERROR: Path does not exist.", file=sys.stderr)
sys.exit(1)
sql_connection = sqlite3.connect(path)
sql_cursor = sql_connection.cursor()
sql_cursor.execute(QUERY)
data = sql_cursor.fetchall()
sql_cursor.close()
sql_connection.close()
for note in data:
note_file = os.path.join(NOTES_DIR, note[0])
f = open(note_file, 'w')
f.write("\n".join([str(x) for x in note[1:]]))
f.close()
@click.command()
@click.option("--store", "-s", default=None, nargs=1,
help="Path to KoboReader.sqlite file")
def relight(store):
"""
Store hightlights from Kobo SQLite, and print a random one.
"""
if(store):
store_notes(store)
else:
print_note()
if __name__ == "__main__":
relight()
``` |
{
"source": "johanek/salt-arista",
"score": 2
} |
#### File: salt-arista/_modules/arista_vlan.py
```python
from __future__ import absolute_import
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
log = logging.getLogger(__name__)
try:
import pyeapi
HAS_PYEAPI = True
except ImportError:
HAS_PYEAPI = False
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.is_windows() or HAS_PYEAPI == False:
return False
return True
def getall():
vlans = _vlans()
return vlans.getall()
def get(vlanid):
vlans = _vlans()
result = vlans.get(vlanid)
if result == None:
return False
return result
def create(vlanid):
vlans = _vlans()
if vlans.get(vlanid):
return "Error: vlan %s already exists" % vlanid
result = vlans.create(vlanid)
if result == None:
return "Error: could not create vlan %s" % vlanid
return result
def set_name(vlanid,name):
vlans = _vlans()
result = vlans.set_name(vlanid,name)
if result == None:
return "Error: could not set name for vlan %s" % vlanid
return result
def delete(vlanid):
vlans = _vlans()
if not vlans.get(vlanid):
return "Error: vlan %s doesn't exist" % vlanid
result = vlans.delete(vlanid)
if result == None:
return "Error: could not delete vlan %s" % vlanid
return result
def set_name(vlanid,name):
vlans = _vlans()
vlan = vlans.get(vlanid)
if vlan == None:
vlan = "Error: vlan %s does not exist" % vlanid
result = vlans.set_name(vlanid,name)
if result == None:
result = "Error: Could not set name for vlan %s" % vlanid
return result
def set_state(vlanid,state):
vlans = _vlans()
if state not in ['active','suspend']:
return "Error: %s is not a valid state for vlan" % state
vlan = vlans.get(vlanid)
if vlan == None:
return "Error: vlan %s does not exist" % vlanid
result = vlans.set_state(vlanid, value=state)
if result == False:
result = "Error: could not set state %s for vlan %s" % (vlanid, state)
return result
def _conn():
node = pyeapi.connect_to('veos1')
return node
def _vlans():
node = _conn()
vlans = node.api('vlans')
vlans.autotrfresh = True
return vlans
``` |
{
"source": "johanek/salt",
"score": 2
} |
#### File: unit/modules/pw_group_test.py
```python
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import pw_group
# Globals
pw_group.__grains__ = {}
pw_group.__salt__ = {}
pw_group.__context__ = {}
pw_group.grinfo = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PwGroupTestCase(TestCase):
'''
Test for salt.module.pw_group
'''
def test_add(self):
'''
Tests to add the specified group
'''
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(pw_group.__salt__, {'cmd.run_all': mock}):
self.assertTrue(pw_group.add('a'))
def test_delete(self):
'''
Tests to remove the named group
'''
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(pw_group.__salt__, {'cmd.run_all': mock}):
self.assertTrue(pw_group.delete('a'))
def test_info(self):
'''
Tests to return information about a group
'''
self.assertDictEqual(pw_group.info('name'), {})
mock = MagicMock(return_value={'gr_name': 'A',
'gr_passwd': 'B',
'gr_gid': 1,
'gr_mem': ['C', 'D']})
with patch.dict(pw_group.grinfo, mock):
self.assertDictEqual(pw_group.info('name'), {})
def test_getent(self):
'''
Tests for return info on all groups
'''
mock = MagicMock(return_value={'group.getent': 1})
with patch.dict(pw_group.__context__, mock):
self.assertEqual(pw_group.getent()[0], {'passwd': 'x',
'gid': 0,
'name': 'root',
'members': []})
mock = MagicMock(return_value='A')
with patch.object(pw_group, 'info', mock):
self.assertEqual(pw_group.getent(True)[0], 'A')
def test_chgid(self):
'''
tests to change the gid for a named group
'''
mock = MagicMock(return_value=1)
with patch.dict(pw_group.__salt__, {'file.group_to_gid': mock}):
self.assertTrue(pw_group.chgid('name', 1))
mock = MagicMock(side_effect=[1, 0])
with patch.dict(pw_group.__salt__, {'file.group_to_gid': mock}):
mock = MagicMock(return_value=None)
with patch.dict(pw_group.__salt__, {'cmd.run': mock}):
self.assertTrue(pw_group.chgid('name', 0))
mock = MagicMock(side_effect=[1, 1])
with patch.dict(pw_group.__salt__, {'file.group_to_gid': mock}):
mock = MagicMock(return_value=None)
with patch.dict(pw_group.__salt__, {'cmd.run': mock}):
self.assertFalse(pw_group.chgid('name', 0))
if __name__ == '__main__':
from integration import run_tests
run_tests(PwGroupTestCase, needs_daemon=False)
``` |
{
"source": "johaneo/lompar",
"score": 3
} |
#### File: johaneo/lompar/lombok.py
```python
from combpar import *
def number(): return predP(lambda x : x.isdigit() and x, desc="num")
def null(): return litP("null")
def name(): return predP(lambda x : x.isalnum() and x, desc="name")
def fqn(): return rexP("([a-zA-Z][a-zA-Z0-9_]*)([.][a-zA-Z][a-zA-Z0-9_]*)*", desc="fqn")
def word(): return rexP("[^=[]*")
#def word(): return name() # more restrictive -> faster, but fewer input accepted
def unquotedstring(): return manyP(refP(word), desc="manyWord") >> (lambda x : "_".join(x)) << "unquoted"
def array(): return (litP("[") & sepListP(refP(value), litP(","), desc="arraySepList") & litP("]")) >> (lambda x: x[0].val[1].val) << "array"
def kv(): return (refP(name) & litP("=") & refP(value)) << "kv" >> (lambda x : (dbg(x[0].val[0], doc="kv0", lvl=0), dbg(x[1], doc="kv1", lvl=0)))
def kvs(): return (sepListP(refP(kv), litP(","), "kvSepList")) << "kvs" # >> (lambda x : dbg(x, doc="kvs", lvl=0))
def value(): return (refP(null) | refP(number) | refP(array) | refP(obj) | refP(unquotedstring)) >> (lambda x : dbg(x, doc="value", lvl=0))
def obj(): return (refP(fqn) & litP("(") & refP(kvs) & litP(")")) >> (lambda x : (x[0].val[0].val[0], x[0].val[1])) << "obj"
# Lombok's toString is not intended to be parsable, but sometimes... you gotta.
test_1 = """SomeDTO(orderId=1)"""
test_nest = """SomeDTO(orderId=1, b=x.y.OtherDTO(c=d))"""
test_long = """SomeDTO(orderId=1, b=very long string, c=hello)"""
test_hard = """SomeDTO(orderId=1, b=very, (long) string, c=hello)"""
test_easy = """SomeDTO(orderId=1, logonId=<EMAIL>, currency=USD, items=[SomeDTO.SomeItem(orderItemId=1001, quantity=1, somebool=true, enumReason=NO), SomeDTO.SomeItem(orderItemId=1002, quantity=100.03, somebool=false, enumReason=OTHER)], extras=[], name=Heartgard, description=null)"""
test_harder = """SomeDTO(orderId=1, logonId=<EMAIL>, currency=USD, items=[SomeDTO.SomeItem(orderItemId=1001, quantity=1, somebool=true, enumReason=NO)], extras=[], name=Heartgard Plus Soft Chew for Dogs, up to 25 lbs, (Blue Box), 6 Soft Chews (6-mos. supply), description=null)"""
## ---------------------------
def val(x):
if hasattr(x, 'val'): return x.val
return x
class java:
def __init__(self, f):
self.f = f
def write(self, str):
print str,
def visit(self, tv):
if tv.typ == "obj": return self.obj(tv)
if tv.typ == "rex(fqn)": return self.rexfqn(tv)
if tv.typ == "kvs": return self.kvs(tv)
if tv.typ == "name": return self.name(tv)
if tv.typ == "num": return self.num(tv)
if tv.typ == "unquoted": return self.unquoted(tv)
if tv.typ == "array": return self.array(tv)
if tv.typ.startswith("lit"): return self.lit(tv)
if tv.typ.startswith("many"): return self.many(tv)
print "!!!", tv
def obj(self, tv):
self.write("new ")
fqn, kvs = tv.val
self.visit(fqn)
self.write("(")
self.visit(kvs)
self.write(")")
def kvs(self, tvs):
comma = False
for tv in tvs.val:
if comma:
self.write(", \n")
comma = True
k, v = tv.val
self.write("/*")
self.visit(k)
self.write("=*/ ")
self.visit(v)
def many(self, tvs):
comma = False
for tv in tvs.val:
if comma:
self.write(", ")
comma = True
self.visit(tv.val)
def array(self, tvs):
comma = False
t = "Object"
self.write("new "+t+"[]{ ")
for tv in tvs.val:
if comma:
self.write(", ")
comma = True
self.visit(tv)
self.write("} ")
def rexfqn(self, tv):
self.write(tv.val)
def name(self, tv):
self.write('"'+tv.val+'"')
def num(self, tv):
self.write(tv.val)
def lit(self, tv):
self.write(tv.val)
def unquoted(self, tv):
self.write('"'+tv.val+'"')
if __name__ == "__main__":
import sys
print sys.argv
``` |
{
"source": "johanere/qflow",
"score": 2
} |
#### File: qflow/qflow/mpi.py
```python
from mpi4py import MPI
from pprint import pprint
def master_rank():
return MPI.COMM_WORLD.rank == 0
def mpiprint(*args, pretty=False, **kwargs):
if master_rank():
if pretty:
pprint(*args, *kwargs)
else:
print(*args, **kwargs)
```
#### File: qflow/tests/test_activation.py
```python
import numpy as np
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from qflow.wavefunctions.nn.activations import (
exponential,
identity,
relu,
sigmoid,
tanh,
)
from .testutils import array_strat, assert_close
@given(array_strat(max_size=100))
def test_identity(x):
np.testing.assert_array_equal(x, identity.evaluate(x))
np.testing.assert_array_equal(np.ones_like(x), identity.derivative(x))
np.testing.assert_array_equal(np.zeros_like(x), identity.dbl_derivative(x))
@given(array_strat(max_size=100))
def test_relu(x):
np.testing.assert_array_equal(np.where(x > 0, x, 0), relu.evaluate(x))
np.testing.assert_array_equal(
np.where(x > 0, 1, 0), relu.derivative(relu.evaluate(x))
)
np.testing.assert_array_equal(
np.zeros_like(x), relu.dbl_derivative(relu.evaluate(x))
)
@given(array_strat(max_size=100))
def test_sigmoid(x):
sig = 1 / (1 + np.exp(-x))
np.testing.assert_array_equal(sig, sigmoid.evaluate(x))
np.testing.assert_array_equal(sig * (1 - sig), sigmoid.derivative(sig))
np.testing.assert_array_equal(
sig * (1 - sig) * (1 - 2 * sig), sigmoid.dbl_derivative(sig)
)
@given(array_strat(max_size=50))
def test_tanh(x):
assume(np.all(np.abs(x) < 15))
ta = np.tanh(x)
assert_close(ta, tanh.evaluate(x))
assert_close(1 - ta ** 2, tanh.derivative(tanh.evaluate(x)))
assert_close(
-2 * np.sinh(x) / np.cosh(x) ** 3, tanh.dbl_derivative(tanh.evaluate(x))
)
@given(array_strat(max_size=100))
def test_exponential(x):
exp = np.exp(x)
np.testing.assert_array_equal(exp, exponential.evaluate(x))
np.testing.assert_array_equal(exp, exponential.derivative(exponential.evaluate(x)))
np.testing.assert_array_equal(
exp, exponential.dbl_derivative(exponential.evaluate(x))
)
```
#### File: qflow/tests/test_dnn_benchmark.py
```python
import numpy as np
import pytest
from qflow.hamiltonians import HarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.wavefunctions import SimpleGaussian, Dnn
from qflow.wavefunctions.nn.layers import DenseLayer
from qflow.wavefunctions.nn.activations import (
sigmoid,
tanh,
relu,
identity,
exponential,
)
from qflow import DistanceCache
small_system = np.zeros((2, 2))
large_system = np.zeros((50, 3))
samples = 10000
H0 = HarmonicOscillator()
psi0 = SimpleGaussian(0.5)
layers = [
DenseLayer(50 * 3, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn = Dnn()
for l in layers:
dnn.add_layer(l)
def local_energy_gradient(H, psi, sampler, samples):
return H.local_energy_gradient(sampler, psi, samples)
@pytest.mark.benchmark(group="evaluation", warmup=True)
def test_dnn_eval(benchmark):
benchmark(dnn, large_system)
@pytest.mark.benchmark(group="gradient", warmup=True)
def test_dnn_gradient(benchmark):
benchmark(dnn.gradient, large_system)
@pytest.mark.benchmark(group="laplacian", warmup=True)
def test_dnn_laplacian(benchmark):
benchmark(dnn.laplacian, large_system)
```
#### File: qflow/tests/test_simplegaussian.py
```python
from autograd import grad, hessian
from autograd import numpy as np
from hypothesis import assume, given, settings
from hypothesis import strategies as st
from hypothesis.extra.numpy import array_shapes, arrays
from qflow.wavefunctions import SimpleGaussian
from .testutils import assert_close, float_strat, array_strat
def psi_np(X, alpha, beta):
coefs = (1, 1, beta)[: X.shape[1]]
return np.exp(-alpha * np.sum(np.dot(X ** 2, coefs)))
@given(X=array_strat(), alpha=float_strat(), beta=float_strat())
def test_eval(X, alpha, beta):
psi = SimpleGaussian(alpha, beta)
assert_close(psi_np(X, alpha, beta), psi(X))
@given(X=array_strat(), alpha=float_strat(), beta=float_strat())
def test_gradient(X, alpha, beta):
psi = SimpleGaussian(alpha, beta)
gradient = psi.gradient(X)
assert len(gradient) == 2
assert gradient[1] == 0 # Beta fixed.
assert_close(grad(psi_np, 1)(X, alpha, beta), gradient[0] * psi(X))
@given(X=array_strat(), alpha=float_strat(), beta=float_strat())
def test_drift_force(X, alpha, beta):
psi = SimpleGaussian(alpha, beta)
drift = 0.5 * psi.drift_force(X) * psi(X)
assert len(drift) == X.size
assert_close(grad(psi_np, 0)(X, alpha, beta).ravel(), drift)
@given(X=array_strat(), alpha=float_strat(), beta=float_strat())
def test_laplacian(X, alpha, beta):
psi = SimpleGaussian(alpha, beta)
laplacian = psi.laplacian(X) * psi(X)
assert_close(
np.trace(hessian(psi_np)(X, alpha, beta).reshape(X.size, X.size)), laplacian
)
```
#### File: qflow/tests/test_wavefunctionproduct.py
```python
import numpy as np
from qflow.wavefunctions import SimpleGaussian, WavefunctionProduct
def test_works_on_simple_gaussians():
"""
If
Psi = SimpleGaussian(a) * SimpleGaussian(b)
then we have by the form of SimpleGaussian the follwing
Psi = SimpleGaussian(a + b)
for any a and b.
"""
for _ in range(1000):
alpha1, alpha2 = np.random.rand(2)
psi1 = SimpleGaussian(alpha1)
psi2 = SimpleGaussian(alpha2)
psi_expected = SimpleGaussian(alpha1 + alpha2)
psi_prod = WavefunctionProduct(psi1, psi2)
n, d = np.random.randint(100), np.random.randint(1, 3 + 1)
s = np.random.randn(n, d)
assert np.isclose(psi_expected(s), psi_prod(s))
assert np.isclose(psi_expected.laplacian(s), psi_prod.laplacian(s))
np.testing.assert_allclose(psi_expected.drift_force(s), psi_prod.drift_force(s))
# The gradient will be slightly different. Both psi1 and psi2 should give the same gradient, as it is
# independent of alpha. However, the product state will give the gradients from both psi1 and psi2,
# which means it will have two sets of each number.
# __This is the expected behaviour.__
expected_grad = psi_expected.gradient(s)
np.testing.assert_allclose(
np.concatenate([expected_grad] * 2), psi_prod.gradient(s)
)
```
#### File: writing/scripts/QD-pade-dnn.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib2tikz
from qflow.wavefunctions import (
JastrowPade,
SimpleGaussian,
WavefunctionProduct,
Dnn,
InputSorter,
)
from qflow.wavefunctions.nn.layers import DenseLayer
from qflow.wavefunctions.nn.activations import tanh, exponential
from qflow.hamiltonians import CoulombHarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.optimizers import AdamOptimizer
from qflow.training import train, EnergyCallback, SymmetryCallback, ParameterCallback
from qflow.statistics import compute_statistics_for_series, statistics_to_tex
from qflow.mpi import mpiprint, master_rank
def plot_training(energies, parameters, symmetries):
_, (eax, pax) = plt.subplots(ncols=2)
eax.semilogy(np.abs(3 - np.asarray(energies[2])), label=r"$\psi_{PJ}$")
eax.semilogy(np.abs(3 - np.asarray(energies[0])), label=r"$\psi_{DNN}$")
eax.semilogy(np.abs(3 - np.asarray(energies[1])), label=r"$\psi_{SDNN}$")
eax.set_xlabel(r"% of training")
eax.set_ylabel(r"Absolute error in $\langle E_L\rangle$ [a.u]")
eax.legend()
pax.plot(np.asarray(parameters[0])[:, 4:50])
pax.set_xlabel(r"% of training")
matplotlib2tikz.save(__file__ + ".tex")
_, sax = plt.subplots()
sax.semilogx(symmetries, label=r"$S(\psi_{DNN})$")
sax.set_ylabel("Symmetry")
sax.set_xlabel(r"% of training")
sax.legend(loc="lower right")
matplotlib2tikz.save(__file__ + ".symmetry.tex")
P, D = 2, 2 # Particles, dimensions
system = np.empty((P, D))
H = CoulombHarmonicOscillator()
# Wave functions:
simple_gaussian = SimpleGaussian(alpha=0.5)
jastrow = JastrowPade(alpha=1, beta=1)
simple_and_jastrow = WavefunctionProduct(simple_gaussian, jastrow)
layers = [
DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn = Dnn()
for l in layers:
dnn.add_layer(l)
psi = WavefunctionProduct(simple_and_jastrow, dnn)
psi_sampler = ImportanceSampler(system, psi, step_size=0.1)
# Sorted
simple_gaussian2 = SimpleGaussian(alpha=0.5)
jastrow2 = JastrowPade(alpha=1, beta=1)
simple_and_jastrow2 = WavefunctionProduct(simple_gaussian2, jastrow2)
layers2 = [
DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
DenseLayer(32, 16, activation=tanh),
DenseLayer(16, 1, activation=exponential),
]
dnn2 = Dnn()
for l in layers2:
dnn2.add_layer(l)
psi_sorted_base = WavefunctionProduct(simple_and_jastrow2, dnn2)
psi_sorted = InputSorter(psi_sorted_base)
psi.parameters = psi_sorted.parameters
psi_sorted_sampler = ImportanceSampler(system, psi_sorted, step_size=0.1)
# Benchmark:
simple_gaussian_bench = SimpleGaussian(alpha=0.5)
jastrow_bench = JastrowPade(alpha=1, beta=1)
psi_bench = WavefunctionProduct(simple_gaussian_bench, jastrow_bench)
psi_bench_sampler = ImportanceSampler(system, psi_bench, step_size=0.1)
plot_samples = 1_000_000
iters = 30000
samples = 1000
gamma = 0.0
evaluation_points = 2 ** 23
psi_energies = EnergyCallback(samples=plot_samples, verbose=True)
psi_symmetries = SymmetryCallback(samples=plot_samples)
psi_parameters = ParameterCallback()
train(
psi,
H,
psi_sampler,
iters=iters,
samples=samples,
gamma=gamma,
optimizer=AdamOptimizer(len(psi.parameters)),
call_backs=(psi_energies, psi_symmetries, psi_parameters),
)
mpiprint("Training regular dnn complete")
np.savetxt("QD-parameters-dnn-regular.txt", psi.parameters)
psi_sorted_energies = EnergyCallback(samples=plot_samples, verbose=True)
psi_sorted_parameters = ParameterCallback()
train(
psi_sorted,
H,
psi_sorted_sampler,
iters=iters,
samples=samples,
gamma=gamma,
optimizer=AdamOptimizer(len(psi_sorted.parameters)),
call_backs=(psi_sorted_energies, psi_sorted_parameters),
)
mpiprint("Training sorted dnn complete")
np.savetxt("QD-parameters-dnn-sorted.txt", psi_sorted.parameters)
psi_bench_energies = EnergyCallback(samples=plot_samples)
train(
psi_bench,
H,
psi_bench_sampler,
iters=iters,
samples=samples,
gamma=0,
optimizer=AdamOptimizer(len(psi.parameters)),
call_backs=(psi_bench_energies,),
)
mpiprint("Bench Training complete")
stats = [
compute_statistics_for_series(
H.local_energy_array(psi_bench_sampler, psi_bench, evaluation_points),
method="blocking",
),
compute_statistics_for_series(
H.local_energy_array(psi_sampler, psi, evaluation_points), method="blocking"
),
compute_statistics_for_series(
H.local_energy_array(psi_sorted_sampler, psi_sorted, evaluation_points),
method="blocking",
),
]
old = psi_sorted.parameters
psi_sorted.parameters = psi.parameters
psi_sorted_sampler.thermalize(10000)
stats.append(
compute_statistics_for_series(
H.local_energy_array(psi_sorted_sampler, psi_sorted, evaluation_points),
method="blocking",
)
)
labels = [r"$\psi_{PJ}$", r"$\psi_{DNN}$", r"$\psi_{SDNN}$", r"$\hat{\psi}_{SDNN}$"]
mpiprint(stats, pretty=True)
mpiprint(statistics_to_tex(stats, labels, filename=__file__ + ".table.tex"))
# mpiprint(psi.parameters)
if master_rank():
plot_training(
[psi_energies, psi_sorted_energies, psi_bench_energies],
[psi_parameters, psi_parameters],
psi_symmetries,
)
plt.show()
``` |
{
"source": "johanesmikhael/ContinuityAnalysis",
"score": 2
} |
#### File: johanesmikhael/ContinuityAnalysis/analysis_graphic_view_widget.py
```python
from PyQt5 import QtGui, QtWidgets, QtCore
class AnalysisViewWidget(QtWidgets.QGraphicsView):
def __init__(self, *kargs):
super(AnalysisViewWidget, self).__init__(*kargs)
self.scale_factor = 1
def wheelEvent(self, event):
delta = event.angleDelta().y()
if delta > 0:
zoom_factor = 1.125
else:
zoom_factor = 0.8
self.scale_factor = self.scale_factor * zoom_factor
self.scale(zoom_factor, zoom_factor)
```
#### File: johanesmikhael/ContinuityAnalysis/analysis_visualization_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_analysis_visualization_gui(object):
def setupUi(self, analysis_visualization_gui):
analysis_visualization_gui.setObjectName("analysis_visualization_gui")
analysis_visualization_gui.resize(755, 553)
self.centralwidget = QtWidgets.QWidget(analysis_visualization_gui)
self.centralwidget.setEnabled(True)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_distance = QtWidgets.QLabel(self.centralwidget)
self.label_distance.setObjectName("label_distance")
self.gridLayout_2.addWidget(self.label_distance, 0, 0, 1, 1)
self.verticalSlider_distance = QtWidgets.QSlider(self.centralwidget)
self.verticalSlider_distance.setMaximum(100)
self.verticalSlider_distance.setOrientation(QtCore.Qt.Vertical)
self.verticalSlider_distance.setObjectName("verticalSlider_distance")
self.gridLayout_2.addWidget(self.verticalSlider_distance, 1, 0, 1, 1)
self.gridLayout.addLayout(self.gridLayout_2, 1, 1, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
self.listWidget_elements = QtWidgets.QListWidget(self.centralwidget)
self.listWidget_elements.setMaximumSize(QtCore.QSize(750, 16777215))
self.listWidget_elements.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.listWidget_elements.setObjectName("listWidget_elements")
self.gridLayout.addWidget(self.listWidget_elements, 1, 2, 1, 1)
analysis_visualization_gui.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(analysis_visualization_gui)
self.menubar.setGeometry(QtCore.QRect(0, 0, 755, 21))
self.menubar.setObjectName("menubar")
analysis_visualization_gui.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(analysis_visualization_gui)
self.statusbar.setObjectName("statusbar")
analysis_visualization_gui.setStatusBar(self.statusbar)
self.retranslateUi(analysis_visualization_gui)
QtCore.QMetaObject.connectSlotsByName(analysis_visualization_gui)
def retranslateUi(self, analysis_visualization_gui):
_translate = QtCore.QCoreApplication.translate
analysis_visualization_gui.setWindowTitle(_translate("analysis_visualization_gui", "MainWindow"))
self.label_distance.setText(_translate("analysis_visualization_gui", "TextLabel"))
```
#### File: johanesmikhael/ContinuityAnalysis/geom.py
```python
from OCC.TColgp import TColgp_Array1OfPnt
from OCC.TColStd import TColStd_Array1OfReal
from OCC.TColStd import TColStd_Array1OfInteger
from OCC.Geom import Geom_BezierCurve
from OCC.Geom import Geom_BSplineCurve
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeEdge
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakePolygon
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeFace
from OCC.BRepPrimAPI import BRepPrimAPI_MakeBox
from OCC.GeomAdaptor import GeomAdaptor_Curve
from OCC.GCPnts import GCPnts_AbscissaPoint
from OCC.gp import gp_Pnt
from util import *
import collections
import math
def divide_range(start, stop, divide_num):
float_list = []
r = start
step = (stop - start) / divide_num
while r < stop:
float_list.append(r)
r += step
float_list.append(stop)
return float_list
def points_to_bezier_curve(points):
pts = TColgp_Array1OfPnt(0, len(points) - 1)
for n, ptn in enumerate(points):
pts.SetValue(n, ptn[0])
crv = Geom_BezierCurve(pts)
return crv
def points_to_bspline_curve(points, degree):
pts = TColgp_Array1OfPnt(0, len(points) - 1)
for n, ptn in enumerate(points):
if isinstance(ptn, collections.Sequence):
pts.SetValue(n, ptn[0])
else:
pts.SetValue(n, ptn)
if len(points) == 3:
if degree > 2:
degree = 2
if len(points) == 2:
if degree > 1:
degree = 1
knots_size = len(points) + degree + 1
knot_sum = degree + 1
mult_array = [degree + 1]
while knot_sum < knots_size - (degree + 1):
knot_sum += 1
mult_array.append(1)
mult_array.append(knots_size - knot_sum)
knot_array = divide_range(0.0, 1.0, len(mult_array) - 1)
print(knots_size)
print(mult_array)
print(knot_array)
knots = TColStd_Array1OfReal(0, len(mult_array) - 1)
mult = TColStd_Array1OfInteger(0, len(mult_array) - 1)
i = 0.0
for n in range(0, len(mult_array)):
print(n)
mult.SetValue(n, mult_array[n])
knots.SetValue(n, knot_array[n])
i += 1.0
crv = Geom_BSplineCurve(pts, knots, mult, degree, False)
return crv
def create_edge_to_points(origin_point, points):
edges = []
for point in points:
edge = create_edge_from_two_point(origin_point, point)
edges.append(edge)
return edges
def create_edge_from_two_point(origin_point, point):
edge = BRepBuilderAPI_MakeEdge(origin_point, point).Edge()
return edge
def create_box_from_center(origin_point, dx, dy, dz):
point_x = origin_point.X() - dx / 2
point_y = origin_point.Y() - dy / 2
point_z = origin_point.Z() - dz / 2
point = gp_Pnt(point_x, point_y, point_z)
box_shape = BRepPrimAPI_MakeBox(point, dx, dy, dz).Shape()
return box_shape
def create_box_from_two_points(p1, p2):
box_shape = BRepPrimAPI_MakeBox(p1, p2).Shape()
return box_shape
def create_rectangle_from_center(origin_point, du, dv, orientation):
x = origin_point.X()
y = origin_point.Y()
z = origin_point.Z()
p1, p2, p3, p4 = None, None, None, None
if orientation == Orientation.bottom:
p1 = gp_Pnt(x - du / 2, y - dv / 2, z)
p2 = gp_Pnt(x - du / 2, y + dv / 2, z)
p3 = gp_Pnt(x + du / 2, y + dv / 2, z)
p4 = gp_Pnt(x + du / 2, y - dv / 2, z)
elif orientation == Orientation.up:
p1 = gp_Pnt(x - du / 2, y + dv / 2, z)
p2 = gp_Pnt(x - du / 2, y - dv / 2, z)
p3 = gp_Pnt(x + du / 2, y - dv / 2, z)
p4 = gp_Pnt(x + du / 2, y + dv / 2, z)
elif orientation == Orientation.right:
p1 = gp_Pnt(x, y - dv / 2, z + du / 2)
p2 = gp_Pnt(x, y - dv / 2, z - du / 2)
p3 = gp_Pnt(x, y + dv / 2, z - du / 2)
p4 = gp_Pnt(x, y + dv / 2, z + du / 2)
elif orientation == Orientation.left:
p1 = gp_Pnt(x, y - dv / 2, z - du / 2)
p2 = gp_Pnt(x, y - dv / 2, z + du / 2)
p3 = gp_Pnt(x, y + dv / 2, z + du / 2)
p4 = gp_Pnt(x, y + dv / 2, z - du / 2)
rectangle_face = create_rectangular_face(p1, p2, p3, p4)
return rectangle_face
def divide_curve(crv, distance):
geom_adaptor_curve = GeomAdaptor_Curve(crv.GetHandle())
curve_param = [0.0]
param = 0
while param < 1:
gcpnts_abscissa_point = GCPnts_AbscissaPoint(geom_adaptor_curve, distance, param)
param = gcpnts_abscissa_point.Parameter()
if param <= 1:
curve_param.append(param)
return curve_param
def curve_length(crv, param1, param2):
geom_adaptor_curve = GeomAdaptor_Curve(crv.GetHandle())
length = GCPnts_AbscissaPoint.Length(geom_adaptor_curve, param1, param2)
return length
def create_rectangular_face(p1, p2, p3, p4):
wire = BRepBuilderAPI_MakePolygon(p1, p2, p3, p4, True).Wire()
rectangle_face = BRepBuilderAPI_MakeFace(wire).Face()
return rectangle_face
def create_yz_square_center(center, radius):
p1 = gp_Pnt(center.X(), center.Y() - radius / 2, center.Z() - radius / 2)
p2 = gp_Pnt(center.X(), center.Y() + radius / 2, center.Z() - radius / 2)
p3 = gp_Pnt(center.X(), center.Y() + radius / 2, center.Z() + radius / 2)
p4 = gp_Pnt(center.X(), center.Y() - radius / 2, center.Z() + radius / 2)
wire = BRepBuilderAPI_MakePolygon(p1, p2, p3, p4, True).Wire()
return wire
def create_yz_diagonal_square_center(center, radius):
diagonal = radius * pow(2.0, 0.5)
p1 = gp_Pnt(center.X(), center.Y(), center.Z() - diagonal)
p2 = gp_Pnt(center.X(), center.Y() + diagonal, center.Z())
p3 = gp_Pnt(center.X(), center.Y(), center.Z() + diagonal)
p4 = gp_Pnt(center.X(), center.Y() - diagonal, center.Z())
wire = BRepBuilderAPI_MakePolygon(p1, p2, p3, p4, True).Wire()
return wire
def create_yz_upside_triangle_center(center, radius):
sine = math.sin(math.pi / 6) * radius
cosine = math.cos(math.pi / 6) * radius
p1 = gp_Pnt(center.X(), center.Y() - cosine, center.Z() - sine)
p2 = gp_Pnt(center.X(), center.Y() + cosine, center.Z() - sine)
p3 = gp_Pnt(center.X(), center.Y(), center.Z() + radius)
wire = BRepBuilderAPI_MakePolygon(p1, p2, p3, True).Wire()
return wire
def create_yz_downside_triangle_center(center, radius):
sine = math.sin(math.pi / 6) * radius
cosine = math.cos(math.pi / 6) * radius
p1 = gp_Pnt(center.X(), center.Y() + cosine, center.Z() + sine)
p2 = gp_Pnt(center.X(), center.Y() - cosine, center.Z() + sine)
p3 = gp_Pnt(center.X(), center.Y(), center.Z() - radius)
wire = BRepBuilderAPI_MakePolygon(p1, p2, p3, True).Wire()
return wire
def create_yz_hexagon_center(center, radius):
sine = math.sin(math.pi / 6) * radius
cosine = math.cos(math.pi / 6) * radius
p1 = gp_Pnt(center.X(), center.Y() - cosine, center.Z() - sine)
p2 = gp_Pnt(center.X(), center.Y(), center.Z() - radius)
p3 = gp_Pnt(center.X(), center.Y() + cosine, center.Z() - sine)
p4 = gp_Pnt(center.X(), center.Y() + cosine, center.Z() + sine)
p5 = gp_Pnt(center.X(), center.Y(), center.Z() + radius)
p6 = gp_Pnt(center.X(), center.Y() - cosine, center.Z() + sine)
wire_a = BRepBuilderAPI_MakePolygon(p1, p2, p3, p4, False)
wire_a.Add(p5)
wire_a.Add(p6)
wire_a.Add(p1)
wire = wire_a.Wire()
return wire
def create_vertical_yz_rectangular_from_center(center, width, height):
p1 = gp_Pnt(center.X(), center.Y() - width / 2, center.Z() - height / 2)
p2 = gp_Pnt(center.X(), center.Y() + width / 2, center.Z() - height / 2)
p3 = gp_Pnt(center.X(), center.Y() + width / 2, center.Z() + height / 2)
p4 = gp_Pnt(center.X(), center.Y() - width / 2, center.Z() + height / 2)
return create_rectangular_face(p1, p2, p3, p4)
def create_horizontal_xy_rectangular_from_center(center, width, height):
p1 = gp_Pnt(center.X() - height / 2, center.Y() - width / 2, center.Z())
p2 = gp_Pnt(center.X() + height / 2, center.Y() - width / 2, center.Z())
p3 = gp_Pnt(center.X() + height / 2, center.Y() + width / 2, center.Z())
p4 = gp_Pnt(center.X() - height / 2, center.Y() + width / 2, center.Z())
return create_rectangular_face(p1, p2, p3, p4)
def middle_point(p1, p2):
x = 0.5 * (p1.X() + p2.X())
y = 0.5 * (p1.Y() + p2.Y())
z = 0.5 * (p1.Z() + p2.Z())
point = gp_Pnt(x, y, z)
return point
def is_adjacent_vertical(bounding_box_1, bounding_box_2):
is_out = bounding_box_1.IsOut(bounding_box_2)
tol = 1e-2
if not is_out: # the box is touching each other
c_min_1 = bounding_box_1.CornerMin()
c_max_1 = bounding_box_1.CornerMax()
c_min_2 = bounding_box_2.CornerMin()
c_max_2 = bounding_box_2.CornerMax()
z_min_1 = c_min_1.Z()
z_max_1 = c_max_1.Z()
z_min_2 = c_min_2.Z()
z_max_2 = c_max_2.Z()
print(z_min_1, z_max_1, z_min_2, z_max_2)
# return True
if z_min_1 >= z_max_2 - tol or z_max_1 <= z_min_2 + tol:
return False
else:
return True
else:
return False
def is_adjacent_horizontal(bounding_box_1, bounding_box_2):
is_out = bounding_box_1.IsOut(bounding_box_2)
tol = 1e-2
if not is_out: # the box is touching each other
c_min_1 = bounding_box_1.CornerMin()
c_max_1 = bounding_box_1.CornerMax()
c_min_2 = bounding_box_2.CornerMin()
c_max_2 = bounding_box_2.CornerMax()
x_min_1 = c_min_1.X()
x_max_1 = c_max_1.X()
x_min_2 = c_min_2.X()
x_max_2 = c_max_2.X()
print(x_min_1, x_max_1, x_min_2, x_max_2)
# return True
if x_min_1 >= x_max_2 - tol or x_max_1 <= x_min_2 + tol:
return False
else:
return True
else:
return False
```
#### File: johanesmikhael/ContinuityAnalysis/material_browser_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MaterialBrowser(object):
def setupUi(self, MaterialBrowser):
MaterialBrowser.setObjectName("MaterialBrowser")
MaterialBrowser.setWindowModality(QtCore.Qt.NonModal)
MaterialBrowser.resize(631, 496)
MaterialBrowser.setMinimumSize(QtCore.QSize(600, 0))
self.verticalLayout = QtWidgets.QVBoxLayout(MaterialBrowser)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.listWidget_material = QtWidgets.QListWidget(MaterialBrowser)
self.listWidget_material.setMinimumSize(QtCore.QSize(0, 0))
self.listWidget_material.setMaximumSize(QtCore.QSize(300, 16777215))
self.listWidget_material.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked|QtWidgets.QAbstractItemView.EditKeyPressed)
self.listWidget_material.setObjectName("listWidget_material")
self.horizontalLayout.addWidget(self.listWidget_material)
self.groupBox = QtWidgets.QGroupBox(MaterialBrowser)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalSlider_slipCoefficient = QtWidgets.QSlider(self.groupBox)
self.horizontalSlider_slipCoefficient.setMaximum(100)
self.horizontalSlider_slipCoefficient.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_slipCoefficient.setObjectName("horizontalSlider_slipCoefficient")
self.gridLayout_2.addWidget(self.horizontalSlider_slipCoefficient, 7, 2, 1, 1, QtCore.Qt.AlignTop)
self.comboBox_reflectiveMethod = QtWidgets.QComboBox(self.groupBox)
self.comboBox_reflectiveMethod.setObjectName("comboBox_reflectiveMethod")
self.gridLayout_2.addWidget(self.comboBox_reflectiveMethod, 5, 2, 1, 1, QtCore.Qt.AlignTop)
self.horizontalSlider_transparency = QtWidgets.QSlider(self.groupBox)
self.horizontalSlider_transparency.setMaximum(100)
self.horizontalSlider_transparency.setSingleStep(1)
self.horizontalSlider_transparency.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_transparency.setObjectName("horizontalSlider_transparency")
self.gridLayout_2.addWidget(self.horizontalSlider_transparency, 2, 2, 1, 1, QtCore.Qt.AlignTop)
self.horizontalSlider_reflectance = QtWidgets.QSlider(self.groupBox)
self.horizontalSlider_reflectance.setMaximum(100)
self.horizontalSlider_reflectance.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_reflectance.setObjectName("horizontalSlider_reflectance")
self.gridLayout_2.addWidget(self.horizontalSlider_reflectance, 4, 2, 1, 1, QtCore.Qt.AlignTop)
self.pushButton_colorPicker = QtWidgets.QPushButton(self.groupBox)
self.pushButton_colorPicker.setObjectName("pushButton_colorPicker")
self.gridLayout_2.addWidget(self.pushButton_colorPicker, 0, 2, 1, 1, QtCore.Qt.AlignTop)
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 5, 0, 1, 1, QtCore.Qt.AlignTop)
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1, QtCore.Qt.AlignTop)
self.horizontalSlider_imperviousness = QtWidgets.QSlider(self.groupBox)
self.horizontalSlider_imperviousness.setMaximum(100)
self.horizontalSlider_imperviousness.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_imperviousness.setObjectName("horizontalSlider_imperviousness")
self.gridLayout_2.addWidget(self.horizontalSlider_imperviousness, 9, 2, 1, 1)
self.pushButton_saveChanges = QtWidgets.QPushButton(self.groupBox)
self.pushButton_saveChanges.setObjectName("pushButton_saveChanges")
self.gridLayout_2.addWidget(self.pushButton_saveChanges, 10, 2, 1, 1)
self.pushButton_reset = QtWidgets.QPushButton(self.groupBox)
self.pushButton_reset.setObjectName("pushButton_reset")
self.gridLayout_2.addWidget(self.pushButton_reset, 10, 0, 1, 1)
self.label_tranparency_value = QtWidgets.QLabel(self.groupBox)
self.label_tranparency_value.setObjectName("label_tranparency_value")
self.gridLayout_2.addWidget(self.label_tranparency_value, 1, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 3, 0, 1, 1)
self.label_reflectance_value = QtWidgets.QLabel(self.groupBox)
self.label_reflectance_value.setObjectName("label_reflectance_value")
self.gridLayout_2.addWidget(self.label_reflectance_value, 3, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.label_slip_coefficient = QtWidgets.QLabel(self.groupBox)
self.label_slip_coefficient.setObjectName("label_slip_coefficient")
self.gridLayout_2.addWidget(self.label_slip_coefficient, 6, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.groupBox)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 6, 0, 1, 1)
self.label_imperviousness = QtWidgets.QLabel(self.groupBox)
self.label_imperviousness.setObjectName("label_imperviousness")
self.gridLayout_2.addWidget(self.label_imperviousness, 8, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.groupBox)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 8, 0, 1, 1)
self.horizontalLayout.addWidget(self.groupBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.pushButton_loadXML = QtWidgets.QPushButton(MaterialBrowser)
self.pushButton_loadXML.setObjectName("pushButton_loadXML")
self.horizontalLayout_2.addWidget(self.pushButton_loadXML)
self.pushButton_saveXML = QtWidgets.QPushButton(MaterialBrowser)
self.pushButton_saveXML.setObjectName("pushButton_saveXML")
self.horizontalLayout_2.addWidget(self.pushButton_saveXML)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.retranslateUi(MaterialBrowser)
QtCore.QMetaObject.connectSlotsByName(MaterialBrowser)
def retranslateUi(self, MaterialBrowser):
_translate = QtCore.QCoreApplication.translate
MaterialBrowser.setWindowTitle(_translate("MaterialBrowser", "Form"))
self.groupBox.setTitle(_translate("MaterialBrowser", "Properties"))
self.pushButton_colorPicker.setText(_translate("MaterialBrowser", "Color Picker"))
self.label_4.setText(_translate("MaterialBrowser", "Reflective Method"))
self.label.setText(_translate("MaterialBrowser", "Color"))
self.pushButton_saveChanges.setText(_translate("MaterialBrowser", "Save Changes"))
self.pushButton_reset.setText(_translate("MaterialBrowser", "Reset"))
self.label_tranparency_value.setText(_translate("MaterialBrowser", "TextLabel"))
self.label_3.setText(_translate("MaterialBrowser", "Reflectance"))
self.label_reflectance_value.setText(_translate("MaterialBrowser", "TextLabel"))
self.label_2.setText(_translate("MaterialBrowser", "Transparency"))
self.label_slip_coefficient.setText(_translate("MaterialBrowser", "TextLabel"))
self.label_5.setText(_translate("MaterialBrowser", "Slip Coefficient"))
self.label_imperviousness.setText(_translate("MaterialBrowser", "TextLabel"))
self.label_6.setText(_translate("MaterialBrowser", "Imperviousness"))
self.pushButton_loadXML.setText(_translate("MaterialBrowser", "Load XML"))
self.pushButton_saveXML.setText(_translate("MaterialBrowser", "Save XML"))
```
#### File: johanesmikhael/ContinuityAnalysis/multiprocess.py
```python
import sys
from section_elements import Section, ElementSection
if sys.version_info[:3] >= (2, 6, 0):
import multiprocessing as processing
else:
import processing
def create_section(section_planes, elements):
section_list = []
for section_plane in section_planes:
section = Section()
for element in elements:
element_section = ElementSection.create_element_section(section_plane, element)
if element_section:
section.add_element_section(element_section)
section_list.append(section)
return section_list
def run_multiprocess_cut(s_planes, elements, n_procs):
def get_section_planes_for_n_procs(section_planes, n_procs):
divided_section_planes = []
n = len(section_planes) // n_procs
for i in range(1, n_procs+1):
if i == 1:
divided_section_planes.append(section_planes[:i*n])
elif i == n_procs:
divided_section_planes.append(section_planes[(i-1)*n:])
else:
divided_section_planes.append(section_planes[(i-1)*n:i*n])
return divided_section_planes
def arguments(section_planes, elem, n_procs):
_tmp = []
divided_section_planes = get_section_planes_for_n_procs(section_planes, n_procs)
for i in divided_section_planes:
print(i)
_tmp.append([i, elem])
return _tmp
P = processing.Pool(n_procs)
_results = P.map(create_section, arguments(s_planes, elements,n_procs))
print(_results)
return _results
def show_section(section_list, display, is_show_section):
for section in section_list:
section.set_visible(display, is_show_section)
def multiprocess_show (section_list, display, is_show_section):
def get_section_list_for_n_procs(sections, n_procs):
div_section_list = []
n = len(sections) // n_procs
for i in range(1, n_procs+1):
if i == 1:
div_section_list.append(sections[:i*n])
elif i == n_procs:
div_section_list.append(sections[(i-1)*n:])
else:
div_section_list.append(sections[(i-1)*n:i*n])
return div_section_list
def arguments(sections, display, is_show_section, n_procs):
_tmp = []
div_section_list = get_section_list_for_n_procs(sections, n_procs)
for i in div_section_list:
_tmp.append([i, display, is_show_section])
return _tmp
if __name__ == '__main__':
n_procs = processing.cpu_count()
P = processing.Pool(n_procs)
P.map(create_section, arguments(section_list, display, is_show_section, n_procs))
```
#### File: johanesmikhael/ContinuityAnalysis/slice_elements.py
```python
import ifcproducts
from ifcproducts import BuildingElement
from OCC.BRepAlgoAPI import BRepAlgoAPI_Section
from OCC.BRepAlgoAPI import BRepAlgoAPI_Common
from OCC.BRepBuilderAPI import BRepBuilderAPI_Copy
from OCC.ShapeAnalysis import ShapeAnalysis_FreeBounds
from OCC.TopTools import TopTools_ListIteratorOfListOfShape
from OCC.TopTools import TopTools_HSequenceOfShape
from OCC.TopTools import Handle_TopTools_HSequenceOfShape
from OCC.BRepAdaptor import BRepAdaptor_Curve
from OCC.TopExp import TopExp_Explorer
from OCC.TopAbs import *
from OCC.IntTools import IntTools_EdgeEdge
from OCC.AIS import AIS_Shape
import OCC.Quantity
import OCC.TopAbs
from OCC.Bnd import Bnd_Box
from OCC.BRepBndLib import brepbndlib_Add
from OCC.TopoDS import topods_Wire
from OCC.TopoDS import topods
class Slice(object):
def __init__(self):
self._element_slice_list = []
#self.bottom_pt = None
#self.upper_pt = None
#self.left_pt = None
#self.right_pt = None
def add_element_slice(self, element_section):
self._element_slice_list.append(element_section)
def get_element_slice_list(self):
return self._element_slice_list
def clear_display(self, display):
for element_slice in self._element_slice_list:
element_slice.clear_display(display)
def display_shape(self, display):
for element_slice in self._element_slice_list:
element_slice.display_shape(display)
def display_coloured_shape(self, display, quantity_colour):
for element_slice in self._element_slice_list:
element_slice.display_coloured_shape(display, quantity_colour)
def copy_slice(self, slice):
for element_slice in slice.get_element_slice_list():
element_slice_copy = element_slice.create_copy()
self._element_slice_list.append(element_slice_copy)
def set_visible(self, display, is_visibile):
for element_slice in self._element_slice_list:
if is_visibile:
element_slice.show_shape(display)
else:
element_slice.hide_shape(display)
'''def get_nearest_intersection_to_edges(self, edges):
points = []
for edge in edges:
points.append(self.get_nearest_intersection(edge))
return points
def get_nearest_intersection(self, edge):
nearest_param = None
edge_curve = BRepAdaptor_Curve(edge)
for element_section in self.get_element_section_list():
param = element_section.nearest_intersection_element(edge)
if param:
if not nearest_param or nearest_param > param[0]:
nearest_param = param[0]
if nearest_param:
point = edge_curve.Value(nearest_param)
else:
last_param = edge_curve.LastParameter()
point = edge_curve.Value(last_param)
return point
def get_nearest_intersection_element(self, edge):
nearest_param = None
edge_curve = BRepAdaptor_Curve(edge)
for element_section in self._element_section_list:
param = element_section.nearest_intersection_element(edge)
if param:
if not nearest_param or nearest_param[0] > param[0]:
nearest_param = param
if nearest_param:
point = edge_curve.Value(nearest_param[0])
return point, nearest_param[1], nearest_param[2]
else:
return None'''
class ElementSlice(object):
def __init__(self, *args):
parent, element = args
self.parent = parent
self.element = element
self.name = element.name
self.is_decomposed = element.is_decomposed
self.children = []
self.shapes_slice = []
#self.ais = []
self.bounding_box = Bnd_Box()
@staticmethod
def create_element_slice(section_box, element, element_slice_parent=None):
intersection = ElementSlice.check_intersection(section_box, element)
if not intersection:
return None
element_slice = None
if not element.is_decomposed:
if len(element.topods_shapes) > 0:
for shape in element.topods_shapes:
material = shape["material"]
topods_shape = ElementSlice.create_slice_from_shape(section_box, shape, element.bounding_box)
if topods_shape: # indicating there is section between element and section plane
shape_slice = [topods_shape, material]
if not element_slice:
element_slice = ElementSlice(element_slice_parent, element)
element_slice.shapes_slice.append(shape_slice)
else: # element are composed by children elements
children_element = element.children
for child in children_element:
child_slice = ElementSlice.create_element_slice(section_box, child, element_slice)
if child_slice:
if not element_slice:
element_slice = ElementSlice(element_slice_parent, element)
child_slice.parent = element_slice
element_slice.children.append(child_slice)
return element_slice
@staticmethod
def check_intersection(section_box, element):
section_plane_b_box = section_box[4]
element_b_box = element.bounding_box
is_out = element_b_box.IsOut(section_plane_b_box)
return not is_out
@staticmethod
def create_slice_from_shape(section_box, shape, bounding_box):
slice_box_topods = section_box[1]
topods_shape = []
cut = BRepAlgoAPI_Common(slice_box_topods, shape["topods_shape"])
shape = cut.Shape()
test_bounding_box = Bnd_Box()
brepbndlib_Add(shape, test_bounding_box)
test_size = test_bounding_box.SquareExtent()
if shape and test_size > 0:
shape_dict = dict()
shape_dict["topods_shape"] = shape
topods_shape.append(shape_dict)
'''# create explorer
exp = OCC.TopExp.TopExp_Explorer(shape, OCC.TopAbs.TopAbs_FACE)
while exp.More():
face, surface_normal, orientation = BuildingElement.break_faces(exp.Current())
exp.Next()
face_dict = dict()
face_dict["topods_shape"] = face
topods_shape.append(face_dict)'''
return topods_shape
else:
return None
#print(shape)
'''if not edge_list.IsEmpty():
edges = TopTools_HSequenceOfShape()
edges_handle = Handle_TopTools_HSequenceOfShape(edges)
wires = TopTools_HSequenceOfShape()
wires_handle = Handle_TopTools_HSequenceOfShape(wires)
edge_list_iterator = TopTools_ListIteratorOfListOfShape(edge_list)
while edge_list_iterator.More():
edge = edge_list_iterator.Value()
edge_list_iterator.Next()
edges.Append(edge)
ShapeAnalysis_FreeBounds.ConnectEdgesToWires(edges_handle, 1e-5, True, wires_handle)
wires = wires_handle.GetObject() #get TopTools_HSequenceOfShape from its handle
for i in range(wires.Length()):
wire_shape = wires.Value(i + 1) # get TopoDS_Shape
topods_wires.append(wire_shape)
return topods_wires
else:
return None'''
def display_shape(self, display):
if not self.is_decomposed:
ais_list = []
for shape_slice in self.shapes_slice:
material = shape_slice[1]
ais_color = OCC.Quantity.Quantity_Color(0.1, 0.1, 0.1, OCC.Quantity.Quantity_TOC_RGB)
transparency = 0
if material is not None:
color = material.get_surface_colour()
ais_color = OCC.Quantity.Quantity_Color(color[0], color[1], color[2], OCC.Quantity.Quantity_TOC_RGB)
transparency = material.get_transparency()
for shape in shape_slice[0]:
# ais = display.DisplayShape(shape, transparency=transparency)
ais_object = AIS_Shape(shape["topods_shape"])
ais_object.SetCurrentFacingModel(OCC.Aspect.Aspect_TOFM_BOTH_SIDE)
ais_object.SetColor(ais_color)
ais = ais_object.GetHandle()
display.Context.Display(ais)
display.Context.SetTransparency(ais, transparency)
# ais_list.append(ais)
shape["ais"] = ais
#self.ais = ais_list
else:
for child in self.children:
child.display_shape(display)
def display_coloured_wire(self, display, quantity_colour):
if not self.is_decomposed:
ais_list = []
for shape_section in self.shapes_section:
material = shape_section[1]
transparency = 0
for shape in shape_section[0]:
# ais = display.DisplayShape(shape, transparency=transparency)
ais_object = AIS_Shape(shape)
ais = ais_object.GetHandle()
ais.GetObject().SetColor(quantity_colour)
display.Context.Display(ais)
display.Context.SetTransparency(ais, transparency)
ais_list.append(ais)
self.ais = ais_list
else:
for child in self.children:
child.display_coloured_wire(display, quantity_colour)
def hide_shape(self, display):
if not self.is_decomposed:
for shape_slice in self.shapes_slice:
for shape in shape_slice[0]:
display.Context.Erase(shape["ais"])
else:
for child in self.children:
child.hide_shape(display)
def show_shape(self, display):
if not self.is_decomposed:
for shape_slice in self.shapes_slice:
for shape in shape_slice[0]:
display.Context.Display(shape["ais"])
else:
for child in self.children:
child.show_shape(display)
def clear_display(self, display):
if not self.is_decomposed:
for ais in self.ais:
display.Context.Remove(ais)
else:
for child in self.children:
child.clear_display(display)
def create_copy(self):
print("begin copy of %s" % self.name)
element_slice = None
if not self.is_decomposed:
element_slice = ElementSlice(self.parent, self.element)
for shape_slice in self.shapes_slice:
topods_shapes, material = shape_slice
topods_shapes_copy = []
for topods_shape in topods_shapes:
shape_copy = BRepBuilderAPI_Copy(topods_shape["topods_shape"])
topods_shape_copy = shape_copy.Shape()
topods_shape_copy_dict = dict()
topods_shape_copy_dict["topods_shape"] = topods_shape_copy
topods_shapes_copy.append(topods_shape_copy_dict)
brepbndlib_Add(topods_shape_copy, element_slice.bounding_box)
shape_slice_copy = [topods_shapes_copy, material]
element_slice.shapes_slice.append(shape_slice_copy)
else: #decomposed element
element_slice = ElementSlice(self.parent, self.element)
for child in self.children:
child_copy = child.create_copy()
element_slice.children.append(child_copy)
return element_slice
def update_bounding_box(self):
self.bounding_box = Bnd_Box()
if not self.is_decomposed:
for shape_slice in self.shapes_slice:
for topods_shape in shape_slice[0]:
brepbndlib_Add(topods_shape["topods_shape"], self.bounding_box)
x_min, y_min, z_min, x_max, y_max, z_max = self.bounding_box.Get()
x_minr = round(x_min, 4)
y_minr = round(y_min, 4)
z_minr = round(z_min, 4)
x_maxr = round(x_max, 4)
y_maxr = round(y_max, 4)
z_maxr = round(z_max, 4)
self.bounding_box.Update(x_minr, y_minr, z_minr, x_maxr, y_maxr, z_maxr )
else:
for child in self.children:
child.update_bounding_box()
'''
def nearest_intersection_element(self, edge):
nearest_param = None
if not self.is_decomposed:
for shape_section in self.shapes_section:
for shape in shape_section[0]:
exp = TopExp_Explorer(shape, TopAbs_EDGE)
while exp.More():
shape_edge = topods.Edge(exp.Current())
intersection = IntTools_EdgeEdge(edge, shape_edge)
intersection.Perform()
if intersection.IsDone():
commonparts = intersection.CommonParts()
for i in range(commonparts.Length()):
commonpart = commonparts.Value(i+1)
parameter = commonpart.VertexParameter1()
if parameter > 0.0:
if not nearest_param or nearest_param[0] > parameter:
nearest_param = parameter, self, shape_section
exp.Next()
else:
for child in self.children:
param = child.nearest_intersection_element(edge)
if param:
if not nearest_param or nearest_param[0] > param[0]:
nearest_param = param
return nearest_param'''
```
#### File: johanesmikhael/ContinuityAnalysis/slice_visualization_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_slice_visualization_gui(object):
def setupUi(self, slice_visualization_gui):
slice_visualization_gui.setObjectName("slice_visualization_gui")
slice_visualization_gui.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(slice_visualization_gui)
self.centralwidget.setObjectName("centralwidget")
slice_visualization_gui.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(slice_visualization_gui)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
slice_visualization_gui.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(slice_visualization_gui)
self.statusbar.setObjectName("statusbar")
slice_visualization_gui.setStatusBar(self.statusbar)
self.retranslateUi(slice_visualization_gui)
QtCore.QMetaObject.connectSlotsByName(slice_visualization_gui)
def retranslateUi(self, slice_visualization_gui):
_translate = QtCore.QCoreApplication.translate
slice_visualization_gui.setWindowTitle(_translate("slice_visualization_gui", "MainWindow"))
```
#### File: johanesmikhael/ContinuityAnalysis/util.py
```python
from OCC.Quantity import Quantity_TOC_RGB, Quantity_Color
from math import fabs
from xml.etree import ElementTree
from xml.dom import minidom
from PyQt5.QtGui import QColor
from colorsys import *
import os
from OCC.STEPControl import STEPControl_Writer, STEPControl_AsIs
from OCC.Interface import Interface_Static_SetCVal
from OCC.IFSelect import IFSelect_RetDone
import networkx as nx
from matplotlib import pyplot, patches
import numpy as np
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def get_text_from_xml(node):
return node.childNodes[0].data
def get_float_from_xml(node):
text = get_text_from_xml(node)
return float(text)
def get_int_from_xml(node):
float_value = get_float_from_xml(node)
return int(float_value)
def write_step_file(a_shape, filename, application_protocol="AP203"):
""" exports a shape to a STEP file
a_shape: the topods_shape to export (a compound, a solid etc.)
filename: the filename
application protocol: "AP203" or "AP214"
"""
# a few checks
if a_shape.IsNull():
raise AssertionError("Shape %s is null." % a_shape)
if application_protocol not in ["AP203", "AP214IS"]:
raise AssertionError("application_protocol must be either AP203 or AP214IS. You passed %s." % application_protocol)
if os.path.isfile(filename):
print("Warning: %s file already exists and will be replaced" % filename)
# creates and initialise the step exporter
step_writer = STEPControl_Writer()
Interface_Static_SetCVal("write.step.schema", application_protocol)
# transfer shapes and write file
step_writer.Transfer(a_shape, STEPControl_AsIs)
status = step_writer.Write(filename)
if not status == IFSelect_RetDone:
raise AssertionError("Error while writing shape to STEP file.")
if not os.path.isfile(filename):
raise AssertionError("File %s was not saved to filesystem." % filename)
def create_folder(folder):
try:
os.mkdir(folder)
except OSError:
print("Creation of the directory %s failed" % folder)
return False
else:
print("Successfully created the directory %s " % folder)
return True
class Color:
def __init__(self):
pass
white = 255, 255, 255
gray = 127, 127, 127
green = 0, 240, 0
red = 240, 0, 0
dark_gray = 32, 32, 32
dark_green = 0, 128, 0
dark_red = 128, 0, 0
ais_yellow = Quantity_Color(1, 1, 0, Quantity_TOC_RGB)
ais_blue = Quantity_Color(0, 0, 1, Quantity_TOC_RGB)
ais_green = Quantity_Color(0, 1, 0, Quantity_TOC_RGB)
ais_red = Quantity_Color(1, 0, 0, Quantity_TOC_RGB)
@staticmethod
def from_factor_to_rgb(r_factor, g_factor, b_factor):
r = int(r_factor * 255)
g = int(g_factor * 255)
b = int(b_factor * 255)
return r, g, b
@staticmethod
def from_rgb_to_factor(r, g, b):
r_factor = float(r) / 255
g_factor = float(g) / 255
b_factor = float(b) / 255
return r_factor, g_factor, b_factor
@staticmethod
def create_qcolor_from_rgb_tuple(rgb_tuple):
color = QColor(rgb_tuple[0], rgb_tuple[1], rgb_tuple[2])
return color
@staticmethod
def create_qcolor_from_rgb_tuple_f(rgb):
rgb_tuple = Color.from_factor_to_rgb(rgb[0], rgb[1], rgb[2])
color = Color.create_qcolor_from_rgb_tuple(rgb_tuple)
return color
@staticmethod
def colour_distance(rgb_tuple1, rgb_tuple2):
squared_distance = Color.squared_colour_distance(rgb_tuple1, rgb_tuple2)
return pow(squared_distance, 0.5)
@staticmethod
def squared_colour_distance(rgb_tuple1, rgb_tuple2):
print("start calc squared distance")
print(rgb_tuple1)
print(rgb_tuple2)
squared_distance = (rgb_tuple2[0] - rgb_tuple1[0]) ** 2 + \
(rgb_tuple2[1] - rgb_tuple1[1]) ** 2 + \
(rgb_tuple2[2] - rgb_tuple1[2]) ** 2
print(squared_distance)
return squared_distance
class ColorInterpolation(object):
def __init__(self, *args):
self.start_color = args[0] # qcolor
self.end_color = args[1] # qcolor
self.min_value = args[2]
self.max_value = args[3]
self.hsv_start = self.get_hsv_from_qcolor(self.start_color)
self.hsv_end = self.get_hsv_from_qcolor(self.end_color)
@staticmethod
def get_hsv_from_qcolor(qcolor):
r = qcolor.redF()
g = qcolor.greenF()
b = qcolor.blueF()
hsv_value = rgb_to_hsv(r, g, b)
return hsv_value
def get_interpolation_from_value(self, value):
if value <= self.min_value:
return self.start_color
elif value >= self.max_value:
return self.end_color
else: # the value is in beetween the value comain
min_hue = self.hsv_start[0]
max_hue = self.hsv_end[0]
min_saturation = self.hsv_start[1]
max_saturation = self.hsv_end[1]
min_value = self.hsv_start[2]
max_value = self.hsv_end[2]
fraction = (value - self.min_value) / (self.max_value - self.min_value)
hue = fraction * (max_hue - min_hue) + min_hue
saturation = fraction * (max_saturation - min_saturation) + min_saturation
value = fraction * (max_value - min_value) + min_value
if hue < 0:
hue = 0
elif hue > 1:
hue = 1
if saturation < 0:
saturation = 0
elif saturation > 1:
saturation = 1
if value < 0:
value = 0
elif value > 1:
value = 1
rgb = hsv_to_rgb(hue, saturation, value)
color_interpolation = Color.create_qcolor_from_rgb_tuple_f(rgb)
return color_interpolation
class Orientation:
def __init__(self):
pass
bottom = 0
up = 1
left = 2
right = 3
class Math:
def __init__(self):
pass
@staticmethod
def replace_minimum(compared_value, value):
if value < compared_value:
compared_value = value
return compared_value
@staticmethod
def replace_maximum(compared_value, value):
if value > compared_value:
compared_value = value
return compared_value
@staticmethod
def integer_division(value, divider):
if value >= 0:
results = value // divider
return results
else:
results = value / fabs(value) * (fabs(value) // divider)
return results
@staticmethod
def drange(start, stop, step):
float_list = []
r = start
print(start)
print(stop)
print(step)
while r < stop - step:
float_list.append(r)
r += step
return float_list
``` |
{
"source": "johanesmikhael/pyinn",
"score": 2
} |
#### File: pyinn/test/benchmark.py
```python
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.init import kaiming_normal
from pyinn import conv2d_depthwise
from torchnet.meter import TimeMeter
from torch.backends import cudnn
cudnn.benchmark = True
def mobilenet(depth, width, depthwise_function):
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
cast = lambda x: x.cuda()
ni = 32
params = {'conv0': cast(kaiming_normal(torch.Tensor(ni, 3, 3, 3)))}
for i, x in enumerate(cfg):
no = x if isinstance(x, int) else x[0]
params['block%d.conv0' % i] = cast(kaiming_normal(torch.Tensor(ni, 1, 3, 3)))
params['block%d.conv1' % i] = cast(kaiming_normal(torch.Tensor(no, ni, 1, 1)))
ni = no
params = {k: Variable(v, requires_grad=True) for k, v in params.items()}
def f(input, params):
o = F.conv2d(input, params['conv0'], padding=1, stride=2)
o = F.relu(o, inplace=True)
for i, x in enumerate(cfg):
stride = 1 if isinstance(x, int) else x[1]
o = depthwise_function(o, params['block%d.conv0' % i], stride=stride, padding=1)
o = F.conv2d(o, params['block%d.conv1' % i])
o = F.relu(o, inplace=True)
return o
return f, params
def fconv2d(x, w, stride, padding):
return F.conv2d(x, w, stride=stride, padding=padding, groups=x.size(1))
x = torch.autograd.Variable(torch.randn(256,3,224,224).cuda())
f_pyinn, params = mobilenet(18, 1, conv2d_depthwise)
f_torch, params = mobilenet(18, 1, fconv2d)
# warmup
f_pyinn(x, params).sum().backward()
f_torch(x, params).sum().backward()
torch.cuda.synchronize()
meter = TimeMeter('s')
for i in range(10):
f_torch(x, params).sum().backward()
torch.cuda.synchronize()
print(meter.value())
meter.reset()
for i in range(10):
f_pyinn(x, params).sum().backward()
torch.cuda.synchronize()
print(meter.value())
``` |
{
"source": "johanesmikhael/Self-Attention-GAN-Refuge_2.0",
"score": 2
} |
#### File: johanesmikhael/Self-Attention-GAN-Refuge_2.0/utils.py
```python
import scipy.misc
import numpy as np
import os
from glob import glob
import imageio
import tensorflow as tf
import tensorflow.contrib.slim as slim
from keras.datasets import cifar10, mnist
import matplotlib.pyplot as plt
import pickle
class ImageData:
def __init__(self, load_size, channels, crop_pos='center', zoom_range=0.0):
self.load_size = load_size
self.channels = channels
self.crop_pos = crop_pos
self.zoom_range = zoom_range
def image_processing(self, filename):
x = tf.io.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
s = tf.shape(x_decode)
w, h = s[0], s[1]
# height, width, channel = x_decode.eval(session=self.sess).shape
c = tf.minimum(w, h)
zoom_factor = 0.15
c_ = tf.cast(tf.cast(c, dtype=tf.float32) * (1 - tf.random.uniform(shape=[])*zoom_factor), dtype=tf.int32)
if self.crop_pos == 'random':
print('crop random')
k = tf.random.uniform(shape=[])
l = tf.random.uniform(shape=[])
w_start = tf.cast(tf.cast((w - c_), dtype=tf.float32) * k, dtype=tf.int32)
h_start = tf.cast(tf.cast((h - c_), dtype=tf.float32) * l, dtype=tf.int32)
else:
w_start = (w - c_) // 2
h_start = (h - c_) // 2
img = x_decode[w_start:w_start + c_, h_start:h_start + c_]
img = tf.image.resize_images(img, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
return img
def load_mnist(size=64):
(train_data, train_labels), (test_data, test_labels) = mnist.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
# x = np.expand_dims(x, axis=-1)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
x = np.expand_dims(x, axis=-1)
return x
def load_cifar10(size=64) :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
return x
def load_data(dataset_name, size=64) :
x = glob(f'{dataset_name}/*/*.jpg')
x.extend(glob(f'{dataset_name}/*.jpg'))
x.extend(glob(f'{dataset_name}/*/*.png'))
x.extend(glob(f'{dataset_name}/*.png'))
print(x)
return x
def preprocessing(x, size):
x = scipy.misc.imread(x, mode='RGB')
x = scipy.misc.imresize(x, [size, size])
x = normalize(x)
return x
def normalize(x) :
return x/127.5 - 1
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def save_image(image, image_path):
image = inverse_transform(image)
image = to_uint8(image)
imageio.imwrite(image_path, image)
def save_images_plt(images, size, image_path, mode=None):
images = inverse_transform(images)
images = to_uint8(images)
if mode == 'sample':
h = 10
else:
h = 21.6
img_dir = '/'.join(image_path.split('/')[:-1])+'/'+image_path.split('/')[-1][:-4]
print(img_dir)
if not os.path.isdir(img_dir):
os.makedirs(img_dir)
w = size[0]/size[1] * h
plt.figure(figsize=(w,h), dpi=100)
n_rows = size[1]
n_cols = size[0]
for i in range(images.shape[0]):
plt.subplot(n_rows, n_cols, i+1)
image = images[i]
if mode != 'sample':
img_path = f'{img_dir}/{i:03d}.png'
imageio.imwrite(img_path, image)
if image.shape[2] == 1:
plt.imshow(image.reshape((image.shape[0], image.shape[1])), cmap='gray')
else:
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
is_exist = os.path.isfile(image_path)
i = 1
image_path_temp = image_path
while is_exist == True:
image_path = image_path_temp[:-4] + f' ({i:02d})'+image_path_temp[-4:]
is_exist = os.path.isfile(image_path)
i+=1
plt.savefig(image_path)
plt.close()
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
# image = np.squeeze(merge(images, size)) # 채널이 1인거 제거 ?
return imageio.imwrite(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def to_uint8(images):
return (images * 255).astype(np.uint8)
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.compat.v1.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def str2bool(x):
return x.lower() in ('true')
def write_param(param_txt_path, param_pickle_path, kwargs):
with open(param_pickle_path, 'wb') as f:
pickle.dump(kwargs, f)
with open(param_txt_path, 'w') as f:
for key, value in kwargs.items():
f.write(f'{key} : {value}\n')
def print_param(kwargs):
for key, val in kwargs.items():
print(f'{key} \t: {val}')
def check_param(param, param_pickle, kwargs):
if os.path.isfile(param_pickle):
with open(param_pickle, 'rb') as f:
saved_kwargs = pickle.load(f)
diff = {}
for key, value in kwargs.items():
if saved_kwargs.get(key) != value:
diff[key] = [saved_kwargs.get(key, None), value]
if diff:
print('The previous parameter is different with the new ones:')
print('------------')
for key, value in diff.items():
print(f'{key} \t: old value = {value[0]}, new value = {value[1]}')
print('------------')
option = ''
while not option in ['p', 'P', 'n', 'N', 'a', 'A']:
try:
print('Select an option:')
print('[P] to continue with the previous param')
print('[N] to continue with the new param')
print('[A] to abort operation')
option = str(input('type [P/N/A] and hit enter : '))
except:
pass
if option in ['p', 'P']:
print('continue with the previous param')
kwargs = saved_kwargs
if option in ['n', 'N']:
print('continue with new param')
write_param(param, param_pickle, kwargs)
if option in ['a', 'A']:
print('aborting operation restart the runtime and run from the beginning')
sys.exit()
else:
write_param(param, param_pickle, kwargs)
print('model parameters is saved')
print_param(kwargs)
return kwargs
``` |
{
"source": "johanfforsberg/fogl",
"score": 2
} |
#### File: fogl/examples/example.py
```python
import logging
import math
from time import time
from pathlib import Path
import pyglet
from pyglet import gl
from euclid3 import Matrix4, Point3
from fogl.debug import DebugWindow
from fogl.framebuffer import FrameBuffer
from fogl.glutil import gl_matrix
from fogl.mesh import ObjMesh, Mesh
from fogl.shader import Program, VertexShader, FragmentShader
from fogl.texture import ImageTexture, Texture, NormalTexture
from fogl.util import try_except_log, load_png
from fogl.vao import VertexArrayObject
from fogl.util import enabled, disabled, debounce
class FoglWindow(pyglet.window.Window):
"""
Pyglet window subclass that draws an scene every frame.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
local = Path(__file__).parent
# Shader setup
self.view_program = Program(
VertexShader(local / "glsl/view_vertex.glsl"),
FragmentShader(local / "glsl/view_fragment.glsl")
)
self.lighting_program = Program(
VertexShader(local / "glsl/copy_vertex.glsl"),
FragmentShader(local / "glsl/copy_fragment.glsl")
)
self.copy_program = Program(
VertexShader(local / "glsl/copy_vertex.glsl"),
FragmentShader(local / "glsl/simple_copy_frag.glsl")
)
# Load a texture
texture = ImageTexture(*load_png(local / "textures/plasma.png"), unit=3)
# Load vertex data from an OBJ file as a "mesh"
# OBJ file belongs to the Blender project.
self.suzanne = ObjMesh(local / "obj/suzanne.obj", texture=texture)
# A simple plane
plane_size = 3
self.plane = Mesh([
# position color normal texture coord
((plane_size, plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (1., 1., 1.)),
((-plane_size, plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (0., 1., 1.)),
((plane_size, -plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (1., 0., 1.)),
((-plane_size, -plane_size, 0.), (1., 1., 1.), (0., 0., -1.), (0., 0., 1.)),
], texture=texture)
# A framebuffer for rendering the shadow light. It needs only a depth texture.
self.shadow_size = 256, 256
self.shadow_buffer = FrameBuffer(self.shadow_size, autoclear=True, depth_unit=3, set_viewport=True)
self.vao = VertexArrayObject()
@debounce(0.1) # Prevent too many events from accumulating
def on_resize(self, width, height):
self.size = width, height
# We need to recreate the offscreen buffer if the window size changes
# This includes when the window is first created.
render_textures = dict(
# These will represent the different channels of the framebuffer,
# that the shader can render to.
color=Texture(self.size, unit=0),
normal=NormalTexture(self.size, unit=1),
position=NormalTexture(self.size, unit=2),
)
self.offscreen_buffer = FrameBuffer(self.size, render_textures, autoclear=True, set_viewport=True)
render_textures2 = dict(
color=Texture(self.size, unit=0),
)
self.offscreen_buffer2 = FrameBuffer(self.size, render_textures2, autoclear=True, set_viewport=True)
return pyglet.event.EVENT_HANDLED # Work around pyglet internals
@try_except_log
def on_draw(self):
# Prevent trying to draw before things have been set up
if not hasattr(self, "offscreen_buffer"):
return
# Model matrix we'll use to position the main model
suzanne_model_matrix = (Matrix4
.new_identity()
.rotatex(-math.pi/2)
.rotatez(time())) # Rotate over time
plane_model_matrix = Matrix4.new_rotatey(math.pi).translate(0, 0, 2)
# Render to an offscreen buffer
with self.offscreen_buffer, self.view_program, \
enabled(gl.GL_DEPTH_TEST), disabled(gl.GL_CULL_FACE):
gl.glDepthMask(gl.GL_TRUE)
w, h = self.size
aspect = h / w
# Calculate a view frustum; this is basically our camera.
near = 5
far = 15
width = 2
height = 2 * aspect
frustum = (Matrix4.new(
near / width, 0, 0, 0,
0, near / height, 0, 0,
0, 0, -(far + near)/(far - near), -1,
0, 0, -2 * far * near/(far - near), 0
))
# The view matrix positions the camera in the scene
view_matrix = (Matrix4
.new_identity()
.translate(0, 0, -8))
# Send the matrices to GL
gl.glUniformMatrix4fv(0, 1, gl.GL_FALSE,
gl_matrix(frustum * view_matrix))
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(suzanne_model_matrix))
gl.glUniform4f(2, 0.3, 0.3, 1, 1) # Set the "color" uniform to blue
self.suzanne.draw()
# We'll also draw a simple plane behind the main model
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(plane_model_matrix))
gl.glUniform4f(2, 0.3, 1, 0.3, 1) # Set the "color" uniform to green
self.plane.draw(mode=gl.GL_TRIANGLE_STRIP)
# Render shadow buffer
# Basically the same scene as above, but to a different buffer and from a different view
with self.shadow_buffer, self.view_program, enabled(gl.GL_DEPTH_TEST), disabled(gl.GL_CULL_FACE):
gl.glDepthMask(gl.GL_TRUE)
frustum = Matrix4.new_perspective(1, 1, 1, 12)
view_matrix = (Matrix4
.new_identity()
.translate(0, 0, -4)
.rotatey(0.5)
.rotatex(0.3))
light_pos = (view_matrix.inverse() * Point3(0, 0, 0))
light_view_matrix = frustum * view_matrix
gl.glUniformMatrix4fv(0, 1, gl.GL_FALSE,
gl_matrix(light_view_matrix))
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(suzanne_model_matrix))
gl.glUniform4f(2, 0.9, 0.3, 0.4, 1)
self.suzanne.draw()
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE,
gl_matrix(plane_model_matrix))
self.plane.draw(mode=gl.GL_TRIANGLE_STRIP)
# Now draw the offscreen buffer to another buffer, combining it with the
# lighting information to get a nice image.
# Note: This step is pretty pointless here, as we might just draw directly to screen.
# Just demonstrates how to do it.
with self.vao, self.offscreen_buffer2, self.lighting_program, disabled(gl.GL_CULL_FACE, gl.GL_DEPTH_TEST):
gl.glUniform3f(0, *light_pos)
gl.glUniformMatrix4fv(1, 1, gl.GL_FALSE, gl_matrix(light_view_matrix))
# Bind some of the offscreen buffer's textures so the shader can read them.
with self.offscreen_buffer["color"], self.offscreen_buffer["normal"], \
self.offscreen_buffer["position"], self.shadow_buffer["depth"]:
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 6)
# Now render the finished image to the screen
with self.vao, self.copy_program, disabled(gl.GL_CULL_FACE, gl.GL_DEPTH_TEST):
with self.offscreen_buffer2["color"]:
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 6)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
config = pyglet.gl.Config(major_version=4,
minor_version=5,
double_buffer=True)
# This enables the GL error log, really useful for tracking down obscure problems.
# Requires a recent GL version, though. https://www.khronos.org/opengl/wiki/Debug_Output
config.debug = True
w = FoglWindow(config=config, resizable=True)
# DebugWindow() # Simple helper that displays all the offscreen textures
pyglet.clock.schedule_interval(lambda dt: None, 0.01)
pyglet.app.run()
``` |
{
"source": "johanfforsberg/pyimgui",
"score": 2
} |
#### File: imgui/integrations/base.py
```python
import imgui
class BaseOpenGLRenderer(object):
def __init__(self):
if not imgui.get_current_context():
raise RuntimeError(
"No valid ImGui context. Use imgui.create_context() first and/or "
"imgui.set_current_context()."
)
self.io = imgui.get_io()
self._font_texture = None
self.io.delta_time = 1.0 / 60.0
self._create_device_objects()
self.refresh_font_texture()
def render(self, draw_data):
raise NotImplementedError
def refresh_font_texture(self):
raise NotImplementedError
def _create_device_objects(self):
raise NotImplementedError
def _invalidate_device_objects(self):
raise NotImplementedError
def shutdown(self):
self._invalidate_device_objects()
```
#### File: imgui/integrations/cocos2d.py
```python
from __future__ import absolute_import
import cocos
import imgui
from .import compute_fb_scale
from .pyglet import PygletMixin
from .opengl import FixedPipelineRenderer
class ImguiLayer(PygletMixin, cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(ImguiLayer, self).__init__()
window_size = cocos.director.director.window.get_size()
viewport_size = cocos.director.director.window.get_viewport_size()
self.io = imgui.get_io()
self.io.display_size = window_size
self.io.display_fb_scale = compute_fb_scale(window_size, viewport_size)
self.renderer = FixedPipelineRenderer()
self._map_keys()
``` |
{
"source": "johanfrisk/Python_at_web",
"score": 3
} |
#### File: features/steps/step_fib.py
```python
import fibonacci
from behave import *
@given(u'a count of "{count}"')
def step_impl(context, count):
context.count = int(count)
@when(u'I call the fib function')
def step_impl(context):
fibonacci.fib(context.count)
@then(u'it should return the sequence "{sequence}"')
def step_impl(context):
expected_seq = [int(s) for s in sequence.split(',')]
assert expected_seq == context.result
``` |
{
"source": "JohanFrom/SyllabusCrawler",
"score": 3
} |
#### File: SyllabusCrawler/syllabuscrawler/Crawler.py
```python
from googlesearch import search
from syllabuscrawler.LoggerUtility import LoggerUtility
from syllabuscrawler.Scraper import Scraper
from syllabuscrawler.DataFinder import DataFinder
from syllabuscrawler.ListUtility import ListUtility
class Crawler:
def print_search_word(search_word, amount_links, keyword_list):
search_word_string = f"Search word: {search_word}"
amount_string = f"Amount of pages: {amount_links}"
keywords_string = f"Keywords: {keyword_list}"
LoggerUtility.print_debug(search_word_string)
LoggerUtility.print_debug(amount_string)
LoggerUtility.print_debug(keywords_string)
def scrape_google(keyword, pages, keywords):
empty_list = []
control_list = []
for i, link in enumerate(search(keyword, tld="co.in", num=pages, stop=pages, pause=2)):
LoggerUtility.print_debug(f"{i+1}. {link}")
if ".pdf" in link:
pdf_scrape_result = Scraper.pdf_scraper(link)
splitted_pdf_data = ListUtility.list_formating(pdf_scrape_result)
found_pdf_data = DataFinder.search_for_keyword(splitted_pdf_data, keywords)
if found_pdf_data != control_list:
empty_list += found_pdf_data
else:
html_scrape_result = Scraper.html_scraper(link)
splitted_html_data = ListUtility.list_formating(html_scrape_result)
found_html_data = DataFinder.search_for_keyword(splitted_html_data, keywords)
if found_html_data != control_list:
empty_list += found_html_data
return empty_list
```
#### File: SyllabusCrawler/syllabuscrawler/LoggerUtility.py
```python
import logging
from termcolor import colored
class LoggerUtility:
def print_debug(string):
level = logging.DEBUG
fmt = '[%(levelname)s] %(asctime)s - %(message)s'
datefmt='%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=level, format=fmt, datefmt=datefmt)
if "https://" in string:
return logging.debug(colored(string, "cyan"))
else:
return logging.debug(colored(string, "green"))
def print_error(string):
level = logging.ERROR
fmt = '[%(levelname)s] %(asctime)s - %(message)s'
datefmt='%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=level, format=fmt, datefmt=datefmt)
if level == 40:
return logging.error(colored(string, "red"))
else:
return logging.exception(colored(string, "red"))
``` |
{
"source": "johangenis/beer_app_rest_api",
"score": 3
} |
#### File: app/core/models.py
```python
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.conf import settings
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new User"""
if not email:
raise ValueError("Users must have an email address")
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = "email"
class Tag(models.Model):
"""Tag to be used for a beer"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Beer(models.Model):
"""Beers rated in the app."""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
# name = models.CharField(max_length=255)
ibu = models.IntegerField(default=55)
calories = models.FloatField(max_length=5, default=0)
abv = models.FloatField(max_length=3, default=0)
style = models.CharField(max_length=50, default="Bitter")
brewery_location = models.CharField(max_length=50, default="Some Brewery")
tags = models.ManyToManyField("Tag")
def __str__(self):
return self.name
class Review(models.Model):
"""Review object"""
name = models.CharField(max_length=50, default=None)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE
)
aroma = models.IntegerField(default=2)
appearance = models.IntegerField(default=2)
taste = models.IntegerField(default=2)
@property
def overall(self):
return self.aroma + self.appearance + self.taste
def __str__(self):
return self.name
``` |
{
"source": "johangenis/CarND-Semantic-Segmentation",
"score": 3
} |
#### File: johangenis/CarND-Semantic-Segmentation/main.py
```python
import os.path
import tensorflow as tf
import aug_helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
from moviepy.editor import VideoFileClip
import scipy.misc
import numpy as np
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return image_input, keep, layer3, layer4, layer7
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution of vgg layer 7
l7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1,
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# Upsample
l4_ups_inp = tf.layers.conv2d_transpose(l7_conv_1x1, num_classes, 4,
strides=(2, 2),
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# make sure the shapes are the same
# 1x1 convolution of vgg layer 4
l4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1,
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connection (element-wise addition)
l4_output = tf.add(l4_ups_inp, l4_conv_1x1)
# Upsample
l3_ups_inp = tf.layers.conv2d_transpose(l4_output, num_classes, 4,
strides=(2, 2),
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# 1x1 convolution of vgg layer 3
l3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1,
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# skip connection (element-wise addition)
l3_output = tf.add(l3_ups_inp, l3_conv_1x1)
# Upsample
nn_last_layer = tf.layers.conv2d_transpose(l3_output, num_classes, 16,
strides=(8, 8),
padding='same',
kernel_initializer = tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
return nn_last_layer
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
# define a loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))
# define a training operation
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
print("Training...")
print()
for i in range(epochs):
print("EPOCH {}".format(i))
for image, label in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss],
feed_dict = {input_image: image,
correct_label: label,
keep_prob: 0.75,
learning_rate: 0.0003})
print("Loss: {:.3f}\n".format(loss))
if loss < .005:
print("Stopping, loss is: {:.3f}\n, less than set threshold of 0.005.".format(loss))
return
tests.test_train_nn(train_nn)
def predict_video(sess, image_shape, logits, keep_prob, input_image):
video_dir = r"./test_video//"
video_library = [["pretVehDetTest_720by480_210-475.mp4", [210, 475]],
["harder_challenge_video-720by576.mp4", [210, 475]],
["testVideo1.mp4", [210, 470]]
]
for video_data in video_library:
rect = video_data[1]
video_output = video_data[0][:-4] +"_out.mp4"
clip1 = VideoFileClip(video_dir + video_data[0])
video_clip = clip1.fl_image(lambda frame: predict_frame(frame, rect, sess, image_shape, logits, keep_prob, input_image))
video_clip.write_videofile(video_output, audio=False)
def predict_frame(im, rect, sess, image_shape, logits, keep_prob, image_pl):
original = im
roi = im[rect[0]:rect[1],0:720]
image = scipy.misc.imresize(roi, image_shape)
im_softmax = sess.run(
[tf.nn.softmax(logits)],
{keep_prob: 1.0, image_pl: [image]})
im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])
segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)
mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))
mask = scipy.misc.toimage(mask, mode="RGBA")
street_im = scipy.misc.toimage(image)
street_im.paste(mask, box=None, mask=mask)
upscale_pred = scipy.misc.imresize(street_im, (rect[1]-rect[0],720))
original[rect[0]:rect[1], 0:720] = upscale_pred
return original
def run():
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
model_dir = './trained_model/'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
aug_helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = aug_helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# TODO: Build NN using load_vgg, layers, and optimize function
epochs = 160
batch_size = 8
#TF place holders:
correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name= 'correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
input_image, keep_prob, vgg_layer3_out, vgg_layer4_out, vgg_layer7_out = load_vgg(sess, vgg_path)
nn_last_layer = layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes)
logits, train_op, cross_entropy_loss = optimize(nn_last_layer, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)
# TODO: Save inference data using aug_helper.save_inference_samples
# aug_helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
predict_video(sess, image_shape, logits, keep_prob, input_image)
aug_helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
save_path = tf.train.Saver().save(sess, model_dir+ "Semantic_seg_trained.ckpt")
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
``` |
{
"source": "johangenis/day-18-start",
"score": 4
} |
#### File: johangenis/day-18-start/main.py
```python
import _tkinter
from turtle import Turtle, Screen
import random
tim = Turtle()
tim.shape("turtle")
tim.color("red")
tim.speed("fastest")
# r = random.random()
# b = random.random()
# g = random.random()
# rgb = (random.random(), random.random(), random.random())
def draw_shapes(num_sides):
angle = 360/num_sides
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(num_sides):
tim.forward(30)
tim.right(angle)
def draw_spirograph(size_of_gap):
for _ in range(int(360 / size_of_gap)):
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
tim.circle(100)
tim.setheading(tim.heading() + size_of_gap)
draw_spirograph(5)
# for shape_sides_n in range(3, 41):
# draw_shapes(shape_sides_n)
def random_walk(num_steps):
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
tim.pensize(width=10)
tim.speed("fastest")
direction = [0, 90, 180, 270, 360]
for _ in range(num_steps):
tim.forward(30)
tim.setheading(random.choice(direction))
# for walk_steps_n in range(3, 201):
# random_walk(walk_steps_n)
def square():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(4):
tim.forward(100)
tim.right(90)
def pentagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(5):
tim.forward(100)
tim.right(360/5)
def hexagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(6):
tim.forward(100)
tim.right(360/6)
def heptagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(7):
tim.forward(100)
tim.right(360/7)
def octagon():
rgb = (random.random(), random.random(), random.random())
tim.pencolor(rgb)
for _ in range(8):
tim.forward(100)
tim.right(360/8)
def dashed_line():
for _ in range(15):
tim.color("red")
tim.forward(10)
tim.penup()
tim.forward(10)
tim.pendown()
# dashed_line()
# square()
# pentagon()
# hexagon()
# heptagon()
# octagon()
screen = Screen()
screen.exitonclick()
``` |
{
"source": "johangenis/flutter-chatbot",
"score": 2
} |
#### File: johangenis/flutter-chatbot/app.py
```python
from flask import Flask, request, jsonify
# from flask import jsonify
import time
app = Flask(__name__)
@app.route("/bot", method=["POST"])
# response
def response():
query = dict(request.form)['query']
result = query + " " + time.ctime()
return jsonify({"response" : result})
if __name__ == "__main__":
app.run(host="0.0.0.0,")
``` |
{
"source": "johangenis/intermediate-django-concepts",
"score": 2
} |
#### File: src/core/views.py
```python
from django.shortcuts import render
from .forms import UserRegisterForm
def register_view(request):
form = UserRegisterForm()
if request.method == "POST":
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
context = {"form": form}
return render(request, "register.html", context)
```
#### File: src/core/forms.py
```python
from django.conf import settings
from django.contrib.auth import get_user_model, authenticate
from django import forms
# settings.AUTH_USER_MODEL
User = get_user_model()
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
fields = ["username", "email", "password"]
class UserLoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField()
def clean(self, *args, **kwargs):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("This user does not exixt")
if not user.check_password(password):
raise forms.ValidationError(
"The username or password was incorrect"
)
if not user.is_active:
raise forms.ValidationError("This user is not active")
return super(UserLoginForm, self).clean(*args, **kwargs)
```
#### File: src/core/models.py
```python
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
class PostQueryset(models.QuerySet):
def valid(self):
return self.filter(valid=True)
def in_valid(self):
return self.filter(valid=False)
class PostManager(models.Manager):
# def get_queryset(self):
# return super().get_queryset()
def get_queryset(self):
return PostQueryset(self.model, using=self._db)
def valid(self):
return self.get_queryset().valid()
def in_valid(self):
return self.get_queryset().in_valid()
class Post(models.Model):
title = models.CharField(max_length=20)
valid = models.BooleanField(default=False)
objects = PostManager()
def __str__(self):
return self.title
def post_model_post_save_receiver(sender, *args, **kwargs):
print("The save method was called")
post_save.connect(post_model_post_save_receiver, sender=Post)
@receiver(post_delete)
def post_model_post_delete_receiver(sender, *args, **kwargs):
print("The delete method was called")
``` |
{
"source": "johangenis/problems_vs_algorithms",
"score": 4
} |
#### File: johangenis/problems_vs_algorithms/problem_4.py
```python
def sort_012(input_list):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Args:
input_list(list): List to be sorted
"""
# Indexes to keep track of the last index of low (0), middle (1), and bottom of high (2) values
top_of_zero_idx = 0 # top index of 0
one_idx = 0 # index of 1
bottom_of_2_idx = len(input_list) - 1 # bottom index of 2
# Continue to loop while index of 1 is less than bottom index of 2
while one_idx <= bottom_of_2_idx:
# if value at 1 index is 0, then swap the value with the value at top of 0 index
# also increment top of 0 index and 1 index
if input_list[one_idx] == 0:
input_list[one_idx], input_list[top_of_zero_idx] = input_list[top_of_zero_idx], input_list[one_idx]
top_of_zero_idx += 1
one_idx += 1
# if value at 1 index is 1, nothing to do. Increment 1 index
elif input_list[one_idx] == 1:
one_idx += 1
# if value at 1 index is 2, swap the value with the value at bottom of 2 index
# also decrement bottom of 2 index
elif input_list[one_idx] == 2:
input_list[one_idx], input_list[bottom_of_2_idx] = input_list[bottom_of_2_idx], input_list[one_idx]
bottom_of_2_idx -= 1
return input_list
def test_function(test_case):
sorted_array = sort_012(test_case)
print(sorted_array)
if sorted_array == sorted(test_case):
print("Pass")
else:
print("Fail")
# Test case 1 - unsorted array
print("Calling function with un-sorted array: [2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0, 2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1]")
# Should print [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2]
# Should print Pass as the result array should be a correctly sorted array
test_function([2, 1, 2, 0, 0, 2, 1, 0, 1, 0, 0, 2, 2, 2, 1, 2, 0, 0, 0, 2, 1, 0, 2, 0, 0, 1])
# Test case 2 - sorted array
# Should print [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]
print("Calling function with sorted array: [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]")
# Should print Pass as the result array should be the same sorted array
test_function([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2])
# Test case 3 - array with only a single element
# Should print [0]
print("Calling function with sorted array: [0]")
# Should print Pass as the result array should be the same array
test_function([0])
# Test case 4 - array with empty array
# Should print []
print("Calling function with empty array: []")
# Should print Pass as the result array should also be an empty array
test_function([])
```
#### File: johangenis/problems_vs_algorithms/problem_6.py
```python
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
# Handle non-list input
if not isinstance(ints, list):
return None, None
# Define variables for min and max value and initialize to None
min_value = None
max_value = None
for index, value in enumerate(ints):
if index == 0:
min_value = value
max_value = value
if value < min_value:
min_value = value
elif value > max_value:
max_value = value
return min_value, max_value
# Example Test Case of Ten Integers
import random
# Test case 1: random int array
l = [i for i in range(0, 10)] # a list containing 0 - 9
print(f"Test case 1 - random list of int: {l}")
random.shuffle(l)
# Should print "Pass" as the result should be (0, 9)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
# Test case 2: empty array
print(f"Test case 2 - empty array")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
# Test case 3: array with single item
print(f"Test case 3 - array with single item")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
# Test case 4: non array input
print(f"Test case 4 - non array input")
# Should print "Pass" as the result should be (None, None)
print ("Pass" if ((None, None) == get_min_max(10)) else "Fail")
``` |
{
"source": "JohanGovers/home-mon-server",
"score": 2
} |
#### File: src/temperature/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from temperature.models import TempReading
def index(request):
return HttpResponse("Hello, world. You're at the home-mon-server index page.")
def save_temp_reading(request):
if request.method == 'POST':
value = request.POST.get('value')
date = request.POST.get('date')
entry = TempReading(value=value, date=date)
entry.save()
response_msg = 'Added new entry with value set to ' + str(value)
return HttpResponse(
response_msg,
content_type="application/json"
)
else:
return HttpResponse()
``` |
{
"source": "johan-gras/rl-camb-kaggle-connect-x",
"score": 3
} |
#### File: johan-gras/rl-camb-kaggle-connect-x/check_run.py
```python
import torch as tc
import numpy as np
import scipy
# import gym # allowed but not included in env, yet...
from kaggle_environments import evaluate, make, utils
# Make and view the env
env = make("connectx", debug=True)
env.render()
def my_agent(observation, configuration):
from random import choice
return choice([c for c in range(configuration.columns) if observation.board[c] == 0])
env.reset()
# Play as the first agent against default "random" agent.
env.run([my_agent, "random"])
env.render(mode="ansi", width=500, height=450)
# Play as first position against random agent.
trainer = env.train([None, "random"])
observation = trainer.reset()
while not env.done:
my_action = my_agent(observation, env.configuration)
print("Action taken:", my_action)
observation, reward, done, info = trainer.step(my_action)
# env.render(mode="ansi", width=100, height=90, header=False, controls=False)
env.render()
# EVALUATE - read this section
def mean_reward(rewards):
return sum(r[0] for r in rewards) / float(len(rewards))
# Run multiple episodes to estimate its performance.
print("My Agent vs Random Agent:", mean_reward(evaluate("connectx", [my_agent, "random"], num_episodes=10)))
# Takes longer:
print("My Agent vs Negamax Agent:", mean_reward(evaluate("connectx", [my_agent, "negamax"], num_episodes=3)))
# If in ipython, can do an interactive game with:
# env.play([None, "negamax"], width=500, height=450)
``` |
{
"source": "johangroe/easyserver",
"score": 3
} |
#### File: johangroe/easyserver/DataProcessorServer.py
```python
import TinyUtils as tut
stdcmds = ["push", "pull", "login", "logout", "new_user", "del_user", "test"]
customcmds = []
USERDB = "users.json"
CONTENTDB = "data.json"
## smash every shit into here lol
def go(content):
"""
Central function. Pass the received message, and it'll do all the work for you.
"""
cmd = content.pop(list(content)[0])
not_cmd = content
print(cmd, not_cmd)
resp = {}
## standard commands
if cmd in stdcmds:
## content-commands
if cmd == "push":
resp = commands.push(not_cmd)
elif cmd == "pull":
resp = commands.pull(not_cmd)
## user-commands
elif cmd == "login":
resp = commands.login(not_cmd)
elif cmd == "logout":
resp = commands.logout(not_cmd)
elif cmd == "new_user":
resp = commands.new_user(not_cmd)
elif cmd == "del_user":
resp = commands.del_user(not_cmd)
elif cmd == "test":
resp = commands.test(not_cmd)
tut.db.close()
else:
resp = {"resp":"Err: no such command"}
resp["cmd"] = cmd
return resp
def check_username_password(username, password):
"""
Check if a username is existing and the password is valid.
"""
name_found = False
valid = False
try:
tut.db.close()
except:
pass
tut.db.set(USERDB)
content = tut.documents.get.by_field("username", username)
if content != []:
doc = content[0]
if doc["password"] == password:
valid = True
else:
valid = False
return valid
def check_username_existing(username) -> bool:
"""Check if a username is existing."""
username_found = False
try:
tut.db.close()
except:
pass
tut.db.set(USERDB)
content = tut.documents.get.by_field("username", username)
if content != []:
username_found = True
return username_found
def check_client_loggedin(username, client) -> bool:
client_loggedin = False
clients = ["ios", "android", "win", "linux", "macos"]
if client not in clients:
return {"resp": "Err: no such client"}
if check_username_existing(username) != True:
return {"resp": "Err: name not found"}
tut.db.set(USERDB)
doc = tut.documents.get.by_field("username", username)[0]
clients_loggedin = dict(doc["clients-loggedin-stayloggedin"])
if client not in list(clients_loggedin):
client_loggedin = False
else:
state = clients_loggedin[client]
state = state.split("&")[0]
if state == "True":
client_loggedin = True
elif state == "False":
client_loggedin = False
return client_loggedin
def check_session(session) -> set:
"""Gives back ```is-loggedin```, ```username```, ```stay-loggedin```, ```redirect-mode```"""
try:
waste = session["user_name"]
## session is existing
except:
## create new session
session["user_loggedin"] = False
session["user_stayloggedin"] = False
session["user_name"] = None
session["user_redirect_mode"] = "login"
is_loggedin = session["user_loggedin"]
username = session["user_name"]
stayloggedin = session["user_stayloggedin"]
redirect_mode = session["user_redirect_mode"]
return is_loggedin, username, stayloggedin, redirect_mode
def clear_session(session):
session["user_loggedin"] = False
session["user_stayloggedin"] = False
session["user_name"] = None
session["user_redirect_mode"] = "login"
class commands:
"""
All standard commands.
"""
def push(content):
if check_username_existing(content["username"]) != True:
return {"resp": "Err: name not found"}
if check_client_loggedin(content["username"], content["client"]) != True:
return {"resp": "Err: client not logged in"}
tut.db.set(CONTENTDB)
new_doc = []
for item in content["data"]:
try:
del item["id"]
del item["table"]
except:
pass
new_doc.append(item)
try:
doc = tut.documents.get.by_field("username", content["username"])[0]
id = doc["id"]
tut.documents.field.update(id, "data", "data", new_doc)
except:
tut.documents.new(new_doc, )
return {"resp": "check"}
def pull(content):
if check_username_existing(content["username"]) != True:
return {"resp": "Err: name not found"}
if check_client_loggedin(content["username"], content["client"]) !=True:
return {"resp": "Err: client not logged in"}
tut.db.set(CONTENTDB)
return_data = tut.documents.get.by_field("username", content["username"])
for item in return_data:
del item["id"]
del item["table"]
return {"resp": "check", "data": return_data}
def login(content):
if check_username_existing(content["username"]) != True:
return {"resp": "Err: name not found"}
if check_username_password(content["username"], content["password"]) != True:
return {"resp": "Err: invalid pw or name"}
clients = ["ios", "android", "win", "linux", "macos"]
if content["client"] not in clients:
return {"resp": "Err: no such client"}
tut.db.set(USERDB)
doc = tut.documents.get.by_field("username", content["username"])[0]
id = doc["id"]
client_loggedin_stay = doc["clients-loggedin-stayloggedin"]
client_loggedin_stay[content["client"]] = str(True) + "&" + str(content["stayloggedin"])
tut.documents.field.update(id, "clients-loggedin-stayloggedin", "clients-loggedin-stayloggedin", client_loggedin_stay)
return {"resp": "check"}
def logout(content):
if check_username_existing(content["username"]) != True:
return {"resp": "Err: name not found"}
clients = ["ios", "android", "win", "linux", "macos"]
if content["client"] not in clients:
return {"resp": "Err: no such client"}
tut.db.set(USERDB)
doc = tut.documents.get.by_field("username", content["username"])[0]
id = doc["id"]
client_loggedin_stay = doc["clients-loggedin-stayloggedin"]
client_loggedin_stay[content["client"]] = str(False) + "&" + str(False)
tut.documents.field.update(id, "clients-loggedin-stayloggedin", "clients-loggedin-stayloggedin", client_loggedin_stay)
return {"resp": "check"}
def new_user(content):
tut.db.set(USERDB)
if check_username_existing(content["username"]) == True:
return {"resp": "Err: name already assigned"}
else:
loggedin_staylogged = [True, bool(content["stayloggedin"])]
tut.documents.new({"username": content["username"], "password": content["password"], "clients-loggedin-stayloggedin": {content["client"]: loggedin_staylogged}}, "user")
tut.db.set(CONTENTDB)
tut.documents.new({"username": content["username"], "data": ""}, "user")
return {"resp": "check"}
def del_user(content):
tut.db.set(USERDB)
if check_username_existing(content["username"]) != True:
return {"resp": "Err: name not found"}
doc = tut.documents.get.by_field("username", content["username"])[0]
if content["password"] != doc["password"]:
return {"resp": "Err: password incorrect"}
else:
id = doc["id"]
tut.documents.delete(id)
tut.db.set(CONTENTDB)
doc = tut.documents.get.by_field("username", content["username"])[0]
id = doc["id"]
tut.documents.delete(id)
return {"resp": "check"}
def test(content):
return {"resp": "check"}
``` |
{
"source": "johangroe/tinyutils",
"score": 3
} |
#### File: johangroe/tinyutils/TinyUtils.py
```python
from typing import Any
import tinydb
from tinydb import where
from tinydb.operations import set, delete
import os.path
import os
import ast
## only needs 3 globals, nice!
database = None
database_path_set = bool(False)
database_path = str("")
## Classes were created to give a better overview than just smashing all funcions raw into the module.
## Class to do all the managing with filepaths, instances, etc.
class db:
"""
Class to handle filepaths & instances.
"""
## Set the path to the db. Why? 'Cause it's used very often later, and it would be annoying to specify it every time.
def set(db_path) -> None:
"""
Set the path to the database-file. Mandatory for all other functions.
"""
global database, database_path_set, database_path
_debug.error_checker("set_db", filepath = db_path)
database = tinydb.TinyDB(db_path)
database_path = str(db_path)
database_path_set = True
## Return the db currently selected, may be useful sometimes.
def get() -> str:
"""
Get the path of the database currently selected.
"""
global database_path
_debug.error_checker("normal")
return str(database_path)
## empty the selected database COMPLETELY, use at own risk!
def empty() -> None:
"""
Delete the whole content of the database.
"""
global database
_debug.error_checker("normal")
database.drop_tables()
database.insert({"id": -1, "temporary_tables": ""})
## delete the entire database FILE COMPLETELY, use at own risk!
def delete() -> None:
"""
Delete the file containing the database.
"""
global database_path, database
_debug.error_checker("normal")
database.close()
os.remove(database_path)
def close() -> None:
"""
Close the database.
"""
global database, database_path, database_path_set
_debug.error_checker("normal")
database.close()
database_path = ""
database_path_set = False
def create(name) -> None:
"""
Create a new database.
"""
file = open(name, "w")
file.close()
db.set(name)
database.insert({"id": -1, "temporary_tables": ""})
database.insert({"id": 1, "table": "table1", "key1": "value1"})
db.close()
class tables:
"""
Class to handle virtual tables.
"""
## Read all virtual tables from the db, and return them
def get() -> list:
"""
Get all virtual tables from the database.
"""
global database
tabless = []
_debug.error_checker("normal")
cont = database.all()
for item in cont:
if item["id"] > 0:
if item["table"] not in tabless:
tabless.append(str(item["table"]))
temps = _debug.temp.get()
for item in temps:
if item not in tabless:
tabless.append(item)
return tabless
def new(name) -> None:
"""
Add a new table to the database.
"""
global database
_debug.error_checker("normal")
if name in tables.get() or name == "":
return
else:
_debug.temp.add(name)
def rename(name_old,name_new) -> None:
"""
Change the name of a table.
"""
global database
_debug.error_checker("normal")
if name_old not in tables.get():
error = f"Given table '{name_old}' was not found"
raise RuntimeError(error)
if name_old == name_new:
return
elif name_new == "":
return
else:
database.update({"table": name_new}, where("table") == name_old)
def merge(table1, table2) -> None:
"""
Merge two tables, where `table1` gets kept and `table2` gets overridden.
"""
global database
_debug.error_checker("normal")
if table1 == table2:
return
if table1 not in tables.get():
error = f"Given table '{table1}' was not found"
raise RuntimeError(error)
if table2 not in tables.get():
error = f"Given table '{table2}' was not found"
raise RuntimeError(error)
cont = database.all()
for item in cont:
if item["id"] > 0:
if item["table"] == table2:
doc_id = item["id"]
database.update({"table":table1}, where("id") == doc_id)
def delete(name) -> None:
"""
Delete a table from the database, all documents in that table get deleted too!
"""
global database
_debug.error_checker("normal")
if name not in tables.get():
error = f"Given table '{name}' was not found"
raise RuntimeError(error)
if _debug.temp.check(name) == True:
_debug.temp.delete(name)
tables_new = tables.get()
cont = database.all()
to_delete = []
for item in cont:
if item["id"] > 0:
if item["table"] == name:
to_delete.append(item.doc_id)
database.remove(doc_ids = to_delete)
## easier to delete all docs and put a new temporary tables doc
def delete_all() -> None:
"""
Delete all tables, which implies deleting all documents.
"""
global database
_debug.error_checker("normal")
database.drop_tables()
database.insert({"id": -1, "temporary_tables": ""})
class documents:
"""
Class to handle documents.
"""
def new(doc, table, desired_id = 0):
"""
Add a new document to the database. Specify an ID if needed, otherwise the next one that is free will be assigned.\n
"""
global database
_debug.error_checker("normal")
## id-checker (check id if free, else take next free one)
if desired_id > 0:
if desired_id in documents.get.ids():
error = f"ID {desired_id} is already in use"
raise RuntimeError(error)
elif desired_id not in documents.get.ids():
free_id = desired_id
elif desired_id == 0:
ids = documents.get.ids()
free_id = 1
while free_id in ids:
free_id += 1
## table-checker:
if table not in tables.get():
error = f"Table '{table}' was not found"
raise RuntimeError(error)
else:
table_to_use = table
## insert the new document
if type(doc) != dict:
error = f"Expected 'dict' but got {type(doc)} instead"
raise RuntimeError(error)
keys = list(doc)
dict_to_insert = {}
dict_to_insert["id"] = int(free_id)
dict_to_insert["table"] = str(table_to_use)
for key in keys:
dict_to_insert[key] = doc[key]
database.insert(dict_to_insert)
class field:
def update(document_id, key_old, key_new, value_new) -> None:
"""
Update a field of a document. You can update key and value at once, or seperately.
"""
global database
_debug.error_checker("normal")
if document_id not in documents.get.ids():
error = f"ID {document_id} was not found"
raise RuntimeError(error)
doc = database.search(where("id") == document_id)
if key_old == key_new and value_new == doc[key_old]:
return
elif key_old == key_new and value_new != doc[key_old]:
database.update(set(key_old, value_new), where("id") == document_id)
return
## pseudo-changing: delete old, insert slightly different new, done
elif key_old != key_new:
database.update(delete(key_old), where("id") == document_id)
database.update({key_new: value_new}, where("id") == document_id)
return
def add(document_id, key, value) -> None:
"""
Add a field to a document.
"""
global database
_debug.error_checker("normal")
if document_id not in documents.get.ids():
error = f"ID {document_id} was not found"
raise RuntimeError(error)
else:
database.update({key: value}, where("id") == document_id)
def delete(document_id, key) -> None:
"""
Delete a field of a document.
"""
global database
_debug.error_checker("normal")
if document_id not in documents.get.ids():
error = f"ID {document_id} was not found"
raise RuntimeError(error)
else:
database.update(delete(key), where("id") == document_id)
def delete(document_id) -> None:
"""
Delete a document COMPLETELY.
"""
global database
_debug.error_checker("normal")
if document_id not in documents.get.ids():
error = error = f"ID {document_id} was not found"
raise RuntimeError(error)
else:
doc = database.get(where("id") == document_id)
id_to_remove = doc.doc_id
ids = []
ids.append(int(id_to_remove))
database.remove(doc_ids = ids)
def change_id(id_old, id_new) -> str:
"""
Change the ID of a document.
"""
global database
_debug.error_checker("normal")
if id_old not in documents.get.ids():
error = f"Given old ID '{str(id_old)}' was not found"
raise RuntimeError(error)
elif id_new in documents.get.ids():
error = f"Given new ID '{str(id_new)}' is already assigned"
raise RuntimeError(error)
else:
if id_old == id_new:
doc = database.search(where("id") == id_old)
return str(doc)
else:
database.update({"id":int(id_new)}, where("id") == id_old)
doc_changed = database.searcH(where("id") == id_new)
return str(doc_changed)
def change_table(document_id, table) -> str:
"""
Change the table of a document.
"""
_debug.error_checker("normal")
global database
if str(table) not in tables.get():
error = f"Given table '{str(table)}' was not found"
raise RuntimeError(error)
if document_id not in documents.get.ids():
error = f"Given ID '{str(document_id)}' was not found"
raise RuntimeError(error)
database.update({"table": str(table)}, where("id") == document_id)
doc_changed = database.search(where("id") == document_id)
return str(doc_changed)
## created, cause there would be too much functions flying "raw" around
class get:
"""
Get various stuff/documents.
"""
def ids() -> "list[int, int]":
"""
Get all ID's used in the database.
"""
global database
ids = []
_debug.error_checker("normal")
cont = database.all()
for item in cont:
if item["id"] not in ids and item["id"] > 0:
ids.append(item["id"])
return ids
def by_id(document_id) -> Any:
"""
Get a document by its specified ID. (accepts list or int)\n
Returns a string or list, based on input.
"""
global database
_debug.error_checker("normal")
if isinstance(document_id, int):
if document_id not in documents.get.ids():
error = f"Given ID '{str(document_id)}' was not found"
raise RuntimeError(error)
else:
doc = database.search(where("id") == document_id)
return doc
elif isinstance(document_id, list):
docs = []
for item in document_id:
number = int(item)
if number not in documents.get.ids():
error = f"Given ID '{str(number)}' was not found"
raise RuntimeError(error)
else:
doc = database.search(where("id") == number)
docs.append(doc)
return docs
def by_table(table) -> list:
"""
Get all documents attached to a special table (accepts str or list)
"""
global database
docs = []
_debug.error_checker("normal")
if isinstance(table, str):
if table in tables.get():
cont = database.all()
for item in cont:
if item["id"] > 0:
if str(item["table"]) == table:
docs.append(item)
elif isinstance(table, list):
cont = database.all()
for t in table:
if t in tables.get():
for item in cont:
if item["id"] > 0:
if str(item["table"]) == str(t):
docs.append(item)
return docs
## this method is a horrible solution, wayy more elaborate than it could be, but the method i was planning to use
## is coded that shitty, that you can't use it dynamically, so here we are
def by_field(field, value = "") -> list:
"""
Get all documents by providing a field, and additionally a value. (accepts str or list)
"""
_debug.error_checker("normal")
global database
if value == "":
if isinstance(field, str):
cont = database.all()
docs = []
for item in cont:
document = ast.literal_eval(str(item))
try:
waste = document[field]
if str(document) not in docs:
docs.append(str(document))
except:
pass
return docs
elif isinstance(field, list):
cont = database.all()
docs = []
for item_cont in cont:
document = ast.literal_eval(str(item_cont))
for item_field in field:
try:
waste = document[item_field]
if str(document) not in docs:
docs.append(str(document))
except:
pass
return docs
else:
if isinstance(field, str):
doc = database.search(where(field) == value)
return doc
elif isinstance(field, list):
if len(field) == len(value):
docs = []
ind = 0
for item in field:
docs.append(str(database.search(where(item) == value[ind])))
ind += 1
return docs
else:
error = f"Lists 'field' ({len(field )} items) and 'value' ({len(value)} items need to have the same length"
raise ValueError(error)
## buggy shit, be aware!
class _debug:
"""
Various debugging tools/internals, use with care. Some functions can have the database specified directly,
might be useful for fast usage.
"""
## class to handle temporary tables
class temp:
"""
Handle temporary virtual tables (= virtual tables, that are unused).
"""
def get() -> list:
"""
Get all temporary tables.
"""
global database
_debug.error_checker("normal")
cont = database.all()
for item in cont:
if item["id"] == -1:
temp = str(item["temporary_tables"])
if temp != "":
temps = temp.split(", ")
temps_list = []
for item in temps:
temps_list.append(item)
elif temp == "":
temps_list = []
return temps_list
def check(name) -> bool:
"""
Check if a table is just temporary.
"""
global database
_debug.error_checker("normal")
temps = _debug.temp.get()
if name in temps:
return True
else:
return False
## no idea, why i had to code it that dumb, but its fast and works so nvm
def add(name) -> None:
"""
Create a new temporary table.
"""
global database
_debug.error_checker("normal")
tables_old = _debug.temp.get()
tables_old.append(name)
tables_new = ""
for item in tables_old:
if tables_old[-1] != item:
tables_new = tables_new + str(item) + ", "
else:
tables_new = tables_new + str(item)
database.update({"temporary_tables": tables_new}, where("id") == -1)
def delete(name) -> None:
"""
Delete a temporary table.
"""
global database
_debug.error_checker("normal")
if _debug.temp.check(name) == False:
error = f"Given table '{name}' was not found"
raise RuntimeError(error)
else:
tables_old = _debug.temp.get()
waste = tables_old.remove(name)
tables_new = ""
for item in tables_old:
if tables_old[-1] != item:
tables_new = tables_new + str(item) + ", "
else:
tables_new = tables_new + str(item)
database.update({"temporary_tables": tables_new}, where("id") == -1)
pass
## check if a database is readable for the module, this fuctions raises errors, if the database is not readable
def database_readable(db_path = "") -> "tuple[bool,bool,bool]":
"""
Check if database contains virtual tables / or id's.
Returns if the database is readable, contains tables, contains id's.\n
(to be readable, a database either needs to contain nothing, or id's and tables)
"""
global database, database_path, database_path_set
is_readable = contains_tables = contains_ids = True
if db_path == "":
pass
else:
if os.path.isfile(db_path) == False:
filename = os.path.split(db_path)[1]
error = f"Given file '{filename}' was not found"
raise FileNotFoundError(error)
elif os.path.isfile(db_path) == True:
name, ext = os.path.splitext(db_path)
if ext == ".json":
database = tinydb.TinyDB(db_path)
database_path = str(db_path)
database_path_set = True
else:
error = f"Expected .json file, got {ext} file instead"
raise RuntimeError(error)
cont = database.all()
if cont == []:
contains_tables = contains_ids = False
is_readable = True
return is_readable, contains_tables, contains_ids
for item in cont:
try:
waste = item["table"]
except KeyError:
contains_tables = False
try:
waste = item["id"]
except KeyError:
contains_ids = False
if contains_tables == False and contains_ids == False:
is_readable = False
return is_readable, contains_tables, contains_ids
## little masterpiece, a function that should catch / raise ALL errors that can occur. if this function doesnt raise an error, everything should work correctly.
def error_checker(type_arg = "", **kwargs) -> None:
"""
An error checker, that can be used in a variety of ways. Pass type (and eventually required arguments) to the function, to use it.\n
`set_db` requires: `filepath`\n
`normal` requires: nothing\n
`check_type` requires: `var_to_check` `type_to_check`
"""
global database, database_path, database_path_set
types_args = ["set_db", "normal", "check_type"]
if type_arg == "":
error = "Expected 1 argument, got 0 instead"
raise RuntimeError(error)
if type_arg not in types_args:
error = f"Unknown argument type: {type_arg}"
raise ValueError(error)
## type: set-db (pretty complex, see comments below)
elif type_arg == "set_db":
## step 1: check if all args were given correctly
if "filepath" not in list(kwargs):
keys = "'"
for item in list(kwargs):
keys = keys + str(item)
if list(kwargs).index(item) != len(kwargs)-1:
keys = keys + ", "
keys = keys + "'"
if len(kwargs) == 0:
keys = "no argument"
error = f"Expected 'filepath' argument, got {keys} instead"
raise ValueError(error)
filepath = kwargs["filepath"]
if filepath == "":
error = "Given 'filepath' argument is empty"
raise ValueError(error)
## step 2: check file metadata (right file type, existing etc)
elif os.path.isfile(filepath) == False:
filename = os.path.split(filepath)[1]
error = f"Given file '{filename}' was not found"
raise FileNotFoundError(error)
elif os.path.isfile(filepath) == True and filepath != "":
name, ext = os.path.splitext(filepath)
if ext != ".json":
error = f"Expected .json file, got {ext} file instead"
raise RuntimeError(error)
## step 3: check if the database is usable for this module
else:
## little critical part, cause file-content might not be suitable for tinydb, but fuck it
database = tinydb.TinyDB(filepath)
contains_tables = contains_ids = is_empty = True
cont = database.all()
if cont == []:
contains_tables = contains_ids = False
is_empty = True
elif cont != []:
is_empty = False
for item in cont:
if item["id"] > 0:
try:
waste = item["table"]
except KeyError:
contains_tables = False
try:
waste = item["id"]
except KeyError:
contains_ids = False
if is_empty == False:
if contains_tables == False:
error = "The given database is not readable, because it doesn't contain any virtual tables"
raise RuntimeError(error)
if contains_ids == False:
error = "The given database is not readable, because it doesn't contain any virtual id's"
raise RuntimeError(error)
database.close()
## type: check_type (check if a given var is a specific type)
elif type_arg == "check_type":
## step 1: check if all args were given correctly
if "var_to_check" and "type_to_check" not in list(kwargs):
keys = "'"
for item in list(kwargs):
keys = keys + str(item)
if list(kwargs).index(item) != len(kwargs)-1:
keys = keys + ", "
keys = keys + "'"
if len(kwargs) == 0:
keys = "no argument"
error = f"Expected 'var_to_check' and 'type_to_check' argument, got {keys} instead"
raise ValueError(error)
var_to_check = kwargs["var_to_check"]
type_to_check = kwargs["type_to_check"]
if var_to_check == "":
error = "Given 'var_to_check' argument is empty"
raise ValueError(error)
if type_to_check == "":
error = "Given 'type_to_check' argument is empty"
raise ValueError(error)
## step 2: convert "<type 'list'>" to "list"
type_unreadable = str(type(var_to_check))
type_list = type_unreadable.split("'")
type_readable = type_list[1]
## stap 3: compare
if str(type_readable) != str(type):
error = f"Expected {str(type_to_check)}, got {str(type_readable)} instead"
raise ValueError(error)
elif str(type_readable) == str(type_to_check):
return
## ADD FUTURE KEYWORDS HERE ##
## type: normal (just check if a database is set, or not)
if type_arg == "normal":
if len(kwargs) > 0:
error = f"Expected 1 argument, got {int(len(kwargs)) + 1} arguments instead"
raise RuntimeError(error)
if database_path_set == False:
error = "No db is set, use db.set() to set the path"
raise RuntimeError(error)
elif database_path_set == True:
return
``` |
{
"source": "johangwbasson/system-manager",
"score": 3
} |
#### File: johangwbasson/system-manager/sm.py
```python
import argparse
import json
import subprocess
from os import path
def newConfig():
return {'packages': { 'install': [], 'uninstall': []}}
def loadConfig():
try:
if path.exists('system.json'):
with open('system.json') as f:
return json.load(f)
return newConfig()
except IOError:
return newConfig()
def writeConfig(cfg):
try:
f = open('system.json', 'w')
f.write(json.dumps(cfg, indent=4, sort_keys=True))
f.close()
except IOError:
print("Error: Unable to write configuration to file")
def installPackage(pkg):
proc = subprocess.Popen(['sudo', 'pacman', '-Qi', pkg], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if 'was not found' in stderr.decode('utf-8'):
print("Installing package %s" % pkg)
proc = subprocess.Popen(['sudo', 'pacman', '-S', pkg, '--noconfirm'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if 'error: target not found:' in stderr.decode('utf-8'):
proc = subprocess.Popen(['yay', '-S', pkg, '--noconfirm'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if 'Could not find all required packages' in stderr.decode('utf-8'):
print("Error: Package %s not found" % pkg)
return False
print("Package %s installed" % pkg)
return True
print("Package %s is already installed" % pkg)
return False
def removePackage(pkg):
print("Removing package %s" % pkg)
result = subprocess.run(['sudo', 'pacman', '-R', pkg, '--noconfirm'], stdout=subprocess.PIPE)
if 'error: target not found:' in result.stdout.decode('utf-8'):
result = subprocess.run(['yay', '-R', pkg, '--noconfirm'], stdout=subprocess.PIPE)
if 'error: target not found:' in result.stdout.decode('utf-8'):
print("Error: Package %s is not installed" % pkg)
return False
print("Package %s removed" % pkg)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", help="Add package")
parser.add_argument("-r", help="Remove package")
parser.add_argument("-s", help="Sync system packages to config file")
args = parser.parse_args()
data = loadConfig()
if args.a:
if args.a not in data['packages']['install']:
if (installPackage(args.a)):
if args.a in data['packages']['uninstall']:
data['packages']['uninstall'].remove(args.a)
data['packages']['install'].append(args.a)
writeConfig(data)
if args.r:
if args.r not in data['packages']['uninstall']:
if (removePackage(args.r)):
if args.r in data['packages']['install']:
data['packages']['install'].remove(args.r)
data['packages']['uninstall'].append(args.r)
writeConfig(data)
if args.s:
print("Installing packages")
for pkg in data['packages']['install']:
installPackage(pkg)
print("Removing packages")
for pkg in data['packages']['uninstall']:
removePackage(pkg)
main()
``` |
{
"source": "johanhammar/core",
"score": 2
} |
#### File: components/onewire/test_entity_sysbus.py
```python
from datetime import datetime, timedelta
from unittest.mock import PropertyMock, patch
from pi1wire import (
InvalidCRCException,
NotFoundSensorException,
UnsupportResponseException,
)
from homeassistant.components.onewire.const import (
DEFAULT_OWSERVER_PORT,
DEFAULT_SYSBUS_MOUNT_DIR,
DOMAIN,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import TEMP_CELSIUS
from homeassistant.setup import async_setup_component
from tests.common import async_fire_time_changed, mock_registry
MOCK_DEVICE_ID = "28-111111111111"
MOCK_DEVICE_NAME = "My DS18B20"
MOCK_ENTITY_ID = "sensor.my_ds18b20_temperature"
async def test_onewiredirect_setup_valid_device(hass):
"""Test that sysbus config entry works correctly."""
entity_registry = mock_registry(hass)
config = {
"sensor": {
"platform": DOMAIN,
"mount_dir": DEFAULT_SYSBUS_MOUNT_DIR,
"port": DEFAULT_OWSERVER_PORT,
"names": {
MOCK_DEVICE_ID: MOCK_DEVICE_NAME,
},
}
}
with patch(
"homeassistant.components.onewire.sensor.Pi1Wire"
) as mock_pi1wire, patch("pi1wire.OneWire") as mock_owsensor:
type(mock_owsensor).mac_address = PropertyMock(
return_value=MOCK_DEVICE_ID.replace("-", "")
)
mock_owsensor.get_temperature.side_effect = [
25.123,
FileNotFoundError,
25.223,
InvalidCRCException,
25.323,
NotFoundSensorException,
25.423,
UnsupportResponseException,
25.523,
]
mock_pi1wire.return_value.find_all_sensors.return_value = [mock_owsensor]
assert await async_setup_component(hass, SENSOR_DOMAIN, config)
await hass.async_block_till_done()
assert len(entity_registry.entities) == 1
registry_entry = entity_registry.entities.get(MOCK_ENTITY_ID)
assert registry_entry is not None
assert (
registry_entry.unique_id == f"/sys/bus/w1/devices/{MOCK_DEVICE_ID}/w1_slave"
)
assert registry_entry.unit_of_measurement == TEMP_CELSIUS
# 25.123
current_time = datetime.now()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "25.1"
# FileNotFoundError
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "unknown"
# 25.223
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "25.2"
# InvalidCRCException
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "unknown"
# 25.323
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "25.3"
# NotFoundSensorException
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "unknown"
# 25.423
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "25.4"
# UnsupportResponseException
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "unknown"
# 25.523
current_time = current_time + timedelta(minutes=2)
async_fire_time_changed(hass, current_time)
await hass.async_block_till_done()
state = hass.states.get(MOCK_ENTITY_ID)
assert state.state == "25.5"
``` |
{
"source": "johanhoiness/SlothBot",
"score": 3
} |
#### File: johanhoiness/SlothBot/alison.py
```python
__author__ = 'JohnHiness'
import sys
import os
import random
import time
import string
import connection
from time import strftime
import ceq
import json, urllib2
import thread
args = sys.argv
req_files = ['filegen.py', 'connection.py', 'commands.py', 'general.py', 'automatics.py']
for filename in req_files:
if os.path.exists(filename) == False:
print "Required file \"{}\" not found. Make sure you have acquired all files.".format(filename)
sys.exit(1)
import filegen
if os.path.exists('config.py') == False:
print 'No configuration-file found. Generating config.py'
filegen.gen_config()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
if os.path.exists('revar.py') == False:
print 'No reconfigurable file found. Generating revar.py'
filegen.gen_revar()
python = sys.executable
print str(python)+'||'+str(python)+'||'+ str(* sys.argv)
os.execl(python, python, * sys.argv)
import config
import revar
import filegen
import commands
import general
import automatics
if not revar.channels:
revar.channels = config.channel.replace(', ', ',').replace(' ', ',').split(',')
if len(args) > 1:
if args[1].lower() == 'reconfig' or args[1].lower() == 'config':
answr = raw_input("This will have you regenerate the configuration file and all old configurations will be lost.\nAre you sure you want to do this?(y/n) ")
while answr.lower() != 'y' or answr.lower() != 'n':
answr = raw_input("You must use the letters Y or N to answer: ")
if answr.lower() == 'y':
filegen.gen_config()
sys.exit(0)
if answr.lower() == 'n':
sys.exit(0)
elif args[1].lower() == 'help':
print "Usage: python alison.py <help | reconfig | >"
sys.exit(0)
else:
print "Flag not recognized."
sys.exit(1)
def connect(server, port):
print "Connecting to {} with port {}.".format(server, port)
s = connection.s
readbuffer = ''
try:
s.connect((server, port))
except BaseException as exc:
print 'Failed to connect: ' + str(exc)
sys.exit(1)
s.send("PASS %<PASSWORD>" % config.password)
s.send("USER %s %s %s :%s\n" % (config.bot_username, config.bot_hostname, config.bot_servername, config.bot_realname))
s.send("NICK %s\n" % revar.bot_nick)
mode_found = False
while not mode_found:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
if rline[1] == 'MODE':
mode_found = True
g.ssend('JOIN %s' % ','.join(revar.channels))
general.update_user_info()
def server_responses(rline):
g = general
if rline[0] == "PING":
g.ssend("PONG %s\r" % rline[1])
return True
if len(rline) > 4 and rline[3] == '152':
general.append_user_info(rline)
return True
if rline[1] == '433':
if revar.bot_nick.lower() != config.bot_nick2.lower():
revar.bot_nick = config.bot_nick2
else:
revar.bot_nick += '_'
g.ssend('NICK %s' % revar.bot_nick)
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if len(rline) > 1 and rline[1].lower() == 'pong':
general.last_pong = time.time()
return True
if len(rline) > 2 and rline[1].lower() == 'join':
if not rline[2].lower() in revar.channels:
revar.channels.append(rline[2].lower())
return True
if len(rline) > 2 and rline[1].lower() == 'nick':
general.update_user_info()
return True
if len(rline) > 2 and rline[1].lower() == 'part':
if rline[2].lower() in revar.channels:
try:
revar.channels.append(rline[2].lower())
except:
pass
return True
if len(rline) > 3 and rline[1] == '319' and rline[2].lower() == revar.bot_nick.lower():
revar.channels = ' '.join(rline[4:])[1:].replace('+', '').replace('@', '').lower().split()
return True
if len(rline) > 2 and rline[1] == '391':
revar.bot_nick = rline[2]
return True
if not rline[0].find('!') != -1:
return True
if len(rline) > 3 and rline[1] == '315':
return True
return False
def find_imdb_link(chanq, msg):
if msg.lower().find('imdb.com/title/') != -1:
imdb_id = msg.lower()[msg.lower().find('imdb.com/title/')+15:][:9]
g.csend(chanq, commands.imdb_info('id', imdb_id))
def botendtriggerd(chant, usert, msgt):
if not general.check_operator(usert):
outp = 'You do not have permission to use any of these commands.'
else:
msgt = general.check_bottriggers(msgt).split()
outp = commands.operator_commands(chant, msgt)
if outp is not None:
for line in outp.split('\n'):
g.csend(chant, line)
time.sleep(1)
def work_command(chanw, userw, msgw):
msgw = general.check_midsentencecomment(msgw)
msgw, rec, notice, pm = general.checkrec(chanw, userw, msgw)
outp = commands.check_called(chanw, userw, msgw)
if outp is not None:
for line in outp.split('\n'):
g.csend(chanw, line, notice, pm, rec)
time.sleep(1)
def work_line(chanl, userl, msgl):
if chanl in general.countdown and msgl.lower().find('stop') != -1:
general.countdown.remove(chanl)
if chanl.find('#') != -1 and (msgl.lower().find('johan') != -1 or msgl.lower().find('slut') != -1):
for item in general.user_info:
if item['nickserv'].lower() == 'sloth':
general.csend(item['nick'], '{} <{}> {}'.format(chanl, userl, msgl))
general.update_seen(chanl, userl, msgl)
if (" "+msgl).lower().find('deer god') != -1 and time.time() - general.deer_god > 30 and revar.deer_god:
general.deer_god = time.time()
general.csend(chanl, "Deer God http://th07.deviantart.net/fs71/PRE/f/2011/223/3/c/deer_god_by_aubrace-d469jox.jpg")
if __name__ == '__main__':
thread.start_new_thread(automatics.get_ftime, ())
connect(config.server, config.port)
thread.start_new_thread(automatics.autoping, ())
thread.start_new_thread(automatics.autoweather, ())
thread.start_new_thread(automatics.checkpongs, ())
thread.start_new_thread(automatics.who_channel, ())
s = connection.s
readbuffer = ''
while True:
readbuffer = readbuffer + s.recv(2048)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for rline in temp:
rline = string.rstrip(rline)
rline = string.split(rline)
g = general
if not server_responses(rline) and len(rline) > 3:
msg = ' '.join(rline[3:])[1:]
user = rline[0][1:][:rline[0].find('!')][:-1]
chan = rline[2]
if chan.lower() == revar.bot_nick.lower():
chan = user
if config.verbose:
print g.ftime + ' << ' + ' '.join(rline)
else:
print g.ftime + ' << ' + chan + ' <{}> '.format(user) + msg
if general.check_bottriggers(msg):
thread.start_new_thread(botendtriggerd, (chan, user, msg),)
break
thread.start_new_thread(find_imdb_link, (chan, msg), )
thread.start_new_thread(work_line, (chan, user, msg), )
msg = general.check_midsentencetrigger(msg)
msg = general.check_triggers(msg)
if msg:
thread.start_new_thread(work_command, (chan, user, msg), )
```
#### File: johanhoiness/SlothBot/commands.py
```python
__author__ = 'JohnHiness'
import revar
import general
import json
import urllib2
import ceq
import config
import os
import socket
import time
import connection
import sys
import thread
import urllib
import random
from datetime import datetime
b = ceq.cbold
r = ceq.creset
cyan = ceq.ccyan
violet = ceq.cviolet
orange = ceq.corange
def shorten_url(url):
try:
post_url = 'https://www.googleapis.com/urlshortener/v1/url?&key=' + general.google_api
postdata = {'longUrl': url,
'key': general.google_api}
headers = {'Content-Type': 'application/json'}
req = urllib2.Request(
post_url,
json.dumps(postdata),
headers
)
ret = urllib2.urlopen(req).read()
return json.loads(ret)['id']
except BaseException as exc:
return general.get_exc(exc, 'commands.shorten_url()')
def get_hash(imdb_id):
return False
"""
if not general.google_api:
return 'Missing Google API-key.'
try:
torrent_hash = ''
if not revar.get_hash:
return
url3 = "https://yts.to/api/v2/list_movies.json?query_term=" + imdb_id
data3 = json.load(urllib2.urlopen(url3, timeout=8))
quality1080 = ''
quality720 = ''
if data3['data']['movie_count'] == 0:
return ''
for torrent in data3['data']['movies'][0]['torrents']:
if torrent['quality'] == '1080p':
quality1080 = torrent['hash']
elif torrent['quality'] == '720p':
quality720 = torrent['hash']
else:
noquality = [torrent['hash'], torrent['quality']]
if quality1080 != '':
quality = '1080p'
xhash = quality1080
elif quality720 != '':
quality = '720p'
xhash = quality720
else:
quality = noquality[1]
xhash = noquality[0]
return xhash
except BaseException as exc:
return general.get_exc(exc, 'commands.get_hash()') """
def imdb_info(kind, simdb):
if kind == 'id':
url = "http://www.omdbapi.com/?i=" + simdb + "&plot=short&r=json"
elif kind == 'search':
params = {
"q": ' '.join(simdb).replace('\'', '').replace('"', ''),
}
url2 = "http://www.imdb.com/xml/find?json=1&" + urllib.urlencode(params)
print url2
try:
data2 = json.load(urllib2.urlopen(url2, timeout=8))
try:
if len(data2["title_popular"]) < 1:
return "Title not found11."
imdbID = data2["title_popular"][0]["id"]
except:
try:
if len(data2["title_exact"]) < 1:
return "Title not found."
imdbID = data2["title_exact"][0]["id"]
except:
return "Title not found."
url = "http://www.omdbapi.com/?i=" + imdbID
except:
url = "http://www.omdbapi.com/?t=" + '+'.join(simdb)
else:
print 'Wrong function parameters: %s %s' % (kind, simdb)
print 'Getting IMDB-info with url: ' + url
try:
data = json.load(urllib2.urlopen(url, timeout=12))
except urllib2.URLError, e:
if revar.dev:
return "API returned with error: %r" % e
else:
return "Something went wrong requesting information."
except BaseException as exc:
return general.get_exc(exc, 'imdb_info()')
if data['Response'].lower() == 'false':
if data['Error'] == 'Movie not found!':
return 'Title not found.'
else:
return data['Error']
try:
print data
i_id = data['imdbID']
i_link = shorten_url('http://imdb.com/title/' + i_id)[7:]
torrent_hash = get_hash(data['imdbID'])
i_title = data['Title']
i_imdbrating = data['imdbRating']
i_metarating = data['Metascore']
i_type = data['Type']
i_genre = data['Genre']
i_plot = data['Plot']
i_runtime = data['Runtime']
i_released = data['Released'][data['Released'].find(' '):][1:]
i_year = data['Year']
except BaseException as exc:
return general.get_exc(exc, 'commands.imdb_info()-get-info')
if torrent_hash and torrent_hash.find(',') != -1:
return torrent_hash
if i_title == 'N/A':
si_title = ''
else:
si_title = ' %s' % i_title
if i_imdbrating == 'N/A':
si_imdbrating = ''
else:
si_imdbrating = ' ' + b + '|' + b + ' Rating: %s' % i_imdbrating
if i_metarating == 'N/A' or i_metarating == 'N/A':
si_metarating = ''
else:
si_metarating = ' (Meta:%s)' % i_metarating
if i_type == 'N/A':
si_type = '%s[%sIMDB%s]%s' % (b, cyan, r + b, b)
else:
si_type = '%s[%s%s%s]%s' % (b, cyan, i_type.upper(), r + b, b)
if i_genre == 'N/A':
si_genre = ''
else:
si_genre = ' ' + b + '|' + b + ' Genre: %s' % i_genre
if i_runtime == 'N/A':
si_runtime = ''
else:
si_runtime = ' ' + b + '|' + b + ' Runtime: %s' % i_runtime
if i_plot == 'N/A':
si_plot = ''
else:
si_plot = ' ' + violet + b + '|' + b + ' Plot: ' + r + '%s' % i_plot
if i_link == 'N/A':
si_link = ''
else:
si_link = ' ' + b + '|' + b + ' Link: %s' % i_link
if i_year == 'N/A':
si_year = ''
else:
si_year = ' (%s)' % i_year
if torrent_hash:
si_magnet = ' ' + b + '|' + b + ' YIFY-Torrent: %s' % shorten_url(
'https://yts.to/torrent/download/' + torrent_hash + '.torrent').replace('http://', '')
else:
si_magnet = ''
send_text = si_type + ceq.corange + b + si_title + r + ceq.cblue + si_year + r + violet + si_runtime + si_imdbrating + si_metarating + si_genre + ceq.cred + si_link + si_magnet + r + si_plot
if len(send_text) > 424:
send_text = send_text[0:421] + '...'
return send_text.encode('utf-8')
def save_revar(chan):
try:
dict_of_var = {
'midsentence_comment': revar.midsentence_comment, 'midsentence_trigger': revar.midsentence_trigger,
'outputredir_all': revar.outputredir_all, 'outputredir': revar.outputredir, 'ignorelist': revar.ignorelist,
'whitelist': revar.whitelist, 'ignorelist_set': revar.ignorelist_set, 'whitelist_set': revar.whitelist_set,
'end_triggers': revar.end_triggers, 'triggers': revar.triggers, 'get_hash': revar.get_hash,
'bot_nick': "\"" + revar.bot_nick + "\"", 'operators': revar.operators, "channels": revar.channels,
"dev": revar.dev, "location": "\"" + revar.location + "\"", "autoweather": str(int(revar.autoweather)),
"autoweather_time": revar.autoweather_time, "weather_custom": revar.weather_custom,
"chatbotid": revar.chatbotid, "deer_god": revar.deer_god
}
# os.rename( "revar.py", "revar.bak" )
with open("revar.py", "w") as target:
for variable_name in dict_of_var:
target.write("{0} = {1}\n".format(variable_name, dict_of_var[variable_name]))
target.close()
return True
except BaseException as exc:
if revar.dev:
pass
# print 'Failed to save to file, line ' + str(sys.exc_info()[2].tb_lineno) + ': ' + str(exc)
# csend(chan, "Error in when trying to rewrite revar.py, line " + str(sys.exc_info()[2].tb_lineno) + ': ' + str(exc))
else:
pass
# csend(chan, "Something went wrong trying to save.")
return False
def refresh_version(chan):
try:
url7 = "https://api.github.com/repos/johanhoiness/alison/commits"
data7 = json.load(urllib2.urlopen(url7, timeout=8))
if data7[0]['commit']['url'][data7[0]['commit']['url'].find('commits/') + 8:][:7] != '':
connection.commit = data7[0]['commit']['url'][data7[0]['commit']['url'].find('commits/') + 8:][:7]
else:
return False
with open("connection.py", "w") as target:
target.write("import socket\ns = socket.socket( )\ncommit = '{}'\n".format(connection.commit))
target.write("google_api = \"{}\"\n".format(general.google_api))
target.write("personalityforge_api = \"{}\"\n".format(general.personalityforge_api))
target.close()
return True
except BaseException, exc:
if revar.dev:
print 'Failed to save to file, line ' + str(sys.exc_info()[2].tb_lineno) + ': ' + str(exc)
general.csend(chan, "Error in when trying to rewrite connection.py, line " + str(
sys.exc_info()[2].tb_lineno) + ': ' + str(exc))
else:
general.csend(chan, "Something went wrong trying to save.")
return False
operator_cmds = dict(
op=ceq.corange + "Syntax: " + ceq.cblue + "op <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will make the user an operator.",
deop=ceq.corange + "Syntax: " + ceq.cblue + "deop <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will remove operator-rights from user.",
config=ceq.corange + "Syntax: " + ceq.cblue + "config <set|save>" + ceq.ccyan + " Description: " + ceq.cviolet + "This is to edit many variables in the bot. Use \"config set\" to view them. All the variables can also be permanetly saved by using \"save\" instead of \"set\".",
nick=ceq.corange + "Syntax: " + ceq.cblue + "nick <new nickname>" + ceq.ccyan + " Description: " + ceq.cviolet + "Change the nickname of the bot.",
whitelist=ceq.corange + "Syntax: " + ceq.cblue + "whitelist <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will add the user to the whitelist, making them unignoreable when whitelisting is set to True.",
ignore=ceq.corange + "Syntax: " + ceq.cblue + "ignore <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will add the user to the ignorelist, making them unnoticeable by the bot.",
mute=ceq.corange + "Syntax: " + ceq.cblue + "mute" + ceq.ccyan + " Description: " + ceq.cviolet + "Will mute the output no matter what.",
unmute=ceq.corange + "Syntax: " + ceq.cblue + "<umute|unmute>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will unmute the output.",
unwhitelist=ceq.corange + "Syntax: " + ceq.cblue + "<unwhitelist|unwhite|niggerfy> <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will remove user from whitelist.",
unignore=ceq.corange + "Syntax: " + ceq.cblue + "unignore <user>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will remove user from ignorelist.",
restart=ceq.corange + "Syntax: " + ceq.cblue + "restart" + ceq.ccyan + " Description: " + ceq.cviolet + "Will simply restart the bot.",
compile=ceq.corange + "Syntax: " + ceq.cblue + "compile" + ceq.ccyan + " Description: " + ceq.cviolet + "Will compile all the files the bot needs to run. This will make the bot run remarkably faster.",
join=ceq.corange + "Syntax: " + ceq.cblue + "join <channel>" + ceq.ccyan + " Description: " + ceq.cviolet + "Bot will join the given channel(s).",
part=ceq.corange + "Syntax: " + ceq.cblue + "part <|channel>" + ceq.ccyan + " Description: " + ceq.cviolet + "Bot will part from the given channel(s). If no channel is spessified, it will part with the channel the command was triggered from.",
quit=ceq.corange + "Syntax: " + ceq.cblue + "quit" + ceq.ccyan + " Description: " + ceq.cviolet + "Bot will simply kill its process.",
update=ceq.corange + "Syntax: " + ceq.cblue + "quit" + ceq.ccyan + " Description: " + ceq.cviolet + "Bot will update certain modules.",
git_update=ceq.corange + "Syntax: " + ceq.cblue + "git-update" + ceq.ccyan + " Description: " + ceq.cviolet + "Bot will pull the lastst commit from git, and reboot.",
say=ceq.corange + "Syntax: " + ceq.cblue + "say <channel/user> <text to say>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will have the bot say a specific message to the spessified channel.",
cmd=ceq.corange + "Syntax: " + ceq.cblue + "cmd <raw text>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will make the bot send a specific string to the IRC server. To get more information on how to use this, make sure you read on how the IRC protocoll works.")
def operator_commands(chan, msgs):
try:
if not msgs:
return ''
print 'Configuration call - detected: ' + str(msgs)
variable_list = [
"{0:s}triggers({1:s}string={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, revar.triggers),
"{0:s}ignorelist({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.ignorelist_set)),
"{0:s}whitelist({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.whitelist_set)),
"{0:s}commentchar({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.midsentence_comment)),
"{0:s}midsentence_trigger({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.midsentence_trigger)),
"{0:s}point-output({1:s}on(true)/off(false)={2:s}{3:s}{1:s}, all(true)/op(false)={2:s}{4:s}{0:s})".format(
ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.outputredir), str(revar.outputredir_all)),
"{0:s}get_hash({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.get_hash)),
"{0:s}dev({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.dev)),
"{0:s}location({1:s}string={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.location)),
"{0:s}autoweather({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.autoweather)),
"{0:s}autoweather_time({1:s}int={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.autoweather_time)),
"{0:s}weather_custom({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet,
str(revar.weather_custom)),
"{0:s}chatbotid({1:s}int={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.chatbotid)),
"{0:s}deer_god({1:s}bool={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, str(revar.deer_god)),
# "{0:s}variable({1:s}string={2:s}{3:s}{0:s})".format(ceq.ccyan, ceq.cblue, ceq.cviolet, ),
]
if (len(msgs) > 1) and msgs[0].lower() == 'ignore':
revar.ignorelist.append(msgs[1])
return 'Ignoring user %s.' % msgs[1]
if msgs[0].lower() == 'mute':
general.mute.append(chan.lower())
if msgs[0].lower() == 'umute' or msgs[0].lower() == 'unmute':
general.mute.remove(chan.lower())
if (len(msgs) == 2) and msgs[0].lower() == 'unignore':
try:
revar.ignorelist.remove(msgs[1])
return "No longer ignoring user '%s'" % msgs[1]
except:
return "Ignored user was not found. Make sure you typed it in correctly."
if (len(msgs) > 1) and (msgs[0].lower() == 'whitelist' or msgs[0].lower() == 'white'):
revar.whitelist.append(msgs[1])
return 'User %s is now whitelisted' % msgs[1]
if (len(msgs) == 2) and (
msgs[0].lower() == 'unwhitelist' or msgs[0].lower() == 'un;white' or msgs[0].lower() == 'niggerfy'):
try:
revar.whitelist.remove(msgs[1])
return "User '%s' no longer whitelisted" % msgs[1]
except:
return "Whitelisted user was not found. Make sure you typed it in correctly."
if len(msgs) > 0 and msgs[0].lower() == 'config':
if len(msgs) > 1:
if msgs[1].lower() == 'set':
if len(msgs) == 2:
return ceq.cred + "Variables: " + ', '.join(variable_list)
if msgs[2].lower() == 'triggers':
if len(msgs) > 3:
revar.triggers = (' '.join(msgs[3:]).replace(', ', '||')).lower().replace('"', '').replace(
'$botnick', revar.bot_nick.lower()).split('||')
print (' '.join(msgs[3:]).replace(', ', '||'))
print revar.triggers
return 'New triggers: ' + ceq.ccyan + '"' + ceq.cred + (
'%s", "%s' % (ceq.ccyan, ceq.cred)).join(revar.triggers) + ceq.ccyan + '"'
else:
return 'Current triggers(use same format when setting): ' + ceq.ccyan + '"' + ceq.cred + (
'%s", "%s' % (ceq.ccyan, ceq.cred)).join(revar.triggers) + ceq.ccyan + '"'
if msgs[2].lower() == 'location':
if len(msgs) > 3:
revar.location = ' '.join(msgs[3:])
return 'New location: ' + ceq.cred + revar.location
else:
return 'Current location: ' + ceq.cred + revar.location
if msgs[2].lower() == 'ignorelist' or msgs[2].lower() == 'ignore':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.ignorelist_set = True
return 'Ignorelist set to True. I will now ignore any users on that list.'
elif msgs[3].lower() == 'false':
revar.ignorelist_set = False
return 'Ignorelist set to False. I will no longer ignore any users that are on the ignorelist.'
else:
return 'Use "true" or "false".'
else:
'Enable or disable the ignore-feature. Default is Off. Use "config set ignorelist <true|false>" to set.'
if msgs[2].lower() == 'whitelist' or msgs[2].lower() == 'white':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.whitelist_set = True
return 'Whitelist set to True. I will now ignore any users NOT on that list.'
elif msgs[3].lower() == 'false':
revar.whitelist_set = False
return 'Whitelist set to False. I will no longer ignore any users that aren\'t on the whitelist.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the whitelist-feature. Default is Off. Use "config set whitelist <true|false>" to set.'
if msgs[2].lower() == 'commentchar' or msgs[2].lower() == 'comment':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.midsentence_comment = True
return 'Midsentence_comment set to True.'
elif msgs[3].lower() == 'false':
revar.midsentence_comment = False
return 'Midsentence_comment set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the midsentence-commentout-feature. Default is Onn. Use "config set commentchar <true|false>" to set.'
if msgs[2].lower() == 'deer_god':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.midsentence_comment = True
return 'Deer_god set to True.'
elif msgs[3].lower() == 'false':
revar.midsentence_comment = False
return 'Deer_god set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the Deer God.'
if msgs[2].lower() == 'weather_custom':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.weather_custom = True
return 'Weather_custom set to True.'
elif msgs[3].lower() == 'false':
revar.weather_custom = False
return 'Weather_custom set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable custom weather descriptions.'
if msgs[2].lower() == 'commentchar' or msgs[2].lower() == 'comment':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.midsentence_comment = True
return 'Midsentence_comment set to True.'
elif msgs[3].lower() == 'false':
revar.midsentence_comment = False
return 'Midsentence_comment set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the midsentence-commentout-feature. Default is Onn. Use "config set commentchar <true|false>" to set.'
if msgs[2].lower() == 'chatbotid':
if len(msgs) > 3:
if not msgs[3].isdigit():
return 'Variable is an interger. Use only numbers.'
revar.chatbotid = int(msgs[3])
return 'Chatbotid is set to ' + str(revar.chatbotid)
else:
return "Change the ChatBotID from PersonalityForge. To see a list of available ID's, go to http://personalityforge.com Make sure the new ChatBot is made 'Run Free' by the creator."
if msgs[2].lower() == 'dev':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.dev = True
return 'Dev set to True.'
elif msgs[3].lower() == 'false':
revar.dev = False
return 'Dev set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable certain failsafes, making the bot less stabel but outputs better error-messages. Default is False. Use "config set dev <true|false>" to set.'
if msgs[2].lower() == 'midsentence_trigger' or msgs[2].lower() == 'midtrigger':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.midsentence_trigger = True
return 'Midsentence_trigger set to True.'
elif msgs[3].lower() == 'false':
revar.midsentence_trigger = False
return 'Midsentence_trigger set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the midsentence-trigger-feature. Type ":(<command>)" in any part of the message to trigger commands. Default is Off. Use "config set commentchar <true|false>" to set.'
if msgs[2].lower() == 'autoweather' or msgs[2].lower() == 'autoweather':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.autoweather = True
return 'Autoweather set to True.'
elif msgs[3].lower() == 'false':
revar.autoweather = False
return 'Autoweather set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable the automatic forecast.'
if msgs[2].lower() == 'get_hash':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.get_hash = True
return 'Get_hash set to True.'
elif msgs[3].lower() == 'false':
revar.get_hash = False
return 'Get_hash set to False.'
else:
return 'Use "true" or "false".'
else:
return 'Enable or disable IMDB from trying to get hash/torrents (quickens response time). Default is True. Use "config set get_char <true|false>" to set.'
if msgs[2].lower() == 'autoweather_time':
if len(msgs) > 3:
if not msgs[3].isdigit():
return 'Variable must be numbers only'
if not len(msgs[3]) == 4:
return 'You must use 4 digits for this variable.'
revar.autoweather_time = int(msgs[3])
return 'New autoweather_time: ' + str(revar.autoweather_time)
else:
return 'Change what time the autoweather should trigger. Format is digits only, as HHSS.'
if msgs[2].lower() == 'point-output' or msgs[2].lower() == 'outputredir':
if len(msgs) > 3:
if msgs[3].lower() == 'true':
revar.outputredir = True
return 'Point-output set to On.'
elif msgs[3].lower() == 'false':
revar.outputredir = False
return 'Point-output set to Off.'
elif msgs[3].lower() == 'all':
revar.outputredir_all = True
return 'Point-output set to available for all.'
elif msgs[3].lower() == 'ops' or msgs[3].lower() == 'op':
revar.outputredir_all = False
return 'Point-output set to available only for operators.'
else:
return 'Use "true", "all", "ops" or "false".'
else:
return 'Enable or disable the point-output feature. See "help point-output". Default is True, for only ops. Use "config set point-output <all|ops|true|false>" to set.'
if msgs[1].lower() == 'save':
if save_revar(chan):
return "Configurations successfully saved to file."
else:
return 'Here you can edit configurations and other variables of the bot. From here you can either "set" or "save". By setting you are changing the current bot, and by saving you are changing files of the bot - making the configuration permanent.'
if len(msgs) > 0 and msgs[0].lower() == 'op':
general.update_user_info()
if len(msgs) > 1:
if not msgs[1].lower() in general.user_info.keys():
return "User not found. Remember, you op someone by their nickname."
if general.user_info[msgs[1].lower()]["nickserv"] == '0':
return "User must be logged in with Nickserv."
if general.user_info[msgs[1].lower()]["nickserv"] in revar.operators:
return "User is already an operator."
revar.operators.append(general.user_info[msgs[1].lower()]["nickserv"])
return "{} is now an operator.".format(msgs[1])
"""
for item in general.user_info:
if msgs[1].lower() == item['nick'].lower():
if item['nickserv'] == '0':
return 'User must be logged in with Nickserv.'
if item['nickserv'].lower() in revar.operators:
return 'User allready an operator.'
revar.operators.append(item['nickserv'].lower())
return '{} is now an operator.'.format(msgs[1])
"""
else:
return 'Usage: "op <nick>".'
if len(msgs) > 0 and msgs[0].lower() == 'nick':
general.update_user_info()
general.ssend("NICK " + msgs[1])
general.ssend("TIME")
revar.bot_nick = msgs[1]
if msgs[0].lower() == 'quit':
general.csend(','.join(revar.channels), "I'm off!")
general.ssend('QUIT ' + config.leave_message)
thread.interrupt_main()
if msgs[0].lower() == 'join':
if len(msgs) < 2:
return 'No channel specified.'
revar.channels.append(msgs[1].lower())
general.ssend('JOIN {}'.format(msgs[1].lower()))
general.ssend('TIME')
general.ssend('WHOIS {}'.format(revar.bot_nick.lower()))
general.update_user_info()
return 'Joined {}.'.format(msgs[1])
if msgs[0].lower() == 'part':
if len(msgs) > 1:
chan_to_leave = msgs[1].lower()
else:
chan_to_leave = chan
if chan_to_leave not in revar.channels:
return "I'm not in that channel."
general.ssend('PART {}'.format(chan_to_leave))
revar.channels.remove(chan_to_leave)
return 'Parted with {}.'.format(chan_to_leave)
if msgs[0].lower() == 'say':
if not len(msgs) > 2:
return "Bad input. To see more information on this, see help."
general.csend(msgs[1].lower(), ' '.join(msgs[2:]))
if msgs[0].lower() == 'cmd':
if not len(msgs) > 1:
return "Missing string to send."
general.ssend(' '.join(msgs[1:]))
if msgs[0].lower() == 'restart':
general.csend(chan, 'Restarting..')
general.ssend('QUIT ' + config.leave_message)
python = sys.executable
print str(python) + '||' + str(python) + '||' + str(*sys.argv)
os.execl(python, python, *sys.argv)
if msgs[0].lower() == 'git-update':
print 'Pulling from Git and updating...'
try:
url4 = "https://api.github.com/repos/johanhoiness/alison/commits"
data4 = json.load(urllib2.urlopen(url4, timeout=4))
general.csend(chan,
ceq.ccyan + 'Last commit: ' + ceq.cviolet + data4[0]['commit']['message'].encode('utf-8'))
except:
print 'Failed to get commit-message from git.'
try:
outp = os.system("git pull http://github.com/johanhoiness/alison")
if outp != 0:
return "Update failed."
outp2 = os.system(
"python -O -m py_compile alison.py connection.py ceq.py config.py revar.py commands.py general.py automatics.py")
if outp2 != 0:
return "Download was successful but the compilation failed."
if not refresh_version(chan):
return 'Something went wrong updating local committ-id.'
general.ssend('QUIT ' + config.leave_message)
python = sys.executable
print str(python) + '||' + str(python) + '||' + str(*sys.argv)
os.execl(python, python, *sys.argv)
return 'Done'
except:
return 'Download or installation failed.'
if len(msgs) > 0 and msgs[0].lower() == 'deop':
general.update_user_info()
if len(msgs) > 1:
if msgs[1].lower() not in revar.operators:
return "User was not found to be an operator. Make sure you typed it correctly and remember that the user is the Nickserv user."
revar.operators.remove(msgs[1].lower())
return "{} is no longer an operator.".format(msgs[1])
else:
return 'Usage: "deop <nickserv user>"'
if len(msgs) > 0 and msgs[0].lower() == 'help':
if len(msgs) > 1:
try:
return operator_cmds[msgs[1].lower()]
except:
return "Command or function not found. Make sure you typed it in correctly."
else:
retrn = "This help is for operator- commands and functions. There are currently %d of them. To use any of them, they must start by saying \"%s\" first, and can only be accessed by operators. To get more information on the command/function, use \"help <command>\"." % (
len(operator_cmds), revar.bot_nick) + '\n'
return retrn + "These are the ones available: " + ', '.join(operator_cmds.keys())
if len(msgs) > 0 and msgs[0] == 'save':
return "All configurations can be saved by using \"config save\"."
if len(msgs) == 0:
return "All commands launched this way is for operators only. It is only to edit settings and variables. See \"%s: help\" for more information." % revar.bot_nick
except BaseException as exc:
if revar.dev:
print 'Error in definitions.operator_commands(), line ' + str(sys.exc_info()[2].tb_lineno) + ': ' + str(exc)
return "Error in definitions.operator_commands(), line " + str(sys.exc_info()[2].tb_lineno) + ': ' + str(
exc)
else:
return "Something went wrong processing operator command."
weather_codes = {
200: "There is a bloody light thundarrstorm on the way",
201: "There is a bloody thundarrstorm on the way",
202: "There is a bloody heavy thundarrstorm on the way",
210: "Light thunderstorm on the loose",
211: "THUNDARR",
212: "Beware, beware. There's heavy thunderstorm about",
221: "Raggidy ragged thunderstorm",
230: "Thundar with a little drizzily rain",
231: "Thundar with drizzle",
232: "Thundarstorm with a heavy drizzle",
300: "Light dense drizzzle",
301: "Drizzely drizzle",
302: "The drizzle is heavy with this one",
310: "Light intensity drizzely rain",
311: "Drizzely rain",
312: "Heavy intensity drizzely rain",
313: "It's showerin raiiin",
314: "It's showerin heavy",
321: "Drizzely shower",
500: "Lighty light rain",
501: "Moderetly rainy rain",
502: "The rain is intense",
503: "The rain is VERY intense",
504: "EXTREME RAIN",
511: "Freezin rain",
520: "Lighty dense rain",
521: "Showerin rain",
522: "Dense shower rain",
531: "Raggedy showerin rain",
600: "Lightly crystalized dihydrogenmonoxide",
601: "Raining crystalized dihydrogenmonoxide",
602: "Heavy raining crystalized dihydrogenmonoxide",
611: "Sleetly raining crystalized dihydrogenmonoxide",
612: "Showring dihydrogenmonoxide crystals",
615: "Lightly raining dihydrogenmonoxide- crystalized and not",
616: "Raining dihydrogenmonoxide- crystalized and not",
620: "Lightly showring crystalized dihydrogenmonoxide",
621: "Showering crystalized dihydrogenmonoxide",
622: "Heavely showering crystalized dihydrogenmonoxide",
701: "The clouds are attacking",
711: "Throwing Smoke!",
721: "It's hazy",
731: "Quite dusty",
741: "The clouds are attacking",
751: "The sandy sands are invading",
761: "de_dust",
762: "Filled with carbon from a volcano",
771: "It's squallin",
781: "A tornado is being a tornado",
800: "If you look up you'll see nothing but the upper atmosphere",
801: "Some collections of dihydromonoxide can be seen floating in the sky",
802: "Scattered clouds can be seen in the sky",
803: "Broken clouds can be seen in the sky",
804: "Overcast clouds in the sky",
900: "idk something about a tornado",
901: "A tropical storm",
902: "Hurricane, yo",
903: "It'z freeezzzin",
904: "It's so haht",
905: "Quite windy",
906: "It's hailin'",
951: "The air is calm",
952: "It's a litey breeze",
953: "It's a gentlebreeze",
954: "It's a moderatly tense breeze",
955: "It's a freshy breeze",
956: "The breeze is strong",
957: "The wind is tall with a near gale",
958: "It's gale",
959: "It's severe gale",
960: "A Storm of Destiny",
961: "It's a bloody violent storm",
962: "A bloody huricane",
## For information on the weathercodes: http://openweathermap.org/weather-conditions
}
def weather(location=revar.location.split()):
try:
url5 = "http://api.openweathermap.org/data/2.5/weather?q={0}&mode=json".format(str('+'.join(location)))
data5 = json.load(urllib2.urlopen(url5, timeout=8))
if config.verbose:
print data5
if data5['cod'] == '404':
return "Location not found."
if data5['cod'] != 200:
return 'Error in request.'
if revar.weather_custom and data5['weather'][0]['id'] in weather_codes.keys():
w_desc = weather_codes[data5['weather'][0]['id']]
else:
w_desc = data5['weather'][0]['description']
w_temp = data5['main']['temp'] - 273.15
w_country = data5['sys']['country']
w_wind = data5['wind']['speed']
if w_country == '':
return "Location not found."
w_city = data5['name']
text_to_send = "{0}Current weather of {3}{4}{0}, {1}{2}{0}: {11}{6}{0}, {10}with a temperature of {7}{8}&DEGREE;{10} celsius and a windspeed of {7}{9}{10} m/s.".format(
ceq.cblue, ceq.cred, w_country.encode('utf-8'), ceq.cviolet, w_city.encode('utf-8'), ceq.ccyan, w_desc,
ceq.corange, w_temp, w_wind, ceq.clcyan, ceq.cgreen, ceq.degree)
return text_to_send.decode('utf-8').encode('utf-8')
except BaseException as exc:
return general.get_exc(exc, 'commands.weather()')
def forecast(location=revar.location.split()):
try:
url5 = "http://api.openweathermap.org/data/2.5/forecast?q={}&mode=json&cnt=9".format(str('+'.join(location)))
data6 = json.load(urllib2.urlopen(url5, timeout=8))
if config.verbose:
print data6
if data6['cod'] == '404':
return "Location not found."
if data6['cod'] != '200':
return 'Error in request.'
nowt = datetime.now()
seconds_since_midnight = int((nowt - nowt.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds())
seconds_to = (24 * 3600) - seconds_since_midnight
epoch_to_mid = int(time.time()) + seconds_to
next_0900NO = epoch_to_mid + 2 * 3600 + 12 * 3600
new_data6 = ''
for listdt in data6['list']:
if listdt['dt'] == next_0900NO:
print listdt
new_data6 = listdt
if not new_data6:
return 'Next-days time not found in API-response.'
data7 = new_data6
if revar.weather_custom and data7['weather'][0]['id'] in weather_codes.keys():
w_desc = weather_codes[data7['weather'][0]['id']]
else:
w_desc = data7['weather'][0]['description']
w_temp = data7['main']['temp'] - 273.15
w_country = data6['city']['country']
w_wind = data7['wind']['speed']
if w_country == '':
return "Location not found."
w_city = data6['city']['name']
text_to_send = "{0}Forecast of {3}{4}{0}, {1}{2}{0}, for tomorrow midday: {11}{6}{0}, {10}with a temperature of {7}{8}&DEGREE;{10} celsius and a windspeed of {7}{9}{10} m/s.".format(
ceq.cblue, ceq.cred, w_country.encode('utf-8'), ceq.cviolet, w_city.encode('utf-8'), ceq.ccyan, w_desc,
ceq.corange, w_temp, w_wind, ceq.clcyan, ceq.cgreen, ceq.degree)
return text_to_send.decode('utf-8').encode('utf-8')
except BaseException as exc:
return general.get_exc(exc, 'commands.forecast()')
def porty(flags):
if len(flags) == 2:
address = flags[0]
if not flags[1].isdigit():
return 'Port must be numbers only.'
port = flags[1]
elif len(flags) == 1:
address = flags[0]
port = ''
else:
return 'Usage: port <address> <portnumber> If no port is specified, I will only check if I can get a response from the network.'
if port == '':
response = os.system("ping -c 1 -W 8 " + address)
if response == 0:
return address + ' is up!'
else:
return address + ' is down!'
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
result = sock.connect_ex((address, int(port)))
if result == 0:
return "Port %d on %s is open." % (int(port), address)
else:
return "Port %d on %s is closed or not responding." % (int(port), address)
def c_bing(msgs):
if len(msgs) == 0:
return
return 'BING! ' + shorten_url('http://www.bing.com/search?q=' + '+'.join(msgs))
def c_imdb(msgs):
return imdb_info('search', msgs)
def c_list(msgs):
if len(msgs) > 0:
if msgs[0].lower() == 'operators' or msgs[0].lower() == 'op' or msgs[0].lower() == 'admin':
if revar.operators == '':
return 'There are no operators listed.'
else:
return 'Operator(s): ' + ceq.cred + ', '.join(revar.operators)
elif msgs[0].lower() == 'ignore' or msgs[0].lower() == 'ignored' or msgs[0].lower() == 'ignorelist':
if revar.ignorelist == []:
return 'There are no ignored users.'
else:
return 'Ignored users: ' + ', '.join(revar.ignorelist)
elif msgs[0].lower() == 'whitelist' or msgs[0].lower() == 'white' or msgs[0].lower() == 'whites':
if revar.whitelist == []:
return 'There are no users being whitelisted.'
else:
return 'Whitelisted users: ' + ', '.join(revar.whitelist)
elif msgs[0].lower() == 'channels' or msgs[0].lower() == 'chan':
return '{}Channels I am currently in: {}{}'.format(ceq.cgreen, ceq.cred,
'{}, {}'.format(ceq.ccyan, ceq.cred).join(
revar.channels))
else:
return "I can't find anything on that. Make sure you typed it right."
else:
return "You can use this ':list'-feature to get me to list the users that are operators(list op), channels(list <chan | channels>), ignored(list ignore), or whitelisted(list whitelist)."
def c_triggers():
return 'Triggers: ' + ceq.ccyan + '"' + ceq.cred + ('%s", "%s' % (ceq.ccyan, ceq.cred)).join(
revar.triggers) + ceq.ccyan + '"'
def c_say(text):
return ' '.join(text)
def c_time():
return 'The current date and time is: ' + ceq.ccyan + time.strftime("%c")
def c_version():
return 'Running Alison v%s' % general.version
def c_rollTheDice(usr, flgs):
#try:
if not flgs:
return '%s rolls a dice! It shows.. %s!' % (usr, random.randint(0, 100))
elif len(flgs) == 2 and checkIfInt(flgs[0]) and checkIfInt(flgs[1]):
flgs = sortInt(flgs)
return '%s rolls a dice from %s to %s! It shows.. %s!' % (
usr, int(flgs[0]), flgs[1], random.randint(int(flgs[0]), int(flgs[1])))
else:
return '%s rolls their own special dice! It shows.. %s!' % (usr, random.choice(flgs))
#except BaseException as exc:
# return general.get_exc(exc, 'rollTheDice')
def checkIfInt(incomingstr):
try:
int(incomingstr)
return True
except:
return False
def sortInt(i):
if int(i[0]) < int(i[1]):
return [str(i[0]), str(i[1])]
elif int(i[0]) > int(i[1]):
return [str(i[1]), str(i[0])]
def personalityforge(usr, msg):
if not general.personalityforge_api:
return 'Missing PersonalityForge API-Key.'
try:
params = {
"apiKey": general.personalityforge_api,
"chatBotID": revar.chatbotid,
"message": msg,
"externalID": "AlisonID" + usr,
"firstName": usr
}
url = "http://www.personalityforge.com/api/chat/?" + urllib.urlencode(params)
data = urllib2.urlopen(url, timeout=8).read()
data = data[data.rfind('<br>') + 4:]
data = json.loads(data)
print data
if data['success'] == 0:
return data['errorMessage']
message = data['message']['message']
for name in data['message']['chatBotName'].split():
message = message.replace(name, revar.bot_nick)
message = message.replace("{0} {0}".format(revar.bot_nick), revar.bot_nick)
return "{}: {}".format(usr, message)
except BaseException as exc:
return general.getexc(exc, 'personalityforge')
def c_countdown(chan, flags):
if chan in general.countdown:
return 'Only one countdown allowed per channel. Stop the current countdown'
if not flags:
return 'You need to specify a number between 1 and 20.'
if not flags[0].isdigit():
return 'Must be numbers only, between 1 and 20.'
if int(flags[0]) > 20:
return 'Number cannot be higher than 20.'
if int(flags[0]) < 1:
return 'Number cannot be lower than 1.'
general.countdown.append(chan)
number = int(flags[0])
while number != 0:
if chan not in general.countdown:
general.csend(chan, 'Countdown stopped.')
return
general.csend(chan, '{}...'.format(number))
number -= 1
time.sleep(1)
general.csend(chan, 'GO!')
general.countdown.remove(chan)
return
def c_last_seen(flags):
if not flags:
return 'You must specify a user you want to know of said users last occurrence.'
user = flags[0]
if user.lower() not in general.last_seen.keys():
return "User hasn't been registered. Meaning the user hasn't said anything since {}.".format(general.start_time)
user = general.last_seen[user.lower()]
sec = int(time.time() - user['time'])
if sec < 60:
if sec == 1:
last_time = str(sec) + ' second ago'
else:
last_time = str(sec) + ' seconds ago'
if sec >= 60:
minutes = sec // 60
if minutes == 1:
last_time = str(minutes) + ' minute ago'
else:
last_time = str(minutes) + ' minutes ago'
if sec >= 3600:
hours = sec // 3600
if hours == 1:
last_time = str(hours) + ' hour ago'
else:
last_time = str(hours) + ' hours ago'
if sec >= 86400:
days = sec // 86400
if days == 1:
last_time = str(days) + ' day ago'
else:
last_time = str(days) + ' days ago'
msg_to_retrn = "{4}{0} {7}was last seen {5}{1} {7}in channel {6}{2}{7}, with the message \"{8}{3}{7}\".".format(
user['name'], last_time, user['channel'], user['message'], ceq.cviolet, ceq.cgreen, ceq.corange, ceq.cblue,
ceq.ccyan)
if len(msg_to_retrn) > 400:
msg_to_retrn = msg_to_retrn[:400] + "... {}\".".format(ceq.cblue)
return msg_to_retrn
cmds = {
"imdb": ceq.corange + "Syntax: " + ceq.cblue + "imdb <searchwords> " + ceq.ccyan + "Description: " + ceq.cviolet + "I will search for movies or other titles from IMDB and will give you information on it. All links in the chat will automatecly be given information on too.",
# "joke" : ceq.corange + "Syntax: " + ceq.cblue + "joke " + ceq.ccyan + "Description: " + ceq.cviolet + "I will tell you a random joke!" ,
# "test" : ceq.corange + "Syntax: " + ceq.cblue + "time " + ceq.ccyan + "Description: " + ceq.cviolet + "I will tell you the time and the state of myself.",
"point-output": ceq.corange + "Syntax: " + ceq.cblue + "<any command> (< | << | > <user> | >> <user>) " + ceq.ccyan + "Description: " + ceq.cviolet + "I will direct the output of the command where the arrows are pointing. If they are pointing left, it will be directed to the one who called the command. Right, and it will go to the user written. Two arrows mean to send as Notice, one is to send as PM.",
"help": ceq.corange + "Syntax: " + ceq.cblue + "help <any command> " + ceq.ccyan + "Description: " + ceq.cviolet + "I will tell you information on the things I can do with the command! If no command is spessified, I will list the available ones.",
"say": ceq.corange + "Syntax: " + ceq.cblue + "say <any text> " + ceq.ccyan + "Description: " + ceq.cviolet + "I will say whatever you want me to say!",
"list": ceq.corange + "Syntax: " + ceq.cblue + "list <whitelist | ignore | op | operators> " + ceq.ccyan + "Description: " + ceq.cviolet + "I will list the users that are being ignored, whitelisted, or the operators.",
"hey": ceq.corange + "Syntax: " + ceq.cblue + "hey <text> " + ceq.ccyan + "Description: " + ceq.cviolet + "Send me a text and I will respond with a presonality! Remember that the only words I register, are the ones AFTER the 'hey'. The word 'hey' is not in the text I register.",
"port": ceq.corange + "Syntax: " + ceq.cblue + "port <address> <port> " + ceq.ccyan + "Description: " + ceq.cviolet + "I'll check if the port is open on that network or not. If no port is given, I'll just see if the network is responding at all.",
"bing": ceq.corange + "Syntax: " + ceq.cblue + "bing <searchwords> " + ceq.ccyan + "Description: " + ceq.cviolet + "I'll give you a link to the searchresults from the greatest search-engine of all time using your searchwords!",
"time": ceq.corange + "Syntax: " + ceq.cblue + "time " + ceq.ccyan + "Description: " + ceq.cviolet + "I'll give you the full time! Oh and I won't allow you to give any parameters. Standardization, yo!",
"weather": ceq.corange + "Syntax: " + ceq.cblue + "weather <location| > " + ceq.ccyan + "Description: " + ceq.cviolet + "I'll tell you the weather and temperature of the given location. If no location is spesified, it will choose the default location which currently is set to %s." % revar.location,
"operator-commands": ceq.corange + "Syntax: " + ceq.cblue + "{0}<:|,| > <any operator-command>".format(
revar.bot_nick) + ceq.ccyan + " Description: " + ceq.cviolet + "This is only accessable for operators. See \"$BOTNICK<:|,| > help\" for more information on this feature. All non-operators will be ignored calling a command this way.",
"countdown": ceq.corange + "Syntax: " + ceq.cblue + "countdown <number of secconds>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will start a countdown with the specified number of seconds. The countown can be stopped by any user by typing 'stop' anywhere in chat. Only one countdown per channel is allowed.",
"seen": ceq.corange + "Syntax: " + ceq.cblue + "seen <user>" + '' + ceq.ccyan + " Description: " + ceq.cviolet + "Will tell you the last ocurence the user talked, with time, channel, and message. Note that this 'log' will be reset on startup.",
"forecast": ceq.corange + "Syntax: " + ceq.cblue + "forecast <location>" + ceq.ccyan + " Description: " + ceq.cviolet + "Will tell you the weather of the given location at the next midday. That is the next time the clock is 12:00. If no location is given, the default one will be used.",
"rtd": ceq.corange + "Syntax: " + ceq.cblue + "rtd <num1> <num2> || rtd <element1> <element2> <element3> ..." + ceq.ccyan + " Description: " + ceq.cviolet + "I'll roll a dice for you! I'll use a 1-100 dice, or you can tell me what range you want. You can even create one yourself!",
# "" : ceq.corange + "Syntax: " + ceq.cblue + "" + ceq.ccyan + " Description: " + ceq.cviolet + "",
}
# rtd=ceq.corange + "Syntax: " + ceq.cblue + "rtd <num1> <num2> || rtd <element1> <element2> <element3> ..." + ceq.ccyan + " Description: " + ceq.cviolet + "I'll roll a dice for you! I'll use a 1-100 dice, or you can tell me what range you want. You can even create one yourself!")
def help_tree(msgs):
if len(msgs) == 0:
retrn = ceq.cblue + "These are the things you can tell me to do! You can say ':help <command>' and I'll tell you about the command you want information on." + '\n'
return retrn + ceq.cblue + "There are {} of them, at the moment: ".format(
len(cmds.keys())) + ceq.cviolet + '{1}, {0}'.format(ceq.cviolet, ceq.cred).join(cmds.keys())
if len(msgs) > 0:
try:
return cmds[msgs[0].lower()]
except:
return "I can't find that one, sorry. Make sure you typed it in correctly."
def check_called(chan, user, msg):
if not msg:
return ''
msgs = msg.split()
if len(msgs) < 2:
flags = []
else:
flags = msgs[1:]
command = msgs[0].lower()
if command == 'bing':
return c_bing(flags)
if command == 'imdb':
return c_imdb(flags)
if command == 'list':
return c_list(flags)
if command == 'weather':
if not flags:
return weather(revar.location.split())
return weather(flags)
if command == 'trigger' or command == 'triggers':
return c_triggers()
if command == 'port':
return porty(flags)
if command == 'say':
return c_say(flags)
if command == 'time':
return c_time()
if command == 'help':
return help_tree(flags)
if command == 'version':
return c_version()
if command == 'hey':
return personalityforge(user, msg)
if command == 'countdown' or command == 'count':
return c_countdown(chan, flags)
if command == 'last' or command == 'seen':
return c_last_seen(flags)
if command == 'rtd' or command == 'rollthedice':
return c_rollTheDice(user, flags)
if command == 'forecast':
if not flags:
return forecast(revar.location.split())
return forecast(flags)
``` |
{
"source": "JohanHorsmans/cds-language-exam-2021",
"score": 3
} |
#### File: cds-language-exam-2021/assignment_6/GoT_dl.py
```python
import os
import sys
sys.path.append(os.path.join(".."))
# Import pandas for working with dataframes:
import pandas as pd
# Import numpy for working with arrays:
import numpy as np
# Import the classifier utility-function as 'clf':
import utils.classifier_utils as clf
# Machine learning stuff
from sklearn.metrics import balanced_accuracy_score, classification_report, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from sklearn.preprocessing import LabelBinarizer
# Import tensforlow ann tools from tensorflow:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, Embedding,
Flatten, GlobalMaxPool1D, Conv1D)
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.regularizers import L2
# Import argparse to specify arguments in the script from the commandline:
import argparse
# Define function argument defaults and how to specify them from the terminal:
ap = argparse.ArgumentParser(description = "[DESCRIPTION]: A script designed to classify Game of Thrones seasons based on dialogue with a neural network. The following arguments can be specified but you can also run the code with default parameters:")
ap.add_argument("-e", "--epochs", default = 10, type = int, help = "int, number of training epochs for the neural network [DEFAULT]: 10")
ap.add_argument("-es", "--embedding_size", default = 50, type = int, help = "int, the size of the word embeddings loaded from the the GloVe-model. Options: 50, 100, 200, 300 [DEFAULT]: 50")
# Parse the arguments:
args = vars(ap.parse_args())
# Define the main function of the script and what parameters it takes:
def main(epochs, embedding_size):
# Define helper function to load the saved GloVe embeddings and build an embedding matrix:
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index.
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
# Define path to the dataset:
filename = os.path.join("data", "Game_of_Thrones_Script.csv")
# Read the data and save it as a variable called "GoT":
GoT = pd.read_csv(filename)
# Save the "Sentence"-column from the GoT-data and save it as a variable called "sentences":
sentences = GoT['Sentence'].values
# Save the "Season"-column from the GoT-data and save it as a variable called "labels"
labels = GoT['Season'].values
# Create and testing- and training data split using sk.learn. Specify:
X_train, X_test, y_train, y_test = train_test_split(sentences, # X-input.
labels, # y-input.
test_size=0.25, # Specify that 25% of the data should be added to the test-set.
random_state=24) # Set random state for reproducibility.
# Use LabelBinarizer to transform the labels to a binary one-vs-all fashion (to make it compatible with the neural network):
lb = LabelBinarizer()
y_train = lb.fit_transform(y_train)
y_test = lb.fit_transform(y_test)
# Initialize tokenizer to convert text to numbers:
tokenizer = Tokenizer(num_words=None)
# Fit tokenizer to training data:
tokenizer.fit_on_texts(X_train)
# Tokenize training- and testting data:
X_train_toks = tokenizer.texts_to_sequences(X_train)
X_test_toks = tokenizer.texts_to_sequences(X_test)
# Define overall vocabulary size:
vocab_size = len(tokenizer.word_index) + 1 # Adding 1 due to the reserved 0 index in Python.
# To make the tokenizer-output work, we need to pad the documents to have idential length:
maxlen = len(max(X_train, key=len)) # Set the max-length to be the longest sentence in the dataset. This is done to ensure that no data is lost.
# Pad the training data to the maximum length defined above:
X_train_pad = pad_sequences(X_train_toks,
padding='post', # sequences can be padded "pre" or "post" (post means adding 0s to the end of the sequence)
maxlen=maxlen)
# Pad the training data to the maximum length defined above:
X_test_pad = pad_sequences(X_test_toks,
padding='post',
maxlen=maxlen)
# If the "glove.6B.zip" file does not exist in the directory then download it and unzip it. If it does exist, do nothing:
if not os.path.exists("glove.6B.zip"):
os.system('wget http://nlp.stanford.edu/data/glove.6B.zip')
os.system('unzip glove.6B.zip')
# Set the embedding dimensions to be the same as the specified embedding_size argument:
embedding_dim = embedding_size
# Use the previously defined helper function to build an embedding matrix using the GloVe-model (with the specified embedding size):
embedding_matrix = create_embedding_matrix(os.path.join(f"glove.6B.{embedding_size}d.txt"),
tokenizer.word_index,
embedding_dim)
# Initialize Sequential model to build neural network:
model = Sequential()
# add Embedding layer:
model.add(Embedding(input_dim=vocab_size, # There should be as many input nodes as there are words generated by the Tokenizer()
output_dim=embedding_dim, # Add defined embedding size
input_length=maxlen, # Add max length of padded sentences
weights=[embedding_matrix], # Add pretrained GloVe weights
trainable=False)) # Make sure that the embeddings are static so we fine-tune rather than train from scratch.
# CONV+ReLU -> MaxPool -> FC+ReLU -> Out
# Add convolutional layer with ReLU-activation:
model.add(Conv1D(128, 5,
activation='relu'))
# Add max-pooling layer:
model.add(GlobalMaxPool1D())
# Add Dense layer with 10 neurons and ReLU-activation
model.add(Dense(128,
activation='relu'))
# Add output layer with 8 nodes; 1 for each class (i.e. season):
model.add(Dense(8,
activation='softmax'))
# Compile model:
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Train the model for specified number of epochs:
history = model.fit(X_train_pad, y_train,
epochs=epochs,
verbose=False,
validation_data=(X_test_pad, y_test),
batch_size=10)
# Predict the testing data:
dl_predictions = model.predict(X_test_pad)
# Make labels integers rather than float-probabilities:
dl_predictions=np.argmax(dl_predictions, axis=1)
y_test=np.argmax(y_test, axis=1)
# Print classification matrix to the terminal:
print(classification_report(y_test, dl_predictions))
#If the script is called from the command-line make epochs the first argument and embedding_size the second argument:
if __name__ =="__main__":
main(
args["epochs"],
args["embedding_size"])
``` |
{
"source": "johanjeppsson/alive-progress",
"score": 2
} |
#### File: alive_progress/animations/spinner_compiler.py
```python
import operator
import random
import time
from inspect import signature
from itertools import chain, count, islice, repeat
from types import SimpleNamespace
from about_time import about_time
from .utils import fix_signature
from ..utils.cells import fix_cells, is_wide, join_cells, strip_marks, to_cells
from ..utils.colors import BLUE, BLUE_BOLD, CYAN, DIM, GREEN, ORANGE, ORANGE_BOLD, RED, YELLOW_BOLD
from ..utils.terminal import clear_end, cursor_up_1, hide_cursor, show_cursor
def spinner_controller(*, natural, skip_compiler=False):
def inner_controller(spinner_inner_factory, op_params=None, extra_commands=None):
def spinner_compiler_dispatcher_factory(actual_length=None):
"""Compile this spinner factory into an actual spinner runner.
The previous parameters were the styling parameters, which defined a style.
These are called operational parameters, which `alive_progress` binds dynamically
as needed. Do not call this manually.
Args:
actual_length (int): the actual length to compile the frames renditions
Returns:
a spinner runner
"""
if skip_compiler:
return spinner_inner_factory(actual_length, **op_params)
with about_time() as t_compile:
gen = spinner_inner_factory(actual_length, **op_params)
spec = spinner_compiler(gen, natural, extra_commands.get(True, ()))
return spinner_runner_factory(spec, t_compile, extra_commands.get(False, ()))
def compile_and_check(*args, **kwargs): # pragma: no cover
"""Compile this spinner factory at its natural length, and..."""
spinner_compiler_dispatcher_factory().check(*args, **kwargs)
def set_operational(**params):
signature(spinner_inner_factory).bind(1, **params) # test arguments (one is provided).
return inner_controller(spinner_inner_factory, params, extra_commands)
def schedule_command(command):
def inner_schedule(*args, **kwargs):
signature(command).bind(1, *args, **kwargs) # test arguments (one is provided).
extra, cmd_type = dict(extra_commands), EXTRA_COMMANDS[command]
extra[cmd_type] = extra.get(cmd_type, ()) + ((command, args, kwargs),)
return inner_controller(spinner_inner_factory, op_params, extra)
return fix_signature(inner_schedule, command, 1)
spinner_compiler_dispatcher_factory.__dict__.update(
check=fix_signature(compile_and_check, check, 1), op=set_operational,
**{c.__name__: schedule_command(c) for c in EXTRA_COMMANDS},
)
op_params, extra_commands = op_params or {}, extra_commands or {}
spinner_compiler_dispatcher_factory.natural = natural # share with the spinner code.
return spinner_compiler_dispatcher_factory
return inner_controller
"""
The commands here are made available in the compiler controller, thus in all spinners.
They work lazily: when called they only schedule themselves to be run when the spinner
gets compiled, i.e., when it receives the operational parameters like `actual_length`.
They can take place inside the compiler or inside the runner.
Compiler commands can change the data at will, before the animation specs are computed.
Runner commands can only change presentation order.
"""
def extra_command(is_compiler):
def inner_command(command):
EXTRA_COMMANDS[command] = is_compiler
return command
return inner_command
EXTRA_COMMANDS = {}
compiler_command, runner_command = extra_command(True), extra_command(False)
@compiler_command
def replace(spec, old, new): # noqa
"""Replace a portion of the frames by another with the same length.
Args:
old (str): the old string to be replaced
new (str): the new string
"""
# different lengths could lead to broken frames, but they will be verified afterwards.
spec.data = tuple(tuple(
to_cells(join_cells(frame).replace(old, new)) for frame in cycle
) for cycle in spec.data)
@compiler_command
def pause(spec, edges=None, center=None, other=None): # noqa
"""Make the animation appear to pause at the edges or at the middle, or make it slower as
a whole, or both.
Use without arguments to get their defaults, which gives a small pause at the edges,
very nice for bouncing text with `hide=False`. Please note that the defaults only apply
if none of the params are set.
In the future, I'd like to make this a `pace` command, which would receive a sequence
of ints of any length, and apply it bouncing across the cycle. For example to smoothly
decelerate it could be (6, 3, 2, 1), which would become (6, 3, 2, 1, 1, ..., 1, 2, 3, 6).
Args:
edges (Optional[int]): how many times the first and last frames of a cycle repeats
default is 8.
center (Optional[int]): how many times the middle frame of a cycle repeats
default is 1.
other (Optional[int]): how many times all the other frames of a cycle repeats
default is 1.
"""
edges, center, other = (max(1, x or 1) for x in (edges, center, other))
if all(x == 1 for x in (edges, center, other)):
edges, center, other = 8, 1, 1
repeats_func = lambda length: {
0: edges,
length - 1: edges,
round(length / 2): center,
}
spec.data = tuple(tuple(chain.from_iterable(
repeat(frame, repeats.get(i) or other) for i, frame in enumerate(cycle)
)) for cycle, repeats in ((cycle, repeats_func(len(cycle))) for cycle in spec.data))
@compiler_command
def reshape(spec, num_frames): # noqa
"""Reshape frame data into another grouping. It can be used to simplify content
description, or for artistic effects.
Args:
num_frames (int): the number of consecutive frames to group
"""
flatten = chain.from_iterable(cycle for cycle in spec.data)
spec.data = tuple(iter(lambda: tuple(islice(flatten, num_frames)), ()))
@compiler_command
def bounce(spec):
"""Make the animation bounce its cycles."""
spec.data = tuple(chain(spec.data, spec.data[-2:0:-1]))
@compiler_command
def transpose(spec):
"""Transpose the frame content matrix, exchanging columns for rows. It can be used
to simplify content description, or for artistic effects."""
spec.data = tuple(tuple(cycle) for cycle in zip(*spec.data))
@runner_command
def sequential(spec):
"""Configure the runner to play the compiled cycles in sequential order."""
def cycle_data(data):
while True:
yield from data
cycle_data.name = 'sequential'
spec.__dict__.update(strategy=cycle_data, cycles=len(spec.data))
@runner_command
def randomize(spec, cycles=None): # noqa
"""Configure the runner to play the compiled cycles in random order.
Args:
cycles (Optional[int]): number of cycles to play randomized
"""
def cycle_data(data):
while True:
yield random.choice(data)
cycle_data.name = 'randomized'
spec.__dict__.update(strategy=cycle_data, cycles=max(0, cycles or 0) or spec.cycles)
def apply_extra_commands(spec, extra_commands): # pragma: no cover
for command, args, kwargs in extra_commands:
command(spec, *args, **kwargs)
def spinner_compiler(gen, natural, extra_commands):
"""Optimized spinner compiler, which compiles ahead of time all frames of all cycles
of a spinner.
Args:
gen (Generator): the generator expressions that defines the cycles and their frames
natural (int): the natural length of the spinner
extra_commands (tuple[tuple[cmd, list[Any], dict[Any]]]): requested extra commands
Returns:
the spec of a compiled animation
"""
spec = SimpleNamespace(
data=tuple(tuple(fix_cells(frame) for frame in cycle) for cycle in gen), natural=natural)
apply_extra_commands(spec, extra_commands)
# generate spec info.
frames = tuple(len(cycle) for cycle in spec.data)
spec.__dict__.update(cycles=len(spec.data), length=len(spec.data[0][0]),
frames=frames, total_frames=sum(frames))
assert (max(len(frame) for cycle in spec.data for frame in cycle) ==
min(len(frame) for cycle in spec.data for frame in cycle)), \
render_data(spec, True) or 'Different cell lengths detected in frame data.'
return spec
def spinner_runner_factory(spec, t_compile, extra_commands):
"""Optimized spinner runner, which receives the spec of an animation, and controls
the flow of cycles and frames already compiled to a certain screen length and with
wide chars fixed, thus avoiding any overhead in runtime within complex spinners,
while allowing their factories to be garbage collected.
Args:
spec (SimpleNamespace): the spec of an animation
t_compile (about_time.Handler): the compile time information
extra_commands (tuple[tuple[cmd, list[Any], dict[Any]]]): requested extra commands
Returns:
a spinner runner
"""
def spinner_runner():
"""Wow, you are really deep! This is the runner of a compiled spinner.
Every time you call this function, a different generator will kick in,
which yields the frames of the current animation cycle. Enjoy!"""
yield from next(cycle_gen) # I love generators!
def runner_check(*args, **kwargs): # pragma: no cover
return check(spec, *args, **kwargs)
spinner_runner.__dict__.update(spec.__dict__, check=fix_signature(runner_check, check, 1))
spec.__dict__.update(t_compile=t_compile, runner=spinner_runner) # set after the update above.
sequential(spec)
apply_extra_commands(spec, extra_commands)
cycle_gen = spec.strategy(spec.data)
return spinner_runner
def check(spec, verbosity=0): # noqa # pragma: no cover
"""Check the specs, contents, codepoints, and even the animation of this compiled spinner.
Args:
verbosity (int): change the verbosity level
0 for specs only (default)
/ \\
/ 3 to include animation
/ \\
1 to unfold frame data -------- 4 to unfold frame data
| |
2 to reveal codepoints -------- 5 to reveal codepoints
"""
verbosity = max(0, min(5, verbosity or 0))
if verbosity in (1, 2, 4, 5):
render_data(spec, verbosity in (2, 5))
spec_data(spec) # spec_data here displays calculated frame data, always shown.
duration = spec.t_compile.duration_human.replace('us', 'µs')
print(f'\nSpinner frames compiled in: {GREEN(duration)}')
print(f'(call {HELP_MSG[verbosity]})')
if verbosity in (3, 4, 5):
animate(spec)
SECTION = ORANGE_BOLD
CHECK = lambda p: f'{BLUE(f".{check.__name__}(")}{BLUE_BOLD(p)}{BLUE(")")}'
HELP_MSG = {
0: f'{CHECK(1)} to unfold frame data, or {CHECK(3)} to include animation',
1: f'{CHECK(2)} to reveal codepoints, or {CHECK(4)} to include animation,'
f' or {CHECK(0)} to fold up frame data',
2: f'{CHECK(5)} to include animation, or {CHECK(1)} to hide codepoints',
3: f'{CHECK(4)} to unfold frame data, or {CHECK(0)} to omit animation',
4: f'{CHECK(5)} to reveal codepoints, or {CHECK(1)} to omit animation,'
f' or {CHECK(3)} to fold up frame data',
5: f'{CHECK(2)} to omit animation, or {CHECK(4)} to hide codepoints',
}
def spec_data(spec): # pragma: no cover
print(f'\n{SECTION("Specs")}')
info = lambda field: f'{YELLOW_BOLD(field.split(".")[0])}: {operator.attrgetter(field)(spec)}'
print(info('length'), f'({info("natural")})')
print(info('cycles'), f'({info("strategy.name")})')
print('\n'.join(info(field) for field in ('frames', 'total_frames')))
def format_codepoints(frame): # pragma: no cover
codes = '|'.join((ORANGE if is_wide(g) else BLUE)(
' '.join(hex(ord(c)).replace('0x', '') for c in g)) for g in frame)
return f" -> {RED(sum(len(fragment) for fragment in frame))}:[{codes}]"
def render_data(spec, show_codepoints): # pragma: no cover
print(f'\n{SECTION("Frame data")}', end='')
whole_index = count(1)
lf, wf = f'>{1 + len(str(max(spec.frames)))}', f'<{len(str(spec.total_frames))}'
codepoints = format_codepoints if show_codepoints else lambda _: ''
for i, cycle in enumerate(spec.data, 1):
frames = map(lambda fragment: tuple(strip_marks(fragment)), cycle)
print(f'\ncycle {i}\n' + '\n'.join(
DIM(li, lf) + f' |{"".join(frame)}| {DIM(wi, wf)}' + codepoints(frame)
for li, frame, wi in zip(count(1), frames, whole_index)
))
def animate(spec): # pragma: no cover
print(f'\n{SECTION("Animation")}')
cf, lf, tf = (f'>{len(str(x))}' for x in (spec.cycles, max(spec.frames), spec.total_frames))
from itertools import cycle
cycles, frames = cycle(range(1, spec.cycles + 1)), cycle(range(1, spec.total_frames + 1))
hide_cursor()
try:
while True:
c = next(cycles)
for i, f in enumerate(spec.runner(), 1):
n = next(frames)
print(f'\r{CYAN(c, cf)}:{CYAN(i, lf)} -->{join_cells(f)}<-- {CYAN(n, tf)} ')
print(DIM('(press CTRL+C to stop)'), end='')
clear_end()
time.sleep(1 / 15)
cursor_up_1()
except KeyboardInterrupt:
pass
finally:
show_cursor()
```
#### File: alive_progress/core/hook_manager.py
```python
import logging
import sys
from collections import defaultdict
from functools import partial
from itertools import chain, islice, repeat
from logging import StreamHandler
from types import SimpleNamespace
from ..utils.terminal import clear_line
def buffered_hook_manager(header_template, get_pos, cond_refresh):
"""Create and maintain a buffered hook manager, used for instrumenting print
statements and logging.
Args:
header_template (): the template for enriching output
get_pos (Callable[..., Any]): the container to retrieve the current position
cond_refresh: Condition object to force a refresh when printing
Returns:
a closure with several functions
"""
def flush_buffers():
for stream, buffer in buffers.items():
flush(stream)
def flush(stream):
if buffers[stream]:
write(stream, '\n')
stream.flush()
def write(stream, part):
buffer = buffers[stream]
if part != '\n':
# this will generate a sequence of lines interspersed with None, which will later
# be rendered as the indent filler to align additional lines under the same header.
gen = chain.from_iterable(zip(repeat(None), part.splitlines(True)))
buffer.extend(islice(gen, 1, None))
else:
header = get_header()
with cond_refresh:
nested = ''.join(line or ' ' * len(header) for line in buffer)
if stream in base:
# this avoids potential flickering, since now the stream can also be
# files from logging, and thus not needing to clear the screen...
clear_line()
stream.write(f'{header}{nested.strip()}\n')
stream.flush()
cond_refresh.notify()
buffer[:] = []
def get_hook_for(handler):
if handler.stream: # supports FileHandlers with delay=true.
handler.stream.flush()
return SimpleNamespace(write=partial(write, handler.stream),
flush=partial(flush, handler.stream),
isatty=sys.__stdout__.isatty)
def install():
root = logging.root
# modify all stream handlers, including their subclasses.
before_handlers.update({h: _set_stream(h, get_hook_for(h)) # noqa
for h in root.handlers if isinstance(h, StreamHandler)})
sys.stdout, sys.stderr = (get_hook_for(SimpleNamespace(stream=x)) for x in base)
def uninstall():
flush_buffers()
buffers.clear()
sys.stdout, sys.stderr = base
[_set_stream(handler, original_stream)
for handler, original_stream in before_handlers.items()]
before_handlers.clear()
# internal data.
buffers = defaultdict(list)
get_header = (lambda: header_template.format(get_pos())) if header_template else lambda: ''
base = sys.stdout, sys.stderr # needed for tests.
before_handlers = {}
# external interface.
hook_manager = SimpleNamespace(
flush_buffers=flush_buffers,
install=install,
uninstall=uninstall,
)
return hook_manager
if sys.version_info >= (3, 7): # pragma: no cover
def _set_stream(handler, stream):
return handler.setStream(stream)
else: # pragma: no cover
def _set_stream(handler, stream):
# from python 3.7 implementation.
result = handler.stream
handler.acquire()
try:
handler.flush()
handler.stream = stream
finally:
handler.release()
return result
``` |
{
"source": "johanjeppsson/msteams",
"score": 3
} |
#### File: msteams/msteams/formatting.py
```python
def _tag(s, tag):
"""Return string wrapped in a tag."""
return "<{0}>{1}</{0}>".format(tag, s)
def bold(s):
"""Return bold string."""
return _tag(s, "strong")
def italic(s):
"""Return italicized string."""
return _tag(s, "em")
def header(s, level=1):
"""Return header. Valid levels are 1-3."""
if level < 1 or level > 3:
raise ValueError("Level must be in range 1-3")
return _tag(s, "h{}".format(level))
def strikethrough(s):
"""Return strikethrough stirng."""
return _tag(s, "strike")
def unordered_list(l):
"""Return string representing an unordered list."""
return _tag("".join([_tag(s, "li") for s in l]), "ul")
def ordered_list(l):
"""Return string representing an ordered list."""
return _tag("".join([_tag(s, "li") for s in l]), "ol")
def preformatted(s):
"""Return preformatted text."""
return _tag(s, "pre")
def blockquote(s):
"""Return blockquote text."""
return _tag(s, "blockquote")
def link(text, url):
"""Return formatted hyperlink."""
return '<a href="{}">{}</a>'.format(url, text)
def img(url, alt_text=None):
"""Return formatted embedded image."""
alt = ' alt="{}"'.format(alt_text) if alt_text is not None else ""
return '<img src="{}"{}></img>'.format(url, alt)
def paragraph(s):
"""Return formatted paragraph."""
return _tag(s, "p")
```
#### File: msteams/tests/test_card_section.py
```python
import json
from collections import OrderedDict
import pytest
from msteams import CardSection, Fact, HttpPostAction, ImageObject
EXPECTED_ACTIVITY = OrderedDict(
(
("activityTitle", "<NAME>"),
("activitySubtitle", "10/2/2019, 21:54"),
("activityImage", "https://tinyurl.com/y4nxy7fj"),
)
)
EXPECTED_FACTS = {
"facts": [
OrderedDict((("name", "Board:"), ("value", "Name of board"))),
OrderedDict((("name", "List:"), ("value", "Name of list"))),
OrderedDict((("name", "Assigned to:"), ("value", "(none)"))),
OrderedDict((("name", "Due date:"), ("value", "(none)"))),
]
}
EXPECTED_TEXT = OrderedDict({"text": "Lorem ipsum dolor sit amet"})
EXPECTED_HERO = OrderedDict(
{
"heroImage": OrderedDict(
(("image", "https://tinyurl.com/yypszv2s"), ("title", "Everyday Hero"))
)
}
)
EXPECTED_TITLE = OrderedDict({"title": "Section title"})
EXPECTED_GROUP = OrderedDict({"startGroup": True})
EXPECTED_ACTION = OrderedDict(
{
"potentialAction": [
OrderedDict(
(
("@type", "HttpPOST"),
("name", "Run tests"),
("target", "http://jenkins.com?action=trigger"),
)
)
]
}
)
def test_activity():
e = EXPECTED_ACTIVITY
section = CardSection()
section.set_activity(
title=e["activityTitle"],
subtitle=e["activitySubtitle"],
image_url=e["activityImage"],
)
assert section.json_payload == json.dumps(e)
section = CardSection()
section.set_activity_title(e["activityTitle"])
section.set_activity_subtitle(e["activitySubtitle"])
section.set_activity_image(e["activityImage"])
assert section.json_payload == json.dumps(e)
section = CardSection(
activity_title=e["activityTitle"],
activity_subtitle=e["activitySubtitle"],
activity_image=e["activityImage"],
)
assert section.json_payload == json.dumps(e)
def test_facts():
e = EXPECTED_FACTS
fact_dict = OrderedDict()
for f in e["facts"]:
fact_dict[f["name"]] = f["value"]
section = CardSection()
section.set_facts(fact_dict)
assert section.json_payload == json.dumps(e)
section = CardSection()
section.add_facts(fact_dict)
assert section.json_payload == json.dumps(e)
section = CardSection()
for fact in e["facts"]:
section.add_fact(fact["name"], fact["value"])
assert section.json_payload == json.dumps(e)
section = CardSection(facts=fact_dict)
assert section.json_payload == json.dumps(e)
def test_texts():
e = EXPECTED_TEXT
section = CardSection()
section.set_text(e["text"])
assert section.json_payload == json.dumps(e)
section = CardSection(text=e["text"])
assert section.json_payload == json.dumps(e)
def test_hero_image():
e = EXPECTED_HERO
image = ImageObject(image=e["heroImage"]["image"], title=e["heroImage"]["title"])
section = CardSection()
section.set_hero_image(image)
assert section.json_payload == json.dumps(e)
section = CardSection()
section.set_hero_image({e["heroImage"]["title"]: e["heroImage"]["image"]})
assert section.json_payload == json.dumps(e)
section = CardSection()
section.set_hero_image(e["heroImage"]["image"])
section["hero_image"].set_title(e["heroImage"]["title"])
assert section.json_payload == json.dumps(e)
section = CardSection(hero_image={e["heroImage"]["title"]: e["heroImage"]["image"]})
assert section.json_payload == json.dumps(e)
def test_title():
e = EXPECTED_TITLE
section = CardSection()
section.set_title(e["title"])
assert section.json_payload == json.dumps(e)
section = CardSection(title=e["title"])
assert section.json_payload == json.dumps(e)
def test_start_group():
e = EXPECTED_TITLE
e.update(EXPECTED_GROUP)
section = CardSection()
section.set_title(e["title"])
section.start_group()
assert section.json_payload == json.dumps(e)
def test_potential_actions():
e = EXPECTED_ACTION
section = CardSection()
with pytest.raises(TypeError):
section.add_potential_action(Fact("a", "b"))
action = HttpPostAction(
name=e["potentialAction"][0]["name"], target=e["potentialAction"][0]["target"]
)
section.add_potential_action(action)
assert section.json_payload == json.dumps(e)
def test_total():
e = EXPECTED_ACTIVITY
e.update(EXPECTED_TITLE)
e.update(EXPECTED_FACTS)
e.update(EXPECTED_TEXT)
e.update(EXPECTED_HERO)
section = CardSection()
section = CardSection(title=e["title"])
section.set_activity(
title=e["activityTitle"],
subtitle=e["activitySubtitle"],
image_url=e["activityImage"],
)
for fact in e["facts"]:
section.add_fact(fact["name"], fact["value"])
section.set_text(e["text"])
section.set_hero_image({e["heroImage"]["title"]: e["heroImage"]["image"]})
section.json_payload == json.dumps(e)
```
#### File: msteams/tests/test_inputs.py
```python
import json
from collections import OrderedDict
import pytest
from msteams import DateInput, MultipleChoiceInput, TextInput
EXPECTED_INPUT = OrderedDict(
(
("@type", "type"),
("id", "comment"),
("isRequired", False),
("title", "Input's title property"),
("value", "Input's value property"),
)
)
EXPECTED_TEXT = OrderedDict((("isMultiline", True), ("maxLength", 80)))
EXPECTED_DATE = {"includeTime": False}
EXPECTED_MULTI = OrderedDict(
(
("choices", [OrderedDict((("display", "Choice 1"), ("value", "1")))]),
("isMultiSelect", False),
("style", "normal"),
)
)
def test_text_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_TEXT)
e["@type"] = "TextInput"
ti = TextInput(
id=e["id"],
is_multiline=e["isMultiline"],
title=e["title"],
is_required=False,
value=e["value"],
max_length=e["maxLength"],
)
assert ti.json_payload == json.dumps(e)
ti = TextInput()
ti.set_id(e["id"])
ti.set_is_required(e["isRequired"])
ti.set_is_multiline(e["isMultiline"])
ti.set_title(e["title"])
ti.set_value(e["value"])
ti.set_max_length(e["maxLength"])
assert ti.json_payload == json.dumps(e)
def test_date_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_DATE)
e["@type"] = "DateInput"
di = DateInput(
id=e["id"],
title=e["title"],
is_required=False,
value=e["value"],
include_time=e["includeTime"],
)
assert di.json_payload == json.dumps(e)
di = DateInput(id=e["id"], title=e["title"], is_required=False, value=e["value"])
di.set_include_time(e["includeTime"])
assert di.json_payload == json.dumps(e)
def test_multiple_choice_input():
e = EXPECTED_INPUT.copy()
e.update(EXPECTED_MULTI)
e["@type"] = "MultipleChoiceInput"
c = {e["choices"][0]["display"]: e["choices"][0]["value"]}
mi = MultipleChoiceInput(
id=e["id"],
title=e["title"],
is_required=False,
value=e["value"],
choices=c,
is_multi_select=False,
style="normal",
)
assert mi.json_payload == json.dumps(e)
mi = MultipleChoiceInput(
id=e["id"], title=e["title"], is_required=False, value=e["value"]
)
mi.set_choices(c)
mi.set_is_multi_select(False)
mi.set_style("normal")
assert mi.json_payload == json.dumps(e)
mi = MultipleChoiceInput(
id=e["id"], title=e["title"], is_required=False, value=e["value"]
)
mi.add_choices(c)
mi.set_is_multi_select(False)
mi.set_style("normal")
assert mi.json_payload == json.dumps(e)
with pytest.raises(ValueError):
mi = MultipleChoiceInput(style="invalid")
``` |
{
"source": "johanjoensson/impurityModel",
"score": 3
} |
#### File: impurityModel/ed/op_parser.py
```python
def skip_whitespaces(str):
for i, c in enumerate(str):
if c not in [" ", "\t", "\n"]:
break
return str[i:]
def read_state_tuple(str):
res = []
num = ""
for i,c in enumerate(str):
if c == ')':
try:
res.append(int(num))
except:
print ("Error casting string " + num + ", to integer.")
raise
break
elif c == ',':
try:
res.append(int(num))
except:
print ("Error casting string " + num + ", to integer.")
raise
num = ""
elif c in "0123456789-":
num += c
return (str[i + 1 :], tuple(res))
# def read_complex(str):
# num = ""
# for i, c in enumerate(str):
# if c == ',':
# break
# elif c in "0123456789+-ij.eE":
# num += c
# try:
# val = complex(num.replace('i', 'j'))
# except:
# print ("Error casting string " + num + ", to complex number.")
# raise
# return (str[i+1:], val)
def read_real(str):
str = skip_whitespaces(str)
num = ""
val = 0
for i, c in enumerate(str):
if c in "0123456789+-.eE":
num += c
else:
break
try:
val = float(num)
except:
print ("Error casting string " + num + ", to floating point number.")
raise
return (str[i:], val)
def read_real_imag(str):
num = ""
real = 0
imag = 0
str = skip_whitespaces(str)
str, real = read_real(str)
str = skip_whitespaces(str)
str, imag = read_real(str)
try:
val = complex(real, imag)
except:
print ("Error casting numbers (" + repr(real) + ", " + repr(imag) + ") to floating point number.")
raise
return val
def extract_operator(line):
res = {}
states = []
line, state1 = read_state_tuple(line)
line = skip_whitespaces(line)
line, state2 = read_state_tuple(line)
amp = read_real_imag(line)
return (((state1, 'c'), (state2, 'a')), amp)
# def extract_operators(string):
# res = []
# remainder = string
# while remainder:
# remainder = skip_whitespaces(remainder)
# key = []
# while remainder and remainder[0] != ':':
# if remainder[0] == ',':
# remainder = skip_whitespaces(remainder[1:])
# op = remainder[0]
# if op not in "ac":
# raise RuntimeError("Operator, " + op +" must be either a or c.")
# remainder = skip_whitespaces(remainder[1:])
# assert(remainder[0] == '(')
# if remainder[0] not in "(":
# raise RuntimeError("Tuple designating state must follow operator."
# " Tuples start with (, not " + remainder[0])
# remainder, state = read_state_tuple(remainder)
# remainder = skip_whitespaces(remainder)
# if remainder[0] not in ':,':
# raise RuntimeError("Operators are separated by , or ended by :."
# " Not " + remainder[0])
# key.append((state, op))
# remainder, val = read_complex(remainder)
# res.append((tuple(key), val))
# return res
def read_name(line):
if line[0] != '(':
return line
return ""
def parse_file(filename):
operators = {}
with open(filename, 'r') as f:
name = ""
op = {}
for linue_number, line in enumerate(f):
line = line.strip()
if line and line[0] != '#':
if not name:
name = read_name(line)
if name:
continue
else:
name = "Op_" + repr(len(operators) + 1)
# extracted_ops = extract_operators(line)
# for key, val in extracted_ops:
key, val = extract_operator(line)
if key in op:
op[key] += val
else:
op[key] = val
elif not line and op:
operators[name] = op
op = {}
name = ""
if op:
operators[name] = op
return operators
def operator_to_string(operator):
state, op = operator
return op+repr(state)
def key_to_string(key):
res = []
for operator in key:
res.append(operator_to_string(operator))
return ", ".join(res)
def value_to_string(value):
return repr(value)
def key_value_to_string(key, value):
return key_to_string(key) + " : " + value_to_string(value) + "\n"
def write_operators_to_file(operators, filename):
with open(filename, 'w+'):
pass
strings = []
for operator in operators:
s = ""
for key, value in operator.items():
s += key_value_to_string(key, value)
strings.append(s)
with open(filename, 'a') as f:
f.write("\n".join(strings))
```
#### File: impurityModel/ed/remove.py
```python
import sys
from bisect import bisect_left
# Local imports
from impurityModel.ed import product_state_representation as psr
def binary_search(a, x):
'''
Return index to the leftmost value exactly equal to x.
If x is not in the list, return -1.
'''
i = bisect_left(a, x)
return i if i != len(a) and a[i] == x else -1
def utuple(i, state):
'''
Remove electron at orbital i in state.
Parameters
----------
i : int
Spin-orbital index
state : tuple
Product state.
Elements are indices of occupied orbitals.
Returns
-------
stateNew : tuple
Product state
amp : int
Amplitude. 0, -1 or 1.
'''
j = binary_search(state,i)
if j != -1:
stateNew = state[:j] + state[j+1:]
amp = 1 if j%2 == 0 else -1
return stateNew, amp
else:
return (), 0
def uint(n_spin_orbitals, i, state):
"""
Remove electron at orbital i in state.
Parameters
----------
n_spin_orbitals : int
Total number of spin-orbitals in the system.
i : int
Spin-orbital index
state : int
Product state.
Returns
-------
state_new : int
Product state
amp : int
Amplitude. 0, -1 or 1.
"""
# String representation of product state.
s = psr.int2str(state, n_spin_orbitals)
if s[i] == "0":
return -1, 0
elif s[i] == "1":
state_new = state - 2**(n_spin_orbitals-i-1)
amp = 1 if s[:i].count("1") % 2 == 0 else -1
return state_new, amp
else:
raise Exception("Integer representation of state is wrong.")
def ustr(i, state):
"""
Remove electron at orbital i in state.
Parameters
----------
i : int
Spin-orbital index
state : str
Product state.
Returns
-------
state_new : str
Product state
amp : int
Amplitude. 0, -1 or 1.
"""
if state[i] == "0":
return "", 0
elif state[i] == "1":
state_new = state[:i] + "0" + state[i+1:]
amp = 1 if state[:i].count("1") % 2 == 0 else -1
return state_new, amp
else:
raise Exception("String representation of state is wrong.")
def ubitarray(i, state):
"""
Remove electron at orbital i in state.
Parameters
----------
i : int
Spin-orbital index
state : bitarray(N)
Product state.
Returns
-------
amp : int
Amplitude. 0, -1 or 1.
"""
if state[i]:
# Modify the product state by removing an electron
state[i] = False
# Amplitude
return 1 if state[:i].count() % 2 == 0 else -1
else:
return 0
def ubytes(n_spin_orbitals, i, state):
"""
Remove electron at orbital i in state.
Parameters
----------
n_spin_orbitals : int
Total number of spin-orbitals in the system.
i : int
Spin-orbital index
state : bytes
Product state.
Returns
-------
state_new : bytes
Product state.
amp : int
Amplitude. 0, -1 or 1.
"""
# bitarray representation of product state.
bits = psr.bytes2bitarray(state, n_spin_orbitals)
# remove an electron at spin-orbital index i.
amp = ubitarray(i, bits)
# Convert back the updated product state to bytes representation.
state_new = psr.bitarray2bytes(bits)
return state_new, amp
```
#### File: impurityModel/test/test_product_state_representation.py
```python
from bitarray import bitarray
# Local
from impurityModel.ed import product_state_representation as psr
def test_tuple2str():
# Number of spin-orbitals in the system
n = 7
# Indices of occupied spin-orbitals
t = (2, 5)
assert "0010010" == psr.tuple2str(t,n)
def test_str2tuple():
# String representation of one product state with particles at indices 2 and 5.
s = "0010010"
assert (2, 5) == psr.str2tuple(s)
def test_tuple2int():
# Number of spin-orbitals in the system
n = 7
# Indices of occupied spin-orbitals
t = (2, 5)
assert 18 == psr.tuple2int(t, n)
def test_int2tuple():
# Number of spin-orbitals in the system
n = 7
# Integer representation of one product state.
i = 18
assert (2, 5) == psr.int2tuple(i, n)
def test_tuple2bitarray():
# Number of spin-orbitals in the system
n = 7
# Indices of occupied spin-orbitals
t = (2, 5)
assert bitarray('0010010') == psr.tuple2bitarray(t, n)
def test_bitarray2tuple():
# Bitarray representation of one product state.
bits = bitarray('0010010')
assert (2, 5) == psr.bitarray2tuple(bits)
def test_tuple2bytes():
# Number of spin-orbitals in the system
n = 7
# Indices of occupied spin-orbitals
t = (2, 5)
assert b'$' == psr.tuple2bytes(t, n)
def test_bytes2tuple():
# Number of spin-orbitals in the system
n = 7
# Bytes representation of one product state.
bytestr = b'$'
assert (2, 5) == psr.bytes2tuple(bytestr, n)
def test_str2int():
# String representation of a product state.
s = "0010010"
assert 18 == psr.str2int(s)
def test_int2str():
# Number of spin-orbitals in the system
n = 7
# Integer representation of a product state.
i = 18
assert "0010010" == psr.int2str(i, n)
def test_str2bitarray():
# String representation of a product state.
s = "0010010"
assert bitarray('0010010') == psr.str2bitarray(s)
def test_bitarray2str():
# Bitarray representation of a product state.
bits = bitarray('0010010')
assert "0010010" == psr.bitarray2str(bits)
def test_str2bytes():
# String representation of a product state.
s = "0010010"
assert b'$' == psr.str2bytes(s)
def test_bytes2str():
# Number of spin-orbitals in the system
n = 7
# Bytes representation of a product state.
bytestr = b'$'
assert "0010010" == psr.bytes2str(bytestr, n)
def test_int2bitarray():
# Number of spin-orbitals in the system
n = 7
# Integer representation of a product state.
i = 18
assert bitarray('0010010') == psr.int2bitarray(i, n)
def test_bitarray2int():
# Bitarray representation of a product state.
bits = bitarray('0010010')
assert 18 == psr.bitarray2int(bits)
def test_int2bytes():
# Number of spin-orbitals in the system
n = 7
# Integer representation of a product state.
i = 18
assert b'$' == psr.int2bytes(i, n)
def test_bytes2int():
# Number of spin-orbitals in the system
n = 7
# Bytes representation of a product state.
bytestr = b'$'
assert 18 == psr.bytes2int(bytestr, n)
def test_bitarray2bytes():
# Bitarray representation of a product state.
bits = bitarray('0010010')
assert b'$' == psr.bitarray2bytes(bits)
def test_bytes2bitarray():
# Number of spin-orbitals in the system
n = 7
# Bytes representation of a product state.
bytestr = b'$'
assert bitarray('0010010') == psr.bytes2bitarray(bytestr, n)
``` |
{
"source": "johanjun/Building_ML_Powered_Applications",
"score": 3
} |
#### File: Building_ML_Powered_Applications/ml_editor/data_visualization.py
```python
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
def plot_embeddings(embeddings, sent_labels):
"""
문장 레이블에 따라 색을 입힌 임베딩 그래프 그리기
:param embeddings: 2차원 임베딩
:param sent_labels: 출력할 레이블
"""
fig = plt.figure(figsize=(16, 10))
color_map = {True: "#1f77b4", False: "#ff7f0e"}
plt.scatter(
embeddings[:, 0],
embeddings[:, 1],
c=[color_map[x] for x in sent_labels],
s=40,
alpha=0.4,
)
handles = [
Rectangle((0, 0), 1, 1, color=c, ec="k") for c in ["#1f77b4", "#ff7f0e"]
]
labels = ["answered", "unanswered"]
plt.legend(handles, labels)
plt.gca().set_aspect("equal", "box")
plt.gca().set_xlabel("x")
plt.gca().set_ylabel("y")
```
#### File: Building_ML_Powered_Applications/ml_editor/explanation_generation.py
```python
import os
from pathlib import Path
import pandas as pd
from lime.lime_tabular import LimeTabularExplainer
from ml_editor.data_processing import get_split_by_author
FEATURE_DISPLAY_NAMES = {
"num_questions": "물음표 빈도",
"num_periods": "마침표 빈도",
"num_commas": "쉼표 빈도",
"num_exclam": "느낌표 빈도",
"num_quotes": "따옴표 빈도",
"num_colon": "콜론 빈도",
"num_semicolon": "세미콜론 빈도",
"num_stops": "불용어 빈도",
"num_words": "단어 개수",
"num_chars": "문자 개수",
"num_diff_words": "어휘 다양성",
"avg_word_len": "평균 단어 길이",
"polarity": "긍정적인 감성",
"ADJ": "형용사 빈도",
"ADP": "전치사 빈도",
"ADV": "부사 빈도",
"AUX": "조동사 빈도",
"CONJ": "접속사 빈도",
"DET": "한정사 빈도",
"INTJ": "감탄사 빈도",
"NOUN": "명사 빈도",
"NUM": "숫자 빈도",
"PART": "불변화사 빈도",
"PRON": "대명사 빈도",
"PROPN": "고유 명사 빈도",
"PUNCT": "구두점 빈도",
"SCONJ": "종속 접속사 빈도",
"SYM": "기호 빈도",
"VERB": "동사 빈도",
"X": "다른 단어의 빈도",
}
POS_NAMES = {
"ADJ": "adjective",
"ADP": "adposition",
"ADV": "adverb",
"AUX": "auxiliary verb",
"CONJ": "coordinating conjunction",
"DET": "determiner",
"INTJ": "interjection",
"NOUN": "noun",
"NUM": "numeral",
"PART": "particle",
"PRON": "pronoun",
"PROPN": "proper noun",
"PUNCT": "punctuation",
"SCONJ": "subordinating conjunction",
"SYM": "symbol",
"VERB": "verb",
"X": "other",
}
FEATURE_ARR = [
"num_questions",
"num_periods",
"num_commas",
"num_exclam",
"num_quotes",
"num_colon",
"num_stops",
"num_semicolon",
"num_words",
"num_chars",
"num_diff_words",
"avg_word_len",
"polarity",
]
FEATURE_ARR.extend(POS_NAMES.keys())
def get_explainer():
"""
훈련 데이터를 사용해 LIME 설명 도구를 준비합니다.
직렬화하지 않아도 될만큼 충분히 빠릅니다.
:return: LIME 설명 도구 객체
"""
curr_path = Path(os.path.dirname(__file__))
data_path = Path("../data/writers_with_features.csv")
df = pd.read_csv(curr_path / data_path)
train_df, test_df = get_split_by_author(df, test_size=0.2, random_state=40)
explainer = LimeTabularExplainer(
train_df[FEATURE_ARR].values,
feature_names=FEATURE_ARR,
class_names=["low", "high"],
)
return explainer
EXPLAINER = get_explainer()
def simplify_order_sign(order_sign):
"""
사용자에게 명확한 출력을 위해 기호를 단순화합니다.
:param order_sign: 비교 연산자 입력
:return: 단순화된 연산자
"""
if order_sign in ["<=", "<"]:
return "<"
if order_sign in [">=", ">"]:
return ">"
return order_sign
def get_recommended_modification(simple_order, impact):
"""
연산자와 영향 타입에 따라 추천 문장을 생성합니다.
:param simple_order: 단순화된 연산자
:param impact: 변화가 긍정적인지 부정적인지 여부
:return: 추천 문자열
"""
bigger_than_threshold = simple_order == ">"
has_positive_impact = impact > 0
if bigger_than_threshold and has_positive_impact:
return "높일 필요가 없습니다"
if not bigger_than_threshold and not has_positive_impact:
return "높이세요"
if bigger_than_threshold and not has_positive_impact:
return "낮추세요"
if not bigger_than_threshold and has_positive_impact:
return "낮출 필요가 없습니다"
def parse_explanations(exp_list):
"""
LIME이 반환한 설명을 사용자가 읽을 수 있도록 파싱합니다.
:param exp_list: LIME 설명 도구가 반환한 설명
:return: 사용자에게 전달한 문자열을 담은 딕셔너리 배열
"""
parsed_exps = []
for feat_bound, impact in exp_list:
conditions = feat_bound.split(" ")
# 추천으로 표현하기 힘들기 때문에
# 1 <= a < 3 와 같은 이중 경계 조건은 무시합니다
if len(conditions) == 3:
feat_name, order, threshold = conditions
simple_order = simplify_order_sign(order)
recommended_mod = get_recommended_modification(simple_order, impact)
parsed_exps.append(
{
"feature": feat_name,
"feature_display_name": FEATURE_DISPLAY_NAMES[feat_name],
"order": simple_order,
"threshold": threshold,
"impact": impact,
"recommendation": recommended_mod,
}
)
return parsed_exps
def get_recommendation_string_from_parsed_exps(exp_list):
"""
플래스크 앱에서 출력할 수 있는 추천 텍스트를 생성합니다.
:param exp_list: 설명을 담은 딕셔너리의 배열
:return: HTML 추천 텍스트
"""
recommendations = []
for i, feature_exp in enumerate(exp_list):
recommendation = "%s %s" % (
feature_exp["recommendation"],
feature_exp["feature_display_name"],
)
font_color = "green"
if feature_exp["recommendation"] in ["Increase", "Decrease"]:
font_color = "red"
rec_str = """<font color="%s">%s) %s</font>""" % (
font_color,
i + 1,
recommendation,
)
recommendations.append(rec_str)
rec_string = "<br/>".join(recommendations)
return rec_string
```
#### File: Building_ML_Powered_Applications/ml_editor/model_v3.py
```python
import os
from pathlib import Path
import spacy
import joblib
from tqdm import tqdm
import pandas as pd
import nltk
from ml_editor.explanation_generation import (
parse_explanations,
get_recommendation_string_from_parsed_exps,
EXPLAINER,
FEATURE_ARR,
)
from ml_editor.model_v2 import add_v2_text_features
nltk.download("vader_lexicon")
SPACY_MODEL = spacy.load("en_core_web_sm")
tqdm.pandas()
curr_path = Path(os.path.dirname(__file__))
model_path = Path("../models/model_3.pkl")
MODEL = joblib.load(curr_path / model_path)
def get_features_from_input_text(text_input):
"""
고유한 텍스트 입력에 대해 특성을 생성합니다.
:param text_input: 질문 문자열
:return: 모델 v3 특성을 담고 있는 1행 시리즈
"""
arr_features = get_features_from_text_array([text_input])
return arr_features.iloc[0]
def get_features_from_text_array(input_array):
"""
입력 텍스트 배열에 대해 특성을 생성합니다.
:param input_array: 입력 질문 배열
:return: 특성 DataFrame
"""
text_ser = pd.DataFrame(input_array, columns=["full_text"])
text_ser = add_v2_text_features(text_ser.copy())
features = text_ser[FEATURE_ARR].astype(float)
return features
def get_model_probabilities_for_input_texts(text_array):
"""
입력 텍스트 배열에 대한 모델 v3의 추정 확률을 반환합니다.
:param text_array: 입력 질문 배열
:return: 예측 배열
"""
global MODEL
features = get_features_from_text_array(text_array)
return MODEL.predict_proba(features)
def get_question_score_from_input(text):
"""
고유한 텍스트 입력에 대해 모델 v3의 확률을 반환합니다.
:param text: 입력 문자열
:return: 높은 점수를 받는 질문의 예측 확률
"""
preds = get_model_probabilities_for_input_texts([text])
positive_proba = preds[0][1]
return positive_proba
def get_recommendation_and_prediction_from_text(input_text, num_feats=10):
"""
플래스크 앱에 출력할 점수와 추천을 구합니다.
:param input_text: 입력 문자열
:param num_feats: 추천으로 제시한 특성 개수
:return: 추천과 현재 점수
"""
global MODEL
feats = get_features_from_input_text(input_text)
pos_score = MODEL.predict_proba([feats])[0][1]
print("설명")
exp = EXPLAINER.explain_instance(
feats, MODEL.predict_proba, num_features=num_feats, labels=(1,)
)
print("설명 끝")
parsed_exps = parse_explanations(exp.as_list())
recs = get_recommendation_string_from_parsed_exps(parsed_exps)
output_str = """
현재 점수 (0은 최악, 1은 최상):
<br/>
%s
<br/>
<br/>
추천 (중요도 순서):
<br/>
<br/>
%s
""" % (
pos_score,
recs,
)
return output_str
```
#### File: Building_ML_Powered_Applications/tests/test_ingestion.py
```python
import sys
import os
from pathlib import Path
import pandas as pd
# pytest를 적절히 임포트하기 위해 필요합니다.
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + "/../")
from ml_editor.data_ingestion import parse_xml_to_csv
TEXT_LENGTH_FIELD = "text_len"
# 테스트를 위해 필요한 특성을 정의합니다.
REQUIRED_COLUMNS = [
"Id",
"AnswerCount",
"PostTypeId",
"AcceptedAnswerId",
"Body",
"body_text",
"Title",
"Score",
]
# 데이터 탐색에 기반하여 만들어진 간격
ACCEPTABLE_TEXT_LENGTH_MEANS = pd.Interval(left=20, right=2000)
def get_fixture_df():
"""
파서를 사용해 데이터프레임을 반환합니다
:return:
"""
curr_path = Path(os.path.dirname(__file__))
return parse_xml_to_csv(curr_path / Path("fixtures/MiniPosts.xml"))
def test_parser_returns_dataframe():
"""
파서가 데이터프레임을 반환하는지 테스트합니다.
"""
df = get_fixture_df()
assert isinstance(df, pd.DataFrame)
def test_feature_columns_exist():
"""
필수 열이 모두 들어 있는지 검사합니다.
"""
df = get_fixture_df()
for col in REQUIRED_COLUMNS:
assert col in df.columns
def test_features_not_all_null():
"""
누락된 값을 포함한 특성이 있는지 검사합니다.
"""
df = get_fixture_df()
for col in REQUIRED_COLUMNS:
assert not df[col].isnull().all()
def test_text_mean():
"""
텍스트 평균이 탐색 결과와 맞는지 검사합니다.
"""
df = get_fixture_df()
df["text_len"] = df["body_text"].str.len()
text_col_mean = df["text_len"].mean()
assert text_col_mean in ACCEPTABLE_TEXT_LENGTH_MEANS
``` |
{
"source": "JohanK91/MethodDice",
"score": 4
} |
#### File: MethodDice/methoddice/Game.py
```python
class game():
"""class for game"""
def gamerun(self):
"""normal game checks if player 1 or 2 have enough point to win the game"""
import Player
global player1score
self.player1score = 0
global player2score
self.player2score = 0
global dicerolls_listp1
dicerolls_listp1 = []
global dicerolls_listp2
dicerolls_listp2 = []
while (self.player1score < 100 and self.player2score < 100):
self.player1score = self.player1score + game.playermove(self)
print(Player.player.Player1nameR(self) + ", your current score is: {} \n" .format(self.player1score))
if(self.player1score < 100):
self.player2score = self.player2score + game.computermove(self)
print("The AI have a current score of: {}".format(self.player2score))
print()
print(game.winprint1(self))
def winprint1(self):
"""prints if it is player 1 or 2 who wins"""
import Player
if (self.player1score > self.player2score):
x = ("Congrats!! " + Player.player.Player1nameR(self) + ", you won! :D")
return x
else:
x = "Sorry! The AI was better this time! :( \nBetter luck next time ;)"
return x
def multiplayergame(self):
"""game between two player and checks if player 1 or 2 have enough point to win the game"""
import Player
import Intelligence
global player1score
self.player1score = 0
global player2score
self.player2score = 0
global dicerolls_listp1
dicerolls_listp1 = []
global dicerolls_listp2
dicerolls_listp2 = []
while (Intelligence.Intelligence.takescores1(self) < 100 and Intelligence.Intelligence.takescores2(self) < 100):
self.player1score = self.player1score + game.playermove(self)
print(Player.player.Player1nameR(self) + ", your current score is: {} \n" .format(self.player1score))
if(self.player1score < 100):
self.player2score = self.player2score + game.player2move(self)
print(Player.player.Player2nameR(self) + ", your current score is: {} \n" .format(self.player2score))
print(game.winprint2(self))
def winprint2(self):
"""prints if it is player 1 or 2 who wins"""
import Player
if (self.player1score > self.player2score):
x = ("Congrats!! " + Player.player.Player1nameR(self) + ", you won! :D")
#print
return x
else:
x = ("Congrats!! " + Player.player.Player2nameR(self) + ", you won! :D")
return x
def playermove(self):
""""handles players moves"""
import Cheat
import Histogram
import Dice
global dicerolls_listp1
self.matchscore = 0
self.newround = game.newroundTrue(self)
game.playerTurn1(self)
while self.newround == True:
self.rolling = Dice.dice.Dicerolling(self)
dicerolls_listp1.append(self.rolling)
if (self.rolling == 1):
print(game.player1round(self))
self.matchscore = 0
self.newround = game.newroundFalse(self)
else:
print(game.player1round(self))
self.matchscore = self.matchscore + self.rolling
print("Your score for this round is {}".format(self.matchscore))
self.newround = game.playerYN(self)
print("Turn over!")
self.matchscore = self.matchscore + Cheat.cheatclass.cheatingR(self)
return self.matchscore
def playerYN(self):
"""handles choice of player"""
import Cheat
import Histogram
print("Do you want to roll again? (y = yes) & (n = no) & (q = options)")
self.newroundchoice = input("Enter your choice here: ")
if (self.newroundchoice == "y" or self.newroundchoice == "Y"):
Cheat.cheatclass.cheatF(self)
self.newround = game.newroundTrue(self)
return newround
elif (self.newroundchoice == "n" or self.newroundchoice == "N"):
Cheat.cheatclass.cheatF(self)
self.newround = game.newroundFalse(self)
return newround
elif (self.newroundchoice == "q" or self.newroundchoice == "Q"):
Cheat.cheatclass.cheatF(self)
Histogram.Histogram.options(self)
self.newround = game.newroundR(self)
return newround
else:
print("Sorry, I could not understand that! :*( \nCan you please only enter a y or an n or a q!")
print("Let´s make a new try! :)\n")
game.playerYN(self)
def player1round(self):
"""handles the print of the dice number"""
import Dice
import Player
print = (Player.player.Player1nameR(self) + ", your dice showed: " + str(Dice.dice.rollGet(self)) )
return print
def player2move(self):
"""handles player 2 game"""
import Histogram
import Dice
import Cheat
global dicerolls_listp2
self.matchscore = 0
self.newround = game.newroundTrue(self)
game.playerTurn2(self)
while self.newround == True:
self.rolling = Dice.dice.Dicerolling(self)
dicerolls_listp2.append(self.rolling)
if (self.rolling == 1):
print(game.player2round(self))
self.matchscore = 0
self.newround = game.newroundFalse(self)
else:
print(game.player2round(self))
self.matchscore = self.matchscore + self.rolling
print("Your score for this round is {}".format(self.matchscore))
self.newround = game.playerYN(self)
print("Turn over!")
self.matchscore = self.matchscore + Cheat.cheatclass.cheatingR(self)
return self.matchscore
def player2round(self):
"""handles the print of dice number for player 2"""
import Dice
import Player
print = (Player.player.Player2nameR(self) + ", your dice showed: " + str(Dice.dice.rollGet(self)) )
return print
def computerround1(self):
"""handles the print for the AI if the dice shows 1"""
print = ("The AI rolled a 1")
return print
def computerround2(self):
"""handles the print of dice number for the AI"""
import Dice
print = ("The AI rolled: " + str(Dice.dice.rollGet(self)))
return print
def computermove(self):
"""handles the moves of computer"""
import Dice
import Player
import Intelligence
global dicerolls_listp2
self.matchscore = 0
self.newround = game.newroundTrue(self)
while self.newround == True:
self.rolling = Dice.dice.Dicerolling(self)
dicerolls_listp2.append(self.rolling)
if self.rolling == 1:
print(game.computerround1(self))
self.matchscore = 0
self.newround = game.newroundFalse(self)
else:
print(game.computerround2(self))
self.matchscore = self.matchscore + self.rolling
if self.matchscore < Intelligence.Intelligence.AImode(self):
print("The AI has chosen to roll again!")
else:
self.newround = game.newroundFalse(self)
print("The AI's turn have ended. It is now " + Player.player.Player1nameR(self) + "'s turn to roll. Prepare yourself.")
return self.matchscore
def newroundFalse(self):
"""makes boolean newround == False"""
global newround
# newround = self.newround
newround = False
return newround
def newroundTrue(self):
"""makes boolean newround == True"""
global newround
# newround = self.newround
newround = True
return newround
def newroundR(self):
"""Declare newround to global"""
global newround
return newround
def playerTurnR(self):
"""declare playerturn to he"""
global playerturn
playerturn = "he"
return playerturn
def playerTurn1(self):
"""declare playerturn to p1"""
global playerturn
playerturn = "p1"
return playerturn
def playerTurn2(self):
"""declare playerturn to p2"""
global playerturn
playerturn = "p2"
return playerturn
def list1R(self):
"""declare the dicreroll list of player 1 to global"""
global dicerolls_listp1
return dicerolls_listp1
def list2R(self):
"""declare the dicreroll list of player 1 to global"""
global dicerolls_listp2
return dicerolls_listp2
```
#### File: MethodDice/methoddice/testhistogram.py
```python
import unittest
class Histogramtest(unittest.TestCase):
"""This is the unittest for the class Histogram"""
def testoptionmenu(self):
"""Tests if the option menu print is correct"""
import Histogram
res = Histogram.Histogram.optionsprint(self)
exp = ("This is the options menu. Here you can do several things." +
"\n1. Change your name." +
"\n2. Upload your score to the highscore list." +
"\n3. History of rolls." +
"\n4. AI intelligence." +
"\n5. Cheat" +
"\n6. Return to the match.")
self.assertEqual(res,exp)
def testoptionChoice1(self):
"""Tests if the option 1 choice works"""
#test option 1
import Histogram
res = Histogram.Histogram.optionChoice1(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
def testoptionChoice2(self):
"""Tests if the option 2 choice works"""
#test option 2
import Histogram
res = Histogram.Histogram.optionChoice2(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
def testoptionChoice3(self):
"""Tests if the option 3 choice works"""
#test option 3
import Histogram
res = Histogram.Histogram.optionChoice3(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
def testoptionChoice4(self):
"""Tests if the option 4 choice works"""
#test option 4
import Histogram
res = Histogram.Histogram.optionChoice4(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
def testoptionChoice5(self):
"""Tests if the option 5 choice works"""
#test option 5
import Histogram
res = Histogram.Histogram.optionChoice5(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
def testoptionChoice6(self):
"""Tests if the option 6 choice works"""
#test option 6
import Histogram
res = Histogram.Histogram.optionChoice6(self)
exp = Histogram.Histogram.optionChoiceR(self)
self.assertEqual(res,exp)
if __name__ == '__main__':
unittest.main()
```
#### File: site-packages/cohesion/flake8_extension.py
```python
import cohesion
class CohesionChecker(object):
name = cohesion.__name__
version = cohesion.__version__
off_by_default = False
_code = 'H601'
_error_tmpl = 'H601 class has low ({0:.2f}%) cohesion'
def __init__(self, tree, filename):
self.tree = tree
self.filename = filename
@classmethod
def add_options(cls, parser):
flag = '--cohesion-below'
kwargs = {
'action': 'store',
'type': 'float',
'default': 50.0,
'help': 'only show cohesion results with this percentage or lower',
'parse_from_config': 'True',
}
config_opts = getattr(parser, 'config_options', None)
if isinstance(config_opts, list):
# flake8 2.x
kwargs.pop('parse_from_config')
parser.add_option(flag, **kwargs)
parser.config_options.append('cohesion-below')
else:
# flake8 3.x
parser.add_option(flag, **kwargs)
@classmethod
def parse_options(cls, options):
cls.cohesion_below = options.cohesion_below
def run(self):
file_module = cohesion.module.Module(self.tree)
file_module.filter_below(float(self.cohesion_below))
for class_name in file_module.classes():
cohesion_percentage = file_module.class_cohesion_percentage(class_name)
yield (
file_module.structure[class_name]['lineno'],
file_module.structure[class_name]['col_offset'],
self._error_tmpl.format(cohesion_percentage),
type(self)
)
```
#### File: site-packages/pdoc/cli.py
```python
import argparse
import ast
import importlib
import inspect
import os
import os.path as path
import json
import re
import sys
import warnings
from contextlib import contextmanager
from functools import lru_cache
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Dict, List, Sequence
from warnings import warn
import pdoc
parser = argparse.ArgumentParser(
description="Automatically generate API docs for Python modules.",
epilog="Further documentation is available at <https://pdoc3.github.io/pdoc/doc>.",
)
aa = parser.add_argument
mode_aa = parser.add_mutually_exclusive_group().add_argument
aa(
'--version', action='version', version='%(prog)s ' + pdoc.__version__)
aa(
"modules",
type=str,
metavar='MODULE',
nargs="+",
help="The Python module name. This may be an import path resolvable in "
"the current environment, or a file path to a Python module or "
"package.",
)
aa(
"-c", "--config",
type=str,
metavar='OPTION=VALUE',
action='append',
default=[],
help="Override template options. This is an alternative to using "
"a custom config.mako file in --template-dir. This option "
"can be specified multiple times.",
)
aa(
"--filter",
type=str,
metavar='STRING',
default=None,
help="Comma-separated list of filters. When specified, "
"only identifiers containing the specified string "
"will be shown in the output. Search is case sensitive. "
"Has no effect when --http is set.",
)
aa(
"-f", "--force",
action="store_true",
help="Overwrite any existing generated (--output-dir) files.",
)
mode_aa(
"--html",
action="store_true",
help="When set, the output will be HTML formatted.",
)
mode_aa(
"--pdf",
action="store_true",
help="When set, the specified modules will be printed to standard output, "
"formatted in Markdown-Extra, compatible with most "
"Markdown-(to-HTML-)to-PDF converters.",
)
aa(
"--html-dir",
type=str,
help=argparse.SUPPRESS,
)
aa(
"-o", "--output-dir",
type=str,
metavar='DIR',
help="The directory to output generated HTML/markdown files to "
"(default: ./html for --html).",
)
aa(
"--html-no-source",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--overwrite",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--external-links",
action="store_true",
help=argparse.SUPPRESS,
)
aa(
"--template-dir",
type=str,
metavar='DIR',
default=None,
help="Specify a directory containing Mako templates "
"(html.mako, text.mako, config.mako and/or any templates they include). "
"Alternatively, put your templates in $XDG_CONFIG_HOME/pdoc and "
"pdoc will automatically find them.",
)
aa(
"--link-prefix",
type=str,
help=argparse.SUPPRESS,
)
aa(
"--close-stdin",
action="store_true",
help="When set, stdin will be closed before importing, to account for "
"ill-behaved modules that block on stdin."
)
DEFAULT_HOST, DEFAULT_PORT = 'localhost', 8080
def _check_host_port(s):
if s and ':' not in s:
raise argparse.ArgumentTypeError(
"'{}' doesn't match '[HOST]:[PORT]'. "
"Specify `--http :` to use default hostname and port.".format(s))
return s
aa(
"--http",
default='',
type=_check_host_port,
metavar='HOST:PORT',
help="When set, pdoc will run as an HTTP server providing documentation "
"for specified modules. If you just want to use the default hostname "
"and port ({}:{}), set the parameter to :.".format(DEFAULT_HOST, DEFAULT_PORT),
)
aa(
"--skip-errors",
action="store_true",
help="Upon unimportable modules, warn instead of raising."
)
args = argparse.Namespace()
class _WebDoc(BaseHTTPRequestHandler):
args = None # Set before server instantiated
template_config = None
def do_HEAD(self):
status = 200
if self.path != "/":
status = self.check_modified()
self.send_response(status)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
def check_modified(self):
try:
module = pdoc.import_module(self.import_path_from_req_url)
new_etag = str(os.stat(module.__file__).st_mtime)
except ImportError:
return 404
old_etag = self.headers.get('If-None-Match', new_etag)
if old_etag == new_etag:
# Don't log repeating checks
self.log_request = lambda *args, **kwargs: None
return 304
return 205
def do_GET(self):
# Deny favicon shortcut early.
if self.path == "/favicon.ico":
return None
importlib.invalidate_caches()
code = 200
if self.path == "/":
modules = [pdoc.import_module(module, reload=True)
for module in self.args.modules]
modules = sorted((module.__name__, inspect.getdoc(module))
for module in modules)
out = pdoc._render_template('/html.mako',
modules=modules,
**self.template_config)
elif self.path.endswith(".ext"):
# External links are a bit weird. You should view them as a giant
# hack. Basically, the idea is to "guess" where something lives
# when documenting another module and hope that guess can actually
# track something down in a more global context.
#
# The idea here is to start specific by looking for HTML that
# exists that matches the full external path given. Then trim off
# one component at the end and try again.
#
# If no HTML is found, then we ask `pdoc` to do its thang on the
# parent module in the external path. If all goes well, that
# module will then be able to find the external identifier.
import_path = self.path[:-4].lstrip("/")
resolved = self.resolve_ext(import_path)
if resolved is None: # Try to generate the HTML...
print("Generating HTML for %s on the fly..." % import_path, file=sys.stderr)
try:
out = pdoc.html(import_path.split(".")[0], **self.template_config)
except Exception as e:
print('Error generating docs: {}'.format(e), file=sys.stderr)
# All hope is lost.
code = 404
out = "External identifier <code>%s</code> not found." % import_path
else:
return self.redirect(resolved)
# Redirect '/pdoc' to '/pdoc/' so that relative links work
# (results in '/pdoc/cli.html' instead of 'cli.html')
elif not self.path.endswith(('/', '.html')):
return self.redirect(self.path + '/')
# Redirect '/pdoc/index.html' to '/pdoc/' so it's more pretty
elif self.path.endswith(pdoc._URL_PACKAGE_SUFFIX):
return self.redirect(self.path[:-len(pdoc._URL_PACKAGE_SUFFIX)] + '/')
else:
try:
out = self.html()
except Exception:
import traceback
from html import escape
code = 404
out = "Error importing module <code>{}</code>:\n\n<pre>{}</pre>".format(
self.import_path_from_req_url, escape(traceback.format_exc()))
out = out.replace('\n', '<br>')
self.send_response(code)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.echo(out)
def redirect(self, location):
self.send_response(302)
self.send_header("Location", location)
self.end_headers()
def echo(self, s):
self.wfile.write(s.encode("utf-8"))
def html(self):
"""
Retrieves and sends the HTML belonging to the path given in
URL. This method is smart and will look for HTML files already
generated and account for whether they are stale compared to
the source code.
"""
return pdoc.html(self.import_path_from_req_url,
reload=True, http_server=True, external_links=True,
skip_errors=args.skip_errors,
**self.template_config)
def resolve_ext(self, import_path):
def exists(p):
p = path.join(args.output_dir, p)
pkg = path.join(p, pdoc._URL_PACKAGE_SUFFIX.lstrip('/'))
mod = p + pdoc._URL_MODULE_SUFFIX
if path.isfile(pkg):
return pkg[len(args.output_dir):]
elif path.isfile(mod):
return mod[len(args.output_dir):]
return None
parts = import_path.split(".")
for i in range(len(parts), 0, -1):
p = path.join(*parts[0:i])
realp = exists(p)
if realp is not None:
return "/%s#%s" % (realp.lstrip("/"), import_path)
return None
@property
def import_path_from_req_url(self):
pth = self.path.split('#')[0].lstrip('/')
for suffix in ('/',
pdoc._URL_PACKAGE_SUFFIX,
pdoc._URL_INDEX_MODULE_SUFFIX,
pdoc._URL_MODULE_SUFFIX):
if pth.endswith(suffix):
pth = pth[:-len(suffix)]
break
return pth.replace('/', '.')
def module_path(m: pdoc.Module, ext: str):
return path.join(args.output_dir, *re.sub(r'\.html$', ext, m.url()).split('/'))
def _quit_if_exists(m: pdoc.Module, ext: str):
if args.force:
return
paths = [module_path(m, ext)]
if m.is_package: # If package, make sure the dir doesn't exist either
paths.append(path.dirname(paths[0]))
for pth in paths:
if path.lexists(pth):
print("File '%s' already exists. Delete it, or run with --force" % pth,
file=sys.stderr)
sys.exit(1)
@contextmanager
def _open_write_file(filename):
try:
with open(filename, 'w', encoding='utf-8') as f:
yield f
print(filename) # print created file path to stdout
except Exception:
try:
os.unlink(filename)
except Exception:
pass
raise
def recursive_write_files(m: pdoc.Module, ext: str, **kwargs):
assert ext in ('.html', '.md')
filepath = module_path(m, ext=ext)
dirpath = path.dirname(filepath)
if not os.access(dirpath, os.R_OK):
os.makedirs(dirpath)
with _open_write_file(filepath) as f:
if ext == '.html':
f.write(m.html(**kwargs))
elif ext == '.md':
f.write(m.text(**kwargs))
for submodule in m.submodules():
recursive_write_files(submodule, ext=ext, **kwargs)
def _flatten_submodules(modules: Sequence[pdoc.Module]):
for module in modules:
yield module
for submodule in module.submodules():
yield from _flatten_submodules((submodule,))
def _print_pdf(modules, **kwargs):
modules = list(_flatten_submodules(modules))
print(pdoc._render_template('/pdf.mako', modules=modules, **kwargs))
def _warn_deprecated(option, alternative='', use_config_mako=False):
msg = 'Program option `{}` is deprecated.'.format(option)
if alternative:
msg += ' Use `' + alternative + '`'
if use_config_mako:
msg += ' or override config.mako template'
msg += '.'
warn(msg, DeprecationWarning, stacklevel=2)
def _generate_lunr_search(modules: List[pdoc.Module],
index_docstrings: bool,
template_config: dict):
"""Generate index.js for search"""
def trim_docstring(docstring):
return re.sub(r'''
\s+| # whitespace sequences
\s+[-=~]{3,}\s+| # title underlines
^[ \t]*[`~]{3,}\w*$| # code blocks
\s*[`#*]+\s*| # common markdown chars
\s*([^\w\d_>])\1\s*| # sequences of punct of the same kind
\s*</?\w*[^>]*>\s* # simple HTML tags
''', ' ', docstring, flags=re.VERBOSE | re.MULTILINE)
def recursive_add_to_index(dobj):
info = {
'ref': dobj.refname,
'url': to_url_id(dobj.module),
}
if index_docstrings:
info['doc'] = trim_docstring(dobj.docstring)
if isinstance(dobj, pdoc.Function):
info['func'] = 1
index.append(info)
for member_dobj in getattr(dobj, 'doc', {}).values():
recursive_add_to_index(member_dobj)
@lru_cache()
def to_url_id(module):
url = module.url()
if url not in url_cache:
url_cache[url] = len(url_cache)
return url_cache[url]
index = [] # type: List[Dict]
url_cache = {} # type: Dict[str, int]
for top_module in modules:
recursive_add_to_index(top_module)
urls = sorted(url_cache.keys(), key=url_cache.__getitem__)
main_path = args.output_dir
with _open_write_file(path.join(main_path, 'index.js')) as f:
f.write("URLS=")
json.dump(urls, f, indent=0, separators=(',', ':'))
f.write(";\nINDEX=")
json.dump(index, f, indent=0, separators=(',', ':'))
# Generate search.html
with _open_write_file(path.join(main_path, 'doc-search.html')) as f:
rendered_template = pdoc._render_template('/search.mako', **template_config)
f.write(rendered_template)
def main(_args=None):
""" Command-line entry point """
global args
args = _args or parser.parse_args()
warnings.simplefilter("once", DeprecationWarning)
if args.close_stdin:
sys.stdin.close()
if (args.html or args.http) and not args.output_dir:
args.output_dir = 'html'
if args.html_dir:
_warn_deprecated('--html-dir', '--output-dir')
args.output_dir = args.html_dir
if args.overwrite:
_warn_deprecated('--overwrite', '--force')
args.force = args.overwrite
template_config = {}
for config_str in args.config:
try:
key, value = config_str.split('=', 1)
value = ast.literal_eval(value)
template_config[key] = value
except Exception:
raise ValueError(
'Error evaluating --config statement "{}". '
'Make sure string values are quoted?'
.format(config_str)
)
if args.html_no_source:
_warn_deprecated('--html-no-source', '-c show_source_code=False', True)
template_config['show_source_code'] = False
if args.link_prefix:
_warn_deprecated('--link-prefix', '-c link_prefix="foo"', True)
template_config['link_prefix'] = args.link_prefix
if args.external_links:
_warn_deprecated('--external-links')
template_config['external_links'] = True
if args.template_dir is not None:
if not path.isdir(args.template_dir):
print('Error: Template dir {!r} is not a directory'.format(args.template_dir),
file=sys.stderr)
sys.exit(1)
pdoc.tpl_lookup.directories.insert(0, args.template_dir)
# Support loading modules specified as python paths relative to cwd
sys.path.append(os.getcwd())
# Virtual environment handling for pdoc script run from system site
try:
venv_dir = os.environ['VIRTUAL_ENV']
except KeyError:
pass # pdoc was not invoked while in a virtual environment
else:
from glob import glob
from distutils.sysconfig import get_python_lib
libdir = get_python_lib(prefix=venv_dir)
sys.path.append(libdir)
# Resolve egg-links from `setup.py develop` or `pip install -e`
# XXX: Welcome a more canonical approach
for pth in glob(path.join(libdir, '*.egg-link')):
try:
with open(pth) as f:
sys.path.append(path.join(libdir, f.readline().rstrip()))
except IOError:
warn('Invalid egg-link in venv: {!r}'.format(pth))
if args.http:
template_config['link_prefix'] = "/"
# Run the HTTP server.
_WebDoc.args = args # Pass params to HTTPServer xP
_WebDoc.template_config = template_config
host, _, port = args.http.partition(':')
host = host or DEFAULT_HOST
port = int(port or DEFAULT_PORT)
print('Starting pdoc server on {}:{}'.format(host, port), file=sys.stderr)
httpd = HTTPServer((host, port), _WebDoc)
print("pdoc server ready at http://%s:%d" % (host, port), file=sys.stderr)
# Allow tests to perform `pdoc.cli._httpd.shutdown()`
global _httpd
_httpd = httpd
try:
httpd.serve_forever()
finally:
httpd.server_close()
sys.exit(0)
docfilter = None
if args.filter and args.filter.strip():
def docfilter(obj, _filters=args.filter.strip().split(',')):
return any(f in obj.refname or
isinstance(obj, pdoc.Class) and f in obj.doc
for f in _filters)
modules = [pdoc.Module(module, docfilter=docfilter,
skip_errors=args.skip_errors)
for module in args.modules]
pdoc.link_inheritance()
if args.pdf:
_print_pdf(modules, **template_config)
import textwrap
print("""
PDF-ready markdown written to standard output.
^^^^^^^^^^^^^^^
Convert this file to PDF using e.g. Pandoc:
{PANDOC_CMD}
or using Python-Markdown and Chrome/Chromium/WkHtmlToPDF:
markdown_py --extension=meta \\
--extension=abbr \\
--extension=attr_list \\
--extension=def_list \\
--extension=fenced_code \\
--extension=footnotes \\
--extension=tables \\
--extension=admonition \\
--extension=smarty \\
--extension=toc \\
pdf.md > pdf.html
chromium --headless --disable-gpu --print-to-pdf=pdf.pdf pdf.html
wkhtmltopdf --encoding utf8 -s A4 --print-media-type pdf.html pdf.pdf
or similar, at your own discretion.""".format(PANDOC_CMD=textwrap.indent(_PANDOC_COMMAND, ' ')),
file=sys.stderr)
sys.exit(0)
for module in modules:
if args.html:
_quit_if_exists(module, ext='.html')
recursive_write_files(module, ext='.html', **template_config)
elif args.output_dir: # Generate text files
_quit_if_exists(module, ext='.md')
recursive_write_files(module, ext='.md', **template_config)
else:
sys.stdout.write(module.text(**template_config))
# Two blank lines between two modules' texts
sys.stdout.write(os.linesep * (1 + 2 * int(module != modules[-1])))
lunr_config = pdoc._get_config(**template_config).get('lunr_search')
if lunr_config is not None:
_generate_lunr_search(
modules, lunr_config.get("index_docstrings", True), template_config)
_PANDOC_COMMAND = '''\
pandoc --metadata=title:"MyProject Documentation" \\
--from=markdown+abbreviations+tex_math_single_backslash \\
--pdf-engine=xelatex --variable=mainfont:"DejaVu Sans" \\
--toc --toc-depth=4 --output=pdf.pdf pdf.md\
'''
if __name__ == "__main__":
main(parser.parse_args())
```
#### File: test/example_pkg/module.py
```python
from collections import namedtuple
import subprocess
import os
CONST = 'const'
"""CONST docstring"""
var = 2
"""var docstring"""
# https://github.com/mitmproxy/pdoc/pull/44
foreign_var = subprocess.CalledProcessError(0, '')
"""foreign var docstring"""
__pdoc__ = {}
def foo(env=os.environ):
"""Doesn't leak environ"""
def object_as_arg_default(*args, a=object(), **kwargs):
"""Html-encodes angle brackets in params"""
def _private_function():
"""Private function, should only appear if whitelisted"""
class A:
"""`A` is base class for `example_pkg.B`.""" # Test refname link
def overridden(self):
"""A.overridden docstring"""
def overridden_same_docstring(self):
"""A.overridden_same_docstring docstring"""
def inherited(self): # Inherited in B
"""A.inherited docstring"""
def __call__(self):
"""A.__call__ docstring. Only shown when whitelisted"""
non_callable_routine = staticmethod(lambda x: 2) # Not interpreted as Function; skipped
class ReadOnlyValueDescriptor:
"""Read-only value descriptor"""
def __get__(self, instance, instance_type=None):
if instance is not None:
return instance.var ** 2
return self
class B(A, int):
"""
B docstring
External refs: `sys.version`, `sys`
"""
CONST = 2
"""B.CONST docstring"""
var = 3
"""B.var docstring"""
ro_value_descriptor = ReadOnlyValueDescriptor()
"""ro_value_descriptor docstring"""
ro_value_descriptor_no_doc = ReadOnlyValueDescriptor() # no doc-string
def __init__(self, x, y, z, w):
"""`__init__` docstring"""
self.instance_var = None
"""instance var docstring"""
self._private_instance_var = None
"""This should be private (hidden) despite PEP 224 docstring"""
def f(self, a: int, b: int = 1, *args, c: str = 'c', **kwargs):
"""B.f docstring"""
@staticmethod
def static(x):
"""B.static docstring"""
@classmethod
def cls(cls):
"""B.cls docstring"""
def _private(self):
"""B._private docstring"""
@staticmethod
def _private_static():
"""B._private_static docstring"""
@classmethod
def _private_cls(cls):
"""B._private_cls docstring"""
@property
def p(self):
"""B.p docstring"""
return 1
class C:
"""B.C docstring"""
def f(self):
"""B.C.f docstring"""
class _Private:
"""B._Private docstring"""
def f(self):
"""B._Private.f docstring"""
def overridden(self):
pass
assert overridden.__doc__ is None
__pdoc__['B.overridden'] = 'B.overridden docstring'
def overridden_same_docstring(self):
pass
class C(B): pass # noqa: E701, E302
class D(C): pass # noqa: E701, E302
class Hidden:
__pdoc__['Hidden'] = False
class Docformats:
def numpy(self):
"""
Summary line.
**Documentation**: https://pdoc3.github.io/pdoc/doc/pdoc/
**Source Code**: https://github.com/pdoc3/
Parameters
----------
x1, x2 : array_like
Input arrays,
description of `x1`, `x2`.
.. versionadded:: 1.5.0
x : { NoneType, 'B', 'C' }, optional
n : int or list of int
Description of num
*args, **kwargs
Passed on.
complex : Union[Set[pdoc.Doc, Function], pdoc]
The `List[Doc]`s of the new signal.
Returns
-------
output : pdoc.Doc
The output array
List[pdoc.Doc]
The output array
foo
Raises
------
TypeError
When something.
Raises
------
TypeError
Returns
-------
None.
Invalid
-------
no match
See Also
--------
fromstring, loadtxt
See Also
--------
pdoc.text : Function a with its description.
scipy.random.norm : Random variates, PDFs, etc.
pdoc.Doc : A class description that
spans several lines.
Examples
--------
>>> doctest
...
Notes
-----
Foo bar.
### H3 Title
Foo bar.
"""
def google(self):
"""
Summary line.
Nomatch:
Args:
arg1 (str, optional): Text1
arg2 (List[str], optional, default=10): Text2
data (array-like object): foo
Args:
arg1 (int): Description of arg1
arg2 (str or int): Description of arg2
test_sequence: 2-dim numpy array of real numbers, size: N * D
- the test observation sequence.
test_sequence =
code
Continue.
*args: passed around
Returns:
issue_10: description didn't work across multiple lines
if only a single item was listed. `inspect.cleandoc()`
somehow stripped the required extra indentation.
Returns:
A very special number
which is the answer of everything.
Returns:
Dict[int, pdoc.Doc]: Description.
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
and a third line.
ValueError: If `arg2` is equal to `arg1`.
Test a title without a blank line before it.
Args:
A: a
Examples:
Examples in doctest format.
>>> a = [1,2,3]
Todos:
* For module TODOs
"""
def doctests(self):
"""
Need an intro paragrapgh.
>>> Then code is indented one level
line1
line2
Alternatively
```
>>> doctest
fenced code works
always
```
Examples:
>>> nbytes(100)
'100.0 bytes'
line2
some text
some text
>>> another doctest
line1
line2
Example:
>>> f()
Traceback (most recent call last):
...
Exception: something went wrong
"""
def reST_directives(self):
"""
.. todo::
Create something.
.. admonition:: Example
Image shows something.
.. image:: https://www.debian.org/logos/openlogo-nd-100.png
.. note::
Can only nest admonitions two levels.
.. image:: https://www.debian.org/logos/openlogo-nd-100.png
Now you know.
.. warning::
Some warning
lines.
* Describe some func in a list
across multiple lines:
.. deprecated:: 3.1
Use `spam` instead.
.. versionadded:: 2.5
The *spam* parameter.
.. caution::
Don't touch this!
"""
numpy = Docformats.numpy
google = Docformats.google
doctests = Docformats.doctests
reST_directives = Docformats.reST_directives
def latex_math():
"""
Inline equation: \\( v_t *\\frac{1}{2}* j_i + [a] < 3 \\).
Block equation: \\[ v_t *\\frac{1}{2}* j_i + [a] < 3 \\]
Block equation: $$ v_t *\\frac{1}{2}* j_i + [a] < 3 $$
..math::
v_t *\\frac{1}{2}* j_i + [a] < 3
"""
class Location(namedtuple('Location', 'lat lon')):
"""Geo-location, GPS position."""
``` |
{
"source": "johankaito/fufuka",
"score": 3
} |
#### File: fufuka/flask-d3-hello-world-master/app.py
```python
import json
import flask
import numpy as np
app = flask.Flask(__name__)
@app.route("/")
def index():
"""
When you request the root path, you'll get the index.html template.
"""
return flask.render_template("index.html")
@app.route("/data")
@app.route("/data/<int:ndata>")
def data(ndata=100):
"""
On request, this returns a list of ``ndata`` randomly made data points.
:param ndata: (optional)
The number of data points to return.
:returns data:
A JSON string of ``ndata`` data points.
"""
x = 10 * np.random.rand(ndata) - 5
y = 0.5 * x + 0.5 * np.random.randn(ndata)
A = 10. ** np.random.rand(ndata)
c = np.random.rand(ndata)
return json.dumps([{"_id": i, "x": x[i], "y": y[i], "area": A[i],
"color": c[i]}
for i in range(ndata)])
if __name__ == "__main__":
import os
port = 8000
# Open a web browser pointing at the app.
os.system("open http://localhost:{0}".format(port))
# Set up the development server on port 8000.
app.debug = True
app.run(port=port)
```
#### File: graph_tool/community/nested_blockmodel.py
```python
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info < (3,):
range = xrange
from .. import _degree, _prop, Graph, GraphView, libcore, _get_rng, PropertyMap, \
infect_vertex_property
from .. stats import label_self_loops
from .. generation import graph_union
from .. topology import shortest_path
import random
from numpy import *
import numpy
from scipy.optimize import fsolve, fminbound
import scipy.special
from collections import defaultdict
import copy
from . blockmodel import *
from . blockmodel import _bm_test
class NestedBlockState(object):
r"""This class encapsulates the nested block state of a given graph.
This must be instantiated and used by functions such as :func:`nested_mcmc_sweep`.
The instances of this class contain a data member called ``levels``, which
is a list of :class:`~graph_tool.community.BlockState` (or
:class:`~graph_tool.community.OverlapBlockState`) instances, containing the
entire nested hierarchy.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
eweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Edge weights (i.e. multiplicity).
vweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex weights (i.e. multiplicity).
bs : list of :class:`~graph_tool.PropertyMap` or :class:`~numpy.ndarray` instances (optional, default: ``None``)
Initial block labels on the vertices, for each hierarchy level.
Bs : list of ``int`` (optional, default: ``None``)
Number of blocks for each hierarchy level.
deg_corr : ``bool`` (optional, default: ``True``)
If ``True``, the degree-corrected version of the blockmodel ensemble will
be used in the bottom level, otherwise the traditional variant will be used.
overlap : ``bool`` (optional, default: ``False``)
If ``True``, the mixed-membership version of the blockmodel will be used
at the lowest level.
clabel : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Constraint labels on the vertices. If supplied, vertices with different
label values will not be clustered in the same group.
max_BE : ``int`` (optional, default: ``1000``)
If the number of blocks exceeds this number, a sparse representation of
the block graph is used, which is slightly less efficient, but uses less
memory,
"""
def __init__(self, g, eweight=None, vweight=None, ec=None, bs=None, Bs=None,
deg_corr=True, overlap=False, layers=False, clabel=None,
max_BE=1000):
L = len(Bs) if Bs is not None else len(bs)
self.g = cg = g
self.vweight = vcount = vweight
self.eweight = ecount = eweight
self.ec = ec
self.layers = layers
self.levels = []
self.overlap = overlap
self.deg_corr = deg_corr
self.clabel = clabel if clabel is not None else g.new_vertex_property("int")
for l in range(L):
Bl = Bs[l] if Bs is not None else None
bl = None
if bs is not None:
if isinstance(bs[l], PropertyMap):
bl = cg.own_property(bs[l])
else:
bl = bs[l]
if l == 0:
if ec is None:
if overlap:
state = OverlapBlockState(g, B=Bl, b=bl,
eweight=ecount,
vweight=vcount,
deg_corr=deg_corr != False,
clabel=self.clabel,
max_BE=max_BE)
self.clabel = state.clabel.copy()
state.clabel.a = 0
else:
state = BlockState(g, B=Bl, b=bl,
eweight=ecount,
vweight=vcount,
deg_corr=deg_corr != False,
#clabel=self.clabel,
max_BE=max_BE)
else:
state = CovariateBlockState(g, B=Bl, b=bl,
ec=ec,
layers=layers,
eweight=ecount,
vweight=vcount,
deg_corr=deg_corr != False,
clabel=self.clabel,
overlap=overlap,
max_BE=max_BE)
if overlap:
self.clabel = state.clabel.copy()
state.clabel.a = 0
else:
state = self.levels[-1].get_block_state(b=bl,
overlap=self.overlap == "full",
deg_corr=self.deg_corr == "full")[0]
if _bm_test():
assert not state.deg_corr, "upper levels must be non-deg-corr"
self.levels.append(state)
# for l in range(len(self.levels) - 1):
# clabel = self.__project_partition(l, l+1)
# self.levels[l].clabel = clabel
if ec is not None:
self.ec = self.levels[0].ec
def __repr__(self):
return "<NestedBlockState object with %d %sblocks,%s%s for graph %s, with %d levels of sizes %s at 0x%x>" % \
(self.levels[0].B, "overlapping " if self.overlap else "",
" with %d %s," % (self.levels[0].C, "layers" if self.layers else "covariates") if self.ec is not None else "",
" degree corrected," if self.deg_corr else "",
str(self.g), len(self.levels), str([(s.N, s.B) for s in self.levels]), id(self))
def copy(self, ec=None, layers=None, deg_corr=None, overlap=None, clabel=None):
r"""Copies the block state. The parameters override the state properties, and
have the same meaning as in the constructor.."""
bs = [s.b.a for s in self.levels]
if overlap is None:
overlap = self.overlap
elif self.overlap and not overlap:
raise ValueError("Cannot automatically convert overlapping nested state to nonoverlapping")
elif not self.overlap and overlap:
s = self.levels[0].copy(overlap=True)
bs[0] = s.b.a
if deg_corr is None:
deg_corr = self.deg_corr
if layers is None:
layers = self.layers
return NestedBlockState(self.g, self.eweight, self.vweight,
self.ec if ec is None else ec, bs,
layers=layers, deg_corr=deg_corr,
overlap=overlap, clabel=clabel,
max_BE=self.levels[0].max_BE)
def __getstate__(self):
state = dict(g=self.g,
ec=self.ec,
eweight=self.eweight,
vweight=self.vweight,
overlap=self.overlap,
bs=[array(s.b.a) for s in self.levels],
clabel=self.clabel,
deg_corr=self.deg_corr,
max_BE=self.levels[0].max_BE)
return state
def __setstate__(self, state):
self.__init__(**state)
return state
def __project_partition(self, l, j):
"""Project partition of level 'j' onto level 'l'"""
if self.overlap != "full":
b = self.levels[l].b.copy()
for i in range(l + 1, j + 1):
clabel = self.levels[i].b.copy()
pmap(b, clabel)
else:
b = self.levels[j].b.copy()
return b
def __propagate_clabel(self, l):
clabel = self.clabel.copy()
for j in range(l):
bg = self.levels[j].bg
bclabel = bg.new_vertex_property("int")
reverse_map(self.levels[j].b, bclabel)
pmap(bclabel, clabel)
clabel = bclabel
return clabel
def __consistency_check(self, op, l):
print("consistency check after", op, "at level", l)
for j in range(len(self.levels)):
c_state = self.levels[j].copy()
S1 = self.levels[j].entropy()
S2 = c_state.entropy()
assert abs(S1 - S2) < 1e-8 and not isnan(S2) and not isnan(S1), "inconsistency at level %d after %s of level %d, different entropies of copies! (%g, %g)" % (j, op, l, S1, S2)
if self.levels[j].wr.a.min() == 0:
print("WARNING: empty blocks at level", j)
if self.levels[j].b.a.max() + 1 != self.levels[j].B:
print("WARNING: b.max() + 1 != B at level", j, self.levels[j].b.max() + 1, self.levels[j].B)
for j in range(len(self.levels) - 1):
B = self.levels[j].b.a.max() + 1
bg_state = self.levels[j].get_block_state(b=self.levels[j+1].b.copy(),
overlap=self.overlap == "full",
deg_corr=self.deg_corr == "full")[0]
S1 = bg_state.entropy(dense=True and self.deg_corr != "full", multigraph=True)
S2 = self.levels[j+1].entropy(dense=True and self.deg_corr != "full", multigraph=True)
if self.levels[j].B != self.levels[j+1].N or S1 != S2:
self.print_summary()
from graph_tool.topology import similarity
print(bg_state)
print(self.levels[j+1])
print("N, B:", bg_state.N, bg_state.B)
print("N, B:", self.levels[j + 1].N, self.levels[j + 1].B)
print("similarity:", similarity(bg_state.g, self.levels[j+1].g))
print("b:", bg_state.b.a)
print("b:", self.levels[j+1].b.a)
print("wr:", bg_state.wr.a)
print("wr:", self.levels[j+1].wr.a)
print("mrs:", bg_state.mrs.a)
print("mrs:", self.levels[j+1].mrs.a)
print("eweight:", bg_state.eweight.a)
print("eweight:", self.levels[j+1].eweight.a)
print("vweight:", bg_state.vweight.a)
print("vweight:", self.levels[j+1].vweight.a)
assert abs(S1 - S2) < 1e-6 and not isnan(S2) and not isnan(S1), "inconsistency at level %d after %s of level %d, different entropies (%g, %g)" % (j, op, l, S1, S2)
assert self.levels[j].B == self.levels[j+1].N, "inconsistency at level %d after %s of level %d, different sizes" % (j + 1, op, l)
# verify hierarchy / clabel consistency
clabel = self.__project_partition(0, j)
self.levels[0].clabel.a = self.clabel.a
assert self.levels[0]._BlockState__check_clabel(), "inconsistency at level %d after %s of level %d, clabel invalidated" % (j + 1, op, l)
self.levels[0].clabel.a = 0
# verify hierarchy consistency
clabel = self.__project_partition(j, j + 1)
self.levels[0].clabel.a = self.clabel.a
assert self.levels[0]._BlockState__check_clabel(), "inconsistency at level %d after %s of level %d, partition not compatible with upper level" % (j + 1, op, l)
self.levels[0].clabel.a = 0
def __rebuild_level(self, l, b, clabel=None):
r"""Replace level ``l`` given the new partition ``b``, and the
projected upper level partition clabel."""
if _bm_test():
assert clabel is not None or l == len(self.levels) - 1, "clabel not given for intermediary level"
if clabel is None:
clabel = self.levels[l].g.new_vertex_property("int")
old_b = b.copy()
state = self.levels[l].copy(b=b, clabel=clabel.a)
self.levels[l] = state
if l == 0:
self.clabel = state.g.own_property(self.clabel) # for CovariateBlockState
# upper level
bclabel = state.get_bclabel()
bstate = self.levels[l].get_block_state(b=bclabel,
overlap=self.overlap == "full",
deg_corr=self.deg_corr == "full")[0]
if l == len(self.levels) - 1:
self.levels.append(None)
self.levels[l + 1] = bstate
self.levels[l].clabel.a = 0
self.levels[l + 1].clabel.a = 0
# if l + 1 < len(self.levels) - 1:
# self.levels[l + 1].clabel = self.__project_partition(l + 1, l + 2)
if l + 1 < len(self.levels) - 1:
bstate = self.levels[l + 1].get_block_state(b=self.levels[l + 2].b,
overlap=self.overlap == "full",
deg_corr=self.deg_corr == "full")[0]
if _bm_test():
from graph_tool.topology import similarity
print("- similarity:", similarity(bstate.g, self.levels[l + 2].g))
if abs(bstate.entropy() - self.levels[l + 2].entropy()) > 1e-6:
print("********************** inconsistent rebuild! **************************")
print(bstate.b.a)
print(self.levels[l + 2].b.a)
print(bstate.wr.a)
print(self.levels[l + 2].wr.a)
print(bstate.eweight.a)
print(self.levels[l + 2].eweight.a)
nclabel = self.__project_partition(l, l + 1)
print(nclabel.a)
print(clabel.a)
print(self.levels[l].b.a)
print(self.levels[l+1].b.a)
print(self.levels[l+2].b.a)
print(bstate.b.a)
print ("DS", l, l + 1, bstate.entropy(), self.levels[l + 2].entropy())
B = self.levels[l].B
if _bm_test():
self.__consistency_check("rebuild", l)
def __delete_level(self, l):
if l == 0:
raise ValueError("cannot delete level l=0")
b = self.__project_partition(l - 1, l)
if l < len(self.levels) - 1:
clabel = self.__project_partition(l - 1, l + 1)
else:
clabel = None
del self.levels[l]
self.__rebuild_level(l - 1, b=b, clabel=clabel)
if _bm_test():
self.__consistency_check("delete", l)
def __duplicate_level(self, l):
assert l > 0, "attempted to duplicate level 0"
if not self.levels[l].overlap:
bstate = self.levels[l].copy(b=self.levels[l].g.vertex_index.copy("int"))
else:
bstate = self.levels[l].copy(b=arange(self.levels[l].g.num_vertices()))
self.levels.insert(l, bstate)
if _bm_test():
self.__consistency_check("duplicate", l)
def level_entropy(self, l, complete=True, dense=False, multigraph=True,
norm=True, dl_ent=False):
r"""Compute the description length of hierarchy level l.
Parameters
----------
l : ``int``
Hierarchy level.
complete : ``bool`` (optional, default: ``False``)
If ``True``, the complete entropy will be returned, including constant
terms not relevant to the block partition.
dense : ``bool`` (optional, default: ``False``)
If ``True``, the "dense" variant of the entropy will be computed.
multigraph : ``bool`` (optional, default: ``True``)
If ``True``, the multigraph entropy will be used.
norm : ``bool`` (optional, default: ``True``)
If ``True``, the entropy will be "normalized" by dividing by the
number of edges.
dl_ent : ``bool`` (optional, default: ``False``)
If ``True``, the description length of the degree sequence will be
approximated by its entropy.
"""
bstate = self.levels[l]
S = bstate.entropy(dl=True, edges_dl=False,
dense=dense or (l > 0 and self.deg_corr != "full"),
multigraph=multigraph or l > 0,
complete=complete or (l > 0 and self.deg_corr == "full"),
norm=norm, dl_ent=dl_ent)
return S
def entropy(self, complete=True, dense=False, multigraph=True, norm=False,
dl_ent=False):
r"""Compute the description length of the entire hierarchy.
Parameters
----------
complete : ``bool`` (optional, default: ``False``)
If ``True``, the complete entropy will be returned, including constant
terms not relevant to the block partition.
dense : ``bool`` (optional, default: ``False``)
If ``True``, the "dense" variant of the entropy will be computed.
multigraph : ``bool`` (optional, default: ``True``)
If ``True``, the multigraph entropy will be used.
norm : ``bool`` (optional, default: ``True``)
If ``True``, the entropy will be "normalized" by dividing by the
number of edges.
dl_ent : ``bool`` (optional, default: ``False``)
If ``True``, the description length of the degree sequence will be
approximated by its entropy.
"""
S = 0
for l in range(len(self.levels)):
S += self.level_entropy(l, complete=complete, dense=dense,
multigraph=multigraph, norm=norm,
dl_ent=dl_ent)
return S
def get_bstack(self):
r"""Return the nested levels as individual graphs.
This returns a list of :class:`~graph_tool.Graph` instances
representing the inferred hierarchy at each level. Each graph has two
internal vertex and edge property maps named "count" which correspond to
the vertex and edge counts at the lower level, respectively. Additionally,
an internal vertex property map named "b" specifies the block partition.
"""
bstack = []
for l, bstate in enumerate(self.levels):
cg = bstate.g
if l == 0:
cg = GraphView(cg, skip_properties=True)
cg.vp["b"] = bstate.b.copy()
cg.ep["count"] = bstate.eweight
if bstate.overlap:
if self.ec is None:
cg.vp["node_index"] = bstate.node_index.copy()
else:
cg.vp["node_index"] = bstate.total_state.node_index.copy()
bstack.append(cg)
if bstate.N == 1:
break
if bstack[-1].num_vertices() > 1:
cg = Graph(directed=bstack[-1].is_directed())
cg.add_vertex()
cg.vp["b"] = cg.new_vertex_property("int")
e = cg.add_edge(0, 0)
ew = cg.new_edge_property("int")
ew[e] = self.levels[-1].E
cg.ep["count"] = ew
bstack.append(cg)
return bstack
def project_level(self, l):
r"""Project the partition at level ``l`` onto the lowest level, and return the
corresponding :class:`~graph_tool.community.BlockState` (or
:class:`~graph_tool.community.OverlapBlockState`). """
if self.overlap != "full":
clabel = b = self.levels[l].b.copy()
while l - 1 >= 0:
clabel = b
b = self.levels[l - 1].b.copy()
pmap(b, clabel)
l -= 1
else:
b = self.levels[l].b.copy()
state = self.levels[0].copy(b=b.a)
return state
def merge_layers(self, l_src, l_tgt, revert=False):
ctxs = []
for state in self.levels:
ctxs.append(state.merge_layers(l_src, l_tgt, revert))
if revert:
if hasattr(contextlib, "nested"):
return contextlib.nested(*ctxs)
else:
with contextlib.ExitStack() as stack:
for ctx in ctxs:
stack.enter_context(ctx)
return stack.pop_all()
def print_summary(self):
for l, state in enumerate(self.levels):
print("l: %d, N: %d, B: %d" % (l, state.N, state.B))
def nested_mcmc_sweep(state, beta=1., c=1., dl=False, sequential=True,
parallel=False, verbose=False):
r"""Performs a Markov chain Monte Carlo sweep on all levels of the hierarchy.
Parameters
----------
state : :class:`~graph_tool.community.NestedBlockState`
The nested block state.
beta : `float` (optional, default: `1.0`)
The inverse temperature parameter :math:`\beta`.
c : ``float`` (optional, default: ``1.0``)
This parameter specifies how often fully random moves are attempted,
instead of more likely moves based on the inferred block partition.
For ``c == 0``, no fully random moves are attempted, and for ``c == inf``
they are always attempted.
dl : ``bool`` (optional, default: ``False``)
If ``True``, the change in the whole description length will be
considered after each vertex move, not only the entropy.
sequential : ``bool`` (optional, default: ``True``)
If ``True``, the move attempts on the vertices are done in sequential
random order. Otherwise a total of `N` moves attempts are made, where
`N` is the number of vertices, where each vertex can be selected with
equal probability.
verbose : ``bool`` (optional, default: ``False``)
If ``True``, verbose information is displayed.
Returns
-------
dS_moves : list of (``float``, ``int``) tuples
The entropy difference (per edge) and number of accepted block membership
moves after a full sweep for each level.
Notes
-----
This algorithm performs a Markov chain Monte Carlo sweep on each level of the
network, via the function :func:`~graph_tool.community.mcmc_sweep`.
This algorithm has a worse-case complexity of :math:`O(E \times L)`, where
:math:`E` is the number of edges in the network, and :math:`L` is the depth
of the hierarchy.
Examples
--------
.. testsetup:: nested_mcmc
gt.seed_rng(42)
np.random.seed(42)
.. doctest:: nested_mcmc
>>> g = gt.collection.data["polbooks"]
>>> state = gt.NestedBlockState(g, Bs=[10, 5, 3, 2, 1], deg_corr=True)
>>> ret = gt.nested_mcmc_sweep(state)
>>> print(ret)
[(0.0, 0), (0.0, 0), (0.0, 0), (0.0, 0), (0.0, 0)]
References
----------
.. [peixoto-efficient-2014] <NAME>, "Efficient Monte Carlo and greedy
heuristic for the inference of stochastic block models", Phys. Rev. E 89, 012804 (2014),
:doi:`10.1103/PhysRevE.89.012804`, :arxiv:`1310.4378`.
.. [peixoto-hierarchical-2014] <NAME>, "Hierarchical block structures and high-resolution
model selection in large networks ", Phys. Rev. X 4, 011047 (2014), :doi:`10.1103/PhysRevX.4.011047`,
:arxiv:`1310.4377`.
.. [peixoto-model-2015] <NAME>, "Model selection and hypothesis
testing for large-scale network models with overlapping groups",
Phys. Rev. X 5, 011033 (2015), :doi:`10.1103/PhysRevX.5.011033`,
:arxiv:`1409.3059`.
"""
rets = []
for l, bstate in enumerate(state.levels):
if verbose:
print("Level:", l, "N:", bstate.N, "B:", bstate.B)
# constraint partitions not to invalidate upper layers
if l < len(state.levels) - 1:
clabel = state._NestedBlockState__project_partition(l, l + 1)
else:
clabel = bstate.g.new_vertex_property("int")
# propagate externally imposed clabel at the bottom
cclabel = state._NestedBlockState__propagate_clabel(l)
cclabel.a += clabel.a * (cclabel.a.max() + 1)
continuous_map(cclabel)
bstate.clabel = cclabel
ret = mcmc_sweep(bstate, beta=beta, c=c, dl=dl,
dense = l > 0 and state.deg_corr != "full",
multigraph = l > 0,
sequential=sequential, parallel=parallel,
verbose=verbose)
bstate.clabel.a = 0
rets.append(ret)
return rets
def replace_level(l, state, min_B=None, max_B=None, max_b=None, nsweeps=10,
nmerge_sweeps=10, adaptive_sweeps=True, r=2, c=0, epsilon=0.,
sequential=True, parallel=False, dl=False, dense=False,
multigraph=True, sparse_thresh=100, verbose=False,
checkpoint=None, minimize_state=None, dl_ent=False,
confine_layers=False):
r"""Replaces level l with another state with a possibly different number of
groups. This may change not only the state at level l, but also the one at
level l + 1, which needs to be 'rebuilt' because of the label changes at
level l."""
if _bm_test():
state._NestedBlockState__consistency_check("(before) replace level", l)
bstate = state.levels[l]
g = bstate.g
base_g = g if not bstate.overlap else bstate.base_g
eweight = bstate.eweight if not bstate.overlap else None
ec = None
if state.ec is not None:
if bstate.overlap:
ec = bstate.base_ec
else:
ec = bstate.ec
if l > 0 or min_B is None:
if l + 1 < len(state.levels):
min_B = state.levels[l + 1].B
else:
min_B = 1
if l > 0 or max_B is None:
max_B = bstate.N
min_B = max(min_B, state.clabel.a.max() + 1)
# constraint partitions not to invalidate upper layers
if l < len(state.levels) - 1:
clabel = state._NestedBlockState__project_partition(l, l + 1)
else:
clabel = bstate.g.new_vertex_property("int")
assert min_B <= max_B, (min_B, max_B, bstate.B, bstate.N, g.num_vertices(),
state.clabel.a.max() + 1, clabel.a.max() + 1, l)
# propagate externally imposed clabel at the bottom
cclabel = state._NestedBlockState__propagate_clabel(l)
assert cclabel.a.max() <= state.clabel.a.max(), (cclabel.a.max(), state.clabel.a.max())
cclabel.a += clabel.a * (cclabel.a.max() + 1)
continuous_map(cclabel)
min_B = max(min_B, cclabel.a.max() + 1)
assert min_B <= max_B, (min_B, max_B, bstate.B, bstate.N, g.num_vertices(),
cclabel.a.max() + 1, state.clabel.a.max() + 1, clabel.a.max() + 1, l)
if _bm_test():
assert bstate._BlockState__check_clabel(), "invalid clabel before minimize!"
nested_dl = l < len(state.levels) - 1
state.levels[l].clabel.a = cclabel.a
Sb = get_b_dl(state.levels[l],
dense=(l > 0 and state.deg_corr != "full") or dense,
multigraph=l > 0 or multigraph,
nested_dl=nested_dl,
nested_overlap=state.overlap == "full",
dl_ent=dl_ent)
state.levels[l].clabel.a = 0
if _bm_test():
assert clabel.a.max() + 1 <= min_B
if l == 0 and max_b is not None:
istate = state.levels[0].copy(b=max_b.copy(),
clabel=cclabel.a if state.overlap else cclabel)
if _bm_test():
assert istate._BlockState__check_clabel(), "invalid clabel!"
if istate.B > max_B:
istate = multilevel_minimize(istate, B=max_B, nsweeps=nsweeps,
nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps, c=c,
r=r, dl=dl, sequential=sequential,
parallel=parallel,
greedy_cooling=True, epsilon=epsilon,
confine_layers=confine_layers,
verbose=verbose=="full")
if _bm_test():
assert istate._BlockState__check_clabel(), "invalid clabel!"
init_states = [istate]
else:
init_states = None
res = minimize_blockmodel_dl(base_g,
ec=ec,
layers=state.layers,
confine_layers=confine_layers,
eweight=eweight,
deg_corr=bstate.deg_corr,
nsweeps=nsweeps,
nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps,
c=c, r=r, sequential=sequential,
parallel=parallel,
greedy_cooling=True,
epsilon=epsilon,
max_B=max_B,
min_B=min_B,
clabel=cclabel if not bstate.overlap else cclabel.a,
max_BE=bstate.max_BE,
dl=dl,
dense=(l > 0 and state.deg_corr != "full") or dense,
multigraph=l > 0 or multigraph,
sparse_heuristic=base_g.num_vertices() > sparse_thresh,
nested_dl=nested_dl,
overlap=bstate.overlap,
nested_overlap=state.overlap == "full",
nonoverlap_compare=False,
nonoverlap_init=False,
init_states=init_states,
verbose=verbose=="full",
##exaustive=g.num_vertices() <= 100,
#minimize_state=minimize_state.minimize_state, >>>>>> HERE <<<<<
checkpoint=checkpoint,
dl_ent=dl_ent)
if _bm_test():
assert (res.clabel.a == cclabel.a).all(), (res.clabel.a, cclabel.a)
assert res._BlockState__check_clabel(), "invalid clabel after minimize!"
Sf = get_b_dl(res,
dense=(l > 0 and state.deg_corr != "full") or dense,
multigraph=l > 0 or multigraph,
nested_dl=nested_dl,
nested_overlap=state.overlap == "full",
dl_ent=dl_ent)
res.clabel.a = 0
if state.ec is not None:
for s in res.states:
s.clabel.a = 0
b = res.b
kept = False
if Sf - Sb >= -1e-10:
kept = True
if Sf - Sb == 0 and bstate.B != state.levels[l].B:
kept = False
if res.B == res.N:
kept = True
if kept:
Sf_rej = Sf
Sf = Sb
else:
# rebuild current level
state._NestedBlockState__rebuild_level(l, b=b, clabel=clabel)
if verbose:
print("level", l, ": resizing", bstate.B, "->", state.levels[l].B, ", dS:", Sf - Sb, end="")
if kept:
print(" [kept, rejected (%d, %g) vs (%d, %g)]" % (res.B, Sf_rej, bstate.B, Sb))
else:
print()
if _bm_test():
state._NestedBlockState__consistency_check("replace level", l)
dS = Sf - Sb
return dS, kept
class NestedMinimizeState(object):
r"""This object stores information regarding the current entropy minimization
state, so that the algorithms can resume previously started runs.
This object can be saved to disk via the :mod:`pickle` interface."""
def __init__(self):
self.minimize_state = MinimizeState()
self.l = 0
self.bs = []
self.done = []
self.init = True
def clear(self):
self.minimize_state.clear()
self.l = 0
del self.bs[:]
del self.done[:]
def sync(self, state):
if len(self.bs) == 0:
for s in state.levels:
self.bs.append(array(s.b.fa))
while len(self.done) < len(state.levels):
self.done.append(False)
def delete(self, l):
del self.done[l]
del self.bs[l]
def insert(self, l, state):
self.done.insert(l + 1, False)
ba = array(state.levels[l].b.fa)
self.bs.insert(l + 1, ba)
def mark_level(self, l, done, state):
while len(state.levels) > len(self.bs):
ba = array(state.levels[len(self.bs)].b.fa)
self.bs.append(ba)
self.done.append(False)
self.done[l] = done
if done:
self.bs[l] = array(state.levels[l].b.fa)
def clear_mstate(self):
self.minimize_state.clear()
def get_checkpoint_wrap(checkpoint, state, minimize_state, dl_ent):
S_total = state.entropy(complete=True, dl_ent=dl_ent)
if checkpoint is not None:
def check_wrap(bstate, Sb, delta, nmoves, ms):
l = minimize_state.l
bstate = state.levels[l]
S_l = bstate.entropy()
S = S_total - S_l + Sb
if bstate is None:
checkpoint(None, S, delta, nmoves, minimize_state)
else:
checkpoint(state, S, delta, nmoves, minimize_state)
chkp = check_wrap
else:
chkp = None
return chkp
def nested_tree_sweep(state, min_B=None, max_B=None, max_b=None, nsweeps=10,
epsilon=0., r=2., nmerge_sweeps=10, adaptive_sweeps=True,
c=0, dl=False, dense=False, multigraph=True,
sequential=True, parallel=False, sparse_thresh=100,
checkpoint=None, minimize_state=None, frozen_levels=None,
confine_layers=False, verbose=False, **kwargs):
r"""Performs one greedy sweep in the entire hierarchy tree, attempting to
decrease its description length.
Parameters
----------
state : :class:`~graph_tool.community.NestedBlockState`
The nested block state.
min_B : ``int`` (optional, default: ``None``)
Minimum number of blocks at the lowest level.
max_B : ``int`` (optional, default: ``None``)
Maximum number of blocks at the lowest level.
max_b : ``int`` (optional, default: ``None``)
Block partition used for the maximum number of blocks at the lowest
level.
nsweeps : ``int`` (optional, default: ``10``)
The number of sweeps done after each merge step to reach the local
minimum.
epsilon : ``float`` (optional, default: ``0``)
Converge criterion for ``adaptive_sweeps``.
r : ``float`` (optional, default: ``2.``)
Agglomeration ratio for the merging steps. Each merge step will attempt
to find the best partition into :math:`B_{i-1} / r` blocks, where
:math:`B_{i-1}` is the number of blocks in the last step.
nmerge_sweeps : `int` (optional, default: `10`)
The number of merge sweeps done, where in each sweep a better merge
candidate is searched for every block.
c : ``float`` (optional, default: ``1.0``)
This parameter specifies how often fully random moves are attempted,
instead of more likely moves based on the inferred block partition.
For ``c == 0``, no fully random moves are attempted, and for ``c == inf``
they are always attempted.
dense : ``bool`` (optional, default: ``False``)
If ``True``, the "dense" variant of the entropy will be computed.
sequential : ``bool`` (optional, default: ``True``)
If ``True``, the move attempts on the vertices are done in sequential
random order. Otherwise a total of `N` moves attempts are made, where
`N` is the number of vertices, where each vertex can be selected with
equal probability.
sparse_thresh : ``int`` (optional, default: ``100``)
If the number of nodes in the higher level multigraphs exceeds this
number, the sparse entropy will be used to find the best partition,
but the dense entropy will be used to compare different partitions.
checkpoint : function (optional, default: ``None``)
If provided, this function will be called after each call to
:func:`mcmc_sweep`. This can be used to store the current state, so it
can be continued later. The function must have the following signature:
.. code-block:: python
def checkpoint(state, S, delta, nmoves, minimize_state):
...
where `state` is either a :class:`~graph_tool.community.NestedBlockState`
instance or ``None``, `S` is the current entropy value, `delta` is
the entropy difference in the last MCMC sweep, and `nmoves` is the
number of accepted block membership moves. The ``minimize_state``
argument is a :class:`NestedMinimizeState` instance which specifies
the current state of the algorithm, which can be stored via :mod:`pickle`,
and supplied via the ``minimize_state`` option below to continue from an
interrupted run.
This function will also be called when the MCMC has finished for the
current value of :math:`B`, in which case ``state == None``, and the
remaining parameters will be zero, except the last.
minimize_state : :class:`NestedMinimizeState` (optional, default: ``None``)
If provided, this will specify an exact point of execution from which
the algorithm will continue. The expected object is a
:class:`NestedMinimizeState` instance which will be passed to the
callback of the ``checkpoint`` option above, and can be stored by
:mod:`pickle`.
frozen_levels : :class:`list` (optional, default: ``None``)
List of levels (``int``s) which will remain unmodified during the
algorithm.
verbose : ``bool`` (optional, default: ``False``)
If ``True``, verbose information is displayed.
Returns
-------
dS : ``float``
The description length difference (per edge) after the move.
Notes
-----
This algorithm performs a constrained agglomerative heuristic on each level
of the network, via the function :func:`~graph_tool.community.multilevel_minimize`.
This algorithm has worst-case complexity of :math:`O(N\ln^2 N \times L)`,
where :math:`N` is the number of nodes in the network, and :math:`L` is
the depth of the hierarchy.
References
----------
.. [peixoto-efficient-2014] <NAME>, "Efficient Monte Carlo and greedy
heuristic for the inference of stochastic block models", Phys. Rev. E 89, 012804 (2014),
:doi:`10.1103/PhysRevE.89.012804`, :arxiv:`1310.4378`.
.. [peixoto-hierarchical-2014] <NAME>, "Hierarchical block structures and high-resolution
model selection in large networks ", Phys. Rev. X 4, 011047 (2014), :doi:`10.1103/PhysRevX.4.011047`,
:arxiv:`1310.4377`.
.. [peixoto-model-2015] <NAME>, "Model selection and hypothesis
testing for large-scale network models with overlapping groups",
Phys. Rev. X 5, 011033 (2015), :doi:`10.1103/PhysRevX.5.011033`,
:arxiv:`1409.3059`.
"""
dl_ent = kwargs.get("dl_ent", False)
if minimize_state is None:
minimize_state = NestedMinimizeState()
mstate = minimize_state
mstate.sync(state)
args = dict(state=state, nsweeps=nsweeps, nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps, r=r, c=c, epsilon=epsilon,
sequential=sequential, parallel=parallel, dl=dl, dense=dense,
multigraph=multigraph, sparse_thresh=sparse_thresh, min_B=min_B,
max_B=max_B, max_b=max_b, checkpoint=checkpoint,
minimize_state=minimize_state, dl_ent=dl_ent,
confine_layers=confine_layers)
#_Si = state.entropy(dense=dense, multigraph=dense)
dS = 0
if frozen_levels is None:
frozen_levels = set()
while mstate.l >= 0:
l = mstate.l
if mstate.done[l]:
if verbose:
print("level", l, ": skipping", state.levels[l].B)
mstate.l -= 1
continue
Si = state.entropy(dl_ent=dl_ent)
kept = True
if l in frozen_levels:
kept = False
# replace level
if kept:
ddS, kept = replace_level(l, verbose=verbose, **args)
dS += ddS
mstate.clear_mstate()
if _bm_test():
if kept:
assert abs(state.entropy(dl_ent=dl_ent) - Si) < 1e-6, "inconsistent replace at level %d (%g, %g)" % (l, state.entropy(), Si)
state._NestedBlockState__consistency_check("replace level", l)
# delete level
if (kept and l > 0 and l < len(state.levels) - 1 and
not (min_B is not None and l == 1 and state.levels[l].B < min_B)):
Si = state.entropy(dl_ent=dl_ent)
bstates = [state.levels[l-1], state.levels[l], state.levels[l + 1]]
state._NestedBlockState__delete_level(l)
#replace_level(l, **args)
Sf = state.entropy(dl_ent=dl_ent)
mstate.clear_mstate()
if Sf > Si:
state.levels[l - 1] = bstates[0]
state.levels.insert(l, bstates[1])
state.levels[l + 1] = bstates[2]
else:
kept = False
dS += Sf - Si
mstate.delete(l)
if verbose:
print("level", l, ": deleted", (bstates[1].N, bstates[1].B), ", dS:", Sf - Si, len(state.levels))
if _bm_test():
if kept:
assert abs(state.entropy(dl_ent=dl_ent) - Si) < 1e-6, "inconsistent delete at level %d (%g, %g)" % (l, state.entropy(), Si)
state._NestedBlockState__consistency_check("delete complete", l)
# insert new level (duplicate and replace)
if kept and l > 0:
Si = state.entropy(dl_ent=dl_ent)
bstates = [state.levels[l].copy()]
if l < len(state.levels) - 1:
bstates.append(state.levels[l + 1].copy())
if l < len(state.levels) - 2:
bstates.append(state.levels[l + 2].copy())
state._NestedBlockState__duplicate_level(l)
replace_level(l + 1, verbose=False, **args)
Sf = state.entropy(dl_ent=dl_ent)
mstate.clear_mstate()
if Sf >= Si:
del state.levels[l + 1]
for j in range(len(bstates)):
state.levels[l + j] = bstates[j]
if bstates[-1].B == 1:
del state.levels[l + len(bstates):]
else:
kept = False
dS += Sf - Si
mstate.insert(l, state)
l += 1
if verbose:
print("level", l, ": inserted", state.levels[l].B, ", dS:", Sf - Si)
if _bm_test():
state._NestedBlockState__consistency_check("delete", l)
if kept:
assert abs(state.entropy(dl_ent=dl_ent) - Si) < 1e-8, "inconsistent delete at level %d (%g, %g)" % (l, state.entropy(), Si)
mstate.mark_level(l, done=True, state=state)
if not kept:
if l + 1 < len(state.levels):
mstate.mark_level(l + 1, done=False, state=state)
if l > 0:
mstate.mark_level(l - 1, done=False, state=state)
l += 1
else:
if ((l + 1 < len(state.levels) and not mstate.done[l + 1]) or
(l + 1 == len(state.levels) and state.levels[l].B > 1)):
l += 1
else:
l -= 1
if l >= len(state.levels):
l = len(state.levels) - 1
# create a new level at the top with B=1, if necessary
if l == len(state.levels) - 1 and state.levels[l].B > 1:
NB = state.levels[l].B if not state.overlap else 2 * state.levels[l].E
state._NestedBlockState__rebuild_level(l, b=state.levels[l].g.new_vertex_property("int"))
mstate.mark_level(l + 1, done=False, state=state)
l += 1
mstate.l = l
if checkpoint is not None:
checkpoint(None, 0, 0, 0, mstate)
if _bm_test():
state._NestedBlockState__consistency_check("tree sweep step", l)
# _Sf = state.entropy(dense=dense, multigraph=dense, dl_ent=dl_ent)
return dS
def init_nested_state(g, Bs, ec=None, deg_corr=True, overlap=False,
layers=False, confine_layers=False, dl=False, dense=False,
multigraph=True, eweight=None, vweight=None, clabel=None,
nsweeps=10, epsilon=0., r=2, nmerge_sweeps=10,
adaptive_sweeps=True, c=0, sequential=True,
parallel=False, sparse_thresh=100, checkpoint=None,
minimize_state=None, max_BE=1000, verbose=False, **kwargs):
r"""Initializes a nested block hierarchy with sizes given by ``Bs``.
Parameters
----------
g : :class:`~graph_tool.Graph`
The graph being modelled.
Bs : list of ``int`` (optional, default: ``None``)
Number of blocks for each hierarchy level.
deg_corr : ``bool`` (optional, default: ``True``)
If ``True``, the degree-corrected version of the blockmodel ensemble will
be used in the bottom level, otherwise the traditional variant will be used.
dense : ``bool`` (optional, default: ``False``)
If ``True``, the "dense" variant of the entropy will be computed.
eweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Edge weights (i.e. multiplicity).
vweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex weights (i.e. multiplicity).
nsweeps : ``int`` (optional, default: ``10``)
The number of sweeps done after each merge step to reach the local
minimum.
epsilon : ``float`` (optional, default: ``0``)
Converge criterion for ``adaptive_sweeps``.
r : ``float`` (optional, default: ``2.``)
Agglomeration ratio for the merging steps. Each merge step will attempt
to find the best partition into :math:`B_{i-1} / r` blocks, where
:math:`B_{i-1}` is the number of blocks in the last step.
nmerge_sweeps : `int` (optional, default: `10`)
The number of merge sweeps done, where in each sweep a better merge
candidate is searched for every block.
c : ``float`` (optional, default: ``0.``)
This parameter specifies how often fully random moves are attempted,
instead of more likely moves based on the inferred block partition.
For ``c == 0``, no fully random moves are attempted, and for ``c == inf``
they are always attempted.
sequential : ``bool`` (optional, default: ``True``)
If ``True``, the move attempts on the vertices are done in sequential
random order. Otherwise a total of `N` moves attempts are made, where
`N` is the number of vertices, where each vertex can be selected with
equal probability.
sparse_thresh : ``int`` (optional, default: ``100``)
If the number of nodes in the higher level multigraphs exceeds this
number, the sparse entropy will be used to find the best partition,
but the dense entropy will be used to compare different partitions.
checkpoint : function (optional, default: ``None``)
If provided, this function will be called after each call to
:func:`mcmc_sweep`. This can be used to store the current state, so it
can be continued later. The function must have the following signature:
.. code-block:: python
def checkpoint(state, S, delta, nmoves, minimize_state):
...
where `state` is either a :class:`~graph_tool.community.NestedBlockState`
instance or ``None``, `S` is the current entropy value, `delta` is
the entropy difference in the last MCMC sweep, and `nmoves` is the
number of accepted block membership moves. The ``minimize_state``
argument is a :class:`NestedMinimizeState` instance which specifies
the current state of the algorithm, which can be stored via :mod:`pickle`,
and supplied via the ``minimize_state`` option below to continue from an
interrupted run.
This function will also be called when the MCMC has finished for the
current value of :math:`B`, in which case ``state == None``, and the
remaining parameters will be zero, except the last.
minimize_state : :class:`NestedMinimizeState` (optional, default: ``None``)
If provided, this will specify an exact point of execution from which
the algorithm will continue. The expected object is a
:class:`NestedMinimizeState` instance which will be passed to the
callback of the ``checkpoint`` option above, and can be stored by
:mod:`pickle`.
verbose : ``bool`` (optional, default: ``False``)
If ``True``, verbose information is displayed.
Returns
-------
state : :class:`~graph_tool.community.NestedBlockState`
The initialized nested state.
Notes
-----
This algorithm performs an agglomerative heuristic on each level of the
network, via the function :func:`~graph_tool.community.multilevel_minimize`.
This algorithm has worst-case complexity of :math:`O(N\ln^2 N \times L)`,
where :math:`N` is the number of nodes in the network, and :math:`L` is
the depth of the hierarchy.
References
----------
.. [peixoto-efficient-2014] <NAME>, "Efficient Monte Carlo and greedy
heuristic for the inference of stochastic block models", Phys. Rev. E 89, 012804 (2014),
:doi:`10.1103/PhysRevE.89.012804`, :arxiv:`1310.4378`.
.. [peixoto-hierarchical-2014] <NAME>, "Hierarchical block structures and high-resolution
model selection in large networks ", Phys. Rev. X 4, 011047 (2014), :doi:`10.1103/PhysRevX.4.011047`,
:arxiv:`1310.4377`.
.. [peixoto-model-2015] <NAME>, "Model selection and hypothesis
testing for large-scale network models with overlapping groups",
Phys. Rev. X 5, 011033 (2015), :doi:`10.1103/PhysRevX.5.011033`,
:arxiv:`1409.3059`.
"""
dl_ent = kwargs.get("dl_ent", False)
if minimize_state is None:
minimize_state = NestedMinimizeState()
mstate = minimize_state
state = NestedBlockState(g, ec=ec, layers=layers, eweight=eweight,
vweight=vweight, Bs=[1], deg_corr=deg_corr,
overlap=overlap, clabel=clabel)
chkp = get_checkpoint_wrap(checkpoint, state, minimize_state, dl_ent)
bg = g
ecount = eweight
for l, B in enumerate(Bs):
ba = None
if l < len(mstate.bs):
ba = mstate.bs[l]
else:
if l == 0:
if ec is None:
if state.overlap:
bstate = OverlapBlockState(bg, B=bg.num_vertices(), #b=bg.vertex_index.copy("int"),
vweight=vweight,
eweight=ecount,
deg_corr=deg_corr != False,
#clabel=clabel,
max_BE=max_BE)
else:
bstate = BlockState(bg, B=bg.num_vertices(), #b=bg.vertex_index.copy("int"),
vweight=vweight,
eweight=ecount,
deg_corr=deg_corr != False,
#clabel=clabel,
max_BE=max_BE)
else:
if overlap:
if confine_layers:
be = init_layer_confined(bg, ec)
B_init = None
else:
be = None
B_init = 2 * g.num_edges()
else:
be = None
B_init = g.num_vertices()
bstate = CovariateBlockState(bg, ec=ec,
lasers=layers,
B=B_init, #b=bg.vertex_index.copy("int"),
b=be,
vweight=vweight,
eweight=ecount,
deg_corr=deg_corr != False,
overlap=overlap,
#clabel=clabel,
max_BE=max_BE)
else:
bstate = state.levels[l-1].get_block_state(b=ba,
overlap=overlap == "full",
deg_corr=deg_corr == "full")[0]
if B == 1:
bstate.copy(b=bstate.g.new_vertex_property("int").a)
else:
bstate = multilevel_minimize(bstate, B, nsweeps=nsweeps,
epsilon=epsilon,
r=r, nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps,
greedy=True, c=c, dl=dl,
dense=(l > 0 and g.num_vertices() < sparse_thresh) or dense,
multigraph=(l > 0 and g.num_vertices() < sparse_thresh) or multigraph,
sequential=sequential,
parallel=parallel,
verbose=verbose != False,
checkpoint=chkp,
minimize_state=minimize_state.minimize_state)
ba = array(bstate.b.fa)
mstate.bs.append(ba)
minimize_state.clear_mstate()
state._NestedBlockState__rebuild_level(len(state.levels) - 1, b=ba)
if ec is None:
bg = state.levels[l].bg
ecount = state.levels[l].mrs
else:
bg, ecount, ec = state.levels[l].get_bg()
for l, B in enumerate(Bs):
if l + 1 < len(state.levels):
assert state.levels[l].B == state.levels[l + 1].N, (state.levels[l].B, state.levels[l + 1].N)
minimize_state.clear()
mstate.sync(state)
if checkpoint is not None:
checkpoint(None, 0, 0, 0, mstate)
return state
def minimize_nested_blockmodel_dl(g, Bs=None, bs=None, min_B=None,
max_B=None, max_b=None, deg_corr=True,
overlap=False, ec=None, layers=False,
confine_layers=False, nonoverlap_init=False,
dl=True, multigraph=True, dense=False,
eweight=None, vweight=None, clabel=None,
frozen_levels=None, nsweeps=10,
adaptive_sweeps=True, epsilon=1e-3, c=0,
nmerge_sweeps=10, r=2, sparse_thresh=100,
sequential=True, parallel=False,
verbose=False, checkpoint=None,
minimize_state=None, **kwargs):
r"""Find the block hierarchy of an unspecified size which minimizes the description
length of the network, according to the nested stochastic blockmodel ensemble which
best describes it.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph being used.
Bs : list of ``int`` (optional, default: ``None``)
Initial number of blocks for each hierarchy level.
bs : list of :class:`~graph_tool.PropertyMap` or :class:`~numpy.ndarray` instances (optional, default: ``None``)
Initial block labels on the vertices, for each hierarchy level.
min_B : ``int`` (optional, default: ``None``)
Minimum number of blocks at the lowest level.
max_B : ``int`` (optional, default: ``None``)
Maximum number of blocks at the lowest level.
max_b : ``int`` (optional, default: ``None``)
Block partition used for the maximum number of blocks at the lowest
level.
deg_corr : ``bool`` (optional, default: ``True``)
If ``True``, the degree-corrected version of the blockmodel ensemble
will be used in the bottom level, otherwise the traditional variant will
be used.
overlap : ``bool`` (optional, default: ``False``)
If ``True``, the mixed-membership version of the blockmodel will be used.
ec : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
If provided, this should be an edge :class:`~graph_tool.PropertyMap`
containing edge covariates that will split the network in discrete
layers.
layers : ``bool`` (optional, default: ``False``)
If ``True``, and `´ec`` is not ``None``, the "independent layers"
version of the model is used, instead of the "edge covariates" version.
confine_layers : ``bool`` (optional, default: ``False``)
If ``True``, and `´ec`` is not ``None`` and ``overlap == True``, the
half edges will only be moved in such a way that inside each layer the
group membership remains non-overlapping.
nonoverlap_init : ``bool`` (optional, default: ``False``)
If ``True``, and `´overlap == True``, the minimization starts by first
fitting the non-overlapping model, and using that as a starting state.
dl : ``bool`` (optional, default: ``True``)
If ``True``, the change in the whole description length will be
considered after each vertex move, not only the entropy.
multigraph : ``bool`` (optional, default: ``False``)
If ``True``, the multigraph entropy will be used.
dense : ``bool`` (optional, default: ``False``)
If ``True``, the "dense" variant of the entropy will be computed.
eweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Edge weights (i.e. multiplicity).
vweight : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex weights (i.e. multiplicity).
clabel : :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Constraint labels on the vertices. If supplied, vertices with different
label values will not be clustered in the same group.
frozen_levels : :class:`list` (optional, default: ``None``)
List of levels (``int``s) which will remain unmodified during the
algorithm.
nsweeps : ``int`` (optional, default: ``10``)
The number of sweeps done after each merge step to reach the local
minimum.
epsilon : ``float`` (optional, default: ``0``)
The number of sweeps necessary for the local minimum will
be estimated to be enough so that no more than ``epsilon * N`` nodes
changes their states in the last ``nsweeps`` sweeps.
c : ``float`` (optional, default: ``0.``)
This parameter specifies how often fully random moves are attempted,
instead of more likely moves based on the inferred block partition.
For ``c == 0``, no fully random moves are attempted, and for ``c == inf``
they are always attempted.
nmerge_sweeps : `int` (optional, default: `10`)
The number of merge sweeps done, where in each sweep a better merge
candidate is searched for every block.
r : ``float`` (optional, default: ``2.``)
Agglomeration ratio for the merging steps. Each merge step will attempt
to find the best partition into :math:`B_{i-1} / r` blocks, where
:math:`B_{i-1}` is the number of blocks in the last step.
sparse_thresh : ``int`` (optional, default: ``100``)
If the number of blocks at some level is larger than this value, the
sparse entropy will be used to find the best partition, but the dense
entropy will be used to compare different partitions.
sequential : ``bool`` (optional, default: ``True``)
If ``True``, the move attempts on the vertices are done in sequential
random order. Otherwise a total of ``N`` moves attempts are made, where
`N` is the number of vertices, where each vertex can be selected with
equal probability.
checkpoint : function (optional, default: ``None``)
If provided, this function will be called after each call to
:func:`mcmc_sweep`. This can be used to store the current state, so it
can be continued later. The function must have the following signature:
.. code-block:: python
def checkpoint(state, L, delta, nmoves, minimize_state):
...
where `state` is either a :class:`~graph_tool.community.NestedBlockState`
instance or ``None``, `L` is the current description length, ``delta`` is
the entropy difference in the last MCMC sweep, and ``nmoves`` is the
number of accepted block membership moves. The ``minimize_state``
argument is a :class:`~graph_tool.community.NestedMinimizeState`
instance which specifies the current state of the algorithm, which can
be stored via :mod:`pickle`, and supplied via the ``minimize_state``
option below to continue from an interrupted run.
This function will also be called when the MCMC has finished for the
current value of :math:`B`, in which case ``state == None``, and the
remaining parameters will be zero, except the last.
minimize_state : :class:`MinimizeState` (optional, default: ``None``)
If provided, this will specify an exact point of execution from which
the algorithm will continue. The expected object is a
:class:`~graph_tool.community.NestedMinimizeState`
instance which will be passed to the callback of the ``checkpoint``
option above, and can be stored by :mod:`pickle`.
verbose : ``bool`` (optional, default: ``False``)
If ``True``, verbose information is displayed.
Returns
-------
state : :class:`~graph_tool.community.NestedBlockState`
The nested block state.
Notes
-----
This algorithm attempts to find a block partition hierarchy of an unspecified size
which minimizes the description length of the network,
.. math::
\Sigma = \mathcal{L}_{t/c} + \mathcal{S}_n,
where :math:`\mathcal{S}_{n}` is the nested blockmodel entropy given by
.. math::
\mathcal{S}_n = \mathcal{S}_{t/c}(\{e^0_{rs}\}, \{n^0_r\}) + \sum_{l=1}^LS_m(\{e^l_{rs}\}, \{n^l_r\}).
with :math:`\mathcal{S}_{t/c}` and :math:`\mathcal{S}_{m}` described in the docstring of
:meth:`~graph_tool.community.BlockState.entropy`, and :math:`\{e^l_{rs}\}`
and :math:`\{n^l_r\}` are the edge and node counts at hierarchical level :math:`l`.
Additionally :math:`\mathcal{L}_{t/c}` is the information necessary to
describe the block partitions, i.e. :math:`\mathcal{L}_t=\sum_{l=0}^L\mathcal{L}^l_t`, with
.. math::
\mathcal{L}^l_t = \ln\left(\!\!{B_l\choose B_{l-1}}\!\!\right) + \ln B_{l-1}! - \sum_r \ln n_r^l!.
See [peixoto-hierarchical-2014]_ for details on the algorithm.
This algorithm has a complexity of :math:`O(N \ln^2 N)`, where :math:`N`
is the number of nodes in the network.
Examples
--------
.. testsetup:: nested_mdl
gt.seed_rng(42)
np.random.seed(42)
.. doctest:: nested_mdl
>>> g = gt.collection.data["power"]
>>> state = gt.minimize_nested_blockmodel_dl(g, deg_corr=True)
>>> gt.draw_hierarchy(state, output="power_nested_mdl.pdf")
(...)
.. testcleanup:: nested_mdl
gt.draw_hierarchy(state, output="power_nested_mdl.png")
.. figure:: power_nested_mdl.*
:align: center
Hierarchical Block partition of a power-grid network, which minimizes
the description length of the network according to the nested
(degree-corrected) stochastic blockmodel.
.. doctest:: nested_mdl_overlap
>>> g = gt.collection.data["celegansneural"]
>>> state = gt.minimize_nested_blockmodel_dl(g, deg_corr=True, overlap=True,
... nonoverlap_init=False, dl=True)
>>> gt.draw_hierarchy(state, output="celegans_nested_mdl_overlap.pdf")
(...)
.. testcleanup:: nested_mdl_overlap
gt.draw_hierarchy(state, output="celegans_nested_mdl_overlap.png")
.. figure:: celegans_nested_mdl_overlap.*
:align: center
Overlapping block partition of the C. elegans neural network, which
minimizes the description length of the network according to the nested
overlapping stochastic blockmodel.
References
----------
.. [peixoto-hierarchical-2014] <NAME>, "Hierarchical block structures and high-resolution
model selection in large networks ", Phys. Rev. X 4, 011047 (2014), :doi:`10.1103/PhysRevX.4.011047`,
:arxiv:`1310.4377`.
.. [peixoto-efficient-2014] <NAME>, "Efficient Monte Carlo and greedy
heuristic for the inference of stochastic block models", Phys. Rev. E 89, 012804 (2014),
:doi:`10.1103/PhysRevE.89.012804`, :arxiv:`1310.4378`.
.. [peixoto-model-2015] <NAME>, "Model selection and hypothesis
testing for large-scale network models with overlapping groups",
Phys. Rev. X 5, 011033 (2015), :doi:`10.1103/PhysRevX.5.011033`,
:arxiv:`1409.3059`.
.. [peixoto-inferring-2015] <NAME>, "Inferring the mesoscale
structure of layered, edge-valued and time-varying networks",
:arXiv:`1504.02381`
"""
dl_ent = kwargs.get("dl_ent", False)
if minimize_state is None:
minimize_state = NestedMinimizeState()
if overlap and nonoverlap_init and minimize_state.init and bs is None:
if verbose:
print("Non-overlapping initialization...")
state = minimize_nested_blockmodel_dl(g, Bs=Bs, bs=bs,
min_B=min_B,
max_B=max_B,
ec=ec, layers=layers,
deg_corr=deg_corr, overlap=False,
dl=dl, dense=dense,
multigraph=multigraph,
eweight=eweight,
vweight=vweight,
clabel=clabel if isinstance(clabel, PropertyMap) else None,
nsweeps=nsweeps,
adaptive_sweeps=adaptive_sweeps,
epsilon=epsilon, c=c,
nmerge_sweeps=nmerge_sweeps, r=r,
sparse_thresh=sparse_thresh,
sequential=sequential,
parallel=parallel,
verbose=verbose,
checkpoint=checkpoint,
minimize_state=minimize_state,
dl_ent=dl_ent)
if overlap != "full":
if clabel is not None:
bstate = state.levels[0].copy(overlap=True, clabel=clabel)
else:
bstate = state.levels[0].copy(overlap=True,
clabel=g.new_vertex_property("int"))
unilevel_minimize(bstate, nsweeps=nsweeps, epsilon=epsilon, c=c,
nmerge_sweeps=nmerge_sweeps, dl=dl,
sequential=sequential, parallel=parallel,
confine_layers=confine_layers)
bs = [array(s.b.a) for s in state.levels]
bs[0] = array(bstate.b.a)
del bstate
else:
bstates = [bstate.copy(overlap=True) for bstate in state.levels]
bs = [array(s.b.a) for s in bstates]
del bstates
if nonoverlap_init != "full":
bs = [bs[0], zeros(bs[0].max() + 1, dtype=bs[0].dtype)]
Bs = [b.max() + 1 for b in bs]
max_B = Bs[0]
max_b = bs[0].copy()
minimize_state.clear()
minimize_state.init = False
if verbose:
print("Overlapping minimization starting from:")
state.print_summary()
del state
if Bs is None:
if minimize_state is not None:
Bs = [ba.max() + 1 for ba in minimize_state.bs]
if len(Bs) == 0:
Bs = [1]
else:
Bs = [1]
if bs is not None:
Bs = [ba.max() + 1 for ba in bs]
if Bs[-1] > 1:
Bs += [1]
if bs is None:
state = init_nested_state(g, Bs=Bs, ec=ec, layers=layers,
confine_layers=confine_layers,
deg_corr=deg_corr, overlap=overlap,
eweight=eweight, vweight=vweight,
clabel=clabel, verbose=verbose,
nsweeps=nsweeps, nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps,
dl=dl, dense=dense, multigraph=multigraph,
epsilon=epsilon, sparse_thresh=sparse_thresh,
sequential=sequential,
parallel=parallel,
checkpoint=checkpoint,
minimize_state=minimize_state, dl_ent=dl_ent)
else:
state = NestedBlockState(g, ec=ec, layers=layers, bs=bs,
deg_corr=deg_corr, overlap=overlap,
eweight=eweight, vweight=vweight,
clabel=clabel)
minimize_state.sync(state)
chkp = get_checkpoint_wrap(checkpoint, state, minimize_state, dl_ent)
dS = nested_tree_sweep(state,
min_B=min_B,
max_B=max_B,
max_b=max_b,
verbose=verbose,
nsweeps=nsweeps,
nmerge_sweeps=nmerge_sweeps,
adaptive_sweeps=adaptive_sweeps,
r=r, epsilon=epsilon,
dense=dense, dl=dl,
multigraph=multigraph,
sequential=sequential,
parallel=parallel,
sparse_thresh=sparse_thresh,
checkpoint=chkp,
minimize_state=minimize_state,
frozen_levels=frozen_levels,
dl_ent=dl_ent,
confine_layers=confine_layers)
return state
def get_hierarchy_tree(state, empty_branches=True):
r"""Obtain the nested hierarchical levels as a tree.
This transforms a :class:`~graph_tool.NestedBlockState` instance into a
single :class:`~graph_tool.Graph` instance containing the hierarchy tree.
Returns
-------
tree : :class:`~graph_tool.Graph`
A directed graph, where vertices are blocks, and a directed edge points
to an upper to a lower level in the hierarchy.
label : :class:`~graph_tool.PropertyMap`
A vertex property map containing the block label for each node.
order : :class:`~graph_tool.PropertyMap`
A vertex property map containing the relative ordering of each layer
according to the total degree of the groups at the specific levels.
empty_branches : ``bool`` (optional, default: ``True``)
If ``empty_branches == False``, dangling branches at the upper layers
will be pruned.
"""
bstack = state.get_bstack()
g = bstack[0]
b = g.vp["b"]
bstack = bstack[1:]
t = Graph()
t.add_vertex(g.num_vertices())
label = t.vertex_index.copy("int")
order = t.own_property(g.degree_property_map("total").copy())
last_pos = 0
for l, s in enumerate(bstack):
pos = t.num_vertices()
t.add_vertex(s.num_vertices())
label.a[-s.num_vertices():] = arange(s.num_vertices())
# relative ordering based on total degree
count = s.ep["count"].copy("double")
for e in s.edges():
if e.source() == e.target():
count[e] /= 2
vs = [t.vertex(vi) for vi in range(pos, t.num_vertices())]
vs = sorted(vs, key=lambda v: (s.vertex(int(v) - pos).out_degree(count) +
s.vertex(int(v) - pos).in_degree(count)))
for vi, v in enumerate(vs):
order[v] = vi
for vi, v in enumerate(g.vertices()):
w = t.vertex(vi + last_pos)
u = t.vertex(b[v] + pos)
t.add_edge(u, w)
last_pos = pos
g = s
if g.num_vertices() == 1:
break
b = g.vp["b"]
if not empty_branches:
vmask = t.new_vertex_property("bool")
vmask.a = True
for vi in range(state.g.num_vertices(), t.num_vertices()):
v = t.vertex(vi)
if v.out_degree() == 0:
vmask[v] = False
while v.in_degree() > 0:
v = list(v.in_neighbours())[0]
vmask[v] = False
vmask[v] = True
t = GraphView(t, vfilt=vmask)
t.vp["label"] = label
t = Graph(t, prune=True)
label = t.vp["label"]
del t.vp["label"]
return t, label, order
```
#### File: src/graph_tool/decorators.py
```python
from __future__ import division, absolute_import, print_function
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright 2006-2015 <NAME>"
__license__ = "GPL version 3 or above"
import inspect
import functools
import sys
################################################################################
# Decorators
# Some useful function decorators which will be used
################################################################################
# exec statement in python 2.7 and exec() function in 3.2 are mutually exclusive
if sys.hexversion > 0x03000000:
def exec_function(source, filename, global_map):
exec(compile(source, filename, "exec"), global_map)
else:
eval(compile("""\
def exec_function(source, filename, global_map):
exec compile(source, filename, "exec") in global_map
""","<exec_function>", "exec"))
def _wraps(func):
"""This decorator works like the functools.wraps meta-decorator, but
also preserves the function's argument signature. This uses eval, and is
thus a bit of a hack, but there no better way I know of to do this."""
def decorate(f):
argspec = inspect.getargspec(func)
___wrap_defaults = defaults = argspec[-1]
if defaults is not None:
def_string = ["___wrap_defaults[%d]" % d for
d in range(len(defaults))]
def_names = argspec[0][-len(defaults):]
else:
def_string = None
def_names = None
args_call = inspect.formatargspec(argspec[0], defaults=def_names)
argspec = inspect.formatargspec(argspec[0], defaults=def_string)
argspec = argspec.lstrip("(").rstrip(")")
wf = "def %s(%s):\n return f%s\n" % \
(func.__name__, argspec, args_call)
if def_string is not None:
for d in def_string:
wf = wf.replace("'%s'" % d, "%s" % d)
for d in def_names:
wf = wf.replace("'%s'" % d, "%s" % d)
exec_function(wf, __file__, locals())
return functools.wraps(func)(locals()[func.__name__])
return decorate
def _attrs(**kwds):
"""Decorator which adds arbitrary attributes to methods"""
def decorate(f):
for k in kwds:
setattr(f, k, kwds[k])
return f
return decorate
def _limit_args(allowed_vals):
"""Decorator which will limit the values of given arguments to a specified
list of allowed values, and raise TypeError exception if the given value
doesn't belong. 'allowed_vals' is a dict containing the allowed value list
for each limited function argument."""
def decorate(func):
@_wraps(func)
def wrap(*args, **kwargs):
arg_names = inspect.getargspec(func)[0]
arguments = list(zip(arg_names, args))
arguments += [(k, kwargs[k]) for k in list(kwargs.keys())]
for a in arguments:
if a[0] in allowed_vals:
if a[1] not in allowed_vals[a[0]]:
raise TypeError("value for '%s' must be one of: %s" % \
(a[0], ", ".join(allowed_vals[a[0]])))
return func(*args, **kwargs)
return wrap
return decorate
def _require(arg_name, *allowed_types):
"""Decorator that lets you annotate function definitions with argument type
requirements. These type requirements are automatically checked by the
system at function invocation time."""
def make_wrapper(f):
if hasattr(f, "wrapped_args"):
wrapped_args = f.wrapped_args
else:
code = f.__code__
wrapped_args = list(code.co_varnames[:code.co_argcount])
try:
arg_index = wrapped_args.index(arg_name)
except ValueError:
raise NameError(arg_name)
@_wraps(f)
def wrapper(*args, **kwargs):
if len(args) > arg_index:
arg = args[arg_index]
if not isinstance(arg, allowed_types):
type_list = " or ".join(str(allowed_type) \
for allowed_type in allowed_types)
raise TypeError("Expected '%s' to be %s; was %s." % \
(arg_name, type_list, type(arg)))
else:
if arg_name in kwargs:
arg = kwargs[arg_name]
if not isinstance(arg, allowed_types):
type_list = " or ".join(str(allowed_type) \
for allowed_type in \
allowed_types)
raise TypeError("Expected '%s' to be %s; was %s." %\
(arg_name, type_list, type(arg)))
return f(*args, **kwargs)
wrapper.wrapped_args = wrapped_args
return wrapper
return make_wrapper
```
#### File: graph_tool/run_action/inline.py
```python
from __future__ import division, absolute_import, print_function
import sys, string, hashlib, os.path, re, glob
from .. import *
from .. import libgraph_tool_core
import numpy
from .. dl_import import dl_flags
import warnings
try:
import scipy.weave
except (ImportError, AttributeError) as e:
msg = "Error importing scipy.weave module'%s'; run_action.inline() will not work!" % str(e)
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
# sys.path can be dirty and in unicode! :-p
sys_path = [str(d) for d in sys.path if os.path.isdir(d)]
prefix = None
for d in [p + "/graph_tool" for p in sys_path]:
if os.path.exists(d):
prefix = d
break
inc_prefix = prefix + "/include"
cxxflags = libgraph_tool_core.mod_info().cxxflags + " -I%s" % inc_prefix + \
" -I%s" % inc_prefix + "/boost-workaround"
# this is the code template which defines the action function object
support_template = open(prefix + "/run_action/run_action_support.hh").read()
code_template = open(prefix + "/run_action/run_action_template.hh").read()
# hash all the headers to force recompilation if code changes
headers_hash = ""
incs = glob.glob(inc_prefix + "/*")
while len(incs) > 0:
inc = incs[0]
del incs[0]
if os.path.isdir(inc):
incs += glob.glob(inc + "/*")
else:
headers_hash = hashlib.md5(headers_hash.encode("utf-8") + open(inc, "rb").read()).hexdigest()
# property map types
props = """
typedef GraphInterface::vertex_index_map_t vertex_index_t;
typedef GraphInterface::edge_index_map_t edge_index_t;
typedef prop_bind_t<GraphInterface::vertex_index_map_t> vertex_prop_t;
typedef prop_bind_t<GraphInterface::edge_index_map_t> edge_prop_t;
typedef prop_bind_t<ConstantPropertyMap<size_t,graph_property_tag> > graph_prop_t;
"""
def clean_prop_type(t):
return t.replace(" ", "_").replace("::", "_")\
.replace("<", "_").replace(">", "_").\
replace("__", "_")
for d in ["vertex", "edge", "graph"]:
for t in value_types():
props += "typedef %s_prop_t::as<%s >::type %sprop_%s_t;\n" % \
(d, t.replace("bool", "uint8_t"), d[0], clean_prop_type(t))
def get_graph_type(g):
return libgraph_tool_core.get_graph_type(g._Graph__graph)
def inline(code, arg_names=None, local_dict=None,
global_dict=None, force=False, compiler="gcc", verbose=False,
auto_downcast=1, support_code="", libraries=None,
library_dirs=None, extra_compile_args=None,
runtime_library_dirs=None, extra_objects=None,
extra_link_args=None, mask_ret=None, debug=False):
"""Compile (if necessary) and run the C++ code specified by 'code', using :mod:`~scipy.weave`.
The (possibly modified) variables in 'arg_names' are returned.
See :func:`scipy.weave.inline` for detailed parameter documentation.
Notes
-----
Graphs and property maps are automatically converted to appropriate [Boost]_
graph types. For convenience, the graph types are automatically typedef'd to
`${name}_graph_t`, where ${name} is the graph's variable name passed to
`arg_names`. Property map types are typedef'd to `vprop_${val_type}_t`,
`eprop_${val_type}_t` or `gprop_${val_type}_t`, for vertex, edge or graph
properties, where ${val_type} specifies the value type (e.g. int, bool,
double, etc.). In the case of vector types, the "<" and ">" symbols are
replaced by underscores ("_").
Examples
--------
>>> from numpy.random import seed
>>> seed(42)
>>> g = gt.random_graph(100, lambda: (3, 3))
>>> nv = 0
>>> ret = gt.inline("nv = num_vertices(g);", ['g', 'nv'])
>>> print(ret["nv"])
100
>>> prop = g.new_vertex_property("vector<double>")
>>> prop[g.vertex(0)] = [1.0, 4.2]
>>> val = 0.0
>>> ret = gt.inline("val = prop[vertex(0,g)][1];", ['g', 'prop', 'val'])
>>> print(ret["val"])
4.2
References
----------
.. [Boost] http://www.boost.org/libs/graph/doc/table_of_contents.html
.. [Weave] http://www.scipy.org/Weave
"""
if arg_names == None:
arg_names = []
if libraries == None:
libraries = []
if library_dirs == None:
library_dirs = []
if extra_compile_args == None:
extra_compile_args = []
if runtime_library_dirs == None:
runtime_library_dirs = []
if extra_objects == None:
extra_objects = []
if extra_link_args == None:
extra_link_args = []
if mask_ret == None:
mask_ret = []
# we need to get the locals and globals of the _calling_ function. Thus, we
# need to go deeper into the call stack
call_frame = sys._getframe(1)
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
# convert variables to boost::python::object, except some known convertible
# types
arg_def = props
arg_conv = ""
arg_alias = []
alias_dict = {}
for arg in arg_names:
if arg not in list(local_dict.keys()) and arg not in list(global_dict.keys()):
raise ValueError("undefined variable: " + arg)
if arg in list(local_dict.keys()):
arg_val = local_dict[arg]
else:
arg_val = global_dict[arg]
if issubclass(type(arg_val), Graph):
alias = "__gt__" + arg
gi = "__gt__" + arg + "__gi"
graph_type = get_graph_type(arg_val)
gi_val = arg_val._Graph__graph
arg_def += "typedef %s %s_graph_t;\n" % (graph_type, arg)
arg_def += "GraphInterface& %s = python::extract<GraphInterface&>(%s);\n" % \
(gi, alias)
arg_def += "%s_graph_t& %s = *boost::any_cast<%s*>(%s.GetGraphView());\n" % \
(arg, arg, graph_type, gi)
arg_alias.append(alias)
alias_dict[alias] = gi_val
elif type(arg_val) == PropertyMap:
alias = "__gt__" + arg
if arg_val == arg_val.get_graph().vertex_index:
prop_name = "GraphInterface::vertex_index_map_t"
elif arg_val == arg_val.get_graph().edge_index:
prop_name = "GraphInterface::edge_index_map_t"
else:
prop_name = "%sprop_%s_t" % \
(arg_val.key_type(),
clean_prop_type(arg_val.value_type()))
arg_def += "%s %s;\n" % (prop_name, arg)
arg_conv += "%s = get_prop<%s>(%s);\n" % \
(arg, prop_name, alias)
arg_alias.append(alias)
alias_dict[alias] = arg_val
elif type(arg_val) not in [int, bool, float, string, numpy.ndarray]:
alias = "__gt__" + arg
obj_type = "python::object"
if type(arg_val) == list:
obj_type = "python::list"
elif type(arg_val) == dict:
obj_type = "python::dict"
elif type(arg_val) == tuple:
obj_type = "python::tuple"
arg_def += "%s %s;\n" % (obj_type, arg)
arg_conv += "%s = %s(python::object(python::handle<>" % (arg, obj_type) + \
"(python::borrowed((PyObject*)(%s)))));\n" % alias
arg_alias.append(alias)
alias_dict[alias] = arg_val
elif type(arg_val) == bool:
#weave is dumb with bools
alias = "__gt__" + arg
arg_def += "bool %s;\n" % arg
arg_conv += "%s = python::extract<bool>(python::object(python::handle<>" % arg + \
"(python::borrowed((PyObject*)(%s)))));\n" % alias
arg_alias.append(alias)
alias_dict[alias] = arg_val
else:
arg_alias.append(arg)
if arg in list(local_dict.keys()):
alias_dict[arg] = local_dict[arg]
else:
alias_dict[arg] = global_dict[arg]
# handle returned values
return_vals = ""
for arg in arg_names:
if arg in list(local_dict.keys()):
arg_val = local_dict[arg]
else:
arg_val = global_dict[arg]
if arg not in mask_ret and \
type(arg_val) not in [numpy.ndarray, PropertyMap] and \
not issubclass(type(arg_val), Graph):
return_vals += 'return_vals["%s"] = %s;\n' % (arg, arg)
support_code += globals()["support_template"]
# set debug flag and disable optimization in debug mode
compile_args = [cxxflags] + extra_compile_args
if debug:
compile_args = [re.sub("-O[^ ]*", "", x) for x in compile_args] + ["-g"]
# insert a hash value into the code below, to force recompilation when
# support_code (and module version) changes
text = support_code + code + " ".join(libraries + library_dirs +
[cxxflags] + \
extra_compile_args +\
extra_objects + \
extra_link_args) + \
headers_hash + __version__
support_hash = hashlib.md5(text.encode("ascii")).hexdigest()
code += "\n// support code hash: " + support_hash
inline_code = string.Template(globals()["code_template"]).\
substitute(var_defs=arg_def, var_extract=arg_conv,
code=code, return_vals=return_vals)
# RTLD_GLOBAL needs to be set in dlopen() if we want typeinfo and
# friends to work properly across DSO boundaries. See
# http://gcc.gnu.org/faq.html#dso
orig_dlopen_flags = sys.getdlopenflags()
sys.setdlopenflags(dl_flags)
# call weave and pass all the updated kw arguments
ret_vals = \
scipy.weave.inline(inline_code, arg_alias, force=int(force),
local_dict=alias_dict, global_dict=global_dict,
compiler=compiler, verbose=int(verbose),
auto_downcast=auto_downcast,
support_code=support_code,
libraries=libraries,
library_dirs=sys_path + library_dirs,
extra_compile_args=compile_args,
runtime_library_dirs=runtime_library_dirs,
extra_objects=extra_objects,
extra_link_args=["-Wl,-E "] + extra_link_args)
# check if exception was thrown
if ret_vals["__exception_thrown"]:
libgraph_tool_core.raise_error(ret_vals["__exception_error"])
else:
del ret_vals["__exception_thrown"]
del ret_vals["__exception_error"]
sys.setdlopenflags(orig_dlopen_flags) # reset dlopen to normal case to
# avoid unnecessary symbol collision
# set return vals
for arg in arg_names:
if arg in ret_vals:
if arg in local_dict:
local_dict[arg] = ret_vals[arg]
else:
global_dict[arg] = ret_vals[arg]
return ret_vals
```
#### File: graph_tool/stats/__init__.py
```python
from __future__ import division, absolute_import, print_function
from .. dl_import import dl_import
dl_import("from . import libgraph_tool_stats")
from .. import _degree, _prop, _get_rng, GraphView, PropertyMap
from numpy import *
import numpy
import sys
__all__ = ["vertex_hist", "edge_hist", "vertex_average", "edge_average",
"label_parallel_edges", "remove_parallel_edges",
"label_self_loops", "remove_self_loops", "remove_labeled_edges",
"distance_histogram"]
def vertex_hist(g, deg, bins=[0, 1], float_count=True):
"""
Return the vertex histogram of the given degree type or property.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
deg : string or :class:`~graph_tool.PropertyMap`
Degree or property to be used for the histogram. It can be either "in",
"out" or "total", for in-, out-, or total degree of the vertices. It can
also be a vertex property map.
bins : list of bins (optional, default: [0, 1])
List of bins to be used for the histogram. The values given represent
the edges of the bins (i.e. lower and upper bounds). If the list
contains two values, this will be used to automatically create an
appropriate bin range, with a constant width given by the second value,
and starting from the first value.
float_count : bool (optional, default: True)
If True, the counts in each histogram bin will be returned as floats. If
False, they will be returned as integers.
Returns
-------
counts : :class:`~numpy.ndarray`
The bin counts.
bins : :class:`~numpy.ndarray`
The bin edges.
See Also
--------
edge_hist: Edge histograms.
vertex_average: Average of vertex properties, degrees.
edge_average: Average of edge properties.
distance_histogram : Shortest-distance histogram.
Notes
-----
The algorithm runs in :math:`O(|V|)` time.
If enabled during compilation, this algorithm runs in parallel.
Examples
--------
.. testsetup::
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import poisson
>>> g = gt.random_graph(1000, lambda: (poisson(5), poisson(5)))
>>> print(gt.vertex_hist(g, "out"))
[array([ 7., 33., 91., 145., 165., 164., 152., 115., 62.,
29., 28., 6., 1., 1., 0., 1.]), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], dtype=uint64)]
"""
ret = libgraph_tool_stats.\
get_vertex_histogram(g._Graph__graph, _degree(g, deg),
[float(x) for x in bins])
return [array(ret[0], dtype="float64") if float_count else ret[0], ret[1]]
def edge_hist(g, eprop, bins=[0, 1], float_count=True):
"""
Return the edge histogram of the given property.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
eprop : :class:`~graph_tool.PropertyMap`
Edge property to be used for the histogram.
bins : list of bins (optional, default: [0, 1])
List of bins to be used for the histogram. The values given represent
the edges of the bins (i.e. lower and upper bounds). If the list
contains two values, this will be used to automatically create an
appropriate bin range, with a constant width given by the second value,
and starting from the first value.
float_count : bool (optional, default: True)
If True, the counts in each histogram bin will be returned as floats. If
False, they will be returned as integers.
Returns
-------
counts : :class:`~numpy.ndarray`
The bin counts.
bins : :class:`~numpy.ndarray`
The bin edges.
See Also
--------
vertex_hist : Vertex histograms.
vertex_average : Average of vertex properties, degrees.
edge_average : Average of edge properties.
distance_histogram : Shortest-distance histogram.
Notes
-----
The algorithm runs in :math:`O(|E|)` time.
If enabled during compilation, this algorithm runs in parallel.
Examples
--------
.. testsetup::
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy import arange
>>> from numpy.random import random
>>> g = gt.random_graph(1000, lambda: (5, 5))
>>> eprop = g.new_edge_property("double")
>>> eprop.get_array()[:] = random(g.num_edges())
>>> print(gt.edge_hist(g, eprop, linspace(0, 1, 11)))
[array([ 501., 441., 478., 480., 506., 494., 507., 535., 499., 559.]), array([ 0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ])]
"""
ret = libgraph_tool_stats.\
get_edge_histogram(g._Graph__graph, _prop("e", g, eprop),
[float(x) for x in bins])
return [array(ret[0], dtype="float64") if float_count else ret[0], ret[1]]
def vertex_average(g, deg):
"""
Return the average of the given degree or vertex property.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
deg : string or :class:`~graph_tool.PropertyMap`
Degree or property to be used for the histogram. It can be either "in",
"out" or "total", for in-, out-, or total degree of the vertices. It can
also be a vertex property map.
Returns
-------
average : float
The average of the given degree or property.
std : float
The standard deviation of the average.
See Also
--------
vertex_hist : Vertex histograms.
edge_hist : Edge histograms.
edge_average : Average of edge properties.
distance_histogram : Shortest-distance histogram.
Notes
-----
The algorithm runs in :math:`O(|V|)` time.
If enabled during compilation, this algorithm runs in parallel.
Examples
--------
.. testsetup::
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy.random import poisson
>>> g = gt.random_graph(1000, lambda: (poisson(5), poisson(5)))
>>> print(gt.vertex_average(g, "in"))
(4.975, 0.0686758691244603)
"""
if isinstance(deg, PropertyMap) and "string" in deg.value_type():
raise ValueError("Cannot calculate average of property type: " + deg.value_type())
a, aa, count = libgraph_tool_stats.\
get_vertex_average(g._Graph__graph, _degree(g, deg))
try:
a = array(a.a)
aa = array(aa.a)
except AttributeError:
pass
a /= count
aa = sqrt((aa / count - a ** 2) / count)
return a, aa
def edge_average(g, eprop):
"""
Return the average of the given degree or vertex property.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
eprop : :class:`~graph_tool.PropertyMap`
Edge property to be used for the histogram.
Returns
-------
average : float
The average of the given property.
std : float
The standard deviation of the average.
See Also
--------
vertex_hist : Vertex histograms.
edge_hist : Edge histograms.
vertex_average : Average of vertex degree, properties.
distance_histogram : Shortest-distance histogram.
Notes
-----
The algorithm runs in :math:`O(|E|)` time.
If enabled during compilation, this algorithm runs in parallel.
Examples
--------
.. testsetup::
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> from numpy import arange
>>> from numpy.random import random
>>> g = gt.random_graph(1000, lambda: (5, 5))
>>> eprop = g.new_edge_property("double")
>>> eprop.get_array()[:] = random(g.num_edges())
>>> print(gt.edge_average(g, eprop))
(0.4989741369720412, 0.004101065927783255)
"""
if "string" in eprop.value_type():
raise ValueError("Cannot calculate average of property type: " + eprop.value_type())
g = GraphView(g, directed=True)
a, aa, count = libgraph_tool_stats.\
get_edge_average(g._Graph__graph, _prop("e", g, eprop))
try:
a = array(a.a)
aa = array(aa.a)
except AttributeError:
pass
a /= count
aa = sqrt((aa / count - a ** 2) / count)
return a, aa
def remove_labeled_edges(g, label):
"""Remove every edge `e` such that `label[e] != 0`."""
u = GraphView(g, directed=True, reversed=g.is_reversed(),
skip_properties=True)
libgraph_tool_stats.\
remove_labeled_edges(u._Graph__graph, _prop("e", g, label))
def label_parallel_edges(g, mark_only=False, eprop=None):
r"""Label edges which are parallel, i.e, have the same source and target
vertices. For each parallel edge set :math:`PE`, the labelling starts from 0
to :math:`|PE|-1`. If `mark_only==True`, all parallel edges are simply
marked with the value 1. If the `eprop` parameter is given (a
:class:`~graph_tool.PropertyMap`), the labelling is stored there."""
if eprop is None:
if mark_only:
eprop = g.new_edge_property("bool")
else:
eprop = g.new_edge_property("int32_t")
libgraph_tool_stats.\
label_parallel_edges(g._Graph__graph, _prop("e", g, eprop),
mark_only)
return eprop
def remove_parallel_edges(g):
"""Remove all parallel edges from the graph. Only one edge from each
parallel edge set is left."""
eprop = label_parallel_edges(g)
remove_labeled_edges(g, eprop)
def label_self_loops(g, mark_only=False, eprop=None):
"""Label edges which are self-loops, i.e, the source and target vertices are
the same. For each self-loop edge set :math:`SL`, the labelling starts from 0
to :math:`|SL|-1`. If `mark_only == True`, self-loops are labeled with 1
and others with 0. If the `eprop` parameter is given
(a :class:`~graph_tool.PropertyMap`), the labelling is stored there."""
if eprop is None:
if mark_only:
eprop = g.new_edge_property("bool")
else:
eprop = g.new_edge_property("int32_t")
libgraph_tool_stats.\
label_self_loops(g._Graph__graph, _prop("e", g, eprop),
mark_only)
return eprop
def remove_self_loops(g):
"""Remove all self-loops edges from the graph."""
eprop = label_self_loops(g)
remove_labeled_edges(g, eprop)
def distance_histogram(g, weight=None, bins=[0, 1], samples=None,
float_count=True):
r"""
Return the shortest-distance histogram for each vertex pair in the graph.
Parameters
----------
g : :class:`Graph`
Graph to be used.
weight : :class:`~graph_tool.PropertyMap` (optional, default: None)
Edge weights.
bins : list of bins (optional, default: [0, 1])
List of bins to be used for the histogram. The values given represent
the edges of the bins (i.e. lower and upper bounds). If the list
contains two values, this will be used to automatically create an
appropriate bin range, with a constant width given by the second value,
and starting from the first value.
samples : int (optional, default: None)
If supplied, the distances will be randomly sampled from a number of
source vertices given by this parameter. It `samples == None` (default),
all pairs are used.
float_count : bool (optional, default: True)
If True, the counts in each histogram bin will be returned as floats. If
False, they will be returned as integers.
Returns
-------
counts : :class:`~numpy.ndarray`
The bin counts.
bins : :class:`~numpy.ndarray`
The bin edges.
See Also
--------
vertex_hist : Vertex histograms.
edge_hist : Edge histograms.
vertex_average : Average of vertex degree, properties.
distance_histogram : Shortest-distance histogram.
Notes
-----
The algorithm runs in :math:`O(V^2)` time, or :math:`O(V^2\log V)` if
`weight != None`. If `samples` is supplied, the complexities are
:math:`O(\text{samples}\times V)` and
:math:`O(\text{samples}\times V\log V)`, respectively.
If enabled during compilation, this algorithm runs in parallel.
Examples
--------
.. testsetup::
import numpy.random
numpy.random.seed(42)
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: (3, 3))
>>> hist = gt.distance_histogram(g)
>>> print(hist)
[array([ 0., 300., 865., 2214., 3857., 2480., 184.]), array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)]
>>> hist = gt.distance_histogram(g, samples=10)
>>> print(hist)
[array([ 0., 30., 88., 226., 391., 240., 15.]), array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)]
"""
if samples != None:
ret = libgraph_tool_stats.\
sampled_distance_histogram(g._Graph__graph,
_prop("e", g, weight),
[float(x) for x in bins],
samples, _get_rng())
else:
ret = libgraph_tool_stats.\
distance_histogram(g._Graph__graph, _prop("e", g, weight), bins)
return [array(ret[0], dtype="float64") if float_count else ret[0], ret[1]]
```
#### File: site-packages/graphviz/_compat.py
```python
import sys
PY2 = sys.version_info[0] == 2
if PY2: # pragma: no cover
text_type = unicode
def iteritems(d):
return d.iteritems()
else: # pragma: no cover
text_type = str
def iteritems(d):
return iter(d.items())
``` |
{
"source": "johan-kallstrom/bayesian-learning-course",
"score": 3
} |
#### File: bayesian-learning-course/bayesian_learning/bandits.py
```python
import numpy as np
class BernoulliBandit:
def __init__(self, probs):
self.probs = probs
self.initial_probs = probs.copy()
self.n_arms = len(probs)
def draw(self, arm):
assert arm < self.n_arms, "Arm outside range: %d" % self.n_arms
reward = np.random.binomial(n=1, p=self.probs[arm])
expected_regret = np.max(self.probs) - self.probs[arm]
return reward, expected_regret
class NonStationaryBernoulliBandit(BernoulliBandit):
def __init__(self, probs, total_draws):
super().__init__(probs)
assert 3 == self.n_arms, "Currently 3 arms is expected"
self.n_draws = 0
self.total_draws = total_draws
def draw(self, arm):
reward = super().draw(arm)
return reward
def _update_probs(self, n_draws):
self.probs[0] = self.initial_probs[0] + (n_draws / self.total_draws) * (self.initial_probs[2] - self.initial_probs[0])
self.probs[2] = self.initial_probs[2] + (n_draws / self.total_draws) * (self.initial_probs[0] - self.initial_probs[2])
# print((self.initial_probs[2] - self.initial_probs[0]))
# print((self.initial_probs[0] - self.initial_probs[2]))
# print((n_draws / self.total_draws))
# print(self.probs)
```
#### File: bayesian-learning-course/bayesian_learning/bayes_ucb.py
```python
import numpy as np
from collections import deque
from scipy.stats import beta
class BayesUcb:
def __init__(self, priors):
self.priors = priors.copy()
self.n_arms = len(priors)
self.reset()
def select_arm(self):
if self.warmup_arm < self.n_arms:
arm = self.warmup_arm
self.warmup_arm += 1
else:
qs = np.zeros(shape=(self.n_arms,))
for i, posterior in enumerate(self.posteriors):
p = 1 - 1 / self.time_step
q = beta.ppf(p, a=posterior[0], b=posterior[1], loc =0, scale = 1)
qs[i] = q
arm = np.argmax(qs)
self.time_step += 1
return arm
def learn(self, arm, reward):
# Update alpha and beta in Beta distribution
# alpha = alpha + reward
self.posteriors[arm][0] += reward
# beta = beta + abs(reward - 1)
self.posteriors[arm][1] += abs(reward - 1)
def reset(self):
self.posteriors = self.priors.copy()
self.warmup_arm = 0
self.time_step = 0
class SlidingWindowBayesUcb:
def __init__(self, priors, window_length=2000):
self.priors = priors.copy()
self.n_arms = len(priors)
self.window_length = window_length
self.reset()
def select_arm(self):
if self.warmup_arm < self.n_arms:
arm = self.warmup_arm
self.warmup_arm += 1
else:
qs = np.zeros(shape=(self.n_arms,))
for i in range(self.n_arms):
p = 1 - 1 / self.time_step
q = beta.ppf(p, a=self.priors[i][0]+np.sum(self.recent_rewards[i]), b=self.priors[i][1]+len(self.recent_rewards[i])-np.sum(self.recent_rewards[i]), loc=0, scale=1)
qs[i] = q
arm = np.argmax(qs)
self.time_step += 1
return arm
def learn(self, arm, reward):
# Store recent rewards
self.recent_rewards[arm].append(reward)
def reset(self):
self.posteriors = self.priors.copy()
self.warmup_arm = 0
self.time_step = 0
self.recent_rewards = [deque(maxlen=self.window_length) for _ in range(self.n_arms)]
``` |
{
"source": "johan-kallstrom/gym-mo",
"score": 2
} |
#### File: envs/gridworlds/mo_gathering_env.py
```python
import time
from gym_mo.envs.gridworlds import gridworld_base
from gym_mo.envs.gridworlds.mo_gridworld_base import MOGridworld
from gym_mo.envs.gridworlds.gridworld_base import GridObject, HunterAgent
import numpy as np
GATHERING_MAPPING = {
'#': GridObject(True, False, 0, (255.0, 255.0, 255.0), 1),
'o': GridObject(True, True, 0, (0.0, 255.0, 0.0), 2),
'p': GridObject(True, True, 1, (255.0, 0.0, 0.0), 3),
'q': GridObject(True, True, 0, (255.0, 255.0, 0.0), 4),
' ': None
}
GATHERING_MAP = [
' ',
' ',
' ',
' ',
' ',
' ',
' ',
' ',
]
class MOGatheringEnv(MOGridworld):
def __init__(self,
from_pixels=True,
agent_start=[0,0],
agent_color=(0.0, 0.0, 255.0),
preference=np.array([-1,-5,+20,-20,-20,+0]),
random_items=['p','o','p','o','p','o','q','q'],
random_items_frame=2,
agents=[]):
agent0 = HunterAgent(3, True, False, 0, (255.0, 0.0, 255.0), 5)
agent0.set_position([7,7])
GATHERING_AGENTS = [agent0]
super(MOGatheringEnv, self).__init__(map=GATHERING_MAP,
object_mapping=GATHERING_MAPPING,
random_items=random_items,
random_items_frame=random_items_frame,
from_pixels=from_pixels,
init_agents=GATHERING_AGENTS,
agent_start=agent_start,
agent_color=agent_color,
preference=preference,
max_steps=30, include_agents=False)
if __name__=="__main__":
my_grid = MOGatheringEnv(from_pixels=True)
done = False
my_grid.reset()
while not done:
_, r, done, _ = my_grid.step(my_grid.action_space.sample())
my_grid.render()
time.sleep(0.5)
``` |
{
"source": "johan-kallstrom/multiagent-particle-envs",
"score": 3
} |
#### File: multiagent/scenarios/simple_hockey.py
```python
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 2
num_adversaries = 1
num_landmarks = 5
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
if i < num_adversaries:
agent.adversary = True
agent.color = np.array([0.75, 0.25, 0.25])
else:
agent.adversary = False
agent.color = np.array([0.25, 0.25, 0.75])
# add landmarks for goal posts and puck
goal_posts = [[-0.25, -1.0],
[-0.25, 1.0],
[0.25, -1.0],
[0.25, 1.0]]
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
if i > 0:
landmark.collide = True
landmark.movable = False
landmark.state.p_pos = np.array(goal_posts[i-1])
landmark.state.p_vel = np.zeros(world.dim_p)
else:
landmark.collide = True
landmark.movable = True
# add landmarks for rink boundary
#world.landmarks += self.set_boundaries(world)
# make initial conditions
self.reset_world(world)
return world
def set_boundaries(self, world):
boundary_list = []
landmark_size = 1
edge = 1 + landmark_size
num_landmarks = int(edge * 2 / landmark_size)
for x_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([x_pos, -1 + i * landmark_size])
boundary_list.append(l)
for y_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([-1 + i * landmark_size, y_pos])
boundary_list.append(l)
for i, l in enumerate(boundary_list):
l.name = 'boundary %d' % i
l.collide = True
l.movable = False
l.boundary = True
l.color = np.array([0.75, 0.75, 0.75])
l.size = landmark_size
l.state.p_vel = np.zeros(world.dim_p)
return boundary_list
def reset_world(self, world):
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
if i > 0:
landmark.color = np.array([0.7, 0.7, 0.7])
else:
landmark.color = np.array([0.1, 0.1, 0.1])
landmark.index = i
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
world.landmarks[0].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
world.landmarks[0].state.p_vel = np.zeros(world.dim_p)
# return all agents of the blue team
def blue_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all agents of the red team
def red_agents(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on team they belong to
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# reward for blue team agent
return 0.0
def adversary_reward(self, agent, world):
# reward for red team agent
return 0.0
def observation(self, agent, world):
# get positions/vel of all entities in this agent's reference frame
entity_pos = []
entity_vel = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
if entity.movable:
entity_vel.append(entity.state.p_vel)
# get positions/vel of all other agents in this agent's reference frame
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)
``` |
{
"source": "johankirsten/fdtdempy",
"score": 2
} |
#### File: johankirsten/fdtdempy/example.py
```python
from fdtdempy.fields import DipoleCurrentField
from fdtdempy.fields import AggregatedField
from fdtdempy.fields import ZeroField
from fdtdempy.fields import StimulatedField
from fdtdempy.core import Dimension
from fdtdempy.core import Simulation
import math
def Experiment():
dipoleCurrentField = DipoleCurrentField()
dipoleCurrentField.SetCurrentDirection(1)
dipoleCurrentField.SetAmplitude(1)
dipoleCurrentField.SetFrequency(3 * math.pow(10, 9))
dipoleCurrentField.SetdX(0.001)
dipoleCurrentField.CenterXIndex = 100
dipoleCurrentField.CenterYIndex = 27
dipoleCurrentField.CenterZIndex = 27
dipoleCurrentField.LengthIndex = 50
dipoleCurrentField.Orientation = 0
jx = AggregatedField()
jx.CalculatedFieldList.append(dipoleCurrentField)
jy = ZeroField()
jz = ZeroField()
ex = StimulatedField()
ex.Name = "Ex"
ex.FilePath = "C:\\Temp\\python\\Ex.csv"
ex.xDimension = Dimension()
ex.xDimension.Step = 2
ex.xDimension.Begin = 0
ex.xDimension.End = 200
ex.yDimension = Dimension()
ex.yDimension.Step = 2
ex.yDimension.Begin = -1
ex.yDimension.End = 61
ex.zDimension = Dimension()
ex.zDimension.Step = 2
ex.zDimension.Begin = -1
ex.zDimension.End = 61
ex.WriteXIndexBegin = 100
ex.WriteXIndexEnd = 102
ex.WriteYIndexBegin = 1
ex.WriteYIndexEnd = 59
ex.WriteZIndexBegin = 27
ex.WriteZIndexEnd = 29
ex.Init()
ey = StimulatedField()
ey.Name = "Ey"
ey.FilePath = "C:\\Temp\\python\\Ey.csv"
ey.xDimension = Dimension()
ey.xDimension.Step = 2
ey.xDimension.Begin = -1
ey.xDimension.End = 201
ey.yDimension = Dimension()
ey.yDimension.Step = 2
ey.yDimension.Begin = 0
ey.yDimension.End = 60
ey.zDimension = Dimension()
ey.zDimension.Step = 2
ey.zDimension.Begin = -1
ey.zDimension.End = 61
ey.Init()
ez = StimulatedField()
ez.Name = "Ez"
ez.FilePath = "C:\\Temp\\python\\Ez.csv"
ez.xDimension = Dimension()
ez.xDimension.Step = 2
ez.xDimension.Begin = -1
ez.xDimension.End = 201
ez.yDimension = Dimension()
ez.yDimension.Step = 2
ez.yDimension.Begin = -1
ez.yDimension.End = 61
ez.zDimension = Dimension()
ez.zDimension.Step = 2
ez.zDimension.Begin = 0
ez.zDimension.End = 60
ez.Init()
hx = StimulatedField()
hx.Name = "Hx"
hx.FilePath = "C:\\Temp\\python\\Hx.csv"
hx.xDimension = Dimension()
hx.xDimension.Step = 2
hx.xDimension.Begin = 1
hx.xDimension.End = 199
hx.yDimension = Dimension()
hx.yDimension.Step = 2
hx.yDimension.Begin = 0
hx.yDimension.End = 60
hx.zDimension = Dimension()
hx.zDimension.Step = 2
hx.zDimension.Begin = 0
hx.zDimension.End = 60
hx.Init()
hy = StimulatedField()
hy.Name = "Hy"
hy.FilePath = "C:\\Temp\\python\\Hy.csv"
hy.xDimension = Dimension()
hy.xDimension.Step = 2
hy.xDimension.Begin = 0
hy.xDimension.End = 200
hy.yDimension = Dimension()
hy.yDimension.Step = 2
hy.yDimension.Begin = 1
hy.yDimension.End = 59
hy.zDimension = Dimension()
hy.zDimension.Step = 2
hy.zDimension.Begin = 0
hy.zDimension.End = 60
hy.Init()
hz = StimulatedField()
hz.Name = "Hz"
hz.FilePath = "C:\\Temp\\python\\Hz.csv"
hz.xDimension = Dimension()
hz.xDimension.Step = 2
hz.xDimension.Begin = 0
hz.xDimension.End = 200
hz.yDimension = Dimension()
hz.yDimension.Step = 2
hz.yDimension.Begin = 0
hz.yDimension.End = 60
hz.zDimension = Dimension()
hz.zDimension.Step = 2
hz.zDimension.Begin = 1
hz.zDimension.End = 59
hz.WriteXIndexBegin = 100
hz.WriteXIndexEnd = 102
hz.WriteYIndexBegin = 0
hz.WriteYIndexEnd = 60
hz.WriteZIndexBegin = 27
hz.WriteZIndexEnd = 29
hz.Init()
simulation = Simulation()
simulation.TimeSteps = 200
simulation.Jx = jx
simulation.Jy = jy
simulation.Jz = jz
simulation.Ex = ex
simulation.Ey = ey
simulation.Ez = ez
simulation.Hx = hx
simulation.Hy = hy
simulation.Hz = hz
simulation.Init(0.001)
simulation.Run()
Experiment()
print("Press any key to continue")
input()
``` |
{
"source": "JohanKJIP/algorithms",
"score": 4
} |
#### File: algorithms/heap/fibonacci_heap.py
```python
import math
import sys
"""
A Fibonacci heap is a collection of rooted trees that ar emin-heap ordered. That is, each tree obeys
the min-heap property: the key of a node is greater than or equal to the key of its parent.
Using Fibonacci heaps for priority queues improves the asymptotic running time of important algorithms,
such as Dijkstra's algorithm for computing the shortest path between two nodes in a graph, compared to
the same algorithm using other slower priority queue data structures.
Time complexiity vs binomial heap:
| | Bin tree (worst-case) | Fib tree (amortized) |
|-------------- |----------------------- |---------------------- |
| Make-Heap | Θ(1) | Θ(1) |
| Insert | Θ(logn) | Θ(1) |
| Minimum | Θ(1) | Θ(1) |
| Extract-min | Θ(logn) | O(logn) |
| Merge/union | Θ(n) | Θ(1) |
| Decrease key | Θ(logn) | Θ(1) |
| Delete | Θ(logn) | O(logn) |
However, notice that thee times are amortized and that memory consumption can be high. Subsequently,
Fibonacci heaps have a reputation for being slow in practice due to large memory consumption per
node and high constant factors on all operations.
Resource:
Introduction to Algorithms, third edition.
Chapter 19, Fibonacci heaps
"""
class FibonacciHeap:
class Node:
def __init__(self, key):
self.key = key
self.parent = None
self.child = None
self.left = None
self.right = None
self.degree = 0
self.mark = False
def __init__(self):
# the root list is a list of rooted trees
# the min_node is the node with lowest key value in the heap
self.min_node = self.root_list = None
self.total_nodes = 0
def _append_root(self, node):
"""
Append a node to the end of the
root list.
"""
last_node = self.root_list.left
node.right = self.root_list
node.left = last_node
last_node.right = node
self.root_list.left = node
def _remove_root(self, node):
"""
Remove the node from the head list.
"""
# nothing to remove
if node == None or self.root_list == None:
return
# only one node
if node == self.root_list and node.left == self.root_list and node.right == self.root_list:
self.root_list = None
return
# length of root_list >= 2
node.left.right = node.right
node.right.left = node.left
# update root list reference if the
# removed node was the reference
if node == self.root_list:
# replace the head contents with the node to the left
# eliminating the node
self.root_list = node.right
return node
def iterate(self, head):
"""
Iterate the fib heap.
"""
node = stop = head
flag = False
while True:
if node == stop and flag is True:
break
elif node == stop:
flag = True
yield node
node = node.right
def find_min(self):
"""
Return the minimum node in the heap.
"""
return self.min_node
def merge(self, heap2):
"""
Merge two fib heaps. It works by placing heap2's root list
at the end of this heap's root list and connecting the heads
and tails.
"""
if heap2.root_list == None:
return
if self.root_list == None:
self.root_list = heap2.root_list
self.min_node = heap2.min_node
self.total_nodes = heap2.total_nodes
return
heap2_tail = heap2.root_list.left
tail = self.root_list.left
# the tail of heap 2 is now the end of the list
self.root_list.left = heap2_tail
heap2_tail.right = self.root_list
# heap2 starts at the end of the old list
tail.right = heap2.root_list
heap2.root_list.left = tail
if self.min_node is None or (
heap2.root_list != None and heap2.min_node.key < self.min_node.key
):
self.min_node = heap2.min_node
self.total_nodes += heap2.total_nodes
def insert(self, key):
"""
Insert a node into the heap.
"""
node = self.Node(key)
node.right = node
node.left = node
if self.min_node is None:
self.root_list = node
self.min_node = node
else:
self._append_root(node)
# node with key lower than the min_node is the new min_node
if node.key < self.min_node.key:
self.min_node = node
self.total_nodes += 1
return node
def extract_min_node(self):
"""
Return and remove the minimum node
in the tree.
"""
z = self.min_node
if z != None:
# add children to the root list
if z.child != None:
children = [x for x in self.iterate(z.child)]
for child in children:
self._append_root(child)
self._remove_root(z)
# only node and no children
if z == z.right:
self.min_node = None
else:
self.min_node = z.right
self._consolidate()
self.total_nodes -= 1
return z
def _consolidate(self):
"""
Combines roots of the same degree, consolidating
the list into an unordered list of binomial trees.
"""
A = [None] * self.total_nodes
# process root list
root_nodes = [x for x in self.iterate(self.root_list)]
for root in root_nodes:
x = root
d = x.degree
while A[d] != None:
y = A[d]
if x.key > y.key:
# exchange x and y to ensure x is root
# after linking them
temp = x
x, y = y, temp
self._link(y, x)
A[d] = None
d += 1
A[d] = x
# find new min node
self.min_node = None
for a in A:
if a != None:
if self.min_node == None:
self.min_node = a
else:
if a.key < self.min_node.key:
self.min_node = a
def _append_child(self, parent, child):
"""
Append a child to parent.
"""
if parent.child == None:
parent.child = child
child.left = child
child.right = child
else:
p_child = parent.child
last_node = p_child.left
child.right = p_child
child.left = last_node
last_node.right = child
p_child.left = child
parent.degree += 1
child.parent = parent
def _remove_child(self, parent, child):
"""
Remove a child from parent.
"""
if parent.child == parent.child.right:
parent.child = None
elif parent.child == child:
parent.child = child.right
child.right.parent = parent
child.left.right = child.right
child.right.left = child.left
parent.degree -= 1
def _link(self, y, x):
"""
Link child x to parent y.
"""
self._remove_root(y)
# make y a child of x
self._append_child(x, y)
y.mark = False
def _cut(self, x, y):
"""
Cut the link between x and y and place
x in the root list.
"""
self._remove_child(y, x)
self._append_root(x)
if x.key < self.min_node.key:
self.min_node = x
x.parent = None
x.mark = False
def _cascading_cut(self, y):
"""
Cascading cut of y to obtain good time bounds.
"""
z = y.parent
if z != None:
if y.mark == False:
y.mark = True
else:
self._cascading_cut(z)
def decrease_key(self, node, key):
"""
Decrease the key of a node in the heap.
"""
if key > node.key:
raise Exception("Key value larger than the nodes key")
node.key = key
parent = node.parent
if parent != None and node.key < parent.key:
self._cut(node, parent)
self._cascading_cut(parent)
if key < self.min_node.key:
self.min_node = node
def delete(self, node):
"""
Delete a node from the heap.
"""
self.decrease_key(node, -sys.maxsize - 1)
self.extract_min_node()
def get_all_nodes(self):
"""
Get all nodes in the heap in a list.
"""
return self._get_nodes(self.root_list)
def _get_nodes(self, node):
"""
Get all neighbours of node and recursively find children to
those nodes and place them into a list.
"""
nodes = []
for n in self.iterate(node):
nodes.append(n)
if n.child:
nodes += self._get_nodes(n.child)
return nodes
```
#### File: algorithms/tests/test_heap.py
```python
from algorithms.heap import (
BinaryHeap,
get_skyline,
max_sliding_window,
k_closest,
fibonacci_heap
)
import unittest
class TestBinaryHeap(unittest.TestCase):
"""
Test suite for the binary_heap data structures
"""
def setUp(self):
self.min_heap = BinaryHeap()
self.min_heap.insert(4)
self.min_heap.insert(50)
self.min_heap.insert(7)
self.min_heap.insert(55)
self.min_heap.insert(90)
self.min_heap.insert(87)
def test_insert(self):
# Before insert 2: [0, 4, 50, 7, 55, 90, 87]
# After insert: [0, 2, 50, 4, 55, 90, 87, 7]
self.min_heap.insert(2)
self.assertEqual([0, 2, 50, 4, 55, 90, 87, 7],
self.min_heap.heap)
self.assertEqual(7, self.min_heap.currentSize)
def test_remove_min(self):
ret = self.min_heap.remove_min()
# Before remove_min : [0, 4, 50, 7, 55, 90, 87]
# After remove_min: [7, 50, 87, 55, 90]
# Test return value
self.assertEqual(4, ret)
self.assertEqual([0, 7, 50, 87, 55, 90],
self.min_heap.heap)
self.assertEqual(5, self.min_heap.currentSize)
class TestSuite(unittest.TestCase):
def test_get_skyline(self):
buildings = [[2, 9, 10], [3, 7, 15], [5, 12, 12],
[15, 20, 10], [19, 24, 8]]
# Expect output
output = [[2, 10], [3, 15], [7, 12], [12, 0], [15, 10],
[20, 8], [24, 0]]
self.assertEqual(output, get_skyline(buildings))
def test_max_sliding_window(self):
nums = [1, 3, -1, -3, 5, 3, 6, 7]
self.assertEqual([3, 3, 5, 5, 6, 7], max_sliding_window(nums, 3))
def test_k_closest_points(self):
points = [(1, 0), (2, 3), (5, 2), (1, 1), (2, 8), (10, 2), (-1, 0), (-2, -2)]
self.assertEqual([(-1, 0), (1, 0)], k_closest(points, 2))
self.assertEqual([(1, 1), (-1, 0), (1, 0)], k_closest(points, 3))
self.assertEqual([(-2, -2), (1, 1), (1, 0), (-1, 0)], k_closest(points, 4))
self.assertEqual([(10, 2), (2, 8), (5, 2), (-2, -2), (2, 3),
(1, 0), (-1, 0), (1, 1)], k_closest(points, 8))
class TestFibonacciHeap(unittest.TestCase):
"""
Test the fibonacci heap and all the interfaces of the data structure
"""
def test_fibonacci_heap_find_min(self):
"""
Test that the find_min method of the fibonacci heap returns:
1. None if the heap is empty
2. the smallest data node if there is data in the heap
"""
fh = fibonacci_heap.FibonacciHeap()
# test case 1
self.assertEqual(None, fh.find_min())
data = [4, 5, 7, 1, 3, 6, 10, 30]
for x in data:
fh.insert(x)
# test case 2
self.assertEqual(1, fh.find_min().key)
def test_fibonacci_heap_extract_min(self):
"""
Test that the extract_min_node method returns the node with
the smallest data from the heap and removes it from the heap
"""
fh = fibonacci_heap.FibonacciHeap()
data = [4, 5, 7, 1, 3, 6, 10, 30]
for x in data:
fh.insert(x)
self.assertEqual(fh.total_nodes, len(data))
self.assertEqual(fh.find_min().key, 1)
n = fh.extract_min_node()
self.assertEqual(n.key, 1)
self.assertEqual(fh.total_nodes, len(data) - 1)
self.assertEqual(fh.find_min().key, 3)
def test_fibonacci_heap_insert(self):
"""
Test that the insert method inserts an element and doe:
1. if the heap is empty
1.1. sets the min_node to the inserted node
1.2. sets the size of the heap to 1
2. if the heap is not empty
2.1. increase the size of the heap by 1
2.2. leave the min_node unchanged if the node is larger
2.3. change the min_node if the node is smaller than min_node
"""
fh = fibonacci_heap.FibonacciHeap()
self.assertEqual(fh.total_nodes, 0)
self.assertEqual(fh.find_min(), None)
# test case 1:
fh.insert(2)
self.assertEqual(fh.total_nodes, 1)
self.assertEqual(fh.find_min().key, 2)
# test case 2.1 and 2.2:
fh.insert(4)
self.assertEqual(fh.total_nodes, 2)
self.assertEqual(fh.find_min().key, 2)
# test case 2.1 and 2.3:
fh.insert(1)
self.assertEqual(fh.total_nodes, 3)
self.assertEqual(fh.find_min().key, 1)
def test_fibonacci_heap_decrease_key(self):
"""
Test the decrease_key method of the fibonacci heap
1. Should return the new min value after decreased key.
2. Verifies that the heap looks as expected after decreased key.
3. Checks to see that it the heap does not still contain the value
that has been decreased.
4. Expects an expection to be thrown when decrease key value is
larger than min.
5. Decreases the key of a child so that it is smaller than the parent,
then verifies that the node with decreased key (which is now min_node) is in root_list.
"""
fh = fibonacci_heap.FibonacciHeap()
data = [4, 7, 6, 3, 5]
for x in data:
fh.insert(x)
fh.decrease_key(fh.find_min(), 2)
# test case 1
self.assertEqual(fh.find_min().key, 2)
# test case 2: Positive
decrease_data = [4, 7, 6, 2, 5]
for x in fh.iterate(fh.root_list):
self.assertTrue(x.key in decrease_data)
# test case 3: Negative
nodes = fh.get_all_nodes()
for i in nodes:
self.assertFalse(i.key == 3)
# test case 4: Exception
with self.assertRaises(Exception):
fh.decrease_key(fh.find_min(), 8)
# test case 5: After extract
fh.extract_min_node()
self.assertEqual(fh.root_list.child.right.key, 5)
fh.decrease_key(fh.root_list.child, 1)
self.assertEqual(fh.root_list.child.key, 5)
self.assertEqual(fh.find_min().key, 1)
self.assertTrue(fh.find_min() in fh.iterate(fh.root_list))
# test case 6: Only one child
fh2 = fibonacci_heap.FibonacciHeap()
fh2.insert(4)
fh2.insert(7)
fh2.extract_min_node()
fh2.decrease_key(fh2.find_min(), 2)
self.assertEqual(fh2.root_list.child, None)
def test_fibonacci_heap_merge(self):
"""
Test that the merge method of the fibonacci heap merges two
seperate heaps correctly
1. the resulting heap's root_list shoulc be a concatenation
of heap1's and heap2's root_lists
2. merging a heap with another empty heap should not change
the original heap
3. mergin an empty heap with another non-empty heap should
change the heap's min_node to the min_node of the merged
heap
"""
a = fibonacci_heap.FibonacciHeap()
b = fibonacci_heap.FibonacciHeap()
empty_heap = fibonacci_heap.FibonacciHeap()
# Fill two heaps with data
a_data = [4, 2, 6, 3, 5]
for x in a_data:
a.insert(x)
b_data = [10, 12, 16, 100]
for x in b_data:
b.insert(x)
# test case 1
a.merge(b)
for x in a.iterate(a.root_list):
self.assertTrue(x.key in a_data or x.key in b_data)
# test case 2
min_key_before = int(b.find_min().key)
numel_before = b.total_nodes
b.merge(empty_heap)
self.assertEqual(b.find_min().key, min_key_before)
self.assertEqual(b.total_nodes, numel_before)
# test case 3
self.assertEqual(empty_heap.find_min(), None)
empty_heap.merge(b)
self.assertEqual(b.find_min().key, empty_heap.find_min().key)
self.assertEqual(empty_heap.total_nodes, b.total_nodes)
def test_fibonacci_heap_delete(self):
"""
Test that the delete method of the fibonacci heap deletes nodes
from the heap with test cases
1. delete a root element and keep structure of tree
2. delete a child in the tree and keep structure of tree
"""
fh = fibonacci_heap.FibonacciHeap()
data = [1, 10, 25, 0, 2]
for x in data:
fh.insert(x)
# At this point, all elements of the tree are root elements
self.assertEqual(fh.total_nodes, 5)
# test case 1
del_node_key = fh.root_list.key
fh.delete(fh.root_list)
self.assertEqual(fh.total_nodes, 4)
nodes = fh.get_all_nodes()
for node in nodes:
self.assertTrue(node.key != del_node_key)
# test case 2
del_node_key = fh.root_list.child.key
fh.delete(fh.root_list.child)
self.assertEqual(fh.total_nodes, 3)
nodes = fh.get_all_nodes()
for node in nodes:
self.assertTrue(node.key != del_node_key)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "johanlahti/urban-lu-model",
"score": 3
} |
#### File: urban-lu-model/script/MacroDemand.py
```python
from osgeo import gdal
import numpy
import random
import Utils
def checkMacroDemand(thisYearsMacroDemand):
tot = 0
for luNr in thisYearsMacroDemand.keys():
tot += thisYearsMacroDemand[luNr]
return tot
def applyMacroDemand(macroDemand, year, luNrsDyn, luNrsStat, potArrs, arrLU):
""" Make the new land use map by changing the required cells for current time step,
for the dynamic land uses. """
n = 8 # neighborhood size
thisYearsMacroDemand = macroDemand[year]
#lastYearsMacroDemand = macroDemand[year-1]
# Make a new arr for the new land use array, where all dynamic LU are replaced by number 1.
newArrLU = Utils.findAndReplace(arrLU.copy(), find=luNrsDyn, replaceWith=1, within=[(n,n),(-n,-n)])
# Cut away the surrounding "frame" of neighborhood cells which are not supposed to be calculated.
for nr in potArrs.keys():
potArrs[nr] = potArrs[nr][n:-n, n:-n]
print "max", potArrs[nr].max()
# This raster keeps track of which land use has been changed already so that it wont change again.
arrLUChange = numpy.zeros(arrLU.shape, dtype=numpy.integer) # I would like int0/unit0 here instead
# Iterate
while checkMacroDemand(thisYearsMacroDemand)>0:
mx = 0 # int Note! Maybe this should be initialized as None?
luNr = None # int
tempMax = None # int
# Get max value of all potential arrays and find out which array (LuNr) holds it
for tLu in potArrs.keys():
# If the land use demand is satisfied for one land use,
# then don't try to find the potential for this LU. When
# all LU are satisfied, the loop with stop automatically (see while-statement)
if thisYearsMacroDemand[tLu]<=0:
continue
tempMax = potArrs[tLu].max()
if tempMax > mx:
mx = tempMax # save current max val
luNr = tLu # save lu (which has the highest potential)
if luNr==None:
print "Breaking when macroDemandStatus is: %i" %(checkMacroDemand(thisYearsMacroDemand))
break
# Find out the xy-location of the max value
#print "lu", lu, "tempMax", tempMax, "mx", mx, "luNr", luNr
potArr = potArrs[luNr] # get potArr for this land use
sortedArr = potArr.argsort() # sort it according to potential
rowIndex = 0
maxValList = []
for column in sortedArr: # (column is an entire column (1D-array) )
# Get the column index which has the max value, for row nr = rowIndex
mxColIndex = column[-1]
# get the max value, of the argsorted array, for comparison with max
val = potArr[rowIndex, mxColIndex]
if val == mx: # if there is more than one max-value... choose randomly
maxValList.append((rowIndex, mxColIndex))
rowIndex+=1
# One (or more) locations of the max potential value found,
# inserted into the list maxValList. In case many values - pick a random.
random.shuffle(maxValList)
row, col = maxValList[0]
luRow, luCol = row+n, col+n # Add the neighborhood size to the row and col.
#print "maxVal=", potArr[row, col]
potArr[row, col] = -999 # make sure it's not again selected for land use change.
# Don't allow LU change if LU has already been assigned once.
if arrLUChange[luRow, luCol]==1:
continue
nrExistingLU = arrLU[luRow, luCol]
if nrExistingLU not in luNrsStat:
# Allow land use change
newArrLU[luRow, luCol] = luNr
arrLUChange[luRow, luCol] = 1 # register as "changed"
# If replacing dynamic land use (which is not its own), increment the replaced LU demand by one.
if nrExistingLU in luNrsDyn and nrExistingLU!=luNr:
thisYearsMacroDemand[nrExistingLU] += 1
# Always decrease decrement the value when satisfying LU demand.
thisYearsMacroDemand[luNr] -= 1
# ...and decrease loop counters value by 1.
# If all demand satisfied for this land use (empty) - take away lu potential arr
# This means, it won't look for LU change potential in this array anymore.
#if thisYearsMacroDemand[luNr] <= 0:
#print "del potArrs[%i]" % luNr
#del potArrs[luNr]
else:
# Continue, without subtracting from the demand counter - since the location of
# existing land use was not allowed to change
continue
#print macroDemandStatus
# Areas not assigned any dynamic land use gets value nr 1 - i.e. same as vacant lu.
return newArrLU
def makeMacroDemandDict(macroDemandNodes):
""" output should look like this:
dict = {1998 : {3 : 187, 4 : 459}, 1999 : {3 : 193, 4 : 498}}"""
pass
if __name__=='__main__':
pass
```
#### File: urban-lu-model/script/MicroInfluence.py
```python
import numpy
import Constants
def getNeighbourPotential(tLu, infl, statLU, luArr, row, col):
"""Calculates the CA neighbourhood potential of the input
land use type (identified by cell number). This is the "pure" CA influence.
This is later on added up with road influence, inherit suitability and zoning. """
# tLu: the land use type (integer) for which the potential
# raster is calculated. One potential raster is made for each
# land use type.
# centerCellVal: is the existing land use type (integer) at the center cell.
#
centerCellVal = luArr[row][col]
if centerCellVal in statLU or \
centerCellVal == 0: # 0 = outside region
return -99999
pot = 0
dists = Constants.distances.keys()
dists.sort()
# iterate through all distances
for dist in dists:
# Get the cells' coordinates for each distance unit
for cellXY in Constants.distances[dist]:
# Fetch the value of one cell from the neighbourhood
iLu = luArr[row+cellXY[0]][col+cellXY[1]]
# Test if any influence was defined between these land uses
# at this distance
try:
fromCellInfluence = infl[tLu][iLu][dist]
except:
# If not, continue with the next cell (next loop)
pass
else:
pot += fromCellInfluence
#if centerCellVal == tLu: # add inertia value if land use exists already
#pot += infl[tLu][-1]
try:
# Inertia is a negative value. Gives (negative) infl from existing lu on candidate lu.
inertia = infl[tLu][centerCellVal][0]
pot += inertia
except: pass
return pot
def makePotentialArrs(luArr, rows, cols, infl, dynKeys, statLU):
""" Makes the potential arrays for each dynamic land use. """
nSize = 8
potArrs = {} # container for the potential arrays
# Make a potential array for each dynamic land use
for tLu in dynKeys:
nArr = numpy.zeros((rows, cols), dtype=numpy.integer)
# Iterate through the land use array (at time t) and
# do it once for each targetLU. Then add it to the collection.
for row in range(nSize, rows-nSize):
for col in range(nSize, cols-nSize):
pot = getNeighbourPotential(tLu, infl, statLU, luArr, row, col)
# Assign neighbourhood potential to the cell
# if it's not unreplaceable...
# if pot != -9999:
nArr[row][col] = pot
#kRoads = 0.80
#kN = 0.20
potArr = nArr
# Add the potential array to the collection
potArrs[tLu] = potArr
return potArrs
``` |
{
"source": "johan--/LittleBeeGeo",
"score": 2
} |
#### File: app/http_handlers/g_json_handler.py
```python
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from app import cfg
from app import util
def g_json_handler():
db_result = util.db_find('bee', {})
return db_result
```
#### File: app/http_handlers/get_google_address_handler.py
```python
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from app import cfg
from app import util
def get_google_address_handler(params):
n_db_result = util._int(params.get('n', 1))
db_result = util.db_find_it('bee_csv', {'is_processed_address': {'$ne': True}}, {'_id': False, 'csv_key': True, 'google_address': True, 'address': True, 'county_and_town': True})
db_result_total = db_result.count()
db_result = db_result.limit(n_db_result)
db_result = list(db_result)
return {"status": "OK", "total": db_result_total, "result": db_result}
```
#### File: app/http_handlers/post_ad_version_handler.py
```python
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from app import cfg
from app import util
def post_ad_version_handler(params):
csv_key = params.get('csv_key', '')
if not csv_key:
return {"success": False, "error_msg": "no csv_key"}
ad_versions = params.get('ad_versions', [])
cfg.logger.debug('to update ad_versions: csv_key: %s ad_versions: %s', csv_key, ad_versions)
util.db_update('bee_csv', {'csv_key': csv_key}, {'ad_versions': ad_versions, "is_processed_ad_version": True})
db_result = util.db_find_one('bee_csv', {"csv_key": csv_key}, {"_id": False, "town": True, "count": True, "deliver_time": True, "deliver_date": True, "save_time": True, "geo": True, "county": True, "address": True, "user_name": True, "is_processed_ad_version": True, "is_processed_address": True, "csv_key": True, "ad_versions": True, "version_text": True, "memo": True, "deliver_status": True})
the_id = str(db_result.get('save_time', 0)) + "_" + util.uuid()
db_result['the_id'] = the_id
if db_result.get('is_processed_ad_version', False) and db_result.get('is_processed_address', False):
util.db_update('bee', {'csv_key': csv_key}, db_result)
```
#### File: johan--/LittleBeeGeo/parser.py
```python
import json
import urllib2
import requests
def outputResult(outfile):
ofd = open(outfile, 'w')
c = 0
ofd.write("[")
for k, comp in companys.iteritems():
if c == 0: ofd.write(", \n")
ofd.write("%s" % json.dumps(comp, ensure_ascii=False, encoding='utf-8'))
c += 1
ofd.write("]")
ofd.close()
def parseDocs():
# url = "https://spreadsheets.google.com/pub?key=0Ah-9opsSMy6LdDZBeEdpb0Z2SUlocWE0YVZoU0hmS2c&output=csv"
url = "https://docs.google.com/spreadsheet/fm?id=t6AxGioFvIIhqa4aVhSHfKg.09543047388341478353.8325993045666016498&fmcmd=5&gid=0"
'''
values = { 'encodeURIComponent': '1',
'step': '1',
'firstin': '1',
'off':'1',
'TYPEK':target }
'''
#data = {}
#req_data = urllib.urlencode(data)
#res = requests.get(url)
#print res.content
with open('ccc.csv', "r") as fd:
lines = fd.readlines()
begin = True
for line in lines:
if begin:
begin = False
continue
tokens = line.strip().split(',')
road = tokens[2]
num1 = tokens[3]
num2 = tokens[4]
print "road: (%s, %s)" % (repr(road), road.__class__.__name__)
print "num1: (%s, %s)" % (repr(num1), num1.__class__.__name__)
road_uni = unicode(road, 'utf-8', 'ignore')
num1_uni = unicode(num1, 'utf-8', 'ignore')
num2_uni = unicode(num2, 'utf-8', 'ignore')
addr1 = road_uni + num1_uni + u'路'
addr2 = road_uni + num2_uni + u'路'
#addr1 = urllib2.quote(addr1.encode('utf-8'))
#addr2 = urllib2.quote(addr2.encode('utf-8'))
print addr1.encode('utf-8')
print addr2.encode('utf-8')
res1 = requests.get("http://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false" % addr1)
res2 = requests.get("http://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false" % addr2)
#print res1.content
print "-----------------------------------------------"
#print res2.content
j1 = json.loads(res1.content)
j2 = json.loads(res2.content)
lat1 = j1["results"][0]["geometry"]["location"]["lat"]
lat2 = j2["results"][0]["geometry"]["location"]["lat"]
lng1 = j1["results"][0]["geometry"]["location"]["lng"]
lng2 = j2["results"][0]["geometry"]["location"]["lng"]
print "loc1: %s, %s loc2: %s, %s" %(lng1, lat1, lng2, lat2)
if __name__ == '__main__':
#parseDirectors('sii')
parseDocs()
```
#### File: LittleBeeGeo/simpleParser/parser.py
```python
import json
import urllib2
import requests
import time
import datetime
def toTimestamp(raw):
tokens = raw.split(',')
def strToDate(rawdate):
d = datetime.datetime.strptime(rawdate, '%Y/%m/%d')
return time.mktime(d.timetuple()) + 1e-6 * d.microsecond
def toUnicode(string):
return unicode(string, 'utf-8', 'ignore')
def parseDocs(lines): # input: line array
# url = "https://spreadsheets.google.com/pub?key=0Ah-9opsSMy6LdDZBeEdpb0Z2SUlocWE0YVZoU0hmS2c&output=csv"
result = []
#url = "https://docs.google.com/spreadsheet/fm?id=t6AxGioFvIIhqa4aVhSHfKg.09543047388341478353.8325993045666016498&fmcmd=5&gid=0"
begin = True
for line in lines:
if begin:
begin = False
continue
tokens = line.strip().split(',')
if tokens:
time = tokens[0]
name = tokens[1]
agenda = tokens[2]
city = tokens[3]
road = tokens[4]
num1 = tokens[5]
num2 = tokens[6]
amount = tokens[7]
rawdate = tokens[8]
deliver_status = tokens[9]
note = tokens[10]
#print "road: (%s, %s)" % (repr(road), road.__class__.__name__)
#print "num1: (%s, %s)" % (repr(num1), num1.__class__.__name__)
# make address
name_uni = unicode(name, 'utf-8', 'ignore')
agenda_uni = unicode(agenda, 'utf-8', 'ignore')
city_uni = unicode(city, 'utf-8', 'ignore')
road_uni = unicode(road, 'utf-8', 'ignore')
num1_uni = unicode(num1, 'utf-8', 'ignore')
num2_uni = unicode(num2, 'utf-8', 'ignore')
dt_uni = toUnicode(deliver_status)
note_uni = toUnicode(note)
addr1 = city_uni + road_uni + num1_uni + u'路'
addr2 = city_uni + road_uni + num2_uni + u'路'
print addr1.encode('utf-8')
print addr2.encode('utf-8')
res1 = requests.get("http://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false" % addr1)
res2 = requests.get("http://maps.googleapis.com/maps/api/geocode/json?address=%s&sensor=false" % addr2)
#print res1.content
print "-----------------------------------------------"
#print res2.content
j1 = json.loads(res1.content)
j2 = json.loads(res2.content)
lat1 = j1["results"][0]["geometry"]["location"]["lat"]
lat2 = j2["results"][0]["geometry"]["location"]["lat"]
lng1 = j1["results"][0]["geometry"]["location"]["lng"]
lng2 = j2["results"][0]["geometry"]["location"]["lng"]
print "loc1: %s, %s loc2: %s, %s" %(lng1, lat1, lng2, lat2)
geoinfo = { "type": "LineString",
"coordinates": [[lng1, lat1], [lng2, lat2]]}
item = { "save-time": strToDate(rawdate),
"agenda": agenda_uni,
"user_name": name_uni,
"county":city_uni,
"address": road_uni,
"start_number": int(num1),
"end_number": int(num2),
"deliver_time": strToDate(rawdate),
"deliver_status": dt_uni,
"memo": note_uni,
"geo": geoinfo,
"extension": {}}
result.append(item)
with open("deliverData.json", "w") as fd_out:
json.dump(result, fd_out)
fd_out.close()
if __name__ == '__main__':
with open('ccc2.csv', "r") as fd:
lines = fd.readlines()
parseDocs(lines)
``` |
{
"source": "johanloones/olympic-hero",
"score": 4
} |
#### File: johanloones/olympic-hero/code.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
#Code starts here
#Load the dataframe from the path using pd.read_csv() and store the dataframe in a variable called 'data'.
data=pd.read_csv(path)
print(data)
#In the dataframe, rename the column Total to Total_Medals
data.rename(columns={'Total':'Total_Medals'},inplace=True)
print(data)
#Display first 10 records using "head()" function to take a look at the dataframe.
print(data.head(10))
# --------------
#Code starts here
#Create a new column Better_Event that stores 'Summer','Winter' or 'Both' based on the comparision between the total medals won in Summer event and Winter event
#(i.e. comparision between the Total_Summer and Total_Winter columns) using "np.where()"function.
data['Better_Event']=np.where(data['Total_Summer']==data['Total_Winter'],'Both',(np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter')))
#Find out which has been a better event with respect to all the performing countries by using value_counts() function and
#store it in a new variable called 'better_event'.
better_event=data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
#Create a new dataframe subset called 'top_countries' with the columns ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] only
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
print(top_countries)
#Drop the last row from 'top_countries'(The last row contains the sum of the medals)
top_countries.drop(top_countries.tail(1).index,axis=0,inplace=True)
print(top_countries)
#Create a function called 'top_ten' that:
#Takes the dataframe 'top_countries' and a column name as parameters.
def top_ten(x,y):
#Creates a new empty list called 'country_list'
country_list=[]
#Find the top 10 values for that particular column(for e.g. 'Total_Summer') using "nlargest()" function
#From the dataframe returned by nlargest function, slices the Country_Name column and stores it in the 'country_list' list
country_list=x.nlargest(columns=y,n=10).Country_Name
#Returns the 'country_list'
print(country_list)
return country_list
#Call the 'top_ten()' function for the three columns :Total_Summer,Total_Winter and Total_Medals and
#store their respective results in lists called 'top_10_summer', 'top_10_winter' and 'top_10'
top_10_summer=list(top_ten(top_countries,'Total_Summer'))
#top_10_summer=top_ten(x=top_countries,y=['Total_Summer'])
top_10_winter=list(top_ten(top_countries,'Total_Winter'))
top_10=list(top_ten(top_countries,'Total_Medals'))
#Create a new list 'common' that stores the common elements between the three lists('top_10_summer', 'top_10_winter' and 'top_10')
common=list((set(top_10_summer)) & (set(top_10_winter)) & (set(top_10)))
print(common)
# --------------
#Code starts here
#Take the three previously created lists(top_10_summer, top_10_winter, top_10)
#Subset the dataframe 'data' based on the country names present in the list top_10_summer using "isin()" function on the column
#Country_Name. Store the new subsetted dataframes in 'summer_df'. Do the similar operation using top_10_winter and top_10 and store
#the subset dataframes in 'winter_df' & 'top_df' respectively.
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df=data[data['Country_Name'].isin(top_10_winter)]
top_df=data[data['Country_Name'].isin(top_10)]
#Take each subsetted dataframe and plot a bar graph between the country name and total medal count according to the event
#(For e.g. for 'summer_df' plot a bar graph between Country_Name and Total_Summer)
#Modify the axes info accordingly.
fig,(ax1,ax2,ax3)=plt.subplots(nrows=3,ncols=1,figsize=(10,15))
plt.tight_layout(pad=10, h_pad=None, w_pad=None, rect=None)
# Bar Plot Country_Name and Total_Summer for summer_df
ax1.bar(summer_df['Country_Name'],height=summer_df['Total_Summer'])
ax1.set_xlabel('Country Name')
ax1.set_ylabel('Medal Count')
ax1.tick_params(axis='x',rotation=90)
ax1.set_title('Country Summer Medals Count')
# Bar Plot Country_Name and Total_Winter for winter_df
ax2.bar(winter_df['Country_Name'],height=winter_df['Total_Winter'])
ax2.set_xlabel('Country Name')
ax2.set_ylabel('Medal Count')
ax2.tick_params(axis='x',rotation=90)
ax2.set_title('Country Winter Medals Count')
# Bar Plot Country_Name and Total_Medal for top_df
ax3.bar(top_df['Country_Name'],height=top_df['Total_Medals'])
ax3.set_xlabel('Country Name')
ax3.set_ylabel('Medal Count')
ax3.tick_params(axis='x',rotation=90)
ax3.set_title('Country Total Medals Count')
# --------------
#Code starts here
#In the dataframe 'summer_df'(created in the previous function) , create a new column Golden_Ratio which is the quotient after
#dividing the two columns Gold_Summer and Total_Summer. Find the max value of Golden_Ratio and the country associated with it
#and store them in summer_max_ratio and summer_country_gold respectively.
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=summer_df['Golden_Ratio'].max()
summer_country_gold=str(list(summer_df['Country_Name'][summer_df['Golden_Ratio'] == summer_max_ratio])[0])
print(summer_max_ratio,summer_country_gold,summer_df.head(5))
#In the dataframe 'winter_df'(created in the previous function) , create a new column Golden_Ratio which is the quotient after
#dividing the two columns Gold_Winter and Total_Winter. Find the max value of Golden_Ratio and the country associated with it
#and store them in 'winter_max_ratio' and 'winter_country_gold' respectively.
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=winter_df['Golden_Ratio'].max()
winter_country_gold=str(list(winter_df['Country_Name'][winter_df['Golden_Ratio']== winter_max_ratio])[0])
print(winter_max_ratio,winter_country_gold,winter_df.head(5))
#In the dataframe top_df'(created in the previous function) , create a new column Golden_Ratio which is the quotient after
#dividing the two columns Gold_Total and Total_Medals. Find the max value of Golden_Ratio and the country associated with it
#and store them in top_max_ratio' and 'top_country_gold' respectively.
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=top_df['Golden_Ratio'].max()
top_country_gold=str(list(top_df['Country_Name'][top_df['Golden_Ratio']==top_max_ratio])[0])
print(top_max_ratio,top_country_gold,top_df.head(5))
# --------------
#Code starts here
#Drop the last row from the dataframe(The last row contains the total of all the values calculated vertically)
#and save the result in 'data_1'
data_1=data.drop(data.tail(1).index,axis=0)
print(data_1)
#Update the dataframe 'data_1' to include a new column called Total_Points which is a weighted value where each
#gold medal counts for 3 points, silver medals for 2 points, and bronze medals for 1 point.(i.e. You need to take weighted
#value of Gold_Total, Silver_Total and Bronze_Total)
data_1['Total_Points']=data_1['Gold_Total'].apply(lambda x:x*3)+data_1['Silver_Total'].apply(lambda x:x*2)+data_1['Bronze_Total'].apply(lambda x:x*1)
#Find the max value of Total_Points in 'data_1' and the country assosciated with it and store it in variables 'most_points'
#and 'best_country' respectively.
most_points=data_1['Total_Points'].max()
best_country=str(list(data_1['Country_Name'][data_1['Total_Points']==most_points])[0])
print(data_1.head(5),most_points,best_country)
# --------------
#Code starts here
#Create a single row dataframe called 'best' from 'data' where value of column Country_Name is equal to 'best_country'
#(The variable you created in the previous task)
#list(pd.Series(best_country))
best=data[data['Country_Name'].isin(list(pd.Series(best_country)))]
#Subset 'best' even further by only including the columns : ['Gold_Total','Silver_Total','Bronze_Total']
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
#Create a stacked bar plot of 'best' using "DataFrame.plot.bar()" function
best.plot.bar(stacked=True)
#Name the x-axis as United States using "plt.xlabel()"
plt.xlabel('United States')
#Name the y-axis as Medals Tally using "plt.ylabel()"
plt.ylabel('Medals Tally')
#Rotate the labels of x-axis by 45o using "plt.xticks()"
plt.xticks(rotation=45)
``` |
{
"source": "johanlundberg/eduid-am",
"score": 3
} |
#### File: eduid_am/tests/__init__.py
```python
__author__ = 'leifj'
import sys
import time
import atexit
import random
import shutil
import tempfile
import unittest
import subprocess
import os
import pymongo
class MongoTemporaryInstance(object):
"""Singleton to manage a temporary MongoDB instance
Use this for testing purpose only. The instance is automatically destroyed
at the end of the program.
"""
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
atexit.register(cls._instance.shutdown)
return cls._instance
def __init__(self):
self._tmpdir = tempfile.mkdtemp()
self._port = random.randint(40000, 50000)
self._process = subprocess.Popen(['mongod', '--bind_ip', 'localhost',
'--port', str(self._port),
'--dbpath', self._tmpdir,
'--nojournal', '--nohttpinterface',
'--noauth', '--smallfiles',
'--syncdelay', '0',
'--maxConns', '10',
'--nssize', '1', ],
#stdout=open(os.devnull, 'wb'),
#stderr=subprocess.STDOUT)
stdout=sys.stdout,
stderr=sys.stderr,
)
# XXX: wait for the instance to be ready
# Mongo is ready in a glance, we just wait to be able to open a
# Connection.
for i in range(10):
time.sleep(0.2)
try:
self._conn = pymongo.Connection('localhost', self._port)
except pymongo.errors.ConnectionFailure:
continue
else:
break
else:
self.shutdown()
assert False, 'Cannot connect to the mongodb test instance'
@property
def conn(self):
return self._conn
@property
def port(self):
return self._port
def shutdown(self):
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
shutil.rmtree(self._tmpdir, ignore_errors=True)
class MongoTestCase(unittest.TestCase):
"""TestCase with an embedded MongoDB temporary instance.
Each test runs on a temporary instance of MongoDB. The instance will
be listen in a random port between 40000 and 5000.
A test can access the connection using the attribute `conn`.
A test can access the port using the attribute `port`
"""
fixtures = []
def __init__(self, *args, **kwargs):
super(MongoTestCase, self).__init__(*args, **kwargs)
self.db = MongoTemporaryInstance.get_instance()
self.conn = self.db.conn
self.port = self.db.port
def setUp(self):
super(MongoTestCase, self).setUp()
for db_name in self.conn.database_names():
self.conn.drop_database(db_name)
``` |
{
"source": "johanlundberg/pysaml2",
"score": 2
} |
#### File: pysaml2/tests/test_42_enc.py
```python
import re
from contextlib import closing
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.server import Server
from saml2.sigver import pre_encryption_part, ASSERT_XPATH, EncryptError
from saml2.sigver import CryptoBackendXmlSec1
from saml2.sigver import pre_encrypt_assertion
from pathutils import xmlsec_path
from pathutils import full_path
__author__ = 'roland'
TMPL_NO_HEADER = """<ns0:EncryptedData xmlns:ns0="http://www.w3.org/2001/04/xmlenc#" xmlns:ns1="http://www.w3.org/2000/09/xmldsig#" Id="{ed_id}" Type="http://www.w3.org/2001/04/xmlenc#Element"><ns0:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#tripledes-cbc" /><ns1:KeyInfo><ns0:EncryptedKey Id="{ek_id}"><ns0:EncryptionMethod Algorithm="http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p" />{key_info}<ns0:CipherData><ns0:CipherValue /></ns0:CipherData></ns0:EncryptedKey></ns1:KeyInfo><ns0:CipherData><ns0:CipherValue /></ns0:CipherData></ns0:EncryptedData>"""
TMPL = f"<?xml version='1.0' encoding='UTF-8'?>\n{TMPL_NO_HEADER}"
IDENTITY = {"eduPersonAffiliation": ["staff", "member"],
"surName": ["Jeter"], "givenName": ["Derek"],
"mail": ["<EMAIL>"],
"title": ["shortstop"]}
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
def test_pre_enc_key_format():
def the_xsd_ID_value_must_start_with_either_a_letter_or_underscore(id):
result = re.match(r"^[a-zA-Z_]", id[0])
return result
def the_xsd_ID_value_may_contain_only_letters_digits_underscores_hyphens_periods(id):
result = re.match(r"^[a-zA-Z0-9._-]*$", id[1:])
return result
tmpl = pre_encryption_part()
for id in (tmpl.id, tmpl.key_info.encrypted_key.id):
assert the_xsd_ID_value_must_start_with_either_a_letter_or_underscore(id)
assert the_xsd_ID_value_may_contain_only_letters_digits_underscores_hyphens_periods(id)
def test_pre_enc_with_pregenerated_key():
tmpl = pre_encryption_part(encrypted_key_id="EK", encrypted_data_id="ED")
expected = TMPL_NO_HEADER.format(
ed_id=tmpl.id,
ek_id=tmpl.key_info.encrypted_key.id,
key_info=''
)
assert str(tmpl) == expected
def test_pre_enc_with_generated_key():
tmpl = pre_encryption_part()
expected = TMPL_NO_HEADER.format(
ed_id=tmpl.id,
ek_id=tmpl.key_info.encrypted_key.id,
key_info=''
)
assert str(tmpl) == expected
def test_pre_enc_with_named_key():
tmpl = pre_encryption_part(key_name="my-rsa-key")
expected = TMPL_NO_HEADER.format(
ed_id=tmpl.id,
ek_id=tmpl.key_info.encrypted_key.id,
key_info='<ns1:KeyInfo><ns1:KeyName>my-rsa-key</ns1:KeyName></ns1:KeyInfo>'
)
assert str(tmpl) == expected
def test_reshuffle_response():
with closing(Server("idp_conf")) as server:
name_id = server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id12")
resp_ = server.create_authn_response(
IDENTITY, "id12", "http://lingon.catalogix.se:8087/",
"urn:mace:example.com:saml:roland:sp", name_id=name_id)
resp2 = pre_encrypt_assertion(resp_)
assert resp2.encrypted_assertion.extension_elements
def test_enc1():
with closing(Server("idp_conf")) as server:
name_id = server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id12")
resp_ = server.create_authn_response(
IDENTITY, "id12", "http://lingon.catalogix.se:8087/",
"urn:mace:example.com:saml:roland:sp", name_id=name_id)
statement = pre_encrypt_assertion(resp_)
tmpl = full_path("enc_tmpl.xml")
# tmpl_file = open(tmpl, "w")
# tmpl_file.write("%s" % pre_encryption_part())
# tmpl_file.close()
data = full_path("pre_enc.xml")
# data_file = open(data, "w")
# data_file.write("%s" % statement)
# data_file.close()
key_type = "des-192"
com_list = [xmlsec_path, "encrypt", "--pubkey-cert-pem", full_path("pubkey.pem"),
"--session-key", key_type, "--xml-data", data,
"--node-xpath", ASSERT_XPATH]
crypto = CryptoBackendXmlSec1(xmlsec_path)
(_stdout, _stderr, output) = crypto._run_xmlsec(com_list, [tmpl])
assert _stderr == ""
assert _stdout == ""
def test_enc2():
crypto = CryptoBackendXmlSec1(xmlsec_path)
with closing(Server("idp_conf")) as server:
name_id = server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:sp", "id12")
resp_ = server.create_authn_response(
IDENTITY, "id12", "http://lingon.catalogix.se:8087/",
"urn:mace:example.com:saml:roland:sp", name_id=name_id)
enc_resp = crypto.encrypt_assertion(resp_, full_path("pubkey.pem"),
pre_encryption_part())
assert enc_resp
if __name__ == "__main__":
test_enc1()
``` |
{
"source": "johanlundberg/python-fido-mds",
"score": 2
} |
#### File: python-fido-mds/scripts/update_metadata.py
```python
import json
import sys
from pathlib import Path
from typing import Optional, Dict, Any, List
from OpenSSL import crypto
from cryptography import x509
from cryptography.x509.oid import NameOID
from jwcrypto import jwk, jws
__author__ = 'lundberg'
METADATA = Path('./fido_alliance_mds.jwt')
ROOT_CERT = Path('./globalsign_root_r3.der')
CN = 'mds.fidoalliance.org'
def load_root_cert(path: Path) -> x509.Certificate:
try:
with open(path, 'rb') as f:
return x509.load_der_x509_certificate(f.read())
except IOError as e:
print(f'Could not open {path}: {e}')
sys.exit(1)
def load_cert_from_str(cert: str) -> x509.Certificate:
raw_cert = f'-----BEGIN CERTIFICATE-----\n{cert}\n-----END CERTIFICATE-----'
return x509.load_pem_x509_certificate(raw_cert.encode())
def get_valid_cert(cert_chain: List[str], cn: str, root_cert: x509.Certificate) -> Optional[x509.Certificate]:
if not cert_chain:
return None
cert_to_check = load_cert_from_str(cert_chain[0]) # first cert is the one used to sign the jwt
# create store and add root cert
store = crypto.X509Store()
store.add_cert(crypto.X509.from_cryptography(root_cert))
# add the rest of the chain to the store
for chain_cert in cert_chain[1:]:
cert = crypto.X509.from_cryptography(load_cert_from_str(chain_cert))
store.add_cert(cert)
ctx = crypto.X509StoreContext(store, crypto.X509.from_cryptography(cert_to_check))
try:
ctx.verify_certificate()
except crypto.X509StoreContextError:
return None
# check if the Common Name matches the verified certificate
if cert_to_check.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value == cn:
return cert_to_check
return None
def load_jwk_from_x5c(x5c: List[str], root_cert: x509.Certificate) -> Optional[jwk.JWK]:
valid_cert = get_valid_cert(cert_chain=x5c, cn=CN, root_cert=root_cert)
if not valid_cert:
return None
try:
_jwk = jwk.JWK.from_pyca(valid_cert.public_key())
return _jwk
except ValueError as e:
print(f'Could not load JWK from certificate chain: {e}')
return None
def load_metadata(path: Path, root_cert: x509.Certificate) -> Optional[Dict[str, Any]]:
_jws = jws.JWS()
try:
with open(path, 'r') as f:
# deserialize jws
_jws.deserialize(raw_jws=f.read())
except IOError as e:
print(f'Could not open {path}: {e}')
except (jws.InvalidJWSObject, IndexError):
print(f'metadata could not be deserialized')
return None
# load JOSE headers
headers = []
if isinstance(_jws.jose_header, list):
for item in _jws.jose_header:
headers.append(item)
elif isinstance(_jws.jose_header, dict):
headers.append(_jws.jose_header)
# verify jws
verified = False
for header in headers:
cert_chain = header.get('x5c', [])
try:
_jwk = load_jwk_from_x5c(x5c=cert_chain, root_cert=root_cert)
_jws.verify(key=_jwk)
verified = True
break
except jws.InvalidJWSSignature:
continue
if verified:
return json.loads(_jws.payload.decode())
return None
def get_metadata(metadata_path: Path, root_cert_path: Path) -> Dict[str, Any]:
root_cert = load_root_cert(path=root_cert_path)
metadata = load_metadata(path=metadata_path, root_cert=root_cert)
return metadata
if __name__ == '__main__':
print(json.dumps(get_metadata(metadata_path=METADATA, root_cert_path=ROOT_CERT), indent=4))
```
#### File: fido_mds/models/webauthn.py
```python
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import List, Optional, Union
from uuid import UUID
from cryptography import x509
from cryptography.x509 import Certificate
from fido2.cose import ES256, PS256, RS1, RS256, CoseKey, EdDSA
from fido2.ctap2 import AttestationObject
from fido2.utils import websafe_decode
from pydantic import BaseModel, Field, validator
__author__ = 'lundberg'
from fido_mds.helpers import load_raw_cert
class AttestationFormat(str, Enum):
PACKED = 'packed'
FIDO_U2F = 'fido-u2f'
NONE = 'none'
ANDROID_KEY = 'android-key'
ANDROID_SAFETYNET = 'android-safetynet'
TPM = 'tpm'
APPLE = 'apple'
class AttestationConfig(BaseModel):
class Config:
orm_mode = True
arbitrary_types_allowed = True
class AttestationStatementResponseHeader(AttestationConfig):
alg: str
x5c: List[Certificate] = Field(default=[])
@validator('x5c', pre=True)
def validate_x5c(cls, v: List[str]) -> List[Certificate]:
return [load_raw_cert(item) for item in v]
class AttestationStatementResponsePayload(AttestationConfig):
nonce: str
timestampMs: datetime
apk_package_name: str = Field(alias='apkPackageName')
apk_digest_sha256: str = Field(alias='apkDigestSha256')
cts_profile_match: bool = Field(alias='ctsProfileMatch')
apk_certificate_digest_sha256: List[str] = Field(alias='apkCertificateDigestSha256')
basic_integrity: bool = Field(alias='basicIntegrity')
class AttestationStatementResponse(AttestationConfig):
header: AttestationStatementResponseHeader
payload: AttestationStatementResponsePayload
signature: str
class AttestationStatement(AttestationConfig):
alg: Optional[int]
sig: Optional[bytes]
x5c: List[Certificate] = Field(default=[])
ver: Optional[str]
response: Optional[AttestationStatementResponse]
cert_info: Optional[bytes] = Field(alias='certInfo')
pub_area: Optional[bytes] = Field(alias='pubArea')
@validator('x5c', pre=True)
def validate_x5c(cls, v: List[bytes]) -> List[Certificate]:
return [x509.load_der_x509_certificate(item) for item in v]
@validator('response', pre=True)
def validate_response(cls, v: bytes) -> Optional[AttestationStatementResponse]:
header, payload, signature = v.decode(encoding='utf-8').split('.')
return AttestationStatementResponse(
header=AttestationStatementResponseHeader.parse_raw(websafe_decode(header)),
payload=AttestationStatementResponsePayload.parse_raw(websafe_decode(payload)),
signature=signature,
)
class AuthenticatorFlags(AttestationConfig):
attested: bool
user_present: bool
user_verified: bool
extension_data: bool
class CredentialData(AttestationConfig):
aaguid: UUID
credential_id: bytes
public_key: Union[ES256, RS256, PS256, EdDSA, RS1]
@validator('aaguid', pre=True)
def validate_aaguid(cls, v: bytes) -> UUID:
return UUID(bytes=v)
@validator('public_key', pre=True)
def validate_public_key(cls, v: bytes) -> UUID:
return CoseKey.parse(v)
class AuthenticatorData(AttestationConfig):
rp_id_hash: bytes
flags: AuthenticatorFlags
counter: int
credential_data: CredentialData
@validator('flags', pre=True)
def validate_flags(cls, v: int) -> AuthenticatorFlags:
# see https://www.w3.org/TR/webauthn/#table-authData
user_present = bool(v & 0x01)
user_verified = bool(v & 0x04)
attested = bool(v & 0x40)
extension_data = bool(v & 0x80)
return AuthenticatorFlags(
attested=attested, user_present=user_present, user_verified=user_verified, extension_data=extension_data
)
class Attestation(AttestationConfig):
fmt: AttestationFormat
att_statement: AttestationStatement = Field(alias='attStmt')
auth_data: AuthenticatorData = Field(alias='authData')
ep_att: Optional[bytes]
large_blob_key: Optional[bytes]
raw_attestation_obj: bytes
@property
def aaguid(self) -> Optional[UUID]:
if self.fmt is not AttestationFormat.FIDO_U2F:
return self.auth_data.credential_data.aaguid
return None
@property
def certificate_key_identifier(self) -> Optional[str]:
if self.fmt is AttestationFormat.FIDO_U2F and self.att_statement.x5c:
cki = x509.SubjectKeyIdentifier.from_public_key(self.att_statement.x5c[0].public_key())
return cki.digest.hex()
return None
@property
def attestation_obj(self) -> AttestationObject:
return AttestationObject(self.raw_attestation_obj)
@classmethod
def from_attestation_object(cls, data: AttestationObject) -> Attestation:
d = dict((k.string_key, v) for k, v in data.data.items())
d['raw_attestation_obj'] = bytes(data)
return cls.parse_obj(d)
@classmethod
def from_base64(cls, data: str) -> Attestation:
try:
return cls.from_attestation_object(AttestationObject(websafe_decode(data)))
except AttributeError as e:
raise AttributeError(f'Could not parse attestation: {e}')
```
#### File: fido_mds/tests/test_verify.py
```python
import pytest
from fido2.utils import websafe_decode
from fido_mds.exceptions import MetadataValidationError
from fido_mds.metadata_store import FidoMetadataStore
from fido_mds.models.webauthn import Attestation
from fido_mds.tests.data import IPHONE_12, MICROSOFT_SURFACE_1796, NEXUS_5, YUBIKEY_4, YUBIKEY_5_NFC
__author__ = 'lundberg'
@pytest.mark.parametrize('attestation_obj,client_data', [YUBIKEY_4, YUBIKEY_5_NFC, MICROSOFT_SURFACE_1796, NEXUS_5])
def test_verify(mds: FidoMetadataStore, attestation_obj: str, client_data: str):
att = Attestation.from_base64(attestation_obj)
cd = websafe_decode(client_data)
assert mds.verify_attestation(attestation=att, client_data=cd) is True
# test attestations with short-lived certs so metadata can't be validated
@pytest.mark.parametrize('attestation_obj,client_data', [IPHONE_12])
def test_verify_no_validate(mds: FidoMetadataStore, attestation_obj: str, client_data: str):
att = Attestation.from_base64(attestation_obj)
cd = websafe_decode(client_data)
with pytest.raises(MetadataValidationError):
mds.verify_attestation(attestation=att, client_data=cd)
``` |
{
"source": "johanlundberg/snake-cms",
"score": 2
} |
#### File: cms/search/views.py
```python
from django.shortcuts import render_to_response
from django.contrib.flatpages.models import FlatPage
from django.http import HttpResponseRedirect
def search(request):
query = request.GET.get('q', '')
keyword_results = results = []
if query:
keyword_results = FlatPage.objects.filter(
searchkeyword__keyword__in=query.split()).distinct()
if keyword_results.count() == 1:
return HttpResponseRedirect(keyword_results[0].get_absolute_url())
results = FlatPage.objects.filter(content__icontains=query)
return render_to_response('search/search.html',
{ 'query': query,
'keyword_results': keyword_results,
'results': results })
```
#### File: snake-cms/snakelog/models.py
```python
import datetime
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from tagging.fields import TagField
from markdown import markdown
class Category(models.Model):
title = models.CharField(max_length=250,
help_text='Maximum 250 characters.')
description = models.TextField()
slug = models.SlugField(unique=True, help_text='Suggested value \
#automatically generated from title. Must be unique.')
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
def __unicode__(self):
return self.title
def get_absolute_url(self):
return('snakelog_category_detail', (),
{'slug': self.slug})
get_absolute_url = models.permalink(get_absolute_url)
def live_entry_set(self):
from snakelog.models import Entry
return self.entry_set.filter(status=Entry.LIVE_STATUS)
# The Manager have to be created before used in the Entry class
class LiveEntryManager(models.Manager):
def get_query_set(self):
return super(LiveEntryManager, self).get_query_set().filter(
status=self.model.LIVE_STATUS)
class Entry(models.Model):
LIVE_STATUS = 0
DRAFT_STATUS = 1
HIDDEN_STATUS = 2
STATUS_CHOICES = (
(LIVE_STATUS, 'Live'),
(DRAFT_STATUS, 'Draft'),
(HIDDEN_STATUS, 'Hidden'),
)
# Core fields
title = models.CharField(max_length=250)
excerpt = models.TextField(blank=True)
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
# Fields to store generated HTML
excerpt_html = models.TextField(editable = False, blank=True)
body_html = models.TextField(editable = False, blank=True)
# Metadata
author = models.ForeignKey(User)
enable_comments = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
slug = models.SlugField(unique_for_date='pub_date')
status = models.IntegerField(choices=STATUS_CHOICES, default = LIVE_STATUS)
# Categorization
categories = models.ManyToManyField(Category)
tags = TagField(help_text="Separate tags with spaces")
# Database queries
objects = models.Manager() # The first one is default (admin interface uses this one)
live = LiveEntryManager()
class Meta:
verbose_name_plural = "Entries"
ordering = ['-pub_date']
def __unicode__(self):
return self.title
def save(self):
self.body_html = markdown(self.body)
if self.excerpt:
self.excerpt_html = markdown(self.excerpt)
super(Entry, self).save()
# An alternative to get_absolute_url = models.permalink(get_absolute_url) is
# @models.permalink above this function
def get_absolute_url(self):
return ('snakelog_entry_detail', (), {'year': self.pub_date.strftime("%Y"),
'month': self.pub_date.strftime("%b").lower(),
'day': self.pub_date.strftime("%d"),
'slug': self.slug })
get_absolute_url = models.permalink(get_absolute_url)
# Comment moderator for the Entry model
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
auto_moderate_field ='pub_date'
# Auto moderate 14 days after pub_date
moderate_after = 14
moderator.register(Entry, EntryModerator)
# Code from
# http://sciyoshi.com/blog/2008/aug/27/using-akismet-djangos-new-comments-framework/
from django.contrib.comments.signals import comment_was_posted
def on_comment_was_posted(sender, comment, request, *args, **kwargs):
# spam checking can be enabled/disabled per the comment's target Model
#if comment.content_type.model_class() != Entry:
# return
from django.contrib.sites.models import Site
from django.conf import settings
try:
from akismet import Akismet
except:
return
# use TypePad's AntiSpam if the key is specified in settings.py
if hasattr(settings, 'TYPEPAD_ANTISPAM_API_KEY'):
ak = Akismet(
key=settings.TYPEPAD_ANTISPAM_API_KEY,
blog_url='http://%s/' % Site.objects.get(pk=settings.SITE_ID).domain
)
ak.baseurl = 'api.antispam.typepad.com/1.1/'
else:
ak = Akismet(
key=settings.AKISMET_API_KEY,
blog_url='http://%s/' % Site.objects.get(pk=settings.SITE_ID).domain
)
if ak.verify_key():
data = {
'user_ip': request.META.get('REMOTE_ADDR', '127.0.0.1'),
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'referrer': request.META.get('HTTP_REFERER', ''),
'comment_type': 'comment',
'comment_author': comment.user_name.encode('utf-8'),
}
if ak.comment_check(comment.comment.encode('utf-8'), data=data, build_data=True):
comment.flags.create(
user=comment.content_object.author,
flag='spam'
)
comment.is_public = False
comment.save()
comment_was_posted.connect(on_comment_was_posted)
class Link(models.Model):
# Core fields
title = models.CharField(max_length=250)
url = models.URLField(unique=True)
description = models.TextField(blank=True)
description_html = models.TextField(editable = False, blank=True)
pub_date = models.DateTimeField(default=datetime.datetime.now)
# Meta data
slug = models.SlugField(unique_for_date='pub_date',
help_text = 'Must be unique for the publication date.')
via_name = models.CharField('Via', max_length=250, blank=True,
help_text='The name of the person whose site you spotted the link on. Optional.')
via_url = models.URLField('Via URL', blank=True,
help_text = 'The URL of the site where you spotted the link. Optional.')
tags = TagField(help_text="Separate tags with spaces")
post_elsewhere = models.BooleanField('Post to del.icio.us', default=True,
help_text = 'If checked, this link will be posted both to your weblog and \
to your del.icio.us account.')
enable_comments = models.BooleanField(default=True)
posted_by = models.ForeignKey(User)
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.title
def save(self):
if self.description:
self.description_html = markdown(self.description)
if not self.id and self.post_elsewhere:
import pydelicious
from django.utils.encoding import smart_str
pydelicious.add(settings.DELICIOUS_USER, settings.DELICIOUS_PASSWORD,
smart_str(self.url), smart_str(self.title),
smart_str(self.tags))
super(Link, self).save()
def get_absolute_url(self):
return ('snakelog_link_detail', (), {'year': self.pub_date.strftime("%Y"),
'month': self.pub_date.strftime("%b").lower(),
'day': self.pub_date.strftime("%d"),
'slug': self.slug })
get_absolute_url = models.permalink(get_absolute_url)
``` |
{
"source": "JohanMabille/proteus",
"score": 3
} |
#### File: proteus/proteus/Gauges.py
```python
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
import os
from collections import defaultdict, OrderedDict
from itertools import product
from mpi4py import MPI
from petsc4py import PETSc
import numpy as np
from numpy.linalg import norm
from . import Comm
from .AuxiliaryVariables import AV_base
from .Profiling import logEvent
from proteus.MeshTools import triangleVerticesToNormals, tetrahedronVerticesToNormals, getMeshIntersections
from proteus import Profiling
def PointGauges(gauges, activeTime=None, sampleRate=0, fileName='point_gauges.csv'):
"""Create a set of point gauges that will automatically be serialized
as CSV data to the requested file.
:param gauges: An iterable of "gauges". Each gauge is specified
by a 2-tuple, with the first element in the tuple a
set of fields to be monitored, and the second
element a tuple of the 3-space representations of
the gauge locations.
See the Gauges class for an explanation of the other parameters.
Example::
p = PointGauges(gauges=((('u', 'v'), ((0.5, 0.5, 0), (1, 0.5, 0))),
(('p',), ((0.5, 0.5, 0),))),
activeTime=(0, 2.5),
sampleRate=0.2,
fileName='combined_gauge_0_0.5_sample_all.csv')
This creates a PointGauges object that will monitor the u and v
fields at the locations [0.5, 0.5, 0] and [1, 0.5, 0], and the p
field at [0.5, 0.5, 0] at simulation time between = 0 and 2.5 with
samples taken no more frequently than every 0.2 seconds. Results
will be saved to: combined_gauge_0_0.5_sample_all.csv.
"""
# build up dictionary of location information from gauges
# dictionary of dictionaries, outer dictionary is keyed by location (3-tuple)
# inner dictionaries contain monitored fields, and closest node
# closest_node is None if this process does not own the node
points = OrderedDict()
fields = list()
for gauge in gauges:
gauge_fields, gauge_points = gauge
for field in gauge_fields:
if field not in fields:
fields.append(field)
for point in gauge_points:
# initialize new dictionary of information at this location
if point not in points:
l_d = {'fields': set()}
points[point] = l_d
# add any currently unmonitored fields
points[point]['fields'].update(gauge_fields)
return Gauges(fields, activeTime, sampleRate, fileName, points=points)
def LineGauges(gauges, activeTime=None, sampleRate=0, fileName='line_gauges.csv'):
"""Create a set of line gauges that will automatically be serialized
as CSV data to the requested file. The line gauges will gather
data at every element on the mesh between the two endpoints on
each line.
:param gauges: An iterable of "gauges". Each gauge is specified
by a 2-tuple, with the first element in the tuple a
set of fields to be monitored, and the second
element a list of pairs of endpoints of the gauges
in 3-space representation.
See the Gauges class for an explanation of the other parameters.
"""
# expand the product of fields and lines for each gauge
lines = list()
fields = list()
for gauge in gauges:
gauge_fields, gauge_lines = gauge
for field in gauge_fields:
if field not in fields:
fields.append(field)
lines.extend(product(gauge_fields, gauge_lines))
return Gauges(fields, activeTime, sampleRate, fileName, lines=lines)
def LineIntegralGauges(gauges, activeTime=None, sampleRate=0, fileName='line_integral_gauges.csv'):
"""Create a set of line integral gauges that will automatically be
serialized as CSV data to the requested file.
:param gauges: An iterable of "gauges". Each gauge is specified
by a 2-tuple, with the first element in the tuple a
set of fields to be monitored, and the second
element a list of pairs of endpoints of the gauges
in 3-space representation.
See the Gauges class for an explanation of the other parameters.
"""
# expand the product of fields and lines for each gauge
lines = list()
fields = list()
for gauge in gauges:
gauge_fields, gauge_lines = gauge
for field in gauge_fields:
if field not in fields:
fields.append(field)
lines.extend(product(gauge_fields, gauge_lines))
return Gauges(fields, activeTime, sampleRate, fileName, lines=lines, integrate=True)
class Gauges(AV_base):
"""Monitor fields at specific values.
This class provides a generic point-wise and line-integral monitor
that can be instantiated and attached to Proteus simulations by
including them in the list of Auxiliary Variables in problem
setup.
Each Gauges instance may contain one or more fields, which may
contain one or more locations to monitor. The monitoring is
defined over a given time and sample rate, and a filename is also
supplied. All results are serialized to a CSV file.
Parallel Implementation Notes: After the gauge has been attached,
all processes are partitioned into Gauge Owners and non-Gauge
Owners. The calculate method is a "no-op" for non-Owners. For
Gauge Owners, all values are computed individually, then
collectively transmitted to the "root" process, which is the only
process responsible for serializing gauge results to disk. This
code has not been aggressively vetted for parallel correctness or
scalability.
"""
def __init__(self, fields, activeTime=None, sampleRate=0, fileName='gauges.csv', points=None, lines=None,
integrate=False):
"""Create a set of gauges that will automatically be serialized as
CSV data to the requested file.
:param activeTime: If not None, a 2-tuple of start time and
end time for which the point gauge is
active.
:param sampleRate: The intervals at which samples should be
measured. Note that this is a rough lower
bound, and that the gauge values could be
computed less frequently depending on the
time integrator. The default value of zero
computes the gauge values at every time
step.
:param fileName: The name of the file to serialize results to.
Data is currently column-formatted, with 10 characters
allotted to the time field, and 45 characters allotted to each
point field.
"""
AV_base.__init__(self)
self.fields = fields
self.activeTime = activeTime
self.sampleRate = sampleRate
self.fileName = fileName
self.points = points if points else OrderedDict()
self.lines = lines if lines else []
self.file = None # only the root process should have a file open
self.flags = {}
self.files = {}
self.outputWriterReady = False
self.last_output = None
self.pointGaugeMats = []
self.field_ids = []
self.dofsVecs = []
self.pointGaugeVecs = []
self.segments = []
self.adapted = False
self.isPointGauge = bool(points)
self.isLineGauge = bool(lines) and not integrate
self.isLineIntegralGauge = bool(lines) and integrate
if not (self.isPointGauge or self.isLineGauge or self.isLineIntegralGauge):
raise ValueError("Need to provide points or lines")
if sum((self.isPointGauge, self.isLineGauge, self.isLineIntegralGauge)) > 1:
raise ValueError("Must be one of point or line gauge but not both")
def getLocalNearestNode(self, location):
# determine local nearest node distance
nearest_node_distance_kdtree, nearest_node_kdtree = self.nodes_kdtree.query(location)
comm = Comm.get().comm.tompi4py()
return comm.rank, nearest_node_kdtree, nearest_node_distance_kdtree
def getLocalElement(self, femSpace, location, node):
"""Given a location and its nearest node, determine if it is on a
local element.
Returns None if location is not on any elements owned by this
process
"""
# search elements that contain the nearest node
patchBoundaryNodes=set()
checkedElements=[]
for eOffset in range(femSpace.mesh.nodeElementOffsets[node], femSpace.mesh.nodeElementOffsets[node + 1]):
eN = femSpace.mesh.nodeElementsArray[eOffset]
checkedElements.append(eN)
patchBoundaryNodes|=set(femSpace.mesh.elementNodesArray[eN])
# evaluate the inverse map for element eN
xi = femSpace.elementMaps.getInverseValue(eN, location)
# query whether xi lies within the reference element
if femSpace.elementMaps.referenceElement.onElement(xi):
return eN
for node in patchBoundaryNodes:
for eOffset in range(femSpace.mesh.nodeElementOffsets[node], femSpace.mesh.nodeElementOffsets[node + 1]):
eN = femSpace.mesh.nodeElementsArray[eOffset]
if eN not in checkedElements:
checkedElements.append(eN)
# evaluate the inverse map for element eN
xi = femSpace.elementMaps.getInverseValue(eN, location)
# query whether xi lies within the reference element
if femSpace.elementMaps.referenceElement.onElement(xi):
return eN
# no elements found
return None
def findNearestNode(self, femSpace, location):
"""Given a gauge location, attempts to locate the most suitable
process for monitoring information about this location, as
well as the node on the process closest to the location.
Returns a 2-tuple containing an identifier for the closest
'owning' process as well as the local ids of the node and
nearest element.
"""
comm = Comm.get().comm.tompi4py()
comm_rank, nearest_node, nearest_node_distance = self.getLocalNearestNode(location)
local_element = self.getLocalElement(femSpace, location, nearest_node)
# determine global nearest node
haveElement = int(local_element is not None)
global_have_element, owning_proc = comm.allreduce((haveElement, comm.rank),
op=MPI.MAXLOC)
if global_have_element:
logEvent("Gauges on element at location: [%g %g %g] assigned to %d" % (location[0], location[1], location[2],
owning_proc), 3)
else:
# gauge isn't on any of the elements, just use nearest node
global_min_distance, owning_proc = comm.allreduce((nearest_node_distance,comm.rank), op=MPI.MINLOC)
logEvent("Off-element gauge location: [%g %g %g] assigned to %d" % (location[0], location[1], location[2],
owning_proc), 3)
if comm.rank != owning_proc:
nearest_node = None
assert owning_proc is not None
return owning_proc, nearest_node
def buildQuantityRow(self, m, femFun, quantity_id, quantity):
"""Builds up contributions to gauge operator from the underlying
element space
"""
location, node = quantity
# search elements that contain the nearest node
# use nearest node if the location is not found on any elements
localElement = self.getLocalElement(femFun.femSpace, location, node)
if localElement is not None:
for i, psi in enumerate(femFun.femSpace.referenceFiniteElement.localFunctionSpace.basis):
# assign quantity weights here
xi = femFun.femSpace.elementMaps.getInverseValue(localElement, location)
m[quantity_id, femFun.femSpace.dofMap.l2g[localElement, i]] = psi(xi)
else:
# just use nearest node for now if we're given a point outside the domain.
# the ideal thing would be to find the element with the nearest face
m[quantity_id, node] = 1
def initOutputWriter(self):
"""Initialize communication strategy for collective output of gauge
data.
On the root process in this communicator, create a map of
quantity owners and the corresponding location in their
arrays. This process is responsible for collecting gauge data
and saving it to disk.
Gauge data is globally ordered by field, then by location id
(as ordered by globalMeasuredQuantities)
"""
numLocalQuantities = sum([len(self.measuredQuantities[field]) for field in self.fields])
self.localQuantitiesBuf = np.zeros(numLocalQuantities)
if self.gaugeComm.rank != 0:
self.globalQuantitiesBuf = None
self.globalQuantitiesCounts = None
else:
if self.adapted:
if(Profiling.logDir not in self.fileName):
self.fileName = os.path.join(Profiling.logDir, self.fileName)
self.file = open(self.fileName, 'a')
else:
self.fileName = os.path.join(Profiling.logDir, self.fileName)
self.file = open(self.fileName, 'w')
if self.isLineIntegralGauge:
#Only need to set up mapping for point gauges
return
quantityIDs = [0] * self.gaugeComm.size
numGlobalQuantities = sum([len(self.globalMeasuredQuantities[field]) for field in self.fields])
# Assign quantity ids to processors
for field in self.fields:
for id in range(len(self.globalMeasuredQuantities[field])):
location, owningProc = self.globalMeasuredQuantities[field][id]
gaugeProc = self.globalGaugeRanks[owningProc]
quantityID = quantityIDs[gaugeProc]
quantityIDs[gaugeProc] += 1
assert gaugeProc >= 0
self.globalMeasuredQuantities[field][id] = location, gaugeProc, quantityID
logEvent("Gauge for %s[%d] at %e %e %e is at P[%d][%d]" % (field, id, location[0], location[1],
location[2], gaugeProc, quantityID), 5)
logEvent("Quantity IDs:\n%s" % str(quantityIDs), 5)
# determine mapping from global measured quantities to communication buffers
self.globalQuantitiesMap = np.zeros(numGlobalQuantities, dtype=np.int)
i = 0
for field in self.fields:
for location, gaugeProc, quantityID in self.globalMeasuredQuantities[field]:
self.globalQuantitiesMap[i] = sum(quantityIDs[:gaugeProc]) + quantityID
assert self.globalQuantitiesMap[i] < numGlobalQuantities
i += 1
# a couple consistency checks
assert sum(quantityIDs) == numGlobalQuantities
assert all(quantityID > 0 for quantityID in quantityIDs)
# final ids also equal to the counts on each process
self.globalQuantitiesCounts = quantityIDs
self.globalQuantitiesBuf = np.zeros(numGlobalQuantities, dtype=np.double)
logEvent("Global Quantities Map: \n%s" % str(self.globalQuantitiesMap), 5)
self.outputWriterReady = True
def buildGaugeComm(self):
"""Create a communicator composed only of processes that own gauge
quantities.
Collective over global communicator. Builds a local
communicator for collecting all gauge data. This communicator
contains only processes that will contain gauge data.
"""
comm = Comm.get().comm.tompi4py()
gaugeOwners = set()
for field in self.fields:
for location, owningProc in self.globalMeasuredQuantities[field]:
gaugeOwners.update((owningProc,))
self.isGaugeOwner = comm.rank in gaugeOwners
gaugeComm = comm.Split(color=self.isGaugeOwner)
logEvent("Gauge owner: %d" % self.isGaugeOwner, 5)
if self.isGaugeOwner:
self.gaugeComm = gaugeComm
gaugeRank = self.gaugeComm.rank
else:
self.gaugeComm = None
gaugeRank = -1
self.globalGaugeRanks = comm.allgather(gaugeRank)
logEvent("Gauge ranks: \n%s" % str(self.globalGaugeRanks), 5)
def addLineGaugePoints(self, line, line_segments):
"""Add all gauge points from each line into self.points
"""
points = self.points
new_points = {}
field, endpoints = line
comm = Comm.get().comm.tompi4py()
def addPoint(points, field, point):
point = tuple(point)
if point in points:
if self.isLineIntegralGauge:
no_output = points[point]['no_output'] if 'no_output' in points[point] else set()
points[point]['no_output'] = no_output.union(set((field,)) - points[point]['fields'])
points[point]['fields'].update((field,))
else:
ignore1, nearestNode, ignore2 = self.getLocalNearestNode(point)
if self.isLineIntegralGauge:
points[point] = {'fields':set((field,)), 'no_output': set((field,)),
'nearest_node': nearestNode,
'owning_proc': comm.rank}
else:
points[point] = {'fields':set((field,)),
'nearest_node': nearestNode,
'owning_proc': comm.rank}
new_points[point] = points[point]
for segment in line_segments:
logEvent("Processing segment [ %e %e %e ] to [ %e %e %e ]" % (
segment[0][0], segment[0][1], segment[0][2],
segment[1][0], segment[1][1], segment[1][2]), 5)
startPoint, endPoint = segment
# only add both sides of segment to line integral gauges and first segment
if self.isLineIntegralGauge or all(startPoint == endpoints[0]):
addPoint(points, field, startPoint)
addPoint(points, field, endPoint)
if self.isLineGauge:
new_points = comm.gather(new_points)
if comm.rank == 0:
for new_points_i in new_points:
points.update(new_points_i)
# resort points
points = OrderedDict(sorted(points.items()))
self.points = comm.bcast(points)
def identifyMeasuredQuantities(self):
""" build measured quantities, a list of fields
Each field in turn contains a list of gauge locations and their accompanying nearest node
only local quantities are saved
"""
self.measuredQuantities = defaultdict(list)
self.globalMeasuredQuantities = defaultdict(list)
comm = Comm.get().comm.tompi4py()
points = self.points
for point, l_d in points.items():
if 'nearest_node' not in l_d:
# TODO: Clarify assumption here about all fields sharing the same element mesh
field_id = self.fieldNames.index(list(l_d['fields'])[0])
femSpace = self.u[field_id].femSpace
owningProc, nearestNode = self.findNearestNode(femSpace, point)
l_d['nearest_node'] = nearestNode
else:
owningProc = l_d['owning_proc']
# nearestNode only makes sense on owning process
# so even if we have this information, it's not valid for this point
if owningProc == comm.rank:
nearestNode = l_d['nearest_node']
else:
nearestNode = None
for field in l_d['fields']:
self.globalMeasuredQuantities[field].append((point, owningProc))
if nearestNode is not None:
point_id = len(self.measuredQuantities[field])
logEvent("Gauge for %s[%d] at %e %e %e is closest to node %d" % (field, point_id, point[0], point[1],
point[2], nearestNode), 3)
l_d[field] = point_id
self.measuredQuantities[field].append((point, nearestNode))
def buildPointGaugeOperators(self):
""" Build the linear algebra operators needed to compute the point gauges.
The operators are all local since the point gauge measurements are calculated locally.
"""
for field, field_id in zip(self.fields, self.field_ids):
m = PETSc.Mat().create(PETSc.COMM_SELF)
m.setSizes([len(self.measuredQuantities[field]),
self.u[field_id].femSpace.dim])
m.setType('aij')
m.setUp()
# matrices are a list in same order as fields
self.pointGaugeMats.append(m)
# dofs are a list in same order as fields as well
dofs = self.u[field_id].dof
dofsVec = PETSc.Vec().createWithArray(dofs, comm=PETSc.COMM_SELF)
self.dofsVecs.append(dofsVec)
for field, field_id, m in zip(self.fields, self.field_ids, self.pointGaugeMats):
# get the FiniteElementFunction object for this quantity
femFun = self.u[field_id]
for quantity_id, quantity in enumerate(self.measuredQuantities[field]):
location, node = quantity
logEvent("Gauge for: %s at %e %e %e is on local operator row %d" % (field, location[0], location[1],
location[2], quantity_id), 3)
self.buildQuantityRow(m, femFun, quantity_id, quantity)
pointGaugesVec = PETSc.Vec().create(comm=PETSc.COMM_SELF)
pointGaugesVec.setSizes(len(self.measuredQuantities[field]))
pointGaugesVec.setUp()
self.pointGaugeVecs.append(pointGaugesVec)
for m in self.pointGaugeMats:
m.assemble()
def pruneDuplicateSegments(self, endpoints, length_segments):
""" prune duplicate segments across processors
endpoints - a pair of points in 3-space defining the line
length_segments - a pair of intersections augmented by length
this could be optimized
"""
eps = 1e-4
comm = Comm.get().comm.tompi4py()
length_segments = sorted(length_segments)
length_segments = comm.gather(length_segments)
if comm.rank != 0:
selected_segments = None
else:
selected_segments = [[] for i in range(len(length_segments))]
segment_pos = 0
while segment_pos < (1 - eps):
# choose the longest line from those that start at segment_pos
longest_segment = 0, None, None
for proc_rank, proc_length_segments in enumerate(length_segments):
segment_id = 0
for segment_id, length_segment in enumerate(proc_length_segments):
# ignore segments below current position (they will be discarded)
start, end, segment = length_segment
if start < (segment_pos - eps):
continue
# equality test
elif start < (segment_pos + eps):
segment_length = end - start
if segment_length > longest_segment[0]:
longest_segment = segment_length, proc_rank, segment
else:
break
# discard any segments that start before our current position
proc_length_segments[:] = proc_length_segments[segment_id:]
segment_length, proc_rank, segment = longest_segment
if segment_length == 0:
print(segment_pos)
print('segments')
for segment in selected_segments: print(segment)
print('length_segments')
for length_segment in length_segments: print(length_segment)
raise FloatingPointError("Unable to identify next segment while segmenting, are %s in domain?" %
str(endpoints))
logEvent("Identified best segment of length %g on %d: %s" % (segment_length, proc_rank, str(segment)), 9)
selected_segments[proc_rank].append(segment)
segment_pos += segment_length
err = abs(segment_pos - 1)
if err > 1e-8:
msg = "Segmented line %s different from original length by ratio %e\n segments: %s" % (
str(endpoints), err, str(selected_segments))
logEvent(msg, 3)
if err > 10*eps:
raise FloatingPointError(msg)
logEvent("Selected segments: %s" % str(selected_segments), 9)
segments = comm.scatter(selected_segments)
return segments
def getMeshIntersections(self, line):
field, endpoints = line
# get Proteus mesh index for this field
field_id = self.fieldNames.index(field)
femFun = self.u[field_id]
mesh = femFun.femSpace.mesh
referenceElement = femFun.femSpace.elementMaps.referenceElement
if referenceElement.dim == 2 and referenceElement.nNodes == 3:
toPolyhedron = triangleVerticesToNormals
elif referenceElement.dim == 3 and referenceElement.nNodes == 4:
toPolyhedron = tetrahedronVerticesToNormals
else:
raise NotImplementedError("Unable to compute mesh intersections for this element type")
intersections = np.asarray(list(getMeshIntersections(mesh, toPolyhedron, endpoints)), dtype=np.double)
endpoints = np.asarray(endpoints, np.double)
length = norm(endpoints[1] - endpoints[0])
length_segments = [(old_div(norm(i[0]-endpoints[0]),length), old_div(norm(i[1]-endpoints[0]),length), i) for i in intersections]
segments = self.pruneDuplicateSegments(endpoints, length_segments)
return segments
def buildLineIntegralGaugeOperators(self, lines, linesSegments):
""" Build the linear algebra operators needed to compute the line integral gauges.
The operators are local to each process, contributions are currently summed in the output functions.
"""
#create lineIntegralGaugesVec to store contributions to all lines from this process
self.lineIntegralGaugesVec = PETSc.Vec().create(comm=PETSc.COMM_SELF)
self.lineIntegralGaugesVec.setSizes(len(lines))
self.lineIntegralGaugesVec.setUp()
# create lineIntegralGaugeMats to store coefficients mapping contributions from each field
# to the line integral gauges
self.lineIntegralGaugeMats = []
if not self.isLineIntegralGauge:
return
# size of lineIntegralGaugeMats depends on number of local points for each field
for pointGaugesVec in self.pointGaugeVecs:
m = PETSc.Mat().create(comm=PETSc.COMM_SELF)
m.setSizes([len(lines), pointGaugesVec.getSize()])
m.setType('aij')
m.setUp()
self.lineIntegralGaugeMats.append(m)
# Assemble contributions from each point in each line segment
for lineIndex, (line, segments) in enumerate(zip(self.lines, linesSegments)):
field, endpoints = line
fieldIndex = self.fields.index(field)
# Trapezoid Rule to calculate coefficients here
for p1, p2 in segments:
segmentLength = np.linalg.norm(np.asarray(p2)-np.asarray(p1))
for point in p1, p2:
point_data = self.points[tuple(point)]
# only assign coefficients for locally owned points
if field in point_data:
pointID = point_data[field]
self.lineIntegralGaugeMats[fieldIndex].setValue(lineIndex, pointID, old_div(segmentLength,2), addv=True)
for m in self.lineIntegralGaugeMats:
m.assemble()
def attachModel(self, model, ar):
""" Attach this gauge to the given simulation model.
"""
from scipy import spatial
self.model = model
self.fieldNames = model.levelModelList[-1].coefficients.variableNames
self.vertexFlags = model.levelModelList[-1].mesh.nodeMaterialTypes
self.vertices = model.levelModelList[-1].mesh.nodeArray
self.num_owned_nodes = model.levelModelList[-1].mesh.nNodes_global
self.u = model.levelModelList[-1].u
self.timeIntegration = model.levelModelList[-1].timeIntegration
for field in self.fields:
field_id = self.fieldNames.index(field)
self.field_ids.append(field_id)
self.nodes_kdtree = spatial.cKDTree(model.levelModelList[-1].mesh.nodeArray)
linesSegments = []
for line in self.lines:
lineSegments = self.getMeshIntersections(line)
self.addLineGaugePoints(line, lineSegments)
linesSegments.append(lineSegments)
self.identifyMeasuredQuantities()
self.buildGaugeComm()
if self.isGaugeOwner:
self.initOutputWriter()
self.buildPointGaugeOperators()
self.buildLineIntegralGaugeOperators(self.lines, linesSegments)
if self.adapted:
pass
else:
self.outputHeader()
return self
def get_time(self):
""" Returns the current model time"""
return self.timeIntegration.tLast
def outputHeader(self):
""" Outputs a single header for a CSV style file to self.file"""
assert self.isGaugeOwner
if self.gaugeComm.rank == 0:
self.file.write("%10s" % ('time',))
if self.isPointGauge or self.isLineGauge:
for field in self.fields:
for quantity in self.globalMeasuredQuantities[field]:
location, gaugeProc, quantityID = quantity
self.file.write(",%12s [%9.5g %9.5g %9.5g]" % (field, location[0], location[1], location[2]))
elif self.isLineIntegralGauge:
for line in self.lines:
self.file.write(",%12s [%9.5g %9.5g %9.5g] - [%9.5g %9.5g %9.5g]" % (
line[0], line[1][0][0], line[1][0][1], line[1][0][2],
line[1][1][0], line[1][1][1], line[1][1][2]))
self.file.write('\n')
def outputRow(self, time):
""" Outputs a single row of currently calculated gauge data to self.file"""
assert self.isGaugeOwner
if self.isPointGauge or self.isLineGauge:
self.localQuantitiesBuf = np.concatenate([gaugesVec.getArray() for gaugesVec in
self.pointGaugeVecs]).astype(np.double)
logEvent("Sending local array of type %s and shape %s to root on comm %s" % (
str(self.localQuantitiesBuf.dtype), str(self.localQuantitiesBuf.shape), str(self.gaugeComm)), 9)
if self.gaugeComm.rank == 0:
logEvent("Receiving global array of type %s and shape %s on comm %s" % (
str(self.localQuantitiesBuf.dtype), str(self.globalQuantitiesBuf.shape), str(self.gaugeComm)), 9)
self.gaugeComm.Gatherv(sendbuf=[self.localQuantitiesBuf, MPI.DOUBLE],
recvbuf=[self.globalQuantitiesBuf, (self.globalQuantitiesCounts, None),
MPI.DOUBLE], root=0)
self.gaugeComm.Barrier()
if self.isLineIntegralGauge:
lineIntegralGaugeBuf = self.lineIntegralGaugesVec.getArray()
globalLineIntegralGaugeBuf = lineIntegralGaugeBuf.copy()
self.gaugeComm.Reduce(lineIntegralGaugeBuf, globalLineIntegralGaugeBuf, op=MPI.SUM)
else:
globalLineIntegralGaugeBuf = []
if self.gaugeComm.rank == 0:
self.file.write("%25.15e" % time)
if self.isPointGauge or self.isLineGauge:
for id in self.globalQuantitiesMap:
self.file.write(", %43.18e" % (self.globalQuantitiesBuf[id],))
if self.isLineIntegralGauge:
for lineIntegralGauge in globalLineIntegralGaugeBuf:
self.file.write(", %80.18e" % (lineIntegralGauge))
self.file.write('\n')
# disable this for better performance, but risk of data loss on crashes
self.file.flush()
self.last_output = time
def calculate(self):
""" Computes current gauge values, updates open output files
"""
if not self.isGaugeOwner:
return
time = self.get_time()
logEvent("Gauges calculate called at time %g" % time, 4)
# check that gauge is in its active time region
if self.activeTime is not None and (self.activeTime[0] > time or self.activeTime[1] < time):
return
# check that gauge is ready to be sampled again
if self.last_output is not None and time < self.last_output + self.sampleRate:
return
for m, dofsVec, gaugesVec in zip(self.pointGaugeMats, self.dofsVecs, self.pointGaugeVecs):
m.mult(dofsVec, gaugesVec)
# this could be optimized out... but why?
self.lineIntegralGaugesVec.zeroEntries()
for m, dofsVec in zip(self.lineIntegralGaugeMats, self.pointGaugeVecs):
m.multAdd(dofsVec, self.lineIntegralGaugesVec, self.lineIntegralGaugesVec)
self.outputRow(time)
```
#### File: JohanMabille/proteus/setup.py
```python
import sys
import setuptools
from distutils import sysconfig
cfg_vars = sysconfig.get_config_vars()
for key, value in cfg_vars.items():
if type(value) == str:
cfg_vars[key] = cfg_vars[key].replace("-Wstrict-prototypes", "")
cfg_vars[key] = cfg_vars[key].replace("-Wall", "-w")
from distutils.core import setup
from Cython.Build import cythonize
from Cython.Distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
## \file setup.py setup.py
# \brief The python script for building proteus
#
# Set the DISTUTILS_DEBUG environment variable to print detailed information while setup.py is running.
#
from proteus import config
from proteus.config import *
###to turn on debugging in c++
##\todo Finishing cleaning up setup.py/setup.cfg, config.py...
PROTEUS_PETSC_EXTRA_LINK_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_LINK_ARGS', [])
PROTEUS_PETSC_EXTRA_COMPILE_ARGS = getattr(config, 'PROTEUS_PETSC_EXTRA_COMPILE_ARGS', [])
PROTEUS_CHRONO_CXX_FLAGS = getattr(config, 'PROTEUS_CHRONO_CXX_FLAGS', [])
proteus_install_path = os.path.join(sysconfig.get_python_lib(), 'proteus')
# handle non-system installations
for arg in sys.argv:
if arg.startswith('--root'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
if arg.startswith('--prefix'):
proteus_install_path = proteus_install_path.partition(sys.prefix + '/')[-1]
break
def get_xtensor_include():
return [str(get_pybind_include()),
str(get_pybind_include(user=True)),
str(get_numpy_include()),
os.path.join(sys.prefix, 'include'),
os.path.join(sys.prefix, 'Library', 'include'),
'proteus',
'proteus/xtensor/pybind11/include',
'proteus/xtensor/xtensor-python/include',
'proteus/xtensor/xtensor/include',
'proteus/xtensor/xtl/include']
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
class get_numpy_include(object):
"""Helper class to determine the numpy include path
The purpose of this class is to postpone importing numpy
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self):
pass
def __str__(self):
import numpy as np
return np.get_include()
EXTENSIONS_TO_BUILD = [
Extension("MeshAdaptPUMI.MeshAdapt",
sources = ['proteus/MeshAdaptPUMI/MeshAdapt.pyx', 'proteus/MeshAdaptPUMI/cMeshAdaptPUMI.cpp',
'proteus/MeshAdaptPUMI/MeshConverter.cpp', 'proteus/MeshAdaptPUMI/ParallelMeshConverter.cpp',
'proteus/MeshAdaptPUMI/MeshFields.cpp', 'proteus/MeshAdaptPUMI/SizeField.cpp',
'proteus/MeshAdaptPUMI/DumpMesh.cpp',
'proteus/MeshAdaptPUMI/ErrorResidualMethod.cpp','proteus/MeshAdaptPUMI/VMS.cpp','proteus/MeshAdaptPUMI/createAnalyticGeometry.cpp'],
depends=["proteus/partitioning.h",
"proteus/partitioning.cpp",
"proteus/cpartitioning.pyx",
"proteus/cmeshTools.pxd",
"proteus/mesh.h",
'proteus/mesh.cpp',
'proteus/meshio.cpp'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H)],
language='c++',
include_dirs=[numpy.get_include(),'include',
'proteus','proteus/MeshAdaptPUMI']+
PROTEUS_SCOREC_INCLUDE_DIRS,
library_dirs=PROTEUS_SCOREC_LIB_DIRS,
libraries=PROTEUS_SCOREC_LIBS,
extra_compile_args=PROTEUS_SCOREC_EXTRA_COMPILE_ARGS+PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_SCOREC_EXTRA_LINK_ARGS+PROTEUS_EXTRA_LINK_ARGS),
Extension(
'mprans.cArgumentsDict',
['proteus/mprans/ArgumentsDict.cpp'],
depends=['proteus/mprans/ArgumentsDict.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cPres',
['proteus/mprans/Pres.cpp'],
depends=['proteus/mprans/Pres.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cPresInit',
['proteus/mprans/PresInit.cpp'],
depends=['proteus/mprans/PresInit.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cPresInc',
['proteus/mprans/PresInc.cpp'],
depends=['proteus/mprans/PresInc.h', 'proteus/mprans/PresInc.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension('mprans.cAddedMass', ['proteus/mprans/AddedMass.cpp'],
depends=['proteus/mprans/AddedMass.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14']),
Extension('mprans.SedClosure', ['proteus/mprans/SedClosure.cpp'],
depends=['proteus/mprans/SedClosure.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14']),
Extension('mprans.cVOF3P', ['proteus/mprans/VOF3P.cpp'],
depends=['proteus/mprans/VOF3P.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14']),
Extension(
'mprans.cVOS3P',
['proteus/mprans/VOS3P.cpp'],
depends=['proteus/mprans/VOS3P.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension('mprans.cNCLS3P', ['proteus/mprans/NCLS3P.cpp'],
depends=['proteus/mprans/NCLS3P.h', 'proteus/mprans/ArgumentsDict.h' , 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14']),
Extension('mprans.cMCorr3P', ['proteus/mprans/MCorr3P.cpp'],
depends=['proteus/mprans/MCorr3P.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
language='c++',
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
),
Extension(
'mprans.cRANS3PSed',
['proteus/mprans/RANS3PSed.cpp'],
depends=['proteus/mprans/RANS3PSed.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cRANS3PSed2D',
['proteus/mprans/RANS3PSed2D.cpp'],
depends=['proteus/mprans/RANS3PSed2D.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'richards.cRichards',
['proteus/richards/cRichards.cpp'],
depends=['proteus/richards/Richards.h', 'proteus/mprans/ArgumentsDict.h' ,'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
language='c++',
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
),
Extension(
'elastoplastic.cElastoPlastic',
['proteus/elastoplastic/cElastoPlastic.cpp'],
define_macros=[('PROTEUS_LAPACK_H',
PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',
PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',
PROTEUS_BLAS_H)],
depends=['proteus/elastoplastic/ElastoPlastic.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
language='c++',
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]
),
Extension(
'mprans.cRANS3PF',
['proteus/mprans/RANS3PF.cpp'],
depends=['proteus/mprans/RANS3PF.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cRANS3PF2D',
['proteus/mprans/RANS3PF2D.cpp'],
depends=['proteus/mprans/RANS3PF2D.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension("Isosurface",['proteus/Isosurface.pyx'],
language='c',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("BoundaryConditions",['proteus/BoundaryConditions.py'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.BoundaryConditions",['proteus/mprans/BoundaryConditions.py','proteus/mprans/BoundaryConditions.pxd'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("mprans.MeshSmoothing",['proteus/mprans/MeshSmoothing.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mprans.cMoveMeshMonitor",['proteus/mprans/cMoveMeshMonitor.pyx'],
language='c++',
include_dirs=[numpy.get_include(),'proteus',PROTEUS_INCLUDE_DIR],
libraries=['stdc++','m'],
extra_compile_args=["-std=c++11","-mavx"]),
Extension("mbd.CouplingFSI",
sources=['proteus/mbd/CouplingFSI.pyx',
'proteus/mbd/CouplingFSI.pxd',
'proteus/mbd/ChVariablesBodyAddedMass.cpp',
'proteus/mbd/ChBodyAddedMass.cpp',
'proteus/mbd/ChronoHeaders.pxd'],
depends=['proteus/mbd/ProtChBody.h',
'proteus/mbd/ProtChMoorings.h'],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_INCLUDE_DIR+'/eigen3',
PROTEUS_CHRONO_INCLUDE_DIR,
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono',
PROTEUS_CHRONO_INCLUDE_DIR+'/chrono/collision/bullet',],
library_dirs=[PROTEUS_CHRONO_LIB_DIR],
libraries=['ChronoEngine',
'stdc++',
'm'],
extra_compile_args=PROTEUS_CHRONO_CXX_FLAGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("WaveTools",['proteus/WaveTools.py'],
depends=['proteus/WaveTools.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus']),
Extension("fenton.Fenton",
sources=['proteus/fenton/Fenton.pyx',
'proteus/fenton/Solve.cpp',
'proteus/fenton/Dpythag.cpp',
'proteus/fenton/Dsvbksb.cpp',
'proteus/fenton/Dsvdcmp.cpp',
'proteus/fenton/Inout.cpp',
'proteus/fenton/Subroutines.cpp',
'proteus/fenton/Util.cpp',],
language='c++',
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_INCLUDE_DIR,
PROTEUS_NCURSES_INCLUDE_DIR,],
library_dirs=[PROTEUS_NCURSES_LIB_DIR,],
libraries=['ncurses','stdc++','m'],
extra_compile_args=["-std=c++11"]),
Extension(
'cADR',
['proteus/ADR.cpp'],
depends=['proteus/ADR.h', 'proteus/mprans/ArgumentsDict.h', 'proteus/ModelFactory.h', 'proteus/CompKernel.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'
),
Extension("subsurfaceTransportFunctions",['proteus/subsurfaceTransportFunctions.pyx'],
include_dirs=[numpy.get_include(),'proteus'],
extra_compile_args=PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("equivalent_polynomials",['proteus/equivalent_polynomials.pyx'],
depends=['proteus/equivalent_polynomials.pxd',
'proteus/equivalent_polynomials.h',
'proteus/equivalent_polynomials_utils.h',
'proteus/equivalent_polynomials_coefficients.h',
'proteus/equivalent_polynomials_coefficients_quad.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension('cfemIntegrals',
['proteus/cfemIntegrals.pyx',
'proteus/femIntegrals.c',
'proteus/postprocessing.c'],
depends=['proteus/femIntegrals.h'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
include_dirs=[numpy.get_include(),'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csparsity",['proteus/csparsity.pyx', 'proteus/sparsity.cpp'],
depends=['proteus/sparsity.h'],
language='c++',
extra_compile_args=PROTEUS_OPT,
include_dirs=[numpy.get_include(),'proteus'],),
Extension("cmeshTools",
['proteus/cmeshTools.pyx', 'proteus/mesh.cpp', 'proteus/meshio.cpp'],
language='c++',
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT),
Extension('ctransportCoefficients',
['proteus/ctransportCoefficients.pyx','proteus/transportCoefficients.c'],
include_dirs=[numpy.get_include(),'proteus'],
depends=["proteus/transportCoefficients.h"],
language="c",
libraries=['m']),
Extension('csubgridError',
['proteus/csubgridError.pyx','proteus/subgridError.c'],
depends=["proteus/subgridError.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('cshockCapturing',
['proteus/cshockCapturing.pyx','proteus/shockCapturing.c'],
depends=["proteus/shockCapturing.h"],
language="c",
include_dirs=[numpy.get_include(),'proteus'],
libraries=['m'],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension('superluWrappers',
['proteus/superluWrappers.pyx'],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=[numpy.get_include(),
'proteus',
PROTEUS_SUPERLU_INCLUDE_DIR],
library_dirs=[PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("csmoothers",["proteus/csmoothers.pyx", "proteus/smoothers.c"],
define_macros=[('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
language="c",
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR,
],
library_dirs=[PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_SUPERLU_LIB_DIR,
PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_SUPERLU_LIB,
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS),
Extension("canalyticalSolutions",["proteus/canalyticalSolutions.pyx", "proteus/analyticalSolutions.c"],
depends=["proteus/analyticalSolutions.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("clapack",
["proteus/clapack.pyx"],
depends=["proteus/proteus_lapack.h","proteus/proteus_blas.h"],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension("cpostprocessing",
["proteus/cpostprocessing.pyx","proteus/postprocessing.c"],
depends=["proteus/postprocessing.h","proteus/postprocessing.pxd"],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language="c",
include_dirs=[numpy.get_include(), 'proteus',
PROTEUS_LAPACK_INCLUDE_DIR,
PROTEUS_BLAS_INCLUDE_DIR],
library_dirs=[PROTEUS_LAPACK_LIB_DIR,PROTEUS_BLAS_LIB_DIR],
libraries=['m',
PROTEUS_LAPACK_LIB,
PROTEUS_BLAS_LIB]),
Extension('cnumericalFlux',
['proteus/cnumericalFlux.pyx','proteus/numericalFlux.c'],
depends=["proteus/numericalFlux.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension('ctimeIntegration',
['proteus/ctimeIntegration.pyx','proteus/timeIntegration.c'],
depends=["proteus/timeIntegration.h"],
extra_compile_args=PROTEUS_OPT,
language="c", include_dirs=[numpy.get_include(), 'proteus']),
Extension("cTwophaseDarcyCoefficients",
["proteus/cTwophaseDarcyCoefficients.pyx",
"proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.h",
"proteus/pskRelations.h",
"proteus/pskRelations.pxd",
"proteus/densityRelations.h",
"proteus/twophaseDarcyCoefficients.pxd",
"proteus/twophaseDarcyCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cSubsurfaceTransportCoefficients",
["proteus/cSubsurfaceTransportCoefficients.pyx","proteus/SubsurfaceTransportCoefficients.cpp"],
depends=["proteus/SubsurfaceTransportCoefficients.pxd",
"proteus/SubsurfaceTransportCoefficients.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpskRelations",["proteus/cpskRelations.pyx"],
depends=["proteus/pskRelations.pxd",
"proteus/pskRelations.h"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("cpartitioning",["proteus/cpartitioning.pyx",
"proteus/partitioning.cpp",
'proteus/mesh.cpp',
'proteus/meshio.cpp',],
depends=["proteus/partitioning.h",
"proteus/partitioning.cpp",
"proteus/cpartitioning.pyx",
"proteus/cmeshTools.pxd",
"proteus/mesh.h",
'proteus/mesh.cpp',
'proteus/meshio.cpp'],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
language="c++",
library_dirs=PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m']+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
),
Extension("flcbdfWrappers",["proteus/flcbdfWrappers.pyx"],
language="c++",
depends=["proteus/flcbdfWrappers.pxd"],
define_macros=[('PROTEUS_TRIANGLE_H',PROTEUS_TRIANGLE_H),
('PROTEUS_SUPERLU_H',PROTEUS_SUPERLU_H),
('CMRVEC_BOUNDS_CHECK',1),
('MV_VECTOR_BOUNDS_CHECK',1),
('PETSCVEC_BOUNDS_CHECK',1),
('F77_POST_UNDERSCORE',1),
('USE_BLAS',1)],
include_dirs=['proteus',
numpy.get_include(),
PROTEUS_SUPERLU_INCLUDE_DIR,
PROTEUS_TRIANGLE_INCLUDE_DIR,
PROTEUS_DAETK_INCLUDE_DIR,
PROTEUS_HDF5_INCLUDE_DIR] + \
PROTEUS_PETSC_INCLUDE_DIRS + \
PROTEUS_MPI_INCLUDE_DIRS,
library_dirs=[PROTEUS_DAETK_LIB_DIR]+PROTEUS_PETSC_LIB_DIRS+PROTEUS_MPI_LIB_DIRS+PROTEUS_HDF5_LIB_DIRS,
libraries=['hdf5','stdc++','m',PROTEUS_DAETK_LIB]+PROTEUS_PETSC_LIBS+PROTEUS_MPI_LIBS+PROTEUS_HDF5_LIBS,
extra_link_args=PROTEUS_EXTRA_LINK_ARGS + PROTEUS_PETSC_EXTRA_LINK_ARGS,
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS + PROTEUS_PETSC_EXTRA_COMPILE_ARGS+PROTEUS_OPT,
),
Extension(
'mprans.cCLSVOF',
['proteus/mprans/CLSVOF.cpp'],
depends=["proteus/mprans/CLSVOF.h", "proteus/mprans/CLSVOF.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cNCLS',
['proteus/mprans/NCLS.cpp'],
depends=["proteus/mprans/NCLS.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cMCorr',
['proteus/mprans/MCorr.cpp'],
depends=["proteus/mprans/MCorr.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"] + [
"proteus/equivalent_polynomials.h",
"proteus/equivalent_polynomials_utils.h",
"proteus/equivalent_polynomials_coefficients.h",
'proteus/equivalent_polynomials_coefficients_quad.h'],
define_macros=[('PROTEUS_LAPACK_H',PROTEUS_LAPACK_H),
('PROTEUS_LAPACK_INTEGER',PROTEUS_LAPACK_INTEGER),
('PROTEUS_BLAS_H',PROTEUS_BLAS_H)],
include_dirs=get_xtensor_include(),
library_dirs=[PROTEUS_LAPACK_LIB_DIR,
PROTEUS_BLAS_LIB_DIR],
libraries=['m',PROTEUS_LAPACK_LIB,PROTEUS_BLAS_LIB],
extra_compile_args=PROTEUS_EXTRA_COMPILE_ARGS+PROTEUS_OPT+['-std=c++14'],
extra_link_args=PROTEUS_EXTRA_LINK_ARGS,
language='c++'),
Extension(
'mprans.cRANS2P',
['proteus/mprans/RANS2P.cpp'],
depends=["proteus/mprans/RANS2P.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"] + [
"proteus/equivalent_polynomials.h",
"proteus/equivalent_polynomials_utils.h",
"proteus/equivalent_polynomials_coefficients.h",
'proteus/equivalent_polynomials_coefficients_quad.h'],
include_dirs=get_xtensor_include()+PROTEUS_MPI_INCLUDE_DIRS,
extra_compile_args=PROTEUS_OPT+PROTEUS_MPI_LIB_DIRS+['-std=c++14'],
libraries=PROTEUS_MPI_LIBS,
language='c++'),
Extension(
'mprans.cRANS2P_IB',
['proteus/mprans/RANS2P_IB.cpp'],
depends=["proteus/mprans/RANS2P_IB.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"] + [
"proteus/equivalent_polynomials.h",
"proteus/equivalent_polynomials_utils.h",
"proteus/equivalent_polynomials_coefficients.h",
'proteus/equivalent_polynomials_coefficients_quad.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cRANS2P2D',
['proteus/mprans/RANS2P2D.cpp'],
depends=["proteus/mprans/RANS2P2D.h"] + ["proteus/MixedModelFactory.h","proteus/CompKernel.h"] + [
"proteus/equivalent_polynomials.h",
"proteus/equivalent_polynomials_utils.h",
"proteus/equivalent_polynomials_coefficients.h",
'proteus/equivalent_polynomials_coefficients_quad.h'],
include_dirs=get_xtensor_include() + PROTEUS_MPI_INCLUDE_DIRS,
extra_compile_args=PROTEUS_OPT+PROTEUS_MPI_LIB_DIRS+['-std=c++14'],
libraries=PROTEUS_MPI_LIBS,
language='c++'),
Extension(
'mprans.cRDLS',
['proteus/mprans/RDLS.cpp'],
depends=["proteus/mprans/RDLS.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"] + [
"proteus/equivalent_polynomials.h",
"proteus/equivalent_polynomials_utils.h",
"proteus/equivalent_polynomials_coefficients.h",
'proteus/equivalent_polynomials_coefficients_quad.h'],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cVOF',
['proteus/mprans/VOF.cpp'],
depends=["proteus/mprans/VOF.h", "proteus/mprans/ArgumentsDict.h", "proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cMoveMesh',
['proteus/mprans/MoveMesh.cpp'],
depends=["proteus/mprans/MoveMesh.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cMoveMesh2D',
['proteus/mprans/MoveMesh2D.cpp'],
depends=["proteus/mprans/MoveMesh2D.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cSW2D',
['proteus/mprans/SW2D.cpp'],
depends=["proteus/mprans/SW2D.h", "proteus/mprans/SW2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cSW2DCV',
['proteus/mprans/SW2DCV.cpp'],
depends=["proteus/mprans/SW2DCV.h", "proteus/mprans/ArgumentsDict.h", "proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cGN_SW2DCV',
['proteus/mprans/GN_SW2DCV.cpp'],
depends=["proteus/mprans/GN_SW2DCV.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cKappa',
['proteus/mprans/Kappa.cpp'],
depends=["proteus/mprans/Kappa.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cKappa2D',
['proteus/mprans/Kappa2D.cpp'],
depends=["proteus/mprans/Kappa2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cDissipation',
['proteus/mprans/Dissipation.cpp'],
depends=["proteus/mprans/Dissipation.h", "proteus/mprans/ArgumentsDict.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
Extension(
'mprans.cDissipation2D',
['proteus/mprans/Dissipation2D.cpp'],
depends=["proteus/mprans/Dissipation2D.h"] + ["proteus/ModelFactory.h","proteus/CompKernel.h"],
include_dirs=get_xtensor_include(),
extra_compile_args=PROTEUS_OPT+['-std=c++14'],
language='c++'),
]
def setup_given_extensions(extensions):
setup(name='proteus',
version='1.7.5dev',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
],
description='Python tools for multiphysics modeling',
author='The <NAME>',
author_email='<EMAIL>',
url='http://proteustoolkit.org',
packages = ['proteus',
'proteus.fenton',
'proteus.mprans',
'proteus.richards',
'proteus.elastoplastic',
'proteus.mbd',
'proteus.test_utils',
'proteus.config',
'proteus.tests',
'proteus.tests.ci',
'proteus.tests.griffiths_lane_6',
'proteus.tests.levelset',
'proteus.tests.linalgebra_tests',
'proteus.tests.LS_with_edgeBased_EV',
'proteus.tests.LS_with_edgeBased_EV.VOF',
'proteus.tests.LS_with_edgeBased_EV.NCLS',
'proteus.tests.BernsteinPolynomials',
'proteus.tests.BernsteinPolynomials.poisson_eqn',
'proteus.tests.elliptic_redist',
'proteus.tests.elliptic_redist.RDLS',
'proteus.tests.elliptic_redist.RDLS3P',
'proteus.tests.surface_tension',
'proteus.tests.surface_tension.rising_bubble_rans3p',
'proteus.tests.CLSVOF',
'proteus.tests.CLSVOF.disc_ICs',
'proteus.tests.CLSVOF.with_RANS2P',
'proteus.tests.CLSVOF.with_RANS3PF',
'proteus.tests.CLSVOF.pure_level_set',
'proteus.TwoPhaseFlow',
'proteus.TwoPhaseFlow.utils',
'proteus.tests.TwoPhaseFlow',
'proteus.tests.matrix_constructor',
'proteus.tests.matrix_constructor.import_modules',
'proteus.SWFlow',
'proteus.SWFlow.utils',
'proteus.tests.SWFlow',
'proteus.MeshAdaptPUMI',
'proteus.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI',
'proteus.tests.MeshAdaptPUMI.gauge_compare.dambreak_Colagrossi_2D',
'proteus.tests.mesh_tests',
'proteus.tests.mesh_tests.import_modules',
'proteus.tests.periodic',
'proteus.tests.periodic.petsc',
'proteus.tests.periodic.comparison_files',
'proteus.tests.poisson_2d',
'proteus.tests.post_processing',
'proteus.tests.post_processing.import_modules',
'proteus.tests.ProjScheme_with_EV',
'proteus.tests.single_phase_gw',
'proteus.tests.solver_tests',
'proteus.tests.solver_tests.import_modules',
'proteus.tests.solver_tests.comparison_files',
'proteus.tests.cylinder2D',
'proteus.tests.cylinder2D.conforming_rans2p',
'proteus.tests.cylinder2D.conforming_rans3p',
'proteus.tests.cylinder2D.ibm_method',
'proteus.tests.cylinder2D.ibm_rans2p',
'proteus.tests.cylinder2D.ibm_rans2p_3D',
'proteus.tests.cylinder2D.sbm_method',
'proteus.tests.cylinder2D.sbm_3Dmesh',
'proteus.tests.HotStart_3P',
'proteus.tests.AddedMass',
'proteus.tests.FSI',
'proteus.tests.MoveMeshMonitor',
'proteus.tests.wave_tests',
],
cmdclass = {'build_ext':build_ext},
ext_package='proteus',
ext_modules=extensions,
data_files=[(proteus_install_path,
['proteus/proteus_blas.h',
'proteus/proteus_lapack.h',
'proteus/proteus_superlu.h',
'proteus/ModelFactory.h',
'proteus/CompKernel.h'
]),
(os.path.join(proteus_install_path,'tests'),
['proteus/tests/hex_cube_3x3.xmf',
'proteus/tests/hex_cube_3x3.h5',
'proteus/tests/sparse_mat_ex.mtx']),
(os.path.join(proteus_install_path,'tests','linalgebra_tests'),
['proteus/tests/linalgebra_tests/sparse_mat_1.txt',
'proteus/tests/linalgebra_tests/jac.bin']),
(os.path.join(proteus_install_path,'tests','griffiths_lane_6'),
[]),
(os.path.join(proteus_install_path,'tests','levelset'),
[]),
(os.path.join(proteus_install_path,'tests','ci','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','VOF','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','LS_with_edgeBased_EV','NCLS','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','BernsteinPolynomials','poisson_eqn','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','surface_tension','rising_bubble_rans3p','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','CLSVOF','disc_ICs','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','CLSVOF','pure_level_set','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS2P','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','CLSVOF','with_RANS3PF','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','TwoPhaseFlow','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','solver_tests','import_modules'),
[]),
(os.path.join(proteus_install_path,'tests','mesh_tests','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','periodic','petsc'),
[]),
(os.path.join(proteus_install_path,'tests','periodic','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','post_processing','import_modules'),
[]),
(os.path.join(proteus_install_path,'tests','post_processing','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','matrix_constructor','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','solver_tests_slow','import_modules'),
[]),
(os.path.join(proteus_install_path,'tests','solver_tests_mprans','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI'),
[]),
(os.path.join(proteus_install_path,'tests','MeshAdaptPUMI','gauge_compare','dambreak_Colagrossi_2D'),
[]),
(os.path.join(proteus_install_path,'tests','poisson_2d'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans3p','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_method','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','ibm_rans2p_3D','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_method','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','sbm_3Dmesh','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','cylinder2D','conforming_rans2p','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','HotStart_3P','comparison_files'),
[]),
(os.path.join(proteus_install_path,'tests','AddedMass'),
[]),
(os.path.join(proteus_install_path,'tests','FSI'),
[]),
(os.path.join(proteus_install_path,'tests','MoveMeshMonitor'),
[]),
(os.path.join(proteus_install_path,'tests','wave_tests'),
[])
],
scripts = ['scripts/parun','scripts/gf2poly','scripts/gatherArchives.py','scripts/qtm','scripts/waves2xmf','scripts/povgen.py',
'scripts/velocity2xmf','scripts/run_script_garnet','scripts/run_script_diamond',
'scripts/run_script_lonestar','scripts/run_script_ranger','scripts/run_script_mpiexec','scripts/gatherTimes','scripts/clearh5.py',
'scripts/runSWEs.py'],
requires=['numpy']
)
def setup_extensions_in_sequential():
setup_given_extensions(EXTENSIONS_TO_BUILD)
def setup_extensions_in_parallel():
import multiprocessing, logging
mp = multiprocessing.get_context('fork')
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
mp.log_to_stderr()
pool = mp.Pool(processes=int(os.getenv('N')))
EXTENSIONS=[[e] for e in EXTENSIONS_TO_BUILD]
pool.imap(setup_given_extensions, EXTENSIONS)
pool.close()
pool.join()
if "build_ext" in sys.argv:
setup_extensions_in_parallel()
else:
setup_extensions_in_sequential()
``` |
{
"source": "JohanMabille/robotframework-lsp",
"score": 2
} |
#### File: robotframework_ls/server_api/client.py
```python
from typing import Optional
from robocorp_ls_core.client_base import LanguageServerClientBase
from robocorp_ls_core.protocols import IIdMessageMatcher
class SubprocessDiedError(Exception):
pass
class RobotFrameworkApiClient(LanguageServerClientBase):
def __init__(self, writer, reader, server_process):
LanguageServerClientBase.__init__(self, writer, reader)
self.server_process = server_process
self._check_process_alive()
self._version = None
def _check_process_alive(self, raise_exception=True):
returncode = self.server_process.poll()
if returncode is not None:
if raise_exception:
raise SubprocessDiedError(
"Process has already exited. Stderr: %s"
% (self.server_process.stderr.read())
)
return False
return True
def initialize(
self, msg_id=None, process_id=None, root_uri=u"", workspace_folders=()
):
from robocorp_ls_core.options import NO_TIMEOUT, USE_TIMEOUTS
self._check_process_alive()
msg_id = msg_id if msg_id is not None else self.next_id()
return self.request(
{
"jsonrpc": "2.0",
"id": msg_id,
"method": "initialize",
"params": {
"processId": process_id,
"rootUri": root_uri,
"workspaceFolders": workspace_folders,
},
},
timeout=30 if USE_TIMEOUTS else NO_TIMEOUT,
)
def get_version(self):
"""
:return:
"""
if self._version is None:
self._check_process_alive()
msg_id = self.next_id()
msg = self.request(
{"jsonrpc": "2.0", "id": msg_id, "method": "version"}, None
)
if msg is None:
self._check_process_alive()
return "Unable to get version."
version = msg.get("result", "N/A")
self._version = version
return self._version
def lint(self, doc_uri) -> list:
self._check_process_alive()
msg_id = self.next_id()
return self.request(
{
"jsonrpc": "2.0",
"id": msg_id,
"method": "lint",
"params": {"doc_uri": doc_uri},
},
default=[],
)
def request_lint(self, doc_uri: str) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(self._build_msg("lint", doc_uri=doc_uri))
def forward(self, method_name, params):
self._check_process_alive()
msg_id = self.next_id()
return self.request(
{"jsonrpc": "2.0", "id": msg_id, "method": method_name, "params": params}
)
def forward_async(self, method_name, params) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
self._check_process_alive()
msg_id = self.next_id()
return self.request_async(
{"jsonrpc": "2.0", "id": msg_id, "method": method_name, "params": params}
)
def open(self, uri, version, source):
self.forward(
"textDocument/didOpen",
{"textDocument": {"uri": uri, "version": version, "text": source}},
)
def _build_msg(self, method_name, **params):
self._check_process_alive()
msg_id = self.next_id()
return {"jsonrpc": "2.0", "id": msg_id, "method": method_name, "params": params}
def request_section_name_complete(
self, doc_uri, line, col
) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(
self._build_msg("sectionNameComplete", doc_uri=doc_uri, line=line, col=col)
)
def request_keyword_complete(
self, doc_uri, line, col
) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(
self._build_msg("keywordComplete", doc_uri=doc_uri, line=line, col=col)
)
def request_complete_all(self, doc_uri, line, col) -> Optional[IIdMessageMatcher]:
"""
Completes: sectionName, keyword, variables
:Note: async complete.
"""
return self.request_async(
self._build_msg("completeAll", doc_uri=doc_uri, line=line, col=col)
)
def request_find_definition(
self, doc_uri, line, col
) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(
self._build_msg("findDefinition", doc_uri=doc_uri, line=line, col=col)
)
def request_source_format(
self, text_document, options
) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(
self._build_msg("codeFormat", text_document=text_document, options=options)
)
def request_signature_help(self, doc_uri, line, col) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(
self._build_msg("signatureHelp", doc_uri=doc_uri, line=line, col=col)
)
def request_workspace_symbols(
self, query: Optional[str] = None
) -> Optional[IIdMessageMatcher]:
"""
:Note: async complete.
"""
return self.request_async(self._build_msg("workspaceSymbols", query=query))
def request_cancel(self, message_id):
self._check_process_alive()
self.write(
{
"jsonrpc": "2.0",
"method": "$/cancelRequest",
"params": dict(id=message_id),
}
)
def __typecheckself__(self) -> None:
from robocorp_ls_core.protocols import check_implements
from robocorp_ls_core.protocols import IRobotFrameworkApiClient
_: IRobotFrameworkApiClient = check_implements(self)
``` |
{
"source": "JohanMabille/xeus-python-shell",
"score": 2
} |
#### File: xeus-python-shell/xeus_python_shell/debugger_utils.py
```python
import re
from IPython.core.getipython import get_ipython
# This import is required to have the next ones working...
from debugpy.server import api # noqa
from _pydevd_bundle import pydevd_frame_utils
from _pydevd_bundle.pydevd_suspended_frames import (
SuspendedFramesManager, _FramesTracker
)
class _FakeCode:
def __init__(self, co_filename, co_name):
self.co_filename = co_filename
self.co_name = co_name
class _FakeFrame:
def __init__(self, f_code, f_globals, f_locals):
self.f_code = f_code
self.f_globals = f_globals
self.f_locals = f_locals
self.f_back = None
class _DummyPyDB:
def __init__(self):
from _pydevd_bundle.pydevd_api import PyDevdAPI
self.variable_presentation = PyDevdAPI.VariablePresentation()
class VariableExplorer:
def __init__(self):
self.suspended_frame_manager = SuspendedFramesManager()
self.py_db = _DummyPyDB()
self.tracker = _FramesTracker(self.suspended_frame_manager, self.py_db)
self.frame = None
def track(self):
ip = get_ipython()
var = ip.user_ns
self.frame = _FakeFrame(
_FakeCode(
'<module>', ip.compile.get_filename('sys._getframe()')
),
var, var
)
self.tracker.track(
'thread1',
pydevd_frame_utils.create_frames_list_from_frame(self.frame)
)
def untrack_all(self):
self.tracker.untrack_all()
def get_children_variables(self, variable_ref=None):
var_ref = variable_ref
if not var_ref:
var_ref = id(self.frame)
variables = self.suspended_frame_manager.get_variable(var_ref)
return [x.get_var_data() for x in variables.get_children_variables()]
class XDebugger:
def __init__(self):
self.variable_explorer = VariableExplorer()
def _accept_variable(self, variable_name):
forbid_list = [
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'__annotations__',
'__builtins__',
'__builtin__',
'__display__',
'get_ipython',
'debugpy',
'exit',
'quit',
'In',
'Out',
'_oh',
'_dh',
'_',
'__',
'___'
]
cond = variable_name not in forbid_list
cond = cond and not bool(re.search(r'^_\d', variable_name))
cond = cond and variable_name[0:2] != '_i'
return cond
def build_variables_response(self, request, variables):
var_list = [
var for var in variables
if self._accept_variable(var['name'])
]
reply = {
'seq': request['seq'],
'type': 'response',
'request_seq': request['seq'],
'success': True,
'command': request['command'],
'body': {
'variables': var_list
}
}
return reply
def inspect_variables(self, message):
self.variable_explorer.untrack_all()
# looks like the implementation of untrack_all in ptvsd
# destroys objects we nee din track. We have no choice but
# reinstantiate the object
self.variable_explorer = VariableExplorer()
self.variable_explorer.track()
variables = self.variable_explorer.get_children_variables()
return self.build_variables_response(message, variables)
def variables(self, message):
# This intentionnaly handles only the case where the code
# did not hit a breakpoint
variables = self.variable_explorer.get_children_variables(
message['arguments']['variablesReference']
)
return self.build_variables_response(message, variables)
``` |
{
"source": "Johan-Mi/MathBot",
"score": 3
} |
#### File: Johan-Mi/MathBot/main.py
```python
import math
import discord
from lark import Lark, Transformer
client = discord.Client()
PREFIX = "="
class CalcTransformer(Transformer): # pylint: disable=too-few-public-methods
"""Transformer for the parser."""
@staticmethod
def _num(args):
return float(args[0])
@staticmethod
def _add(args):
return args[0] + args[1]
@staticmethod
def _sub(args):
return args[0] - args[1]
@staticmethod
def _mul(args):
return args[0] * args[1]
@staticmethod
def _div(args):
return args[0] / args[1]
@staticmethod
def _negate(args):
return -args[0]
@staticmethod
def _exp(args):
return args[0]**args[1]
@staticmethod
def _func_call(args):
def unknown_func(_):
raise NameError(f"Function {args[0]}() does not exist")
return {
"sqrt": math.sqrt,
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"asin": math.asin,
"acos": math.acos,
"atan": math.atan,
"log": math.log10,
"ln": math.log,
"abs": abs,
}.get(args[0], unknown_func)(args[1])
parser = Lark.open("grammar.lark", parser="lalr", transformer=CalcTransformer)
@client.event
async def on_ready():
"""Lets you know when the bot starts."""
print(f"Discord version: {discord.__version__}")
print(f"Logged in as {client.user}")
@client.event
async def on_message(message):
"""Responds when someone else sends a message."""
if message.author == client.user:
return
if message.content.startswith(PREFIX):
expr = message.content[len(PREFIX):]
try:
result = f"{expr} = {parser.parse(expr)}"
except ZeroDivisionError:
result = "Tried to divide by zero!"
except Exception as err:
result = str(err)
await message.channel.send(result)
def main():
"""Runs the bot with the token from the file called 'token'."""
with open("token") as token_file:
token = token_file.read()
client.run(token)
if __name__ == "__main__":
main()
``` |
{
"source": "Johan-Mi/Misc",
"score": 3
} |
#### File: BfToC/Python/main.py
```python
import re
from enum import Enum
class Token(Enum):
Input, Output, Inc, Move, LoopBegin, LoopEnd, Set = range(7)
def main():
with open('../program.bf') as prog_file:
prog = re.sub(r'[^-\+,\.<>\[\]]', '', prog_file.read())
prog = prog.replace('[-]', '0')
output = []
i = 0
while i < len(prog):
if prog[i] == '0':
output.append((Token.Set, 0))
elif prog[i] == ',':
output.append((Token.Input, None))
elif prog[i] == '.':
output.append((Token.Output, None))
elif prog[i] == '[':
output.append((Token.LoopBegin, None))
elif prog[i] == ']':
output.append((Token.LoopEnd, None))
elif prog[i] in '+-':
change_amount = 0
while prog[i] in '+-':
change_amount += 1 if prog[i] == '+' else -1
i += 1
i -= 1
output.append((Token.Inc, change_amount))
elif prog[i] in '<>':
change_amount = 0
while prog[i] in '<>':
change_amount += 1 if prog[i] == '>' else -1
i += 1
i -= 1
output.append((Token.Move, change_amount))
i += 1
with open('../output.c', 'w') as out:
out.write('#include <stdio.h>\n#include <stdlib.h>\n\nint main(void) {\n\tunsigned short ptr = 0;\n\tunsigned char memory[65536] = {0};\n\n')
indentation = 1
for token, value in output:
if token == Token.Set:
line = 'memory[ptr] = %d;' % value
elif token == Token.Input:
line = 'memory[ptr] = getchar();'
elif token == Token.Output:
line = 'putchar(memory[ptr]);'
elif token == Token.Inc:
if value == 1:
line = 'memory[ptr]++;'
elif value == -1:
line = 'memory[ptr]--;'
elif value > 0:
line = 'memory[ptr] += %d;' % value
else:
line = 'memory[ptr] -= %d;' % -value
elif token == Token.Move:
if value == 1:
line = 'ptr++;'
elif value == -1:
line = 'ptr--;'
elif value > 0:
line = 'ptr += %d;' % value
else:
line = 'ptr -= %d;' % -value
elif token == Token.LoopBegin:
line = 'while(memory[ptr]) {'
elif token == Token.LoopEnd:
line = '}'
indentation -= 1
out.write('\t' * indentation + line + '\n')
if token == Token.LoopBegin:
indentation += 1
out.write('\n\treturn EXIT_SUCCESS;\n}')
if __name__ == "__main__":
main()
```
#### File: Misc/FileTransfer/send.py
```python
import socket
def main():
filename = input('File name: ')
host = input('IP address of reciever (Blank for localhost): ')
if not host:
host = '127.0.0.1'
port = input('Port of reciever (Blank for 65432): ')
if port:
port = int(port)
else:
port = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
with open(filename, 'rb') as f:
data = f.read(1024)
while data:
s.send(data)
data = f.read(1024)
if __name__ == "__main__":
main()
```
#### File: Misc/FourInARow/main.py
```python
EMPTY = 0
RED = 1
WHITE = 2
FULL = 3
def print_board(board):
"""Prints a four in a row board."""
print("\033[0m\033[2J\033[3J\033[;H┌───────┐")
for i in range(6):
print("│", end="")
for j in range(7):
if board[j][i] == EMPTY:
print("\033[0m◯", end="")
elif board[j][i] == RED:
print("\033[31m●", end="")
else:
print("\033[37m●", end="")
print("\033[0m│")
print("└1234567┘\n")
def check_win(board, turn):
"""Checks if a player has won the game."""
for i in range(6):
for j in range(4):
if all(board[j + k][i] == turn for k in range(4)):
return turn
for i in range(3):
for j in range(7):
if all(board[j][i + k] == turn for k in range(4)):
return turn
for j in range(4):
if all(board[j + k][i + k] == turn for k in range(4)) \
or all(board[j + k][i + 3 - k] == turn for k in range(4)):
return turn
if not any(EMPTY in i for i in board):
return FULL
return None
def main():
"""Creates a board and lets two people play against each other."""
board = []
for _ in range(7):
board.append([EMPTY] * 6)
turn = WHITE
while True:
print_board(board)
while True:
try:
x_pos = int(input("RED: " if turn == RED else "WHITE: "))
except ValueError:
continue
if 0 < x_pos < 8:
x_pos -= 1
if board[x_pos][0] == EMPTY:
y_pos = board[x_pos].count(EMPTY) - 1
board[x_pos][y_pos] = turn
break
res = check_win(board, turn)
if res == turn:
print_board(board)
print("Red won!" if turn == RED else "White won!")
break
if res == FULL:
print_board(board)
print("Draw!")
break
turn = RED if turn == WHITE else WHITE
if __name__ == "__main__":
main()
``` |
{
"source": "Johan-Mi/OwOifierBot",
"score": 3
} |
#### File: Johan-Mi/OwOifierBot/main.py
```python
from re import sub
from random import randint, choice
import discord
client = discord.Client()
faces = ("owo", "òwó", "ówò", "ºwº", "OwO", "ÒwÓ", "ÓwÒ", "oωo", "òωó", "óωò",
"ºωº", "OωO", "ÒωÓ", "ÓωÒ", "uwu", "ùwú", "úwù", "uωu", "ùωú", "úωù",
"UwU", "ÙwÚ", "ÚwÙ", "UωU", "ÙωÚ", "ÚωÙ", "·w·", "(·w·)", "(·ω·)",
";;w;;", ";;ω;;", ">w<", ">ω<", "≥w≤", "≥ω≤", "^w^", "^ω^", "ǭwǭ",
"ǭωǭ", "^-^", "(≧∇≦)", "(´·ω·`)")
@client.event
async def on_ready():
"""Lets you know when the bot starts."""
print(f"Discord version: {discord.__version__}")
print(f"Logged in as {client.user}")
@client.event
async def on_message(message):
"""Resopnds when someone else sends a message."""
if message.author == client.user:
return
if randint(0, 10) == 0:
txt = message.content
txt = sub("(?:r|l)", "w", txt)
txt = sub("(?:R|L)", "W", txt)
txt = sub("([nN])([aeiouAEIOU])", "\\1y\\2", txt)
txt.replace("ove", "uv")
txt = sub("\\!", f" {choice(faces)}", txt)
await message.channel.send(txt)
def main():
"""Runs the bot with the token from the file called 'token'."""
with open("token") as token_file:
token = token_file.read()
client.run(token)
if __name__ == "__main__":
main()
``` |
{
"source": "Johan-Mi/SudokuSolver",
"score": 4
} |
#### File: SudokuSolver/Python/main.py
```python
import sys
def print_board(board):
"""Prints a 9x9 list of numbers as a sudoku board."""
for i in range(9):
for j in range(9):
print(board[i][j] or " ", end="")
if j in (2, 5):
print("│", end="")
print()
if i in (2, 5):
print("───┼───┼───")
print()
def solve(board, x_pos, y_pos):
"""Solves a sudoku board, given the x and y coordinates to start on."""
while board[y_pos][x_pos]:
if x_pos == 8 and y_pos == 8:
return True
x_pos += 1
if x_pos == 9:
y_pos += 1
x_pos = 0
possible = set(range(1, 10))
for i in range(9):
possible.discard(board[y_pos][i])
possible.discard(board[i][x_pos])
possible.discard(
board[y_pos - y_pos % 3 + i % 3][x_pos - x_pos % 3 + i // 3])
next_x = (x_pos + 1) % 9
next_y = y_pos + (x_pos == 8)
for num in possible:
board[y_pos][x_pos] = num
if (x_pos == 8 and y_pos == 8) or solve(board, next_x, next_y):
return True
board[y_pos][x_pos] = 0
return False
def main():
"""Reads a sudoku board from a specified file and solves it."""
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} (name of sudoku file)")
with open(sys.argv[1]) as sudoku_file:
file_content = sudoku_file.read()
board = [[None for i in range(9)] for j in range(9)]
for i in range(9):
for j in range(9):
curr_char = file_content[i * 10 + j]
board[i][j] = 0 if curr_char in " .-_" else int(curr_char)
print_board(board)
if solve(board, 0, 0):
print_board(board)
else:
sys.exit("No solution found")
if __name__ == "__main__":
main()
``` |
{
"source": "johanmodin/poe-timeless-jewel-multitool",
"score": 2
} |
#### File: poe-timeless-jewel-multitool/bot/tree_navigator.py
```python
import logging
import cv2
import numpy as np
import pytesseract
import os
import time
import json
import re
from multiprocessing import Pool
from Levenshtein import distance
from .input_handler import InputHandler
from .grabscreen import grab_screen
from .utils import get_config, filter_mod
# This is a position of the inventory as fraction of the resolution
OWN_INVENTORY_ORIGIN = (0.6769531, 0.567361)
# These are the sockets positions as measured on 2560x1440 resolution
# with X_SCALE and Y_SCALE applied, i.e., scale * SOCKETS[i] is the i:th
# sockets absolute pixel position with origin in the middle of the skill tree
# I think the SCALE variables are in fact useless and a relics from the
# positions initially being measured at a view which wasn't zoomed out maximally
SOCKETS = {
1: (-650.565, -376.013),
2: (648.905, -396.45),
3: (6.3354, 765.658),
4: (-1700.9, 2424.17),
5: (-2800.66, -215.34),
6: (-1435.02, -2635.39),
7: (1855.53, -2360.1),
8: (2835.84, 230.5361),
9: (1225.37, 2625.76),
10: (-120.12471, 5195.44),
11: (-3580.19, 5905.92),
12: (-5395.86, 2120.42),
13: (-6030.95, -115.7007),
14: (-5400.59, -1985.18),
15: (-3035.14, -5400.87),
16: (160.10728, -5196.32),
17: (3382.05, -5195.21),
18: (5730.2, -1625.75),
19: (6465.24, 190.3341),
20: (5542.76, 1690.07),
21: (3322.76, 6090.5),
}
# The offsets are specified in the same fashion as SOCKETS and are rough
# guesses which allow us to move to the general area and later refine the
# position of the socket through template matching
SOCKET_MOVE_OFFSET = {
1: (0, 150),
2: (0, 150),
3: (0, 200),
4: (0, 150),
5: (-300, 200),
6: (-100, 150),
7: (-150, 0),
8: (0, -150),
9: (-100, -125),
10: (170, 0),
11: (-400, -900),
12: (0, 300),
13: (400, 200),
14: (-250, -150),
15: (-100, -150),
16: (150, -150),
17: (150, 500), #
18: (-300, 400),
19: (-1000, -150),
20: (-500, 500),
21: (100, -1000),
}
# Scalers for the SOCKETS positions to convert them to 2560x1440 pixel positions
X_SCALE = 0.2
Y_SCALE = 0.2
CIRCLE_EFFECTIVE_RADIUS = 300
IMAGE_FOLDER = "data/images/"
# We're using a lot of template matching and all templates are defined here
# with matching thresholds (scores) and sizes per resolution
TEMPLATES = {
"AmbidexterityCluster.png": {
"1440p_size": (34, 34),
"1440p_threshold": 0.95,
"1080p_size": (26, 26),
"1080p_threshold": 0.95,
},
"FreeSpace.png": {
"1440p_size": (41, 41),
"1440p_threshold": 0.98,
"1080p_size": (30, 30),
"1080p_threshold": 0.98,
},
"Notable.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.89,
"1080p_size": (23, 23),
"1080p_threshold": 0.85,
},
"NotableAllocated.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.93,
"1080p_size": (23, 23),
"1080p_threshold": 0.90,
},
"Jewel.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.92,
"1080p_size": (23, 23),
"1080p_threshold": 0.92,
},
"JewelSocketed.png": {
"1440p_size": (30, 30),
"1440p_threshold": 0.9,
"1080p_size": (23, 23),
"1080p_threshold": 0.9,
},
"LargeJewel.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"LargeJewelSocketed.png": {
"1440p_size": (39, 39),
"1440p_threshold": 0.9,
"1080p_size": (30, 30),
"1080p_threshold": 0.88,
},
"Skill.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.87,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
"SkillAllocated.png": {
"1440p_size": (21, 21),
"1440p_threshold": 0.93,
"1080p_size": (15, 15),
"1080p_threshold": 0.91,
},
}
# Defines the position of the text box which is cropped out and OCR'd per node
TXT_BOX = {"x": 32, "y": 0, "w": 900, "h": 320}
mod_files = {
"passives": "data/passives.json",
"passivesAlt": "data/passivesAlternatives.json",
"passivesAdd": "data/passivesAdditions.json",
"passivesVaalAdd": "data/passivesVaalAdditions.json",
}
class TreeNavigator:
def __init__(self, resolution, halt_value):
self.resolution = resolution
self.input_handler = InputHandler(self.resolution)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S %d-%m-%Y]",
)
self.log = logging.getLogger("tree_nav")
self.config = get_config("tree_nav")
self.find_mod_value_re = re.compile("(\(?(?:[0-9]*\.?[0-9]-?)+\)?)")
self.nonalpha_re = re.compile("[^a-zA-Z]")
self.origin_pos = (self.resolution[0] / 2, self.resolution[1] / 2)
self.ingame_pos = [0, 0]
self.px_multiplier = self.resolution[0] / 2560
self.resolution_prefix = str(self.resolution[1]) + "p_"
self.templates_and_masks = self.load_templates()
self.passive_mods, self.passive_names = self.generate_good_strings(mod_files)
self.passive_nodes = list(self.passive_mods.keys()) + list(
self.passive_names.keys()
)
self.halt = halt_value
self.first_run = True
def _run(self):
return not bool(self.halt.value)
def eval_jewel(self, item_location):
self.ingame_pos = [0, 0]
item_name, item_desc = self._setup(item_location, copy=True)
pool = Pool(self.config["ocr_threads"])
jobs = {}
if self.first_run:
# We just initiated the module and not sure where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
self.first_run = False
for socket_id in sorted(SOCKETS.keys()):
if not self._run():
return None, None, None
found_socket = self._move_screen_to_socket(socket_id)
if not found_socket and socket_id == 1:
self.log.info("We are lost - trying to find known location")
# We just initiated the search and have no clue where we are
# Thus, we better rectify our position estimate before starting
self._refind_position(SOCKETS[1])
socket_nodes = self._analyze_nodes(socket_id)
# Convert stats for the socket from image to lines in separate process
self.log.info("Performing asynchronous OCR")
jobs[socket_id] = pool.map_async(OCR.node_to_strings, socket_nodes)
self.log.info("Analyzed socket %s" % socket_id)
# Return to socket 1 to ease next search
self._move_to_tree_pos_using_spaces(SOCKETS[1])
self._setup(item_location)
self.log.info("Waiting for last OCR to finish")
item_stats = [
{
"socket_id": socket_id,
"socket_nodes": self._filter_ocr_lines(
jobs[socket_id].get(timeout=300)
),
}
for socket_id in jobs
]
pool.close()
pool.join()
return item_name, item_desc, item_stats
def load_templates(self, threshold=128):
templates_and_masks = {}
for template_name in TEMPLATES.keys():
template_path = os.path.join(IMAGE_FOLDER, template_name)
img = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
size = TEMPLATES[template_name][self.resolution_prefix + "size"]
channels = cv2.split(img)
mask = None
if len(channels) > 3:
mask = np.array(channels[3])
mask[mask <= threshold] = 0
mask[mask > threshold] = 255
mask = cv2.resize(mask, size)
img = cv2.imread(template_path, 0)
img = cv2.resize(img, size)
templates_and_masks[template_name] = {"image": img, "mask": mask}
return templates_and_masks
def _move_screen_to_socket(self, socket_id):
self.log.debug("Moving close to socket %s" % socket_id)
move_offset_tx, move_offset_ty = SOCKET_MOVE_OFFSET[socket_id]
move_offset = self._tree_pos_to_xy(
[move_offset_tx, move_offset_ty], offset=True
)
socket_tx, socket_ty = SOCKETS[socket_id]
socket_xy = self._tree_pos_to_xy([socket_tx, socket_ty])
compensation_offset = self._find_socket(socket_xy)
if compensation_offset is None:
found_socket = False
compensation_offset = [0, 0]
else:
found_socket = True
self.log.debug("Compensated navigation with %s" % compensation_offset)
move_to = [
socket_xy[0] + compensation_offset[0] + move_offset[0],
socket_xy[1] + compensation_offset[1] + move_offset[1],
]
x_offset = move_to[0] - self.resolution[0] / 2
y_offset = move_to[1] - self.resolution[1] / 2
self.input_handler.click(
*move_to, *move_to, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(self.origin_pos[0], self.origin_pos[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
self.ingame_pos = [socket_tx + move_offset_tx, socket_ty + move_offset_ty]
return found_socket
def _refind_position(self, desired_tree_pos):
# If the current location has been determined to be incorrect
# we can go to the bottom right corner and find a cluster close
# to socket 21, namely the Ambidexterity cluster
# This is a known location, which can then be used to calculate
# our way to a desired position
self.log.debug("Centering screen position")
# Correct our tree position to a known value
self._locate_screen_using_ambidexterity()
# Find our way to the desired position
self._move_to_tree_pos_using_spaces(desired_tree_pos)
def _move_to_tree_pos_using_spaces(self, desired_tree_pos, max_position_error=5):
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
self.log.debug("Moving to tree pos using spaces. Deltas: ({}, {})".format(dx, dy))
while (abs(dx) + abs(dy)) > max_position_error:
# Choose quadrant to find spaces in based on dx, dy
right, bottom = dx >= 0, dy >= 0
if right and not bottom:
quadrant = 0
elif not right and not bottom:
quadrant = 1
elif not right and bottom:
quadrant = 2
elif right and bottom:
quadrant = 3
# Find empty spaces that we can drag from
spaces = self._find_empty_space(quadrant)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose a random empty space for maximum drag
chosen_space = spaces[np.random.randint(spaces.shape[0])]
# How far to drag the window to end up in the optimal place
screen_move_x, screen_move_y = self._tree_pos_to_xy([dx, dy],
offset=True)
# Calculate where our drag should end up to perform the move
drag_x = chosen_space[0] - screen_move_x
drag_y = chosen_space[1] - screen_move_y
# We should only drag within the screen's resolution
# Additionally, we use 100px margin to not trigger tree scroll
drag_x = np.clip(drag_x, 100, self.resolution[0] - 100)
drag_y = np.clip(drag_y, 100, self.resolution[1] - 100)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_x, drag_y, speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Calculate how far we've actually moved
effective_move_x = chosen_space[0] - drag_x
effective_move_y = chosen_space[1] - drag_y
# Update our internal tree position
self.ingame_pos = self._add_xy_offset_to_tree_pos(
[effective_move_x, effective_move_y]
)
# Figure out how much we have left to move
dx = desired_tree_pos[0] - self.ingame_pos[0]
dy = desired_tree_pos[1] - self.ingame_pos[1]
def _locate_screen_using_ambidexterity(self):
# Essentially, this is _move_to_tree_pos_using_spaces but
# only used to find the tree position by navigating to a known point
self.log.debug("Moving to ambidexterity")
ambidexterity_position = None
assumed_ambidexterity_position = (0.25234375, 0.20555556)
while ambidexterity_position is None:
# Find empty spaces that we can drag from
spaces = self._find_empty_space(3)
if spaces is None:
raise ValueError("Could not find an empty space, quitting.")
# Choose the farthest empty space for maximum drag
chosen_space = spaces[np.argmax(spaces.sum(axis=1))]
# An arbitrary position in the top left region
drag_location = (200, 200)
# Drag
self.input_handler.click(
*chosen_space, *chosen_space, button=None, raw=True, speed_factor=1
)
self.input_handler.drag(drag_location[0], drag_location[1], speed_factor=1)
self.input_handler.rnd_sleep(min=200, mean=300, sigma=100)
# Are we there yet?
# i.e., have we reached Ambidexterity, which in that case is at
# roughly (646, 296) in absolute 1440p screen px position
ambidexterity_position = self._find_icon(
assumed_ambidexterity_position, "AmbidexterityCluster.png"
)
# Ambidexterity is located (-560, 850) from socket 21
# Thus, this plus any (scaled) offset found by the template matcher is
# our tree position
self.ingame_pos = [
SOCKETS[21][0]
- 560
+ ambidexterity_position[0] / (X_SCALE * self.px_multiplier),
SOCKETS[21][1]
+ 850
+ ambidexterity_position[1] / (Y_SCALE * self.px_multiplier),
]
def _find_empty_space(self, quadrant):
# Finds empty spaces that can be used to drag the screen
# Used to recenter the screen
# The quadrant argument is an int in [0, 1, 2, 3], corresponding to
# [top-right, top-left, bottom-left, bottom-right]
quadrant_translation = {0: [0.5, 0], 1: [0, 0], 2: [0, 0.5], 3: [0.5, 0.5]}
fractional_lt = quadrant_translation[quadrant]
lt = [
int(fractional_lt[0] * self.resolution[0]),
int(fractional_lt[1] * self.resolution[1]),
]
rb = [int(lt[0] + self.resolution[0] / 2),
int(lt[1] + self.resolution[1] / 2)]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros_like(searched_area)
centered_coordinates = self._match_image(searched_area, "FreeSpace.png")
locations[tuple(centered_coordinates)] = 1
rel_space_pos_yx = np.argwhere(locations == 1)
rel_space_pos = rel_space_pos_yx.T[::-1].T
if len(rel_space_pos) == 0:
self.log.warning("Could not find any free spaces in tree!")
return None
screen_space_pos = rel_space_pos + lt
# remove positions that are close to edges as these trigger scroll
screen_space_pos = screen_space_pos[(screen_space_pos[:, 0] > 100) &
(screen_space_pos[:, 1] > 100) &
(screen_space_pos[:, 0] < self.resolution[0] - 100) &
(screen_space_pos[:, 1] < self.resolution[1] - 100)]
return screen_space_pos
def _find_icon(self, assumed_position, icon_name):
# Finds the ambidexerity cluster icon in the region it sits in
# if we are at the bottom-right corner of the tree
# The exact location is used to refine our knowledge of our position
abs_assumed_position = (
assumed_position[0] * self.resolution[0],
assumed_position[1] * self.resolution[1],
)
margin_side = int(0.05 * self.resolution[0])
lt = [
int(abs_assumed_position[0] - margin_side / 2),
int(abs_assumed_position[1] - margin_side / 2),
]
rb = [
int(abs_assumed_position[0] + margin_side / 2),
int(abs_assumed_position[1] + margin_side / 2),
]
searched_area = grab_screen(tuple(lt + rb))
searched_area = cv2.cvtColor(searched_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((margin_side, margin_side))
centered_coordinates = self._match_image(searched_area, icon_name)
locations[tuple(centered_coordinates)] = 1
rel_icon_pos_yx = np.argwhere(locations == 1)
rel_icon_pos = rel_icon_pos_yx.T[::-1].T
if len(rel_icon_pos) == 0:
return None
icon_offset = [
int(rel_icon_pos[0][0] - margin_side / 2 + abs_assumed_position[0]),
int(rel_icon_pos[0][1] - margin_side / 2 + abs_assumed_position[1]),
]
return icon_offset
def _click_socket(self, socket_pos, insert=True):
self.log.debug("Clicking socket")
xy = socket_pos
lt = [xy[0] - 5 * self.px_multiplier, xy[1] - 5 * self.px_multiplier]
rb = [xy[0] + 5 * self.px_multiplier, xy[1] + 5 * self.px_multiplier]
if insert:
self.input_handler.click(*lt, *rb, button="left", raw=True)
else:
self.input_handler.click(*lt, *rb, button="right", raw=True)
self.input_handler.rnd_sleep(min=200, mean=300)
def _tree_pos_to_xy(self, pos, offset=False):
if offset:
return [
pos[0] * X_SCALE * self.px_multiplier,
pos[1] * Y_SCALE * self.px_multiplier,
]
uncentered_xy = [
(pos[0] - self.ingame_pos[0]) * X_SCALE * self.px_multiplier,
(pos[1] - self.ingame_pos[1]) * Y_SCALE * self.px_multiplier,
]
xy = [
int(uncentered_xy[0] + self.origin_pos[0]),
int(uncentered_xy[1] + self.origin_pos[1]),
]
return xy
def _add_xy_offset_to_tree_pos(self, offset):
tree_pos = [
self.ingame_pos[0] + offset[0] / (X_SCALE * self.px_multiplier),
self.ingame_pos[1] + offset[1] / (Y_SCALE * self.px_multiplier),
]
return tree_pos
def _analyze_nodes(self, socket_id):
self.log.info("Analyzing nodes for socket id %s" % socket_id)
nodes = []
node_locations, socket_pos = self._find_nodes(socket_id)
self.log.debug(
"Found %s nodes for socket id %s" % (len(node_locations), socket_id)
)
self._click_socket(socket_pos)
for location in node_locations:
if not self._run():
return
node_stats = self._get_node_data(location)
node = {
"location": self._socket_offset_pos(socket_pos, location),
"stats": node_stats,
}
nodes.append(node)
self._click_socket(socket_pos, insert=False)
return nodes
def _socket_offset_pos(self, socket_pos, node_location):
circle_radius = CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
return [
(node_location[0] - socket_pos[0]) / circle_radius,
(node_location[1] - socket_pos[1]) / circle_radius,
]
def _filter_ocr_lines(self, nodes_lines, max_dist=4):
filtered_nodes = []
for node in nodes_lines:
names = []
mods = []
for line in node["stats"]:
filtered_line = self._filter_nonalpha(line)
if len(filtered_line) < 4 or filtered_line == "Unallocated":
continue
if filtered_line in self.passive_names:
names.append(self.passive_names[filtered_line])
elif filtered_line in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[filtered_line],
count=1,
)
mods.append(new_mod)
else:
# Sometimes the OCR might return strange results. If so,
# as a last resort, check levenshtein distance to closest
# node. This shouldn't happen often.
best_distance = 99999999999
best_match = None
for possible_mod in self.passive_nodes:
d = distance(filtered_line, possible_mod)
if d < best_distance:
best_distance = d
best_match = possible_mod
if best_distance > max_dist:
continue
if best_match in self.passive_names:
names.append(self.passive_names[best_match])
elif best_match in self.passive_mods:
filtered_mod, value = filter_mod(line, regex=self.nonalpha_re)
new_mod = re.sub(
self.find_mod_value_re,
str(value),
self.passive_mods[best_match],
count=1,
)
mods.append(new_mod)
if mods:
filtered_nodes.append(
{"location": node["location"], "name": names, "mods": mods}
)
return filtered_nodes
def _find_nodes(self, socket_id):
self.input_handler.click(0.5, 0.07, 0.51, 0.083, button=None)
socket_pos = self._tree_pos_to_xy(SOCKETS[socket_id])
socket_offset = self._find_socket(socket_pos)
if socket_offset is None:
found_socket = False
socket_offset = [0, 0]
else:
found_socket = True
self.log.debug("Jewel socket offset correction: %s" % socket_offset)
socket_pos[0] += socket_offset[0]
socket_pos[1] += socket_offset[1]
# Add some margin so that we dont accidentally cut any nodes off
margin = 20 * self.px_multiplier
x1 = int(socket_pos[0] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
y1 = int(socket_pos[1] - CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier - margin)
x2 = int(x1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
y2 = int(y1 + 2 * CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier + 2 * margin)
nodes = self._get_node_locations_from_screen((x1, y1, x2, y2))
nodes = self._filter_nodes(nodes, socket_pos)
return nodes, socket_pos
def _find_socket(self, socket_pos, side_len=100):
lt = [int(socket_pos[0] - side_len / 2), int(socket_pos[1] - side_len / 2)]
rb = [lt[0] + side_len, lt[1] + side_len]
socket_area = grab_screen(tuple(lt + rb))
socket_area = cv2.cvtColor(socket_area, cv2.COLOR_BGR2GRAY)
locations = np.zeros((side_len, side_len))
for template_name in [
"Jewel.png",
"JewelSocketed.png",
"LargeJewel.png",
"LargeJewelSocketed.png",
]:
centered_coordinates = self._match_image(socket_area, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = np.argwhere(locations == 1)
rel_node_pos = rel_node_pos_yx.T[::-1].T
if len(rel_node_pos) == 0:
self.log.warning("Could not find any jewel socket for compensating offset!")
return None
socket_offset = [
int(rel_node_pos[0][0] - side_len / 2),
int(rel_node_pos[0][1] - side_len / 2),
]
return socket_offset
def _filter_nodes(self, nodes, socket_pos, duplicate_min_dist=10):
# filter duplicate nodes
kept_node_indices = [len(nodes) - 1]
z = np.array([[complex(c[0], c[1]) for c in nodes]])
dist_matrix = abs(z.T - z)
for node_idx in range(len(nodes) - 1):
if np.min(dist_matrix[node_idx + 1 :, node_idx]) >= duplicate_min_dist:
kept_node_indices.append(node_idx)
nodes = np.array(nodes)
nodes = nodes[kept_node_indices, :]
# filter nodes outside jewel socket radius
distances_to_socket = np.sqrt(np.sum((nodes - socket_pos) ** 2, axis=1))
nodes = nodes[
distances_to_socket <= CIRCLE_EFFECTIVE_RADIUS * self.px_multiplier
]
return nodes
def _get_node_locations_from_screen(self, box):
jewel_area_bgr = grab_screen(box)
jewel_area_gray = cv2.cvtColor(jewel_area_bgr, cv2.COLOR_BGR2GRAY)
locations = np.zeros((box[2] - box[0], box[3] - box[1]))
for template_name in [
"Notable.png",
"NotableAllocated.png",
"Skill.png",
"SkillAllocated.png",
]:
centered_coordinates = self._match_image(jewel_area_gray, template_name)
locations[tuple(centered_coordinates)] = 1
rel_node_pos_yx = np.argwhere(locations == 1)
rel_node_pos = rel_node_pos_yx.T[::-1].T
abs_node_pos = rel_node_pos + [box[0], box[1]]
return abs_node_pos
def _match_image(self, screen, template_name):
template = self.templates_and_masks[template_name]["image"]
mask = self.templates_and_masks[template_name]["mask"]
res = cv2.matchTemplate(screen, template, cv2.TM_CCORR_NORMED, mask=mask)
coordinates = np.where(
res >= TEMPLATES[template_name][self.resolution_prefix + "threshold"]
)
icon_size = (
int(TEMPLATES[template_name][self.resolution_prefix + "size"][0]),
int(TEMPLATES[template_name][self.resolution_prefix + "size"][1]),
)
icon_center_offset = [int(icon_size[0] / 2), int(icon_size[1] / 2)]
centered_coordinates = [
coordinates[0] + icon_center_offset[0],
coordinates[1] + icon_center_offset[1],
]
return centered_coordinates
def _get_node_data(self, location):
self.log.debug("Getting node stats at location %s" % location)
lt = [
location[0] - 7 * self.px_multiplier,
location[1] - 7 * self.px_multiplier,
]
rb = [
location[0] + 7 * self.px_multiplier,
location[1] + 7 * self.px_multiplier,
]
self.input_handler.click(
*lt,
*rb,
button=None,
raw=True,
speed_factor=self.config["node_search_speed_factor"]
)
textbox_lt = location + [TXT_BOX["x"], TXT_BOX["y"]]
textbox_rb = textbox_lt + [
int(TXT_BOX["w"] * self.px_multiplier),
int(TXT_BOX["h"] * self.px_multiplier),
]
jewel_area_bgr = grab_screen(tuple(np.concatenate([textbox_lt, textbox_rb])))
return jewel_area_bgr
def _setup(self, item_location, copy=False):
item_desc = None
item_name = None
self.input_handler.click_hotkey("p")
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.click_hotkey("i")
if copy:
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
item = self.input_handler.inventory_copy(
*item_location, OWN_INVENTORY_ORIGIN, speed_factor=2
)
item_desc = item.split("\n")[9].strip()
item_name = item.split("\n")[1].strip()
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.inventory_click(*item_location, OWN_INVENTORY_ORIGIN)
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
self.input_handler.click_hotkey("i")
self.input_handler.rnd_sleep(min=150, mean=200, sigma=100)
return item_name, item_desc
def generate_good_strings(self, files):
mods = {}
names = {}
for name in files:
path = files[name]
with open(path) as f:
data = json.load(f)
if isinstance(data, dict):
names.update({self._filter_nonalpha(k): k for k in data.keys()})
for key in data.keys():
if isinstance(data[key]["passives"][0], list):
mods.update(
{
self._filter_nonalpha(e): e
for e in data[key]["passives"][0]
}
)
else:
mods.update(
{
self._filter_nonalpha(e): e
for e in data[key]["passives"]
}
)
else:
mods.update({self._filter_nonalpha(e): e for e in data})
mods.pop("", None)
return mods, names
def _filter_nonalpha(self, value):
return re.sub(self.nonalpha_re, "", value)
# Adapted from https://github.com/klayveR/python-poe-timeless-jewel
class OCR:
@staticmethod
def clahe(img, clip_limit=2.0, grid_size=(8, 8)):
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=grid_size)
return clahe.apply(img)
@staticmethod
def getFilteredImage(src):
srcH, srcW = src.shape[:2]
src = cv2.resize(src, (int(srcW * 2), int(srcH * 2)))
# HSV thresholding to get rid of as much background as possible
src = cv2.cvtColor(src, cv2.COLOR_BGRA2BGR)
hsv = cv2.cvtColor(src.copy(), cv2.COLOR_BGR2HSV)
# Define 2 masks and combine them
# mask1 for blue affix text
# mask2 for yellow passive node name
lower_blue = np.array([80, 10, 40])
upper_blue = np.array([130, 180, 255])
lower_yellow = np.array([10, 10, 190])
upper_yellow = np.array([30, 200, 255])
mask1 = cv2.inRange(hsv, lower_blue, upper_blue)
mask2 = cv2.inRange(hsv, lower_yellow, upper_yellow)
mask = cv2.bitwise_or(mask1, mask2)
result = cv2.bitwise_and(src, src, mask=mask)
b, g, r = cv2.split(result)
b = OCR.clahe(b, 5, (5, 5))
inverse = cv2.bitwise_not(b)
return inverse
@staticmethod
def imageToStringArray(img):
t = pytesseract.image_to_string(img, lang="eng", config="--oem 3 --psm 12 poe")
t = t.replace("\n\n", "\n")
lines = t.split("\n")
return lines
@staticmethod
def node_to_strings(node):
img = node["stats"]
filt_img = OCR.getFilteredImage(img)
text = OCR.imageToStringArray(filt_img)
return {"location": node["location"], "stats": text}
``` |
{
"source": "johann1000/grpc",
"score": 3
} |
#### File: framework/helpers/rand.py
```python
import random
import string
# Alphanumeric characters, similar to regex [:alnum:] class, [a-zA-Z0-9]
ALPHANUM = string.ascii_letters + string.digits
# Lowercase alphanumeric characters: [a-z0-9]
# Use ALPHANUM_LOWERCASE alphabet when case-sensitivity is a concern.
ALPHANUM_LOWERCASE = string.ascii_lowercase + string.digits
def rand_string(length: int = 8, *, lowercase: bool = False) -> str:
"""Return random alphanumeric string of given length.
Space for default arguments: alphabet^length
lowercase and uppercase = (26*2 + 10)^8 = 2.18e14 = 218 trillion.
lowercase only = (26 + 10)^8 = 2.8e12 = 2.8 trillion.
"""
alphabet = ALPHANUM_LOWERCASE if lowercase else ALPHANUM
return ''.join(random.choices(population=alphabet, k=length))
``` |
{
"source": "Johann13/fanzone_twitch_bot",
"score": 3
} |
#### File: fanzone_twitch_bot/config/config_loader.py
```python
import json
class QuoteConfig(object):
def __init__(self, json_data: dict):
self.spreadsheet = json_data['QUOTES_SPREADSHEET']
self.quotes_worksheet_name = json_data['QUOTES_WORKSHEET']
self.google_service_accounts = json_data['GOOGLE_SERVICE_ACCOUNT']
@classmethod
def from_file(cls, file_name: str):
data = json.load(open(file_name))
return cls(data)
pass
class RaffleConfig(object):
def __init__(self, json_data: dict):
self.enter_raffle_command = json_data['ENTER_RAFFLE_COMMAND']
@classmethod
def from_file(cls, file_name: str):
data = json.load(open(file_name))
return cls(data)
pass
class TwitchConfig(object):
def __init__(self, json_data: dict):
self.oauth = json_data['TWITCH_TMI_OAUTH']
self.client_id = json_data['TWITCH_CLIENT_ID']
self.secret = json_data['TWITCH_SECRET']
@classmethod
def from_file(cls, file_name: str):
data = json.load(open(file_name))
return cls(data)
pass
class BotConfig(object):
def __init__(self, json_data: dict):
self.channel = json_data['CHANNEL']
self.use_quote_bot = json_data['USE_QUOTE_MODULE']
self.use_raffle_bot = json_data['USE_RAFFLE_MODULE']
@classmethod
def from_file(cls, file_name: str):
data = json.load(open(file_name))
return cls(data)
pass
class FiZoneBotConfig(object):
def __init__(self,
bot_config: BotConfig,
twitch_config: TwitchConfig,
quote_config: QuoteConfig,
raffle_config: RaffleConfig,
):
self.bot_config = bot_config
self.twitch_config = twitch_config
self.quote_config = quote_config
self.raffle_config = raffle_config
@classmethod
def from_file(cls,
bot_config_file_name: str,
twitch_config_file_name: str,
quote_config_file_name: str,
raffle_config_file_name: str,
):
return cls(
bot_config=BotConfig.from_file(bot_config_file_name),
twitch_config=TwitchConfig.from_file(twitch_config_file_name),
quote_config=QuoteConfig.from_file(quote_config_file_name),
raffle_config=RaffleConfig.from_file(raffle_config_file_name),
)
pass
```
#### File: Johann13/fanzone_twitch_bot/fizone_bot.py
```python
import asyncio
import datetime
import pytz
import twitchio
from twitchAPI import Twitch
from twitchio.ext import commands
from config.config_loader import FiZoneBotConfig
class FiZoneBot(commands.Bot):
def __init__(self, config: FiZoneBotConfig):
self.config = config
self.oauth_token = config.twitch_config.oauth
self._channel = config.bot_config.channel
self.twitch = Twitch(config.twitch_config.client_id, config.twitch_config.secret)
super().__init__(
token=config.twitch_config.oauth,
prefix='!',
initial_channels=[self._channel]
)
async def event_ready(self):
# We are logged in and ready to chat and use commands...
print(f'Logged in as | {self.nick}')
await asyncio.sleep(3)
channel: twitchio.Channel = self.get_channel('ostof')
if channel is not None:
if channel.name == 'fionn':
await channel.send('fionBot Bot ready! fionBot')
elif channel.name == 'zoeyproasheck':
await channel.send('MrDestructoid Bot ready! MrDestructoid')
else:
await channel.send('MrDestructoid Bot ready! MrDestructoid')
else:
print('channel is none')
@commands.command(name='uptime')
async def uptime(self, ctx: commands.Context):
streams = self.twitch.get_streams(
user_login=[
self._channel
]
)['data']
if len(streams) > 0:
stream = streams[0]
started_at = stream['started_at']
tzname = pytz.timezone(started_at.tzname())
now = datetime.datetime.now(tz=tzname)
diff = now - stream.started_at
await ctx.send(f'@{ctx.author.name} We have been live for {str(datetime.timedelta(seconds=diff.seconds))}')
pass
@commands.command(name='close')
async def close_bot(self, ctx: commands.Context):
if ctx.message.author is not None:
if ctx.message.author.name.lower() == 'ostof':
await ctx.send('stopping bot')
await self.close()
else:
await ctx.reply('You are unauthorized to stopping the bot yogP')
pass
pass
``` |
{
"source": "Johann13/Twitch_Follows_Comparison",
"score": 3
} |
#### File: Johann13/Twitch_Follows_Comparison/models.py
```python
import time
from datetime import datetime
import requests
class TwitchCredentials:
def __init__(self, client_id, secret, token):
self.client_id = client_id
self.secret = secret
self.token = token
pass
def __str__(self):
return self.client_id
def __repr__(self):
return f'TwitchCredentials({self.client_id})'
class TwitchResponse:
def __init__(self, response: requests.Response):
secs = int(round(time.time()))
self.json = response.json()
self.rate_limit_reset = int(response.headers['ratelimit-reset'])
self.diff = self.rate_limit_reset - secs
self.rate_limit_remaining = int(response.headers['ratelimit-remaining'])
self.rate_limit_limit = int(response.headers['ratelimit-limit'])
self.should_sleep = self.rate_limit_remaining <= 0
self.has_error = 'error' in self.json
if 'error' in self.json:
self.error = self.json['error']
else:
self.error = None
pass
def __repr__(self):
return f'TwitchResponse({self.json})'
def __str__(self):
return f'{self.json}'
class TwitchUser:
def __init__(self, twitch_id, twitch_name):
self.twitch_id = twitch_id
self.twitch_name = twitch_name
def __repr__(self):
return f'TwitchUser({self.twitch_id},{self.twitch_name})'
def __str__(self):
return self.to_string()
def to_string(self):
return f'{self.twitch_id} {self.twitch_name}'
class SimpleFollow:
def __init__(self, twitch_id: str, year: str, month: str, day: str):
self.twitch_id = twitch_id
self.y = year
self.m = month
self.d = day
pass
@classmethod
def from_line(cls, line: str):
words = list(line.split(' '))
return cls(words[0], words[1])
def get_date(self):
return datetime.strptime(self.date, '%Y-%m-%d')
def get_day(self):
date = self.get_date()
return f'{date.year}-{str(date.month).zfill(2)}-{str(date.day).zfill(2)}'
class TwitchFollowRelation:
def __init__(self, index: int, from_id: str, from_name: str,
to_id: str, to_name: str,
followed_at: str, page: str):
self.index = index
self.from_user = TwitchUser(from_id, from_name)
self.to_user = TwitchUser(to_id, to_name)
self.from_id = from_id
self.from_name = from_name
self.to_id = to_id
self.to_name = to_name
self.followed_at = followed_at
s = self.followed_at
s = s.split('T')[0]
l = s.split('-')
self.y = l[0]
self.m = l[1]
self.d = l[2]
self.page = page
pass
@classmethod
def from_api(cls, data: {str: str}, page: str):
from_id = data['from_id']
from_name = data['from_name']
to_id = data['to_id']
to_name = data['to_name']
followed_at = data['followed_at']
page = page
return cls(0, from_id, from_name, to_id, to_name, followed_at, page)
@classmethod
def from_line(cls, line: str):
words = list(map(lambda s: s.replace(' ', ''), line.split(' ')))
if len(words) > 7:
words = list(filter(lambda s: s != '', words))
index = int(words[0])
from_id = words[1].replace('\n', '')
from_name = words[2].replace('\n', '')
to_id = words[3].replace('\n', '')
to_name = words[4].replace('\n', '')
followed_at = words[5].replace('\n', '')
if len(words) == 7:
page = words[6].replace('\n', '')
else:
page = None
return cls(index, from_id, from_name, to_id, to_name, followed_at, page)
def get_date(self):
return datetime.strptime(self.followed_at, '%Y-%m-%dT%H:%M:%SZ')
def get_day(self):
date = self.get_date()
return f'{date.year}-{str(date.month).zfill(2)}-{str(date.day).zfill(2)}'
def __repr__(self):
return f'TwitchFollowRelation({self.from_name},{self.to_name})'
def __str__(self):
return self.to_string()
def to_string(self):
return f'{self.from_id} {self.from_name} {self.to_id} {self.to_name} {self.followed_at} {self.page}'
def __eq__(self, other):
if other is TwitchFollowRelation:
return self.from_id == other.from_id and self.to_id == other.to_id
return False
def __hash__(self):
return int(f'{self.from_id}{self.to_id}')
```
#### File: Johann13/Twitch_Follows_Comparison/over_time.py
```python
import csv
from channel import channel_list
from models import SimpleFollow
from write_twitch_csv import load_twitch_follows_from_csv
def _data(start_year: int, end_year: int) -> [(str, str)]:
return [(f'{y}', f'{m}'.rjust(2, '0')) for y in range(start_year, end_year + 1, 1) for m in range(1, 13, 1)]
def _over_time(channel: (str, str)):
twitch_id, twitch_name = channel
follows: [SimpleFollow] = load_twitch_follows_from_csv(twitch_id)
dates: [(str, str)] = _data(2011, 2020)
return [len(list(filter(lambda f: f.y == y and f.m == m, follows))) for (y, m) in dates]
def growth(channel: [(str, str)]):
dates: [(str, str)] = _data(2011, 2020)
header: [str] = [f'{m}.{y}' for (y, m) in dates]
header.insert(0, '')
csv_file = open('data/growth.csv', 'w', newline='')
csv_file2 = open('data/total_over_time.csv', 'w', newline='')
w = csv.writer(csv_file,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
w2 = csv.writer(csv_file2,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
w.writerow(header)
w2.writerow(header)
for c in channel:
timeline = _over_time(c)
row = [c[1]]
row2 = [c[1]]
timeline2 = [sum(timeline[:(i + 1)]) for i in range(len(timeline))]
for t in timeline:
row.append(t)
for t in timeline2:
row2.append(t)
w.writerow(row)
w2.writerow(row2)
csv_file.close()
csv_file2.close()
pass
if __name__ == '__main__':
growth(channel_list)
``` |
{
"source": "johannabi/MA-ConceptMining",
"score": 3
} |
#### File: MA-ConceptMining/exploration/type_token_ratio.py
```python
from inout import inputoutput
import spacy
from nltk.tokenize import sent_tokenize
import os
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s')
def get_german_fiction_tokens(limit, nlp):
logging.info('get german fiction tokens')
dir = '../data/corpus-of-german-language-fiction/corpus-of-german-fiction-txt'
all_tokens = list()
tokens = list()
ratio_sum = 0
rounds = 0
for file in os.listdir(dir):
with open(dir + '/' + file, 'r', encoding='utf-8') as f:
content = f.read()
sent = sent_tokenize(content)
for s in sent:
doc = nlp(s)
for t in doc:
lemma = t.lemma_.lower()
if lemma.upper().isupper():
tokens.append(lemma)
if len(tokens) == 1000:
ratio_sum += compute_ratio(tokens)
all_tokens.extend(tokens)
tokens = list()
rounds += 1
if len(all_tokens) >= limit:
ratio_avg = ratio_sum / rounds
types = set(all_tokens)
logging.info(str(len(all_tokens)) + ' tokens ' + str(len(types)) + ' types')
logging.info('corpus size:' + str(len(all_tokens)) + '\naverage ratio: ' + str(ratio_avg))
return
# ratio_sum += compute_ratio(tokens)
def get_jobads_tokens(jobads, nlp):
logging.info('get jobad tokens')
# corpus_size = 0
all_tokens = list()
tokens = list()
ratio_sum = 0
rounds = 0
for job in jobads:
doc = nlp(job)
for t in doc:
lemma = t.lemma_.lower()
if lemma.upper().isupper():
tokens.append(lemma)
if len(tokens) == 1000:
ratio_sum += compute_ratio(tokens)
all_tokens.extend(tokens)
tokens = list()
rounds += 1
# print(all_tokens)
ratio_sum += compute_ratio(tokens)
all_tokens.extend(tokens)
types = set(all_tokens)
logging.info(str(len(all_tokens)) + ' tokens ' + str(len(types)) + ' types')
ratio_avg = ratio_sum / rounds
logging.info('corpus size:' + str(len(all_tokens)) + '\naverage ratio: ' + str(ratio_avg) + '\n\n')
return all_tokens
def compute_ratio(tokens):
types = set(tokens)
ratio = len(types) / len(tokens)
return ratio
logging.info('read jobads from sqlite')
sentences = inputoutput.read_jobads_content('../data/jobads/text_kernel_jobads.db')
logging.info(str(len(sentences)) + ' jobads')
nlp = spacy.load('de_core_news_sm')
jobad_tokens = get_jobads_tokens(sentences, nlp)
fiction_tokens = get_german_fiction_tokens(len(jobad_tokens), nlp)
``` |
{
"source": "johannah-23/motulator",
"score": 3
} |
#### File: control/im/scalar.py
```python
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from control.common import RateLimiter, PWM
from helpers import abc2complex, complex2abc
# %%
class ScalarCtrl:
"""
V/Hz control algorithm with the stator current feedback.
"""
def __init__(self, pars, datalog):
# Parameters
self.pars = pars
# Compute the breakdown slip angular frequency
w_rb = (pars.L_M + pars.L_sgm)/pars.L_sgm*(pars.R_R/pars.L_M)
# LPF bandwidth
self.alpha_f = .1*w_rb
# States
self.i_s0 = 0
self.theta_s = 0
# Instantiate classes
self.pwm = PWM(pars)
self.rate_limiter = RateLimiter(pars)
self.datalog = datalog
def __call__(self, w_m_ref, i_s_abc, u_dc):
"""
Main control loop.
Parameters
----------
w_m_ref : float
Rotor speed reference (in electrical rad/s).
i_s_abc : ndarray, shape (3,)
Phase currents.
u_dc : float
DC-bus voltage.
Returns
-------
d_abc_ref : ndarray, shape (3,)
Duty ratio references.
T_s : float
Sampling period.
"""
T_s = self.pars.T_s
# Get the states
theta_s = self.theta_s
i_s0 = self.i_s0
# Limit the rate of change of the speed reference (in electrical rad/s)
w_m0 = self.rate_limiter(w_m_ref)
# Space vector transformation
i_s = np.exp(-1j*theta_s)*abc2complex(i_s_abc)
w_s = self.stator_freq(w_m0, i_s, i_s0)
u_s_ref = self.voltage_reference(w_s, w_m0, i_s, i_s0)
# Compute the duty ratios
d_abc_ref, u_s = self.pwm.output(u_s_ref, u_dc, theta_s, 0)
# Update the states
self.update(w_s, i_s)
# Data logger
self.datalog.save([i_s0, i_s, u_s, w_m_ref, w_m0, w_s, theta_s,
u_dc, self.pars.psi_s_nom, T_s])
return d_abc_ref, T_s
def stator_freq(self, w_m_ref, i_s, i_s0):
"""
Computes the dynamic stator frequency reference used in
the coordinate transformations.
"""
k_w = self.pars.k_w
R_R = self.pars.R_R
L_sgm = self.pars.L_sgm
psi_s_nom = self.pars.psi_s_nom
# Operating-point quantities are marked with zero
psi_R0 = psi_s_nom - L_sgm*i_s0
psi_R0_sqr = np.abs(psi_R0)**2
# Compute the dynamic stator frequency
if psi_R0_sqr > 0:
# Operating-point slip
w_r0 = R_R*np.imag(i_s0*np.conj(psi_R0))/psi_R0_sqr
# Operating-point stator frequency
w_s0 = w_m_ref + w_r0
# Dynamic frequency
err = R_R*np.imag((i_s0 - i_s)*np.conj(psi_R0))/psi_R0_sqr
w_s = w_s0 + k_w*err
else:
w_s = 0
return w_s
def voltage_reference(self, w_s, w_m0, i_s, i_s0):
"""
Compute the stator voltage reference in synchronous coordinates.
"""
R_s = self.pars.R_s
L_sgm = self.pars.L_sgm
alpha = self.pars.R_R/self.pars.L_M
k_u = self.pars.k_u
psi_s_nom = self.pars.psi_s_nom
# Nominal magnetizing current
i_sd_nom = psi_s_nom/(self.pars.L_M + L_sgm)
# Operating-point current for RI compensation
i_s_ref0 = i_sd_nom + 1j*i_s0.imag
# Term -R_s omitted to avoid problems due to the voltage saturation
# k = -R_s + k_u*L_sgm*(alpha + 1j*w_m0)
k = k_u*L_sgm*(alpha + 1j*w_m0)
u_s_ref = R_s*i_s_ref0 + 1j*w_s*psi_s_nom + k*(i_s0 - i_s)
return u_s_ref
def update(self, w_s, i_s):
"""
Update the states.
"""
T_s = self.pars.T_s
self.i_s0 += T_s*self.alpha_f*(i_s - self.i_s0)
self.theta_s += T_s*w_s # Integrate
self.theta_s = np.mod(self.theta_s, 2*np.pi) # Limit to [0, 2*pi]
def __str__(self):
desc = ('V/Hz control:\n'
' R_s={} R_R={} L_sgm={} L_M={}\n'
' k_u={:.1f} k_w={:.1f} alpha_f=2*pi*{:.1f}')
return desc.format(self.pars.R_s, self.pars.R_R,
self.pars.L_sgm, self.pars.L_M,
self.pars.k_u, self.pars.k_w, self.alpha_f)
# %%
class Datalogger:
"""
This class contains a data logger.
"""
def __init__(self):
"""
Initialize the attributes.
"""
self.t = []
self.i_s0 = []
self.i_s = []
self.u_s = []
self.w_m_ref = []
self.w_m = []
self.w_s = []
self.theta_s = []
self.u_dc = []
self.psi_s_nom = []
self.u_ss, self.i_ss = 0j, 0j
def save(self, data):
"""
Saves the solution.
Parameters
----------
mdl : instance of a class
Continuous-time model.
"""
(i_s0, i_s, u_s, w_m_ref, w_m, w_s,
theta_s, u_dc, psi_s_nom, T_s) = data
try:
t_new = self.t[-1] + T_s
except IndexError:
t_new = 0 # At the first step t = []
self.t.extend([t_new])
self.i_s0.extend([i_s0])
self.i_s.extend([i_s])
self.u_s.extend([u_s])
self.w_m_ref.extend([w_m_ref])
self.w_m.extend([w_m])
self.w_s.extend([w_s])
self.theta_s.extend([theta_s])
self.u_dc.extend([u_dc])
self.psi_s_nom.extend([psi_s_nom])
def post_process(self):
"""
Transforms the lists to the ndarray format and post-process them.
"""
self.i_s0 = np.asarray(self.i_s0)
self.i_s = np.asarray(self.i_s)
self.u_s = np.asarray(self.u_s)
self.w_m_ref = np.asarray(self.w_m_ref)
self.w_m = np.asarray(self.w_m)
self.w_s = np.asarray(self.w_s)
self.theta_s = np.asarray(self.theta_s)
self.u_dc = np.asarray(self.u_dc)
self.psi_s_nom = np.asarray(self.psi_s_nom)
self.u_ss = np.exp(1j*self.theta_s)*self.u_s
self.i_ss = np.exp(1j*self.theta_s)*self.i_s
def plot(self, mdl):
"""
Plots some example figures.
Parameters
----------
t : ndarray
Discrete time.
mdl : instance of a class
Continuous-time solution.
"""
# Continuous-time data
data = mdl.datalog
# Plotting parameters
plt.rcParams['axes.prop_cycle'] = cycler(color='brgcmyk')
plt.rcParams['lines.linewidth'] = 1.
plt.rcParams.update({"text.usetex": True,
"font.family": "serif",
"font.sans-serif": ["Computer Modern Roman"]})
t_range = (0, self.t[-1])
t_zoom = (1.2, 1.225)
# Plots speeds and torques
fig1, (ax1, ax2) = plt.subplots(2, 1)
ax1.step(self.t, self.w_m_ref, '--', where='post')
ax1.plot(data.t, data.w_m)
ax1.step(self.t, self.w_m, where='post')
ax1.legend([r'$\omega_\mathrm{m,ref}$',
r'$\omega_\mathrm{m}$',
r'$\omega_\mathrm{m0}$'])
ax1.set_xlim(t_range)
ax1.set_ylabel('Angular speed (rad/s)')
ax2.plot(data.t, data.T_L, '--')
ax2.plot(data.t, data.T_M)
ax2.set_xlim(t_range)
ax2.legend([r'$\tau_\mathrm{L}$', r'$\tau_\mathrm{m}$'])
ax2.set_ylabel('Torque (Nm)')
ax2.set_xlabel('Time (s)')
# Plots currents components and flux magnitudes
fig2, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.step(self.t, self.i_s0.real, '--', where='post')
ax1.step(self.t, self.i_s.real, where='post')
ax1.step(self.t, self.i_s0.imag, '--', where='post')
ax1.step(self.t, self.i_s.imag, where='post')
ax1.set_ylabel('Current (A)')
ax1.legend([r'$i_\mathrm{sd0}$', r'$i_\mathrm{sd}$',
r'$i_\mathrm{sq0}$', r'$i_\mathrm{sq}$'])
ax1.set_xlim(t_range)
ax2.step(self.t, self.psi_s_nom, '--', where='post')
ax2.plot(data.t, np.abs(data.psi_ss))
ax2.plot(data.t, np.abs(data.psi_Rs))
ax2.set_xlim(t_range)
ax2.legend([r'$\psi_\mathrm{s,ref}$',
r'$\psi_\mathrm{s}$', r'$\psi_\mathrm{R}$'])
ax2.set_ylabel('Flux linkage (Vs)')
ax3.step(self.t, np.abs(self.u_s), where='post')
ax3.step(self.t, self.u_dc/np.sqrt(3), '--', where='post')
ax3.set_ylabel('Voltage (V)')
ax3.set_xlim(t_range)
ax3.legend([r'$u_\mathrm{s}$', r'$u_\mathrm{dc}/\sqrt{3}$'])
ax3.set_xlabel('Time (s)')
if mdl.pwm is not None:
# Plots a zoomed view of voltages and currents
fig3, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(data.t, data.u_ss.real)
ax1.plot(self.t, self.u_ss.real)
ax1.set_xlim(t_zoom)
ax1.legend([r'$u_\mathrm{sa}$', r'$\hat u_\mathrm{sa}$'])
ax1.set_ylabel('Voltage (V)')
ax2.plot(data.t, complex2abc(data.i_ss).T)
ax2.step(self.t, self.i_ss.real, where='post')
ax2.set_xlim(t_zoom)
ax2.set_ylim(-10, 10)
ax2.legend([r'$i_\mathrm{a}$', r'$i_\mathrm{b}$',
r'$i_\mathrm{c}$'])
ax2.set_ylabel('Current (A)')
ax2.set_xlabel('Time (s)')
else:
fig3 = None
# Plots the DC bus and grid-side variables (if data exists)
try:
data.i_L
except AttributeError:
data.i_L = None
if data.i_L is not None:
fig4, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(data.t, data.u_di)
ax1.plot(data.t, data.u_dc)
ax1.plot(data.t, complex2abc(data.u_g).T)
ax1.set_xlim(t_zoom)
ax1.legend([r'$u_\mathrm{di}$',
r'$u_\mathrm{dc}$',
r'$u_\mathrm{ga}$'])
ax1.set_ylabel('Voltage (V)')
ax2.plot(data.t, data.i_L)
ax2.plot(data.t, data.i_dc)
ax2.plot(data.t, data.i_g.real)
ax2.set_xlim(t_zoom)
ax2.legend([r'$i_\mathrm{L}$',
r'$i_\mathrm{dc}$',
r'$i_\mathrm{ga}$'])
ax2.set_ylabel('Current (A)')
ax2.set_xlabel('Time (s)')
else:
fig4 = None
plt.show()
# plt.savefig('test.pdf')
return fig1, fig2, fig3, fig4
``` |
{
"source": "johannah/balloon-learning-environment",
"score": 2
} |
#### File: balloon-learning-environment/balloon_learning_environment/acme_utils.py
```python
r"""Acme utils.
"""
import functools
from typing import Any, Dict, Optional
from acme import adders
from acme import core
from acme import wrappers
from acme.agents.jax import dqn
from acme.jax import networks as networks_lib
from acme.jax import utils
from balloon_learning_environment.agents import marco_polo_exploration
from balloon_learning_environment.agents import networks
from balloon_learning_environment.agents import random_walk_agent
from balloon_learning_environment.env import balloon_env
from balloon_learning_environment.env import simulator_data
from balloon_learning_environment.utils import units
import dm_env
from flax import linen as nn
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
def _balloon_is_within_radius(state: simulator_data.SimulatorState,
radius: units.Distance,
max_episode_length: int) -> float:
balloon_state = state.balloon_state
return (units.relative_distance(balloon_state.x, balloon_state.y) <=
radius) / max_episode_length
def create_env(is_eval: bool, max_episode_length: int) -> dm_env.Environment:
"""Creates a BLE environment."""
env = balloon_env.BalloonEnv()
if is_eval:
env = balloon_env.BalloonEnv(
reward_function=functools.partial(
_balloon_is_within_radius,
radius=env.radius,
max_episode_length=max_episode_length))
env = wrappers.gym_wrapper.GymWrapper(env)
env = wrappers.step_limit.StepLimitWrapper(
env, step_limit=max_episode_length)
env = wrappers.SinglePrecisionWrapper(env)
return env
class QuantileNetwork(nn.Module):
"""Network used to compute the agent's return quantiles."""
num_actions: int
num_layers: int
hidden_units: int
num_atoms: int = 51
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x: jnp.ndarray):
ble_quantile_network = networks.QuantileNetwork(self.num_actions,
self.num_layers,
self.hidden_units,
self.num_atoms,
self.inputs_preprocessed)
def batched_network(x):
return ble_quantile_network(x)
# Make network batched, since this is what Acme expects.
output = jax.vmap(batched_network)(x)
return {'q_dist': output.logits, 'q_values': output.q_values}
class CombinedActor(core.Actor):
"""Combines Acme's actor with MarcoPoloExploration exploration actor."""
def __init__(
self,
actor: core.Actor,
exploration_actor: marco_polo_exploration.MarcoPoloExploration,
):
self._actor = actor
self._exploration_actor = exploration_actor
def select_action(self, observation: networks_lib.Observation):
action = self._actor.select_action(observation)
action = self._exploration_actor.step(0, observation, action)
return np.array(action, dtype=np.int32)
def observe_first(self, timestep: dm_env.TimeStep):
self._actor.observe_first(timestep)
self._exploration_actor.begin_episode(timestep.observation, 42)
def observe(self, action: networks_lib.Action,
next_timestep: dm_env.TimeStep):
self._actor.observe(action, next_timestep)
def update(self, wait: bool = False):
self._actor.update(wait)
def marco_polo_actor(make_actor_fn):
"""Wraps make_actor_fn to include MarcoPoloExploration."""
def make_actor(
random_key: networks_lib.PRNGKey,
policy_network,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
):
original_actor = make_actor_fn(random_key, policy_network, adder,
variable_source)
if adder is None: # eval actor
return original_actor
exploration = marco_polo_exploration.MarcoPoloExploration(
num_actions=3,
observation_shape=(1099,),
exploratory_episode_probability=0.8,
exploratory_agent_constructor=random_walk_agent.RandomWalkAgent)
return CombinedActor(original_actor, exploration)
return make_actor
def create_dqn(params: Dict[str, Any]):
"""Creates necessary components to run Acme's DQN."""
use_marco_polo_exploration = params.pop('marco_polo_exploration', False)
update_period = 4
target_update_period = 100
adaptive_learning_rate = params.pop('adaptive_learning_rate', False)
config = dqn.DQNConfig(**params)
config.discount = 0.993
config.n_step = 5
config.min_replay_size = 500
config.target_update_period = target_update_period // update_period
config.adam_eps = 0.00002
config.max_replay_size = 2000000
config.batch_size = 32
config.samples_per_insert = config.batch_size / update_period
config.prefetch_size = 0 # Somehow prefetching makes it much slower.
if adaptive_learning_rate:
config.learning_rate = optax.linear_schedule(
init_value=2e-6, end_value=4e-7,
transition_steps=5_000_000 // config.batch_size)
else:
config.learning_rate = 2e-6
num_atoms = 51
def make_networks(env_spec):
q_network = QuantileNetwork(num_actions=3, num_layers=8,
hidden_units=600, num_atoms=num_atoms)
dummy_obs = utils.tile_nested(utils.zeros_like(env_spec.observations), 1)
dqn_network = networks_lib.FeedForwardNetwork(
lambda key: q_network.init(key, dummy_obs), q_network.apply)
return dqn_network
def dqn_logger():
return None
loss_fn = dqn.QrDqn(num_atoms=num_atoms, huber_param=1)
rl_agent = dqn.DQNBuilder(
config=config, loss_fn=loss_fn, logger_fn=dqn_logger)
def behavior_policy(dqn_network):
def policy(params: networks_lib.Params, key: jnp.ndarray,
observation: jnp.ndarray, epsilon: float) -> jnp.ndarray:
observation = jnp.expand_dims(observation, axis=0) # add batch dim
action_values = dqn_network.apply(params, observation)['q_values']
action_values = jnp.squeeze(action_values, axis=0) # remove batch dim
result = rlax.epsilon_greedy(epsilon).sample(key, action_values)
return result
return policy
def eval_policy(dqn_network):
def policy(params: networks_lib.Params, key: jnp.ndarray,
observation: jnp.ndarray, _) -> jnp.ndarray:
observation = jnp.expand_dims(observation, axis=0) # add batch dim
action_values = dqn_network.apply(params, observation)['q_values']
action_values = jnp.squeeze(action_values, axis=0) # remove batch dim
result = rlax.epsilon_greedy(0).sample(key, action_values)
return result
return policy
if use_marco_polo_exploration:
rl_agent.make_actor = marco_polo_actor(rl_agent.make_actor)
return rl_agent, config, make_networks, behavior_policy, eval_policy
```
#### File: balloon_learning_environment/agents/dqn_agent.py
```python
import functools
from typing import Optional, Sequence, Union
from absl import logging
from balloon_learning_environment.agents import agent
from balloon_learning_environment.agents import dopamine_utils
from dopamine.jax.agents.dqn import dqn_agent
from flax import linen as nn
import gin
import jax.numpy as jnp
import numpy as np
@gin.configurable(allowlist=['network', 'checkpoint_duration'])
class DQNAgent(agent.Agent, dqn_agent.JaxDQNAgent):
"""A wrapper for training the Dopamine DQN agent."""
def __init__(self,
num_actions: int,
observation_shape: Sequence[int],
*, # Everything after this is a keyword-only argument.
seed: Optional[int] = None,
network: nn.Module = gin.REQUIRED,
checkpoint_duration: Optional[int] = gin.REQUIRED):
"""Create the DQN Agent.
Args:
num_actions: Number of actions.
observation_shape: Shape of input observations.
seed: Optional seed for the PRNG.
network: Network to use for training and inference.
checkpoint_duration: Optional duration of checkpoints for garbage
collection.
"""
self._checkpoint_duration = checkpoint_duration
# Although Python MRO goes from left to right, we call each __init__
# function explicitly as opposed to using `super()` (which would just call
# agent.Agent's init) to avoid confusion.
agent.Agent.__init__(self, num_actions, observation_shape)
dqn_agent.JaxDQNAgent.__init__(
self,
num_actions,
observation_shape=observation_shape,
observation_dtype=jnp.float32,
stack_size=1,
network=functools.partial(network, is_dopamine=True),
seed=seed)
def begin_episode(self, observation: np.ndarray) -> int:
return dqn_agent.JaxDQNAgent.begin_episode(self, observation)
def step(self, reward: float, observation: np.ndarray) -> int:
return dqn_agent.JaxDQNAgent.step(self, reward, observation)
def _train_step(self):
# We override this method to log using flax's (eager) tensorboard.
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
self.optimizer_state, self.online_params, loss = dqn_agent.train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
self.cumulative_gamma,
self._loss_type)
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
self.summary_writer.scalar('HuberLoss', loss, self.training_steps)
self.summary_writer.flush()
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
def end_episode(self, reward: float, terminal: bool = True) -> None:
dqn_agent.JaxDQNAgent.end_episode(self, reward, terminal)
def set_mode(self, mode: Union[agent.AgentMode, str]) -> None:
mode = agent.AgentMode(mode)
if mode == agent.AgentMode.TRAIN:
self.eval_mode = False
else:
self.eval_mode = True
def save_checkpoint(self, checkpoint_dir: str, iteration_number: int) -> None:
"""Checkpoint agent parameters as a pickled dict."""
# Try to create checkpoint directory if it doesn't exist.
dopamine_utils.save_checkpoint(
checkpoint_dir, iteration_number,
functools.partial(dqn_agent.JaxDQNAgent.bundle_and_checkpoint,
self))
# Get rid of old checkpoints if necessary.
if self._checkpoint_duration is not None:
dopamine_utils.clean_up_old_checkpoints(
checkpoint_dir, iteration_number,
checkpoint_duration=self._checkpoint_duration)
def load_checkpoint(self, checkpoint_dir: str, iteration_number: int) -> None:
"""Checkpoint agent parameters as a pickled dict."""
dopamine_utils.load_checkpoint(
checkpoint_dir, iteration_number,
functools.partial(dqn_agent.JaxDQNAgent.unbundle, self))
def reload_latest_checkpoint(self, checkpoint_dir: str) -> int:
latest_episode = dopamine_utils.get_latest_checkpoint(checkpoint_dir)
if latest_episode < 0:
logging.warning('Unable to reload checkpoint at %s', checkpoint_dir)
return -1
try:
self.load_checkpoint(checkpoint_dir, latest_episode)
logging.info('Will restart training from episode %d', latest_episode)
return latest_episode
except ValueError:
logging.warning('Unable to reload checkpoint at %s', checkpoint_dir)
return -1
```
#### File: env/balloon/acs_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env.balloon import acs
from balloon_learning_environment.utils import units
class AcsTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='at_min', pressure_ratio=1.0, power=100.0,
comparator='eq'),
dict(testcase_name='at_mid', pressure_ratio=1.2, power=300.0,
comparator='eq'),
dict(testcase_name='at_max', pressure_ratio=1.35, power=400.0,
comparator='eq'),
dict(testcase_name='below_min', pressure_ratio=0.01, power=100.0,
comparator='lt'),
dict(testcase_name='above_max', pressure_ratio=2.0, power=400.0,
comparator='gt'))
def test_get_most_efficient_power(self, pressure_ratio, power, comparator):
if comparator == 'eq':
comparator = self.assertEqual
elif comparator == 'lt':
comparator = self.assertLessEqual
else:
comparator = self.assertGreaterEqual
comparator(acs.get_most_efficient_power(pressure_ratio).watts, power)
@parameterized.named_parameters(
dict(testcase_name='at_min', pressure_ratio=1.05, power=100.0,
efficiency=0.4, comparator='eq'),
dict(testcase_name='at_max', pressure_ratio=1.35, power=400.0,
efficiency=0.13, comparator='eq'),
dict(testcase_name='below_min', pressure_ratio=0.01, power=10.0,
efficiency=0.4, comparator='gt'),
dict(testcase_name='above_max', pressure_ratio=2.0, power=500.0,
efficiency=0.13, comparator='lt'))
def test_get_fan_efficiency(self, pressure_ratio, power, efficiency,
comparator):
if comparator == 'eq':
comparator = self.assertEqual
elif comparator == 'lt':
comparator = self.assertLessEqual
else:
comparator = self.assertGreaterEqual
comparator(acs.get_fan_efficiency(pressure_ratio, units.Power(watts=power)),
efficiency)
def test_get_mass_flow(self):
self.assertEqual(
acs.get_mass_flow(units.Power(watts=3.6), 10.0), 0.01)
if __name__ == '__main__':
absltest.main()
```
#### File: env/balloon/altitude_safety.py
```python
import enum
import logging
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import units
import transitions
# TODO(joshgreaves): This may require some tuning.
BUFFER = units.Distance(feet=500.0)
RESTART_HYSTERESIS = units.Distance(feet=500.0)
MIN_ALTITUDE = units.Distance(feet=50_000.0)
class _AltitudeState(enum.Enum):
NOMINAL = 0
LOW = 1
VERY_LOW = 2
# Note: Transitions are applied in the order of the first match.
# '*' is a catch-all, and applies to any state.
_ALTITUDE_SAFETY_TRANSITIONS = (
dict(trigger='very_low', source='*', dest=_AltitudeState.VERY_LOW),
dict(trigger='low', source='*', dest=_AltitudeState.LOW),
dict(
trigger='low_nominal',
source=(_AltitudeState.VERY_LOW, _AltitudeState.LOW),
dest=_AltitudeState.LOW),
dict(
trigger='low_nominal',
source=_AltitudeState.NOMINAL,
dest=_AltitudeState.NOMINAL),
dict(trigger='nominal', source='*', dest=_AltitudeState.NOMINAL),
)
class AltitudeSafetyLayer:
"""A safety layer that prevents balloons navigating to unsafe altitudes."""
def __init__(self):
self._state_machine = transitions.Machine(
states=_AltitudeState,
transitions=_ALTITUDE_SAFETY_TRANSITIONS,
initial=_AltitudeState.NOMINAL)
logging.getLogger('transitions').setLevel(logging.WARNING)
def get_action(self, action: control.AltitudeControlCommand,
atmosphere: standard_atmosphere.Atmosphere,
pressure: float) -> control.AltitudeControlCommand:
"""Gets the action recommended by the safety layer.
Args:
action: The action the controller has supplied to the balloon.
atmosphere: The atmospheric conditions the balloon is flying in.
pressure: The current pressure of the balloon.
Returns:
An action the safety layer recommends.
"""
altitude = atmosphere.at_pressure(pressure).height
self._transition_state(altitude)
if self._state_machine.state == _AltitudeState.VERY_LOW:
# If the balloon is too low, make it ascend.
return control.AltitudeControlCommand.UP
elif self._state_machine.state == _AltitudeState.LOW:
# If the balloon is almost too low, don't let it go lower.
if action == control.AltitudeControlCommand.DOWN:
return control.AltitudeControlCommand.STAY
return action
@property
def navigation_is_paused(self):
return self._state_machine.state != _AltitudeState.NOMINAL
def _transition_state(self, altitude: units.Distance):
if altitude < MIN_ALTITUDE:
self._state_machine.very_low()
elif altitude < MIN_ALTITUDE + BUFFER:
self._state_machine.low()
elif altitude < MIN_ALTITUDE + BUFFER + RESTART_HYSTERESIS:
self._state_machine.low_nominal()
else:
self._state_machine.nominal()
```
#### File: env/balloon/altitude_safety_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env.balloon import altitude_safety
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import units
import jax
class AltitudeSafetyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
very_low_altitude = (
altitude_safety.MIN_ALTITUDE - units.Distance(feet=100.0))
low_altitude = (altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER / 2.0)
low_nominal_altitude = (
altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER +
altitude_safety.RESTART_HYSTERESIS / 2.0)
nominal_altitude = (
altitude_safety.MIN_ALTITUDE + altitude_safety.BUFFER +
altitude_safety.RESTART_HYSTERESIS + units.Distance(feet=100.0))
self.very_low_altitude_pressure = self.atmosphere.at_height(
very_low_altitude).pressure
self.low_altitude_pressure = self.atmosphere.at_height(
low_altitude).pressure
self.low_nominal_altitude_pressure = self.atmosphere.at_height(
low_nominal_altitude).pressure
self.nominal_altitude_pressure = self.atmosphere.at_height(
nominal_altitude).pressure
self.pressures = {
'very_low_altitude_pressure': self.very_low_altitude_pressure,
'low_altitude_pressure': self.low_altitude_pressure,
'low_nominal_altitude_pressure': self.low_nominal_altitude_pressure,
'nominal_altitude_pressure': self.nominal_altitude_pressure
}
@parameterized.named_parameters(
dict(
testcase_name='very_low_atltitude_advises_up',
pressure='very_low_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='low_altitude_advises_stay',
pressure='low_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='nominal_altitude_allows_action',
pressure='nominal_altitude_pressure',
action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.DOWN),
dict(
testcase_name='low_altitude_allows_up_action',
pressure='low_altitude_pressure',
action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP))
def test_safety_layer_gives_correct_action(
self, pressure: str, action: control.AltitudeControlCommand,
expected_action: control.AltitudeControlCommand):
asl = altitude_safety.AltitudeSafetyLayer()
pressure = self.pressures[pressure]
action = asl.get_action(action, self.atmosphere, pressure)
self.assertEqual(action, expected_action)
@parameterized.named_parameters(
dict(
testcase_name='very_low_altitude_is_paused',
pressure='very_low_altitude_pressure',
expected=True),
dict(
testcase_name='low_altitude_is_paused',
pressure='low_altitude_pressure',
expected=True),
dict(
testcase_name='nominal_altitude_is_not_paused',
pressure='nominal_altitude_pressure',
expected=False))
def test_navigation_is_paused_is_calculated_correctly(self, pressure: str,
expected: bool):
asl = altitude_safety.AltitudeSafetyLayer()
pressure = self.pressures[pressure]
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
pressure)
self.assertEqual(asl.navigation_is_paused, expected)
def test_increasing_altitude_below_hysteresis_does_not_resume_control(self):
asl = altitude_safety.AltitudeSafetyLayer()
# Sets state to LOW.
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_altitude_pressure)
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_nominal_altitude_pressure)
self.assertTrue(asl.navigation_is_paused)
def test_increasing_altitude_above_hysteresis_resumes_control(self):
asl = altitude_safety.AltitudeSafetyLayer()
# Sets state to LOW.
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.low_altitude_pressure)
asl.get_action(control.AltitudeControlCommand.DOWN, self.atmosphere,
self.nominal_altitude_pressure)
self.assertFalse(asl.navigation_is_paused)
if __name__ == '__main__':
absltest.main()
```
#### File: env/balloon/envelope_safety_test.py
```python
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.env.balloon import envelope_safety
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import test_helpers
import jax
class EnvelopeSafetyTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='superpressure_low_critical_down',
superpressure=50.0,
input_action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_low_critical_stay',
superpressure=50.0,
input_action=control.AltitudeControlCommand.STAY,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_low_critical_up',
superpressure=50.0,
input_action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_low_down',
superpressure=200.0,
input_action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='superpressure_low_stay',
superpressure=200.0,
input_action=control.AltitudeControlCommand.STAY,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='superpressure_low_up',
superpressure=200.0,
input_action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_ok_down',
superpressure=1000.0,
input_action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.DOWN),
dict(
testcase_name='superpressure_ok_stay',
superpressure=1000.0,
input_action=control.AltitudeControlCommand.STAY,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='superpressure_ok_up',
superpressure=1000.0,
input_action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_high_down',
superpressure=2180.0,
input_action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='superpressure_high_stay',
superpressure=2180.0,
input_action=control.AltitudeControlCommand.STAY,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='superpressure_high_up',
superpressure=2180.0,
input_action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_high_critical_down',
superpressure=2280.0,
input_action=control.AltitudeControlCommand.DOWN,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_high_critical_stay',
superpressure=2280.0,
input_action=control.AltitudeControlCommand.STAY,
expected_action=control.AltitudeControlCommand.UP),
dict(
testcase_name='superpressure_high_critical_up',
superpressure=2280.0,
input_action=control.AltitudeControlCommand.UP,
expected_action=control.AltitudeControlCommand.UP),
)
def test_envelope_safety_layer_alters_actions_correctly(
self, superpressure: float, input_action: control.AltitudeControlCommand,
expected_action: control.AltitudeControlCommand):
atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
b = test_helpers.create_balloon(atmosphere=atmosphere)
envelope_safety_layer = envelope_safety.EnvelopeSafetyLayer(
b.state.envelope_max_superpressure)
action = envelope_safety_layer.get_action(input_action, superpressure)
self.assertEqual(action, expected_action)
if __name__ == '__main__':
absltest.main()
```
#### File: balloon_learning_environment/env/balloon_env_test.py
```python
import datetime as dt
import functools
import random
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env import balloon_arena
from balloon_learning_environment.env import balloon_env
from balloon_learning_environment.env import features
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import constants
from balloon_learning_environment.utils import test_helpers
from balloon_learning_environment.utils import units
import jax
import numpy as np
START_DATE_TIME = units.datetime(2013, 3, 25, 9, 25, 32)
class BalloonEnvTest(parameterized.TestCase):
def setUp(self):
super(BalloonEnvTest, self).setUp()
test_helpers.bind_environment_gin_parameters(seed=0)
self.atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
self.create_balloon = functools.partial(
test_helpers.create_balloon, atmosphere=self.atmosphere)
def test_observation_space_matches_observation(self):
env = balloon_env.BalloonEnv()
shape = env.observation_space.sample().shape
# Test the shape from reset
observation = env.reset()
self.assertEqual(observation.shape, shape)
# Test the shape from multiple environment steps
for _ in range(100):
obs, _, _, _ = env.step(random.randrange(3))
self.assertEqual(obs.shape, shape)
def test_out_of_power(self):
env = balloon_env.BalloonEnv()
env.arena._balloon = self.create_balloon(
date_time=units.datetime(2021, 9, 9, 0)) # Nighttime.
for _ in range(10):
env.arena._balloon.state.battery_charge = (
env.arena._balloon.state.battery_capacity)
_, _, is_terminal, info = env.step(random.randrange(3))
self.assertFalse(is_terminal)
self.assertFalse(info['out_of_power'])
# Create an out of battery situation.
env.arena._balloon.state.battery_charge = (
1e-7 * env.arena._balloon.state.battery_capacity)
_, _, is_terminal, info = env.step(random.randrange(3))
self.assertTrue(is_terminal)
self.assertTrue(info['out_of_power'])
def test_time_elapsed(self):
arena = balloon_arena.BalloonArena(features.PerciatelliFeatureConstructor)
time_elapsed = dt.timedelta()
test_helpers.bind_environment_gin_parameters(arena=arena, seed=1)
env = balloon_env.BalloonEnv()
for _ in range(10):
_, _, _, info = env.step(0)
time_elapsed += constants.AGENT_TIME_STEP
self.assertEqual(info['time_elapsed'], time_elapsed)
@parameterized.named_parameters(
dict(testcase_name='near_center', radius=50.0, x_km=1.0, y_km=-1.0),
dict(testcase_name='near_border_1', radius=50.0, x_km=49.99, y_km=0.0),
dict(testcase_name='near_border_2', radius=50.0, x_km=0.0, y_km=-49.99),
dict(testcase_name='near_border_3', radius=50.0, x_km=-35.355, y_km=35.3),
dict(testcase_name='10km_near_border', radius=10.0, x_km=-9.99, y_km=0.0))
def test_reward_in_radius_should_be_one(self, radius, x_km, y_km):
x = units.Distance(km=x_km)
y = units.Distance(km=y_km)
balloon_state = self.create_balloon(x, y).state
arena = balloon_arena.BalloonArena(features.PerciatelliFeatureConstructor)
arena.get_balloon_state = mock.MagicMock(return_value=balloon_state)
test_helpers.bind_environment_gin_parameters(
seed=0,
station_keeping_radius_km=radius,
reward_dropoff=0.0,
arena=arena)
env = balloon_env.BalloonEnv()
_, reward, _, _ = env.step(0)
self.assertEqual(reward, 1.0)
@parameterized.named_parameters(
dict(testcase_name='zero_drop', radius_km=50.0, angle=0.6, dropoff=0.0),
dict(
testcase_name='nonzero_drop', radius_km=50.0, angle=1.3, dropoff=0.4),
dict(testcase_name='10km_radius', radius_km=10.0, angle=2.1, dropoff=0.0))
def test_reward_is_equal_to_dropoff_immediately_outside_radius(
self, radius_km: float, angle: float, dropoff: float):
# Calculate the x, y coordinates in meters just outside the radius at angle
outside_radius_distance = units.Distance(km=radius_km + 0.1)
x_pos = outside_radius_distance * np.cos(angle)
y_pos = outside_radius_distance * np.sin(angle)
balloon_state = self.create_balloon(x_pos, y_pos).state
arena = balloon_arena.BalloonArena(features.PerciatelliFeatureConstructor)
arena.get_balloon_state = mock.MagicMock(return_value=balloon_state)
test_helpers.bind_environment_gin_parameters(
seed=0,
station_keeping_radius_km=radius_km,
reward_dropoff=dropoff,
arena=arena)
env = balloon_env.BalloonEnv()
_, reward, _, _ = env.step(0)
self.assertAlmostEqual(reward, dropoff, delta=0.001)
def test_reward_is_half_after_decay_distance(self):
# 51 km from origin, 1 km from border
x1, y1 = units.Distance(m=47_548.69), units.Distance(m=18_442.39)
# 101 km from origin, 51 km from border
x2, y2 = units.Distance(m=94_165.06), units.Distance(m=36_523.16)
balloon_state1 = self.create_balloon(x=x1, y=y1).state
balloon_state2 = self.create_balloon(x=x2, y=y2).state
arena1 = balloon_arena.BalloonArena(features.PerciatelliFeatureConstructor)
arena1.get_balloon_state = mock.MagicMock(return_value=balloon_state1)
arena2 = balloon_arena.BalloonArena(features.PerciatelliFeatureConstructor)
arena2.get_balloon_state = mock.MagicMock(return_value=balloon_state2)
test_helpers.bind_environment_gin_parameters(
seed=0,
station_keeping_radius_km=50.0,
reward_dropoff=1.0,
reward_halflife=50.0,
arena=arena1)
env1 = balloon_env.BalloonEnv()
test_helpers.bind_environment_gin_parameters(
seed=0,
station_keeping_radius_km=50.0,
reward_dropoff=1.0,
reward_halflife=50.0,
arena=arena2)
env2 = balloon_env.BalloonEnv()
_, reward1, _, _ = env1.step(0)
_, reward2, _, _ = env2.step(0)
self.assertAlmostEqual(reward1 * 0.5, reward2, delta=0.001)
@parameterized.named_parameters(
dict(
testcase_name='excess_energy_down',
excess_energy=True,
action=0,
expected_reward=1.0),
dict(
testcase_name='excess_energy_stay',
excess_energy=True,
action=1,
expected_reward=1.0),
dict(
testcase_name='no_excess_energy_down',
excess_energy=False,
action=0,
expected_reward=0.95),
dict(
testcase_name='no_excess_energy_stay',
excess_energy=False,
action=1,
expected_reward=1.0))
def test_power_regularization_is_applied_correclty_to_reward(
self, excess_energy: bool, action: int, expected_reward: float):
# Mock the distance function to always return 0, so base reward is 1.0.
with mock.patch.object(units, 'relative_distance',
mock.MagicMock(return_value=units.Distance(m=0.0))):
test_helpers.bind_environment_gin_parameters(seed=0)
env = balloon_env.BalloonEnv()
type(env.arena.get_balloon_state()).excess_energy = mock.PropertyMock(
return_value=excess_energy)
_, reward, _, _ = env.step(action)
self.assertAlmostEqual(reward, expected_reward, places=2)
def test_seeding_gives_deterministic_initial_balloon_state(self):
test_helpers.bind_environment_gin_parameters(seed=123)
env1 = balloon_env.BalloonEnv()
env2 = balloon_env.BalloonEnv()
balloon_state1 = env1.get_simulator_state().balloon_state
balloon_state2 = env2.get_simulator_state().balloon_state
self.assertEqual(balloon_state1, balloon_state2)
def test_different_seed_gives_different_initial_balloon_state(self):
test_helpers.bind_environment_gin_parameters(seed=124)
env1 = balloon_env.BalloonEnv()
test_helpers.bind_environment_gin_parameters(seed=125)
env2 = balloon_env.BalloonEnv()
balloon_state1 = env1.get_simulator_state().balloon_state
balloon_state2 = env2.get_simulator_state().balloon_state
self.assertNotEqual(balloon_state1, balloon_state2)
def test_seeding_gives_deterministic_trajectory(self):
test_helpers.bind_environment_gin_parameters(seed=1)
env1 = balloon_env.BalloonEnv()
env2 = balloon_env.BalloonEnv()
for action in (0, 0, 0, 2, 2, 2, 2, 1, 1, 1, 1, 0):
env1.step(action)
env2.step(action)
balloon_state1 = env1.get_simulator_state().balloon_state
balloon_state2 = env2.get_simulator_state().balloon_state
self.assertEqual(balloon_state1, balloon_state2)
if __name__ == '__main__':
absltest.main()
```
#### File: env/balloon/power_safety_test.py
```python
import datetime as dt
from absl.testing import absltest
from absl.testing import parameterized
from balloon_learning_environment.env.balloon import control
from balloon_learning_environment.env.balloon import power_safety
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import test_helpers
from balloon_learning_environment.utils import units
import jax
import s2sphere as s2
_NIGHTTIME_HOTEL_LOAD = units.Power(watts=183.7)
_BATTERY_CAPACITY = units.Energy(watt_hours=2000.0)
class PowerSafetyTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='night_low_power_prevents_action',
date_time=units.datetime(2021, 6, 1, 0),
battery_charge_percent=0.1,
expected_action=control.AltitudeControlCommand.STAY),
dict(
testcase_name='night_high_power_allows_action',
date_time=units.datetime(2021, 6, 1, 0),
battery_charge_percent=1.0,
expected_action=control.AltitudeControlCommand.DOWN),
dict(
testcase_name='day_allows_action',
date_time=units.datetime(2021, 6, 1, 12),
battery_charge_percent=0.1,
expected_action=control.AltitudeControlCommand.DOWN))
def test_power_safety_layer_correctly_modifies_actions(
self, date_time: dt.datetime, battery_charge_percent: float,
expected_action: control.AltitudeControlCommand):
# Initialize balloon at midnight.
safety_layer = power_safety.PowerSafetyLayer(
s2.LatLng.from_degrees(0.0, 0.0), date_time)
action = safety_layer.get_action(control.AltitudeControlCommand.DOWN,
date_time, _NIGHTTIME_HOTEL_LOAD,
_BATTERY_CAPACITY * battery_charge_percent,
_BATTERY_CAPACITY)
self.assertEqual(action, expected_action)
def test_power_safety_layer_correctly_forecasts_battery_charge(self):
# We predict sunrise at 5:43 at our latlng and altitude.
# Initialize at 0:43, exactly 5 hours before sunrise.
date_time = units.datetime(2021, 8, 26, 0, 43)
safety_layer1 = power_safety.PowerSafetyLayer(
s2.LatLng.from_degrees(0.0, 0.0), date_time)
safety_layer2 = power_safety.PowerSafetyLayer(
s2.LatLng.from_degrees(0.0, 0.0), date_time)
# Use round numbers to see when we will fall below 2.5% charge by sunrise.
# After 5 hours 30 mins (sunrise + hysteresis) we will have
# battery_charge - 5.5 watt_hours charge.
action1 = safety_layer1.get_action(
control.AltitudeControlCommand.DOWN,
date_time,
nighttime_power_load=units.Power(watts=1.0),
battery_charge=units.Energy(watt_hours=7.9),
battery_capacity=units.Energy(watt_hours=100.0))
action2 = safety_layer2.get_action(
control.AltitudeControlCommand.DOWN,
date_time,
nighttime_power_load=units.Power(watts=1.0),
battery_charge=units.Energy(watt_hours=8.1),
battery_capacity=units.Energy(watt_hours=100.0))
self.assertEqual(action1, control.AltitudeControlCommand.STAY)
self.assertEqual(action2, control.AltitudeControlCommand.DOWN)
def test_power_safety_prevents_acting_on_low_power_at_night(self):
# Create a balloon with 10% power at midnight.
atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
b = test_helpers.create_balloon(
power_percent=0.1,
date_time=units.datetime(2020, 1, 1, 0, 0, 0),
atmosphere=atmosphere)
power_safety_layer = power_safety.PowerSafetyLayer(b.state.latlng,
b.state.date_time)
for action in control.AltitudeControlCommand:
with self.subTest(action.name):
effective_action = power_safety_layer.get_action(
action, b.state.date_time, b.state.nighttime_power_load,
b.state.battery_charge, b.state.battery_capacity)
# Safety layer only prevents balloons from going down.
if action == control.AltitudeControlCommand.DOWN:
expected_action = control.AltitudeControlCommand.STAY
else:
expected_action = action
self.assertEqual(effective_action, expected_action)
if __name__ == '__main__':
absltest.main()
```
#### File: env/balloon/pressure_range_builder_test.py
```python
import functools
from absl.testing import absltest
from balloon_learning_environment.env.balloon import altitude_safety
from balloon_learning_environment.env.balloon import pressure_range_builder
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.utils import test_helpers
import jax
class AltitudeRangeBuilderTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.atmosphere = standard_atmosphere.Atmosphere(jax.random.PRNGKey(0))
self.create_balloon = functools.partial(
test_helpers.create_balloon, atmosphere=self.atmosphere)
def test_get_pressure_range_returns_valid_range(self):
b = self.create_balloon()
pressure_range = pressure_range_builder.get_pressure_range(
b.state, self.atmosphere)
self.assertIsInstance(pressure_range,
pressure_range_builder.AccessiblePressureRange)
self.assertBetween(pressure_range.min_pressure, 1000.0, 100_000.0)
self.assertBetween(pressure_range.max_pressure, 1000.0, 100_000.0)
def test_get_pressure_range_returns_min_pressure_below_max_pressure(self):
b = self.create_balloon()
pressure_range = pressure_range_builder.get_pressure_range(
b.state, self.atmosphere)
self.assertLess(pressure_range.min_pressure, pressure_range.max_pressure)
def test_get_pressure_range_returns_max_pressure_above_min_altitude(self):
b = self.create_balloon()
pressure_range = pressure_range_builder.get_pressure_range(
b.state, self.atmosphere)
self.assertLessEqual(
pressure_range.max_pressure,
self.atmosphere.at_height(altitude_safety.MIN_ALTITUDE).pressure)
# TODO(joshgreaves): Add more tests when the pressure ranges are as expected.
if __name__ == '__main__':
absltest.main()
```
#### File: env/balloon/stable_init.py
```python
import dataclasses
import datetime as dt
from balloon_learning_environment.env.balloon import balloon
from balloon_learning_environment.env.balloon import solar
from balloon_learning_environment.env.balloon import standard_atmosphere
from balloon_learning_environment.env.balloon import thermal
from balloon_learning_environment.utils import constants
import numpy as np
import s2sphere as s2
@dataclasses.dataclass
class StableParams:
ambient_temperature: float
internal_temperature: float
mols_air: float
envelope_volume: float
superpressure: float
def calculate_stable_params_for_pressure(
pressure: float, envelope_volume_base: float,
envelope_volume_dv_pressure: float, envelope_mass: float,
payload_mass: float, mols_lift_gas: float, latlng: s2.LatLng,
date_time: dt.datetime, upwelling_infrared: float,
atmosphere: standard_atmosphere.Atmosphere) -> StableParams:
"""Calculates stable parameter values for the ambient pressure.
This calculates the internal and external temperature for a balloon
at the specified pressure, as well as the mols air in the ballonet,
envelope volume, and superpressure required to float at the specified
ambient temperature.
Args:
pressure: Ambient pressure of the balloon [Pa].
envelope_volume_base: The y-intercept for the balloon envelope volume
model [m^3].
envelope_volume_dv_pressure: The slope for the balloon envelope volume
model.
envelope_mass: Mass of the balloon envelope [kg].
payload_mass: The mass of the payload. The term payload here refers to
all parts of the flight system other than the balloon envelope [kg].
mols_lift_gas: Mols of helium within the balloon envelope [mols].
latlng: The current latitude and longitude of the balloon.
date_time: The current date and time of the balloon.
upwelling_infrared: The upwelling infrared value.
atmosphere: The current atmosphere state.
Returns:
A tuple of (ambient temperature [K], mols air in ballonet [mols]).
"""
ambient_temperature = atmosphere.at_pressure(pressure).temperature
# ---- Cold start mols air in envelope ----
# Compute the mols gas in balloon that gives the desired pressure.
# This comes from rho * V = m, where:
#
#. ambient_pressure * air_molar_mass
# rho = ----------------------------------
# universal_gas_const * ambient_temp
#
# m = (mass_envelope + mass_payload +
# helium_molar_mass * mols_helium +
# air_molar_mass * mols_air)
#
# Then, you just solve for mols_air to get the following equation.
mols_air = (
(pressure * constants.DRY_AIR_MOLAR_MASS * envelope_volume_base /
(constants.UNIVERSAL_GAS_CONSTANT * ambient_temperature) -
envelope_mass - payload_mass - constants.HE_MOLAR_MASS * mols_lift_gas)
/ constants.DRY_AIR_MOLAR_MASS)
# TODO(joshgreaves): Warning or Exception for initializing out of range?
mols_air = np.clip(mols_air, 0.0, None)
# ---- Cold start internal temperature ----
internal_temperature = 206.0 # [K] pick an average value to start search.
solar_elevation, _, solar_flux = solar.solar_calculator(latlng, date_time)
# Apply a few iterations of Newton-Raphson to find where the rate of
# change of temperature is close to 0.
delta_temp = 0.01
for _ in range(10):
# Note: we use envelope_volume_base rather than envelope_volume, since
# empirically it doesn't make much of a difference, and the envelope
# volume isn't calculated until the temperature is calculated.
d_internal_temp1 = thermal.d_balloon_temperature_dt(
envelope_volume_base, envelope_mass,
internal_temperature - delta_temp / 2, ambient_temperature, pressure,
solar_elevation, solar_flux, upwelling_infrared)
d_internal_temp2 = thermal.d_balloon_temperature_dt(
envelope_volume_base, envelope_mass,
internal_temperature + delta_temp / 2, ambient_temperature, pressure,
solar_elevation, solar_flux, upwelling_infrared)
# d2_internal_temp is the second derivitive of temperature w.r.t time.
d2_internal_temp = (d_internal_temp2 - d_internal_temp1) / delta_temp
mean_d_internal_temp = (d_internal_temp1 + d_internal_temp2) / 2.0
if abs(d2_internal_temp) > 0.0:
internal_temperature -= (mean_d_internal_temp / d2_internal_temp)
if abs(mean_d_internal_temp) < 1e-5:
break
# ---- Cold start superpressure ----
envelope_volume, superpressure = (
balloon.Balloon.calculate_superpressure_and_volume(
mols_lift_gas, mols_air, internal_temperature, pressure,
envelope_volume_base, envelope_volume_dv_pressure))
return StableParams(ambient_temperature, internal_temperature, mols_air,
envelope_volume, superpressure)
def cold_start_to_stable_params(
balloon_state: balloon.BalloonState,
atmosphere: standard_atmosphere.Atmosphere) -> None:
"""Sets parameters to stable values for the ambient pressure.
The pressure altitude of the balloon depends on a number of variables,
such as the number of mols of air in the ballonet, the temperature
of air and gas inside the envelope, and the superpressure. To have
a balloon float at a specific pressure level, these parameters should
be updated to match the specified ambient pressure.
Args:
balloon_state: The balloon state to update with stable params.
atmosphere: The current atmosphere the balloon is flying in.
"""
stable_params = calculate_stable_params_for_pressure(
balloon_state.pressure, balloon_state.envelope_volume_base,
balloon_state.envelope_volume_dv_pressure, balloon_state.envelope_mass,
balloon_state.payload_mass, balloon_state.mols_lift_gas,
balloon_state.latlng, balloon_state.date_time,
balloon_state.upwelling_infrared, atmosphere)
balloon_state.ambient_temperature = stable_params.ambient_temperature
balloon_state.internal_temperature = stable_params.internal_temperature
balloon_state.mols_air = stable_params.mols_air
balloon_state.envelope_volume = stable_params.envelope_volume
balloon_state.superpressure = stable_params.superpressure
```
#### File: balloon_learning_environment/env/gym.py
```python
import contextlib
def register_env() -> None:
"""Register the Gym environment."""
# We need to import Gym's registration module inline or else we'll
# get a circular dependency that will result in an error when importing gym
from gym.envs import registration # pylint: disable=g-import-not-at-top
env_id = 'BalloonLearningEnvironment-v0'
env_entry_point = 'balloon_learning_environment.env.balloon_env:BalloonEnv'
# We guard registration by checking if our env is already registered
# This is necesarry because the plugin system will load our module
# which also calls this function. If multiple `register()` calls are
# made this will result in a warning to the user.
registered = env_id in registration.registry.env_specs
if not registered:
with contextlib.ExitStack() as stack:
# This is a workaround for Gym 0.21 which didn't support
# registering into the root namespace with the plugin system.
if hasattr(registration, 'namespace'):
stack.enter_context(registration.namespace(None))
registration.register(id=env_id, entry_point=env_entry_point)
```
#### File: balloon_learning_environment/metrics/collector_dispatcher_test.py
```python
from absl import flags
from absl.testing import absltest
from balloon_learning_environment.metrics import collector
from balloon_learning_environment.metrics import collector_dispatcher
from balloon_learning_environment.metrics import statistics_instance
class CollectorDispatcherTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._na = 5
self._tmpdir = flags.FLAGS.test_tmpdir
def test_with_no_collectors(self):
# This test verifies that we can run successfully with no collectors.
metrics = collector_dispatcher.CollectorDispatcher(
self._tmpdir, self._na, [], 0)
metrics.pre_training()
for _ in range(4):
metrics.begin_episode()
for _ in range(10):
metrics.step(statistics_instance.StatisticsInstance(0, 0, 0, False))
metrics.end_episode(
statistics_instance.StatisticsInstance(0, 0, 0, False))
metrics.end_training()
def test_with_simple_collector(self):
# Create a simple collector that keeps track of received statistics.
logged_stats = []
class SimpleCollector(collector.Collector):
def get_name(self) -> str:
return 'simple'
def pre_training(self) -> None:
pass
def begin_episode(self) -> None:
logged_stats.append([])
def step(self, statistics) -> None:
logged_stats[-1].append(statistics)
def end_episode(self, statistics) -> None:
logged_stats[-1].append(statistics)
def end_training(self) -> None:
pass
# Create a simple collector that tracks method calls.
counts = {
'pre_training': 0,
'begin_episode': 0,
'step': 0,
'end_episode': 0,
'end_training': 0,
}
class CountCollector(collector.Collector):
def get_name(self) -> str:
return 'count'
def pre_training(self) -> None:
counts['pre_training'] += 1
def begin_episode(self) -> None:
counts['begin_episode'] += 1
def step(self, statistics) -> None:
counts['step'] += 1
def end_episode(self, unused_statistics) -> None:
counts['end_episode'] += 1
def end_training(self) -> None:
counts['end_training'] += 1
# Run a collection loop.
metrics = collector_dispatcher.CollectorDispatcher(
self._tmpdir, self._na, [SimpleCollector, CountCollector], 0)
metrics.pre_training()
expected_stats = []
num_episodes = 4
num_steps = 10
for _ in range(num_episodes):
metrics.begin_episode()
expected_stats.append([])
for j in range(num_steps):
stat = statistics_instance.StatisticsInstance(
step=j, action=num_steps-j, reward=j, terminal=False)
metrics.step(stat)
expected_stats[-1].append(stat)
stat = statistics_instance.StatisticsInstance(
step=num_steps, action=0, reward=num_steps, terminal=True)
metrics.end_episode(stat)
expected_stats[-1].append(stat)
metrics.end_training()
self.assertEqual(
counts,
{'pre_training': 1, 'begin_episode': num_episodes,
'step': num_episodes * num_steps, 'end_episode': num_episodes,
'end_training': 1})
self.assertEqual(expected_stats, logged_stats)
if __name__ == '__main__':
absltest.main()
```
#### File: balloon_learning_environment/metrics/collector_test.py
```python
import os.path as osp
from absl import flags
from absl.testing import absltest
from balloon_learning_environment.metrics import collector
# A simple subclass that implements the abstract methods.
class SimpleCollector(collector.Collector):
def get_name(self) -> str:
return 'simple'
def pre_training(self) -> None:
pass
def begin_episode(self) -> None:
pass
def step(self, unused_statistics) -> None:
pass
def end_episode(self, unused_statistics) -> None:
pass
def end_training(self) -> None:
pass
class CollectorTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._na = 5
self._tmpdir = flags.FLAGS.test_tmpdir
def test_instantiate_abstract_class(self):
# It is not possible to instantiate Collector as it has abstract methods.
with self.assertRaises(TypeError):
collector.Collector(self._tmpdir, self._na, 'fail')
def test_valid_subclass(self):
simple_collector = SimpleCollector(self._tmpdir, self._na, 0)
self.assertEqual(simple_collector._base_dir,
osp.join(self._tmpdir, 'metrics/simple'))
self.assertEqual(self._na, simple_collector._num_actions)
self.assertTrue(osp.exists(simple_collector._base_dir))
def test_valid_subclass_with_no_basedir(self):
simple_collector = SimpleCollector(None, self._na, 0)
self.assertIsNone(simple_collector._base_dir)
self.assertEqual(self._na, simple_collector._num_actions)
if __name__ == '__main__':
absltest.main()
```
#### File: balloon_learning_environment/models/models.py
```python
from importlib import resources
import os
from typing import Optional
import gin
import tensorflow as tf
_MODEL_ROOT = 'balloon_learning_environment/models/'
_OFFLINE_SKIES22_RELATIVE_PATH = os.path.join(
_MODEL_ROOT, 'offlineskies22_decoder.msgpack')
_PERCIATELLI44_RELATIVE_PATH = os.path.join(
_MODEL_ROOT, 'perciatelli44.pb')
@gin.configurable
def load_offlineskies22(path: Optional[str] = None) -> bytes:
"""Loads offlineskies22 serialized wind VAE parameters.
There are three places this function looks:
1. At the path specified, if one is specified.
2. Under the models package using importlib.resources. It should be
found there if the code was installed with pip.
3. Relative to the project root. It should be found there if running
from a freshly cloned repo.
Args:
path: An optional path to load the VAE weights from.
Returns:
The serialized VAE weights as bytes.
Raises:
ValueError: if a path is specified but the weights can't be loaded.
RuntimeError: if the weights couldn't be found in any of the
specified locations.
"""
# Attempt 1: Load from path, if specified.
# If a path is specified, we expect it is a good path.
if path is not None:
try:
with tf.io.gfile.GFile(path, 'rb') as f:
return f.read()
except tf.errors.NotFoundError:
raise ValueError(f'offlineskies22 checkpoint not found at {path}')
# Attempt 2: Load from location expected in the built wheel.
try:
with resources.open_binary('balloon_learning_environment.models',
'offlineskies22_decoder.msgpack') as f:
return f.read()
except FileNotFoundError:
pass
# Attempt 3: Load from the path relative to the source root.
try:
with tf.io.gfile.GFile(_OFFLINE_SKIES22_RELATIVE_PATH, 'rb') as f:
return f.read()
except tf.errors.NotFoundError:
pass
raise RuntimeError(
'Unable to load wind VAE checkpoint from the expected locations.')
@gin.configurable
def load_perciatelli44(path: Optional[str] = None) -> bytes:
"""Loads Perciatelli44.pb as bytes.
There are three places this function looks:
1. At the path specified, if one is specified.
2. Under the models package using importlib.resources. It should be
found there if the code was installed with pip.
3. Relative to the project root. It should be found there if running
from a freshly cloned repo.
Args:
path: An optional path to load the VAE weights from.
Returns:
The serialized VAE weights as bytes.
Raises:
ValueError: if a path is specified but the weights can't be loaded.
RuntimeError: if the weights couldn't be found in any of the
specified locations.
"""
# Attempt 1: Load from path, if specified.
# If a path is specified, we expect it is a good path.
if path is not None:
try:
with tf.io.gfile.GFile(path, 'rb') as f:
return f.read()
except tf.errors.NotFoundError:
raise ValueError(f'perciatelli44 checkpoint not found at {path}')
# Attempt 2: Load from location expected in the built wheel.
try:
with resources.open_binary('balloon_learning_environment.models',
'perciatelli44.pb') as f:
return f.read()
except FileNotFoundError:
pass
# Attempt 3: Load from the path relative to the source root.
try:
with tf.io.gfile.GFile(_PERCIATELLI44_RELATIVE_PATH, 'rb') as f:
return f.read()
except FileNotFoundError:
pass
raise RuntimeError(
'Unable to load Perciatelli44 checkpoint from the expected locations.')
```
#### File: balloon_learning_environment/utils/transforms.py
```python
import numpy as np
def linear_rescale_with_extrapolation(x: float,
vmin: float,
vmax: float) -> float:
"""Returns x normalized between [vmin, vmax], with possible extrapolation."""
if vmax <= vmin:
raise ValueError('Interval must be such that vmax > vmin.')
else:
return (x - vmin) / (vmax - vmin)
def undo_linear_rescale_with_extrapolation(x: float, vmin: float,
vmax: float) -> float:
"""Computes the input of linear_rescale_with_extrapolation given output."""
if vmax <= vmin:
raise ValueError('Interval must be such that vmax > vmin.')
return vmin + x * (vmax - vmin)
def linear_rescale_with_saturation(x: float, vmin: float, vmax: float) -> float:
"""Returns x normalized in [0, 1]."""
y = linear_rescale_with_extrapolation(x, vmin, vmax)
return np.clip(y, 0.0, 1.0).item()
def squash_to_unit_interval(x: float, constant: float) -> float:
"""Scales non-negative x to be in range [0, 1], with a squash."""
if constant <= 0:
raise ValueError('Squash constant must be greater than zero.')
if x < 0:
raise ValueError('Squash can only be performed on a positive value.')
return x / (x + constant)
def undo_squash_to_unit_interval(x: float, constant: float) -> float:
"""Computes the input value of squash_to_unit_interval given the output."""
if constant <= 0:
raise ValueError('Squash constant must be greater than zero.')
if 0 > x >= 1:
raise ValueError('Undo squash can only be performed on a value in [0, 1).')
return (x * constant) / (1 - x)
```
#### File: johannah/balloon-learning-environment/setup.py
```python
import os
import pathlib
import setuptools
from setuptools.command import build_py
from setuptools.command import develop
current_directory = pathlib.Path(__file__).parent
description = (current_directory / 'README.md').read_text()
core_requirements = [
'absl-py',
'dopamine-rl >= 4.0.0',
'flax',
'gin-config',
'gym',
'jax >= 0.2.28',
'jaxlib >= 0.1.76',
'opensimplex <= 0.3.0',
's2sphere',
'scikit-learn',
'tensorflow',
'tensorflow-probability',
'transitions',
]
acme_requirements = [
'dm-acme',
'dm-haiku',
'dm-reverb',
'dm-sonnet',
'rlax',
'xmanager',
]
def generate_requirements_file(path=None):
"""Generates requirements.txt file needed for running Acme.
It is used by Launchpad GCP runtime to generate Acme requirements to be
installed inside the docker image. Acme itself is not installed from pypi,
but instead sources are copied over to reflect any local changes made to
the codebase.
Args:
path: path to the requirements.txt file to generate.
"""
if not path:
path = os.path.join(os.path.dirname(__file__), 'acme_requirements.txt')
with open(path, 'w') as f:
for package in set(core_requirements + acme_requirements):
f.write(f'{package}\n')
class BuildPy(build_py.build_py):
def run(self):
generate_requirements_file()
build_py.build_py.run(self)
class Develop(develop.develop):
def run(self):
generate_requirements_file()
develop.develop.run(self)
cmdclass = {
'build_py': BuildPy,
'develop': Develop,
}
entry_points = {
'gym.envs': [
'__root__=balloon_learning_environment.env.gym:register_env'
]
}
setuptools.setup(
name='balloon_learning_environment',
long_description=description,
long_description_content_type='text/markdown',
version='1.0.1',
cmdclass=cmdclass,
packages=setuptools.find_packages(),
install_requires=core_requirements,
extras_require={
'acme': acme_requirements,
},
package_data={
'': ['*.msgpack', '*.pb', '*.gin'],
},
entry_points=entry_points,
python_requires='>=3.7',
)
``` |
{
"source": "johannah/DH",
"score": 2
} |
#### File: johannah/DH/move_robot.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import robosuite
import imageio
import numpy as np
import os
from glob import glob
from copy import deepcopy
import pickle
import json
from imageio import mimwrite
from replay_buffer import ReplayBuffer, compress_frame
from torch.utils.tensorboard import SummaryWriter
import torch
import robosuite.utils.macros as macros
torch.set_num_threads(3)
import TD3_kinematic
from dh_utils import seed_everything, normalize_joints, skip_state_keys, robotDH
from utils import build_replay_buffer, build_env, build_model, plot_replay, get_rot_mat, MAX_RELATIVE_ACTION
from IPython import embed
def run(env, replay_buffer, cfg, cam_dim, savebase):
robot_name = cfg['robot']['robots'][0]
env_type = cfg['experiment']['env_type']
num_steps = 0
total_steps = replay_buffer.max_size-1
use_frames = cam_dim[0] > 0
if use_frames:
print('recording camera: %s'%args.camera)
h, w, c = cam_dim
torques = []
rewards = []
while num_steps < total_steps:
done = False
state, body = env.reset()
if use_frames:
frame_compressed = compress_frame(env.render(camera_name=args.camera, height=h, width=w))
ep_reward = 0
e_step = 0
while not done:# and e_step < args.max_eval_timesteps:
action = np.array([0.00001, .0001, -.0001, .01, .0, .0, 0, 0])
next_state, next_body, reward, done, info = env.step(action) # take a random action
ep_reward += reward
if use_frames:
next_frame_compressed = compress_frame(env.render(camera_name=args.camera, height=h, width=w))
replay_buffer.add(state, body, action, reward, next_state, next_body, done,
frame_compressed=frame_compressed,
next_frame_compressed=next_frame_compressed)
frame_compressed = next_frame_compressed
else:
replay_buffer.add(state, body, action, reward, next_state, next_body, done)
if e_step > 100:
done = True
torques.append(env.env.robots[0].torques)
print(next_body[:7] - body[:7])
print(torques[-1])
state = next_state
body = next_body
num_steps+=1
e_step+=1
rewards.append(ep_reward)
replay_buffer.torques = torques
return rewards, replay_buffer
def rollout():
print(cfg)
if "kinematic_function" in cfg['experiment'].keys():
kinematic_fn = cfg['experiment']['kinematic_function']
print("setting kinematic function", kinematic_fn)
robot_name = cfg['robot']['robots'][0]
if 'robot_dh' in cfg['robot'].keys():
robot_dh_name = cfg['robot']['robot_dh']
else:
robot_dh_name = cfg['robot']['robots'][0]
env_type = cfg['experiment']['env_type']
env = build_env(cfg['robot'], cfg['robot']['frame_stack'], skip_state_keys=skip_state_keys, env_type=env_type, default_camera=args.camera)
if 'eval_seed' in cfg['experiment'].keys():
eval_seed = cfg['experiment']['eval_seed'] + 1000
else:
eval_seed = cfg['experiment']['seed'] + 1000
if args.frames: cam_dim = (240,240,3)
else:
cam_dim = (0,0,0)
if 'eval_replay_buffer_size' in cfg['experiment'].keys():
eval_replay_buffer_size = cfg['experiment']['eval_replay_buffer_size']
else:
eval_replay_buffer_size = env.max_timesteps*args.num_eval_episodes
print('running eval for %s steps'%eval_replay_buffer_size)
savebase = '_show_%06d'%(eval_replay_buffer_size)
replay_file = savebase+'.pkl'
movie_file = savebase+'_%s.mp4' %args.camera
#if not os.path.exists(replay_file):
if 1:
replay_buffer = build_replay_buffer(cfg, env, eval_replay_buffer_size, cam_dim, eval_seed)
rewards, replay_buffer = run(env, replay_buffer, cfg, cam_dim, savebase)
pickle.dump(replay_buffer, open(replay_file, 'wb'))
plt.figure()
plt.plot(rewards)
plt.title('eval episode rewards')
plt.savefig(savebase+'.png')
else:
replay_buffer = pickle.load(open(replay_file, 'rb'))
plot_replay(env, replay_buffer, savebase, frames=args.frames)
if __name__ == '__main__':
import argparse
from glob import glob
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', default='experiments/base_robosuite.cfg')
parser.add_argument('--eval', action='store_true', default=False)
parser.add_argument('--frames', action='store_true', default=False)
parser.add_argument('--camera', default='', choices=['default', 'frontview', 'sideview', 'birdview', 'agentview'])
parser.add_argument('--num_eval_episodes', default=2, type=int)
# parser.add_argument('--max_eval_timesteps', default=100, type=int)
args = parser.parse_args()
# keys that are robot specific
cfg = json.load(open(args.cfg))
rollout()
```
#### File: johannah/DH/utils.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from copy import deepcopy
import sys
from imageio import imwrite
import math
import os
import random
from collections import deque
import numpy as np
import scipy.linalg as sp_la
from imageio import mimwrite
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributions as pyd
from skimage.util.shape import view_as_windows
import gym
import gym.spaces as spaces
import robosuite
import robosuite.utils.transform_utils as T
import robosuite.utils.macros as macros
macros.IMAGE_CONVENTION = 'opencv'
from robosuite.utils.transform_utils import mat2quat
from dm_control import suite
import TD3, TD3_kinematic, TD3_kinematic_critic
from replay_buffer import ReplayBuffer, compress_frame
from robosuite.utils.dh_parameters import robotDH
#from dh_utils import quaternion_matrix, quaternion_from_matrix, robot_attributes, normalize_joints
from IPython import embed;
#MAX_RELATIVE_ANGLE = np.pi/16
class eval_mode(object):
def __init__(self, *models):
self.models = models
def __enter__(self):
self.prev_states = []
for model in self.models:
self.prev_states.append(model.training)
model.train(False)
def __exit__(self, *args):
for model, state in zip(self.models, self.prev_states):
model.train(state)
return False
def soft_update_params(net, target_net, tau):
for param, target_param in zip(net.parameters(), target_net.parameters()):
target_param.data.copy_(tau * param.data +
(1 - tau) * target_param.data)
def set_seed_everywhere(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def make_dir(*path_parts):
dir_path = os.path.join(*path_parts)
try:
os.mkdir(dir_path)
except OSError:
pass
return dir_path
def tie_weights(src, trg):
assert type(src) == type(trg)
trg.weight = src.weight
trg.bias = src.bias
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data, gain)
if hasattr(m.bias, 'data'):
m.bias.data.fill_(0.0)
def mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):
if hidden_depth == 0:
mods = [nn.Linear(input_dim, output_dim)]
else:
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
for i in range(hidden_depth - 1):
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
mods.append(nn.Linear(hidden_dim, output_dim))
if output_mod is not None:
mods.append(output_mod)
trunk = nn.Sequential(*mods)
return trunk
def to_np(t):
if t is None:
return None
elif t.nelement() == 0:
return np.array([])
else:
return t.cpu().detach().numpy()
def dm_site_pose_in_base_from_name(physics, root_body, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the base frame.
Args:
name (str): Name of site in sim to grab pose
Returns:
np.array: (4,4) array corresponding to the pose of @name in the base frame
"""
pos_in_world = physics.named.data.xpos[name]
rot_in_world = physics.named.data.xmat[name].reshape((3, 3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = physics.named.data.xpos[root_body]
base_rot_in_world = physics.named.data.xmat[root_body].reshape((3, 3))
base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
world_pose_in_base = T.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)
return pose_in_base
def site_pose_in_base_from_name(sim, root_body, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the base frame.
Args:
name (str): Name of site in sim to grab pose
Returns:
np.array: (4,4) array corresponding to the pose of @name in the base frame
"""
pos_in_world = sim.data.get_site_xpos(name)
rot_in_world = sim.data.get_site_xmat(name).reshape((3, 3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = sim.data.get_body_xpos(root_body)
base_rot_in_world = sim.data.get_body_xmat(root_body).reshape((3, 3))
base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
world_pose_in_base = T.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)
return pose_in_base
class EnvStack():
def __init__(self, env, k, skip_state_keys=[], env_type='robosuite', default_camera='', xpos_targets='', bpos="root"):
assert env_type in ['robosuite', 'dm_control']
"""
xpos_targets - env positions to grab
"""
# dm_control named.data to use for eef position
# see https://github.com/deepmind/dm_control/blob/5ca4094e963236d0b7b3b1829f9097ad865ebabe/dm_control/suite/reacher.py#L66 for example:
env.reset()
self.bpos = bpos
self.xpos_targets = xpos_targets
self.env_type = env_type
self.env = env
self.k = k
self._body = deque([], maxlen=k)
self._state = deque([], maxlen=k)
self.body_shape = k*len(self.make_body())
if self.env_type == 'robosuite':
self.control_min = self.env.action_spec[0].min()
self.control_max = self.env.action_spec[1].max()
self.control_shape = self.env.action_spec[0].shape[0]
self.max_timesteps = self.env.horizon
self.sim = self.env.sim
if default_camera == '':
self.default_camera = 'frontview'
self.base_matrix = np.array([[0,1,0,0],
[1,0,0,0],
[0,0,-1,0],
[0,0,0,1]])
self.bpos = self.base_matrix[:3, 3]
self.bori = T.mat2quat(self.base_matrix)
# damping_ratio, kp, action
self.n_joints = len(self.env.robots[0].controller.qpos_index)
if self.env.robots[0].controller.impedance_mode == 'fixed':
self.joint_indexes = np.arange(self.n_joints).astype(np.int)
elif self.env.robots[0].controller.impedance_mode == 'variable':
self.joint_indexes = np.arange(self.n_joints*2, self.n_joints*3).astype(np.int)
elif self.env.robots[0].controller.impedance_mode == 'variable':
self.joint_indexes = np.arange(self.n_joints, self.n_joints*2).astype(np.int)
elif self.env_type == 'dm_control':
self.control_min = self.env.action_spec().minimum[0]
self.control_max = self.env.action_spec().maximum[0]
self.control_shape = self.env.action_spec().shape[0]
self.max_timesteps = int(self.env._step_limit)
self.sim = self.env.physics
if default_camera == '':
self.default_camera = -1
self.base_matrix = np.eye(4)
# TODO hardcoded for reacher
# eye is right rot for reachr
#self.base_matrix[:3, :3] = self.env.physics.named.data.geom_xmat['root'].reshape(3,3)
self.bpos = self.env.physics.named.data.geom_xpos['root']
self.base_matrix[:3, 3] = self.bpos
#self.base_matrix[1,1] = 1 # TODO FOUND EXPERIMENTALLY FOR REACHER
self.bori = quaternion_from_matrix(self.base_matrix)
total_size = 0
self.skip_state_keys = skip_state_keys
self.obs_keys = [o for o in list(self.env.observation_spec().keys()) if o not in self.skip_state_keys]
self.obs_sizes = {}
self.obs_specs = {}
for i, j in self.env.observation_spec().items():
if i in self.obs_keys:
if type(j) in [int, np.bool]: s = 1
else:
l = len(j.shape)
if l == 0: s = 1
elif l == 1: s = j.shape[0]
elif l == 2: s = (j.shape[0]*j.shape[1])
else:
print("write code to handle this shape",j.shape); sys.exit()
total_size +=s
self.obs_sizes[i] = s
self.obs_specs[i] = j
self.observation_space = spaces.Box(-np.inf, np.inf, (total_size*k, ))
def render(self, camera_name='', height=240, width=240, depth=False):
if camera_name == '':
camera_name = self.default_camera
if self.env_type == 'dm_control':
frame = self.sim.render(camera_id=camera_name, height=height, width=width, depth=depth)
elif self.env_type == 'robosuite':
frame = self.sim.render(camera_name=camera_name, height=height, width=width, depth=depth)[::-1]
return frame
def make_obs(self, obs):
a = []
for i in self.obs_keys:
o = obs[i]
if type(o) in [np.ndarray, np.array, list]:
o = np.ravel(o)
else:
o = np.array([o])
a.append(o)
return np.concatenate(a)
def make_body(self):
if self.env_type == 'dm_control':
# TODO hardcoded arm debug reacher
bxqs = self.env.physics.data.qpos
# TODO FIX
#for t in self.xpos_targets:
# pos_in_world = self.env.physics.named.data.geom_xpos[t]
# rot_in_world = self.env.physics.named.data.geom_xmat[t].reshape((3, 3))
# targ_rmat = T.make_pose(pos_in_world, rot_in_world).reshape(16)
# bxqs = np.hstack((bxqs, pos_in_world, targ_rmat))
if self.env_type == 'robosuite':
r = self.env.robots[0]
nj = len(r._joint_positions)
bxqs = np.zeros(((nj + (19 * len(self.xpos_targets)))))
bxqs[:nj] = deepcopy(r._joint_positions)
idx = nj
for t in self.xpos_targets:
#sim_eef_pose = deepcopy(env.robots[0].pose_in_base_from_name(t)
sid = self.env.sim.model.site_name2id(t)
rmat = site_pose_in_base_from_name(self.env.sim, r.robot_model.root_body, t)
bxqs[idx:idx+3] = deepcopy(self.env.sim.data.site_xpos[sid])
bxqs[idx+3:idx+19] = deepcopy(rmat.reshape(16))
idx += 19
#bxqs = np.hstack((bxqs, self.env.sim.data.site_xpos[sid], rmat.reshape(16)))
#bxq = np.hstack((r._joint_positions, self.env.sim.data.site_xpos[r.eef_site_id], self.env.sim.data.get_body_xquat[r.eef_site_id]))
#bxq = np.hstack((r._joint_positions, self.env.sim.data.site_xpos[r.eef_site_id], r.pose_in_base_from_name('gripper0_eef').reshape(16)))
# joint pos, eef in world frame, grip site in base frame
#bx = np.hstack((r.eef_pos(), r.eef_quat()))
return bxqs
def reset(self):
o = self.env.reset()
if self.env_type == 'dm_control':
o = o.observation
o = self.make_obs(o)
b = self.make_body()
for _ in range(self.k):
self._state.append(o)
self._body.append(b)
return self._get_obs(), self._get_body()
def step(self, action):
if self.env_type == 'robosuite':
state, reward, done, info = self.env.step(action)
elif self.env_type == 'dm_control':
o = self.env.step(action)
done = o.step_type.last()
state = o.observation
reward = o.reward
info = o.step_type
self._state.append(self.make_obs(state))
self._body.append(self.make_body())
return self._get_obs(), self._get_body(), reward, done, info
def _get_obs(self):
assert len(self._state) == self.k
return np.concatenate(list(self._state), axis=0)
def _get_body(self):
assert len(self._body) == self.k
return np.concatenate(list(self._body), axis=0)
def build_env(cfg, k, skip_state_keys, env_type='robosuite', default_camera=''):
if env_type == 'robosuite':
if 'controller_config_file' in cfg.keys():
cfg_file = os.path.split(cfg['controller_config_file'])[1]
cfg_path = os.path.join(os.path.split(robosuite.__file__)[0], 'controllers', 'config', cfg_file)
print('loading controller from', cfg_path)
controller_configs = robosuite.load_controller_config(custom_fpath=cfg_path)
else:
print('loading DEFAULT controller')
controller_configs = robosuite.load_controller_config(default_controller=cfg['controller'])
#from robosuite.models.grippers import JacoThreeFingerGripper
#gripper = JacoThreeFingerGripper
env = robosuite.make(env_name=cfg['env_name'],
robots=cfg['robots'],
controller_configs=controller_configs,
use_camera_obs=cfg['use_camera_obs'],
use_object_obs=cfg['use_object_obs'],
reward_shaping=cfg['reward_shaping'],
camera_names=cfg['camera_names'],
horizon=cfg['horizon'],
control_freq=cfg['control_freq'],
ignore_done=False,
hard_reset=False,
reward_scale=1.0,
has_offscreen_renderer=True,
has_renderer=False,
)
elif env_type == 'dm_control':
env = suite.load(cfg['robots'][0], cfg['env_name'])
xpos_targets = cfg['xpos_targets']
env = EnvStack(env, k=k, skip_state_keys=skip_state_keys, env_type=env_type, default_camera=default_camera, xpos_targets=xpos_targets)
return env
def build_model(policy_name, env, cfg):
state_dim = env.observation_space.shape[0]
action_dim = env.control_shape
body_dim = env.body_shape
# 1 for open/close gripper
min_action = env.env.robots[0].action_limits[0]
max_action = env.env.robots[0].action_limits[1]
if policy_name == 'TD3':
kwargs = {'tau':0.005,
'action_dim':action_dim, 'state_dim':state_dim, 'body_dim':body_dim,
'policy_noise':0.2, 'max_policy_action':max_action,
'noise_clip':0.5, 'policy_freq':2,
'discount':0.99, 'max_action':max_action, 'min_action':min_action}
policy = TD3.TD3(**kwargs)
if policy_name == 'TD3_kinematic':
kwargs = {'tau':0.005,
'action_dim':action_dim, 'state_dim':state_dim, 'body_dim':body_dim,
'policy_noise':0.2, 'max_policy_action':max_action,
'noise_clip':0.5, 'policy_freq':2,
'discount':0.99, 'max_action':max_action, 'min_action':min_action}
policy = TD3_kinematic.TD3(**kwargs)
if policy_name == 'TD3_kinematic_critic':
robot_name = env.env.robots[0].name
device = cfg['experiment']['device']
robot_dh = robotDH(robot_name, device)
joint_indexes = env.joint_indexes
kwargs = {'tau':0.005,
'action_dim':action_dim, 'state_dim':state_dim, 'body_dim':body_dim,
'joint_indexes':joint_indexes, 'robot_dh':robot_dh,
'policy_noise':0.2, 'max_policy_action':max_action,
'noise_clip':0.5, 'policy_freq':2,
'discount':0.99, 'max_action':max_action, 'min_action':min_action, device:device}
policy = TD3_kinematic_critic.TD3(**kwargs)
return policy, kwargs
def build_replay_buffer(cfg, env, max_size, cam_dim, seed):
env_type = cfg['experiment']['env_type']
state_dim = env.observation_space.shape[0]
action_dim = env.control_shape
body_dim = env.body_shape
replay_buffer = ReplayBuffer(state_dim, body_dim, action_dim,
max_size=max_size,
cam_dim=cam_dim,
seed=seed)
# this is a bit hacky! TODO
replay_buffer.k = env.k
replay_buffer.obs_keys = env.obs_keys
replay_buffer.obs_sizes = env.obs_sizes
replay_buffer.obs_specs = env.obs_specs
replay_buffer.max_timesteps = env.max_timesteps
replay_buffer.xpos_targets = env.xpos_targets
replay_buffer.cfg = cfg
replay_buffer.base_pos = env.bpos
replay_buffer.base_ori = env.bori
# hard code orientation
# TODO add conversion to rotation matrix
replay_buffer.base_matrix = env.base_matrix
return replay_buffer
def get_replay_state_dict(replay_buffer, use_states=[]):
# find eef position according to DH
n, ss = replay_buffer.states.shape
k = replay_buffer.k
idx = (k-1)*(ss//k) # start at most recent observation
state_data = {'state':np.empty((n,0))}
next_state_data = {'next_state':np.empty((n,0))}
for key in replay_buffer.obs_keys:
o_size = replay_buffer.obs_sizes[key]
state_data[key] = replay_buffer.states[:, idx:idx+o_size]
next_state_data[key] = replay_buffer.next_states[:, idx:idx+o_size]
if key in use_states:
state_data['state'] = np.hstack((state_data['state'], state_data[key]))
next_state_data['next_state'] = np.hstack((next_state_data['next_state'], next_state_data[key]))
idx += o_size
return state_data, next_state_data
def get_rot_mat(alpha, beta, gamma):
""" alpha is yaw counterclockwise rotation around x axis
beta is pitch counterclockwise rotation around y axis
gamma is roll counterclockwise rotation around z axis
"""
R1 = np.array([[1, 0, 0], [0, np.cos(alpha), -np.sin(alpha)], [0, np.sin(alpha), np.cos(alpha)]])
R2 = np.array([[np.cos(beta), 0, np.sin(beta)], [0, 1, 0], [-np.sin(beta), 0, np.cos(beta)]])
R3 = np.array([[np.cos(gamma),-np.sin(gamma), 0], [np.sin(gamma), np.cos(gamma), 0], [0, 0, 1]])
return np.dot(R3,R2,R1)
def plot_replay(env, replay_buffer, savebase, frames=False):
# env.reset()
# joint_positions = np.array([
# [-6.27,3.27,5.17,3.24,0.234,3.54,3.14], # sky
# [ 0,np.pi,np.pi,5.17,0.234,3.54,np.pi/2], #
# [ np.pi/2,np.pi,np.pi,5.17,0.234,3.54,np.pi/2], #
# [ np.pi,np.pi,np.pi,5.17,0.234,3.54,np.pi/2], #
# [ (2*np.pi)/3,np.pi,np.pi,5.17,0.234,3.54,np.pi/2], #
# [ 2*np.pi,np.pi,np.pi,5.17,0.234,3.54,np.pi/2], #
# [-np.pi*2,np.pi,5.17,.5, 0.234,3.54,np.pi/2],
# [np.deg2rad(180), np.deg2rad(270), np.deg2rad(90), np.deg2rad(270), np.deg2rad(270), np.deg2rad(270), np.deg2rad(270)], # tech doc
# [4.71, 2.61, 0, .5, 6.28, 3.7, 3.14], # sleep
# [-6.27,1,5.17,3.24,0.234,3.54,3.14], # out
# ])
#
if 'robot_dh' in replay_buffer.cfg['robot'].keys():
robot_name = replay_buffer.cfg['robot']['robot_dh']
else:
robot_name = replay_buffer.cfg['robot']['robots'][0]
rdh = robotDH(robot_name, 'cpu')
bm = replay_buffer.base_matrix
nt = replay_buffer.bodies.shape[0]
if robot_name.lower() == 'jaco':
#bm = np.eye(4)
#bm[:3, :3] = get_rot_mat(alpha=0., beta=np.pi, gamma=np.pi)
# position is right, but orientation is wrong
n_joints = 7
elif 'reacher' in robot_name.lower():
n_joints = 2
elif 'panda' in robot_name.lower():
n_joints = 7
elif 'sawyer' in robot_name.lower():
bm = np.eye(4)
n_joints = 7
joint_positions = replay_buffer.bodies[:,:n_joints]
if 'panda' in robot_name.lower():
# todo need an extra transform for flange
bm = np.eye(4)
#bm[:3, :3] = get_rot_mat(alpha=0.0, beta=0, gamma=np.pi)
joint_positions = np.hstack((joint_positions, np.zeros((nt, 1))))
#bm[:3, :3] = get_rot_mat(alpha=np.pi, beta=np.pi, gamma=np.pi)
#pm[1,1] = -1
print("BM", bm)
#true_rmat = []
#true_frames = []
#for cnt, t in enumerate(joint_positions):
# env.sim.data.qpos[:7] = t
# env.step(np.zeros(8))
# true_rmat.append(env.make_body()[-16:].reshape(4,4))
# imwrite(savebase+'_%0d.png'%cnt, env.render())
# r = env.env.robots[0]
# grip_rmat = site_pose_in_base_from_name(env.env.sim, r.robot_model.root_body, 'robot0_link1')
# print('T1', grip_rmat, T.mat2euler(grip_rmat))
# rdh.np_angle2ee(bm, joint_positions[cnt][None])
# # joint pos, eef in world frame, grip site in base frame
##embed()
#true_rmat = np.array(true_rmat)
true_rmat = replay_buffer.bodies[:,n_joints+3:n_joints+3+16].reshape(nt, 4,4)
true_pos = true_rmat[:,:3,3]
true_euler = np.array([T.mat2euler(a) for a in true_rmat])
true_quat = np.array([T.mat2quat(a) for a in true_rmat])
dh_rmat = rdh.np_angle2ee(joint_positions)
dh_pos = dh_rmat[:,:3,3]
dh_euler = np.array([T.mat2euler(a) for a in dh_rmat])
dh_quat = np.array([T.mat2quat(a) for a in dh_rmat])
f, ax = plt.subplots(3, figsize=(10,18))
xdiff = true_pos[:,0]-dh_pos[:,0]
ydiff = true_pos[:,1]-dh_pos[:,1]
zdiff = true_pos[:,2]-dh_pos[:,2]
print('max xyzdiff', np.abs(xdiff).max(), np.abs(ydiff).max(), np.abs(zdiff).max())
ax[0].plot(true_pos[:,0], label='state')
ax[0].plot(dh_pos[:,0], label='dh calc')
ax[0].plot(xdiff, label='diff')
ax[0].set_title('posx: max diff %.04f'%np.abs(xdiff).max())
ax[0].legend()
ax[1].plot(true_pos[:,1])
ax[1].plot(dh_pos[:,1])
ax[1].plot(ydiff)
ax[1].set_title('posy: max diff %.04f'%np.abs(ydiff).max())
ax[2].plot(true_pos[:,2])
ax[2].plot(dh_pos[:,2])
ax[2].plot(zdiff)
ax[2].set_title('posz: max diff %.04f'%np.abs(zdiff).max())
plt.savefig(savebase+'pos.png')
print('saving', savebase+'pos.png')
f, ax = plt.subplots(4, figsize=(10,18))
qxdiff = true_quat[:,0]-dh_quat[:,0]
qydiff = true_quat[:,1]-dh_quat[:,1]
qzdiff = true_quat[:,2]-dh_quat[:,2]
qwdiff = true_quat[:,3]-dh_quat[:,3]
print('max qxyzwdiff',np.abs(qxdiff).max(), np.abs(qydiff).max(), np.abs(qzdiff).max(), np.abs(qwdiff).max())
ax[0].plot(true_quat[:,0], label='sqx')
ax[0].plot(dh_quat[:,0], label='dhqx')
ax[0].plot(qxdiff, label='diff')
ax[0].set_title('qx: max diff %.04f'%np.abs(qxdiff).max())
ax[0].legend()
ax[1].plot(true_quat[:,1], label='sqy')
ax[1].plot(dh_quat[:,1], label='dhqy')
ax[1].plot(qydiff)
ax[1].set_title('qy: max diff %.04f'%np.abs(qydiff).max())
ax[2].plot(true_quat[:,2], label='sqz')
ax[2].plot(dh_quat[:,2], label='dhqz')
ax[2].plot(qzdiff)
ax[2].set_title('qz: max diff %.04f'%np.abs(qzdiff).max())
ax[3].plot(true_quat[:,3])
ax[3].plot(dh_quat[:,3])
ax[3].plot(qwdiff)
ax[3].set_title('qw: max diff %.04f'%np.abs(qwdiff).max())
plt.savefig(savebase+'quat.png')
print('saving', savebase+'quat.png')
exdiff = true_euler[:,0]-dh_euler[:,0]
eydiff = true_euler[:,1]-dh_euler[:,1]
ezdiff = true_euler[:,2]-dh_euler[:,2]
print('max qxyzwdiff',np.abs(exdiff).max(), np.abs(eydiff).max(), np.abs(ezdiff).max())
f, ax = plt.subplots(3, figsize=(10,18))
ax[0].plot(true_euler[:,0], label='sqx')
ax[0].plot(dh_euler[:,0], label='dhqx')
ax[0].plot(exdiff, label='diff')
ax[0].legend()
ax[1].plot(true_euler[:,1])
ax[1].plot(dh_euler[:,1])
ax[1].plot(eydiff)
ax[2].plot(true_euler[:,2])
ax[2].plot(dh_euler[:,2])
ax[2].plot(ezdiff)
plt.savefig(savebase+'euler.png')
if frames:
frames = [replay_buffer.undo_frame_compression(replay_buffer.frames[f]) for f in np.arange(len(replay_buffer.frames))]
mimwrite(savebase+'.mp4', frames)
print('writing', savebase+'.mp4')
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
``` |
{
"source": "johannah/drift-predict",
"score": 3
} |
#### File: johannah/drift-predict/download_fft_data.py
```python
import os
from glob import glob
import datetime
from IPython import embed
datadir = 'data/'
webbase = 'https://oceanofthings.darpa.mil/docs/Sample%20Data/'
# file look like: 'challenge_1-day_sofar_20211109_day8JSON.json'
filebase = 'challenge_1-day_sofar_%s_day%sJSON.json'
# https://oceanofthings.darpa.mil/docs/Sample%20Data/challenge_1-day_sofar_20211111_day10JSON.json
def download_data():
if not os.path.exists(datadir):
os.makedirs(datadir)
start_day = datetime.date(2021, 11, 2)
today = datetime.date.today()
assert start_day < today
for comp_day in range(30):
data_day = start_day + datetime.timedelta(days=comp_day)
if data_day <= today:
fname = filebase%(data_day.strftime('%Y%m%d'), comp_day+1)
if not os.path.exists(os.path.join(datadir, fname)):
get_file = webbase + fname
cmd = 'wget %s -P %s'%(get_file, datadir)
print(cmd)
os.system(cmd)
if __name__ == '__main__':
download_data()
```
#### File: johannah/drift-predict/simulate.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import json
import os
from glob import glob
from download_fft_data import download_data
import datetime
import plotly.express as px
from copy import deepcopy
import netCDF4 as nc
import pickle
import time
import pytz
from haversine import haversine, Unit, inverse_haversine, Direction
from IPython import embed
from opendrift.models.openberg import OpenBerg
from opendrift.models.oceandrift import OceanDrift
from opendrift.models.leeway import Leeway
from opendrift.models.physics_methods import wind_drift_factor_from_trajectory, distance_between_trajectories
from utils import load_environment_data, make_datetimes_from_args, load_hindcast_environment_data
from utils import load_drifter_data, plot_spot_tracks
def simulate_spot(spot, start_datetime=None, end_datetime=None, start_at_drifter=False, end_at_drifter=False, plot_plot=False, plot_gif=False, num_seeds=100, seed_radius=10, wind_drift_factor_max=.02, model_type='OceanDrift', object_type=26):
# create random wind drift factors
# mean wind drift factor is found to be 0.041
# min wind drift factor is found to be 0.014
# max wind drift factor is found to be 0.16
spot_df = track_df[track_df['spotterId'] == spot]
samples = spot_df.index
ts_col = 'ts_utc'
timestamps = [x for x in spot_df[ts_col].dt.tz_localize(None)]
drifter_lons = np.array(spot_df['longitude'])
drifter_lats = np.array(spot_df['latitude'])
if model_type == 'OceanDrift':
ot = OceanDrift(loglevel=80) # lower log is more verbose
# Prevent mixing elements downwards
ot.set_config('drift:vertical_mixing', False)
if model_type == 'Leeway':
ot = Leeway(loglevel=20)
[ot.add_reader(r) for r in readers]
# TODO fine-tune these. 0.01 seemed too small
ot.set_config('drift:horizontal_diffusivity', .1) # m2/s
ot.set_config('drift:current_uncertainty', .1) # m2/s
ot.set_config('drift:wind_uncertainty', .1) # m2/s
# find nearest timestep to start
if start_at_drifter:
start_datetime = timestamps[0]
else:
# no tz in opendrift
start_datetime = start_datetime.replace(tzinfo=None)
if end_at_drifter:
end_datetime = timestamps[-1]
else:
end_datetime = end_datetime.replace(tzinfo=None)
# seed from time nearest to start time's location of drifters
diff_time = abs(start_time-spot_df.index)
drift_ts_index = np.argmin(diff_time)
drift_ts = spot_df.index[drift_ts_index]
if np.abs(start_time-drift_ts) > datetime.timedelta(hours=1):
print("NO NEAR TIME DRIFTER", drift_ts, spot_df.loc[drift_ts]['spotterId'])
return
if end_datetime < start_datetime:
print('ending before starting')
return
try:
start_lon = spot_df.loc[drift_ts]['longitude']
start_lat = spot_df.loc[drift_ts]['latitude']
if model_type == 'OceanDrift':
wind_drift_factor = np.linspace(0.001, wind_drift_factor_max, num_seeds)
ot.seed_elements(start_lon, start_lat, radius=seed_radius, number=num_seeds,
time=start_time.replace(tzinfo=None),
wind_drift_factor=wind_drift_factor)
# time step should be in seconds
if model_type == 'Leeway':
ot.seed_elements(start_lon, start_lat, radius=seed_radius, number=num_seeds,
time=start_time.replace(tzinfo=None),
object_type=object_type)
ot.run(end_time=end_datetime.replace(tzinfo=None), time_step=datetime.timedelta(hours=1),
time_step_output=datetime.timedelta(hours=1), outfile=os.path.join(spot_dir, spot + '.nc'))
drifter_dict = {'time': timestamps, 'lon': drifter_lons, 'lat': drifter_lats,
'label': '%s Drifter'%spot, 'color': 'orangered', 'linewidth': 2.2, 'linestyle':':', 'markerstyle':'.', 'markersize': 40}
# Drifter track is shown in red, and simulated trajectories are shown in gray.
motion_background = ['x_sea_water_velocity', 'y_sea_water_velocity']
ot.history.dump(os.path.join(spot_dir, spot+'.npy'))
except Exception as e:
print(e)
if plot_plot:
try:
ot.plot(filename=os.path.join(spot_dir, '%s.png'%spot), buffer=.01, fast=True, cmap='viridis', drifter=drifter_dict, linewidth=1.3, background=motion_background)
except Exception as e:
print(e)
if plot_gif:
try:
ot.animation(filename=os.path.join(spot_dir, '%s.gif'%spot), background=motion_background, buffer=.3, fast=True, drifter=drifter_dict, show_trajectories=True, surface_only=True)
except Exception as e:
print(e)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=1110)
parser.add_argument('--load-dir', default='', help='load partially-complete experiment from this dir')
parser.add_argument('--data-dir', default='data', help='load environmental and drifter data from this dir')
parser.add_argument('--save-dir', default='results', help='save results in this dir')
parser.add_argument('--model-type', default='Leeway', help='type of model', choices=['OceanDrift', 'Leeway'])
parser.add_argument('--object-type', default=70, help='type of model', choices=[69, 70, 71, 72]) # bait boxes
parser.add_argument('--num-seeds', default=100, type=int, help='num particles to simulate')
parser.add_argument('--seed-radius', default=500, type=int, help='meters squared region to seed particles in simulation')
parser.add_argument('--wind-drift-factor-max', '-wdm', default=0.06, type=float, help='max wind drift factor to use when seeding particles. default was found experimentally with get_wind_drift_factor.py')
parser.add_argument('--start-year', default=2021, type=int)
parser.add_argument('--start-month', default=11, type=int)
parser.add_argument('--start-day', default=22, type=int)
parser.add_argument('--start-hour', default=17, type=int)
parser.add_argument('--future-days', '-fd', default=10, type=int)
parser.add_argument('--test-spots', default=-1, help='number of random spots to run. if negative, all spots will be evaluated')
parser.add_argument('--start-at-drifter', '-sd', action='store_true', default=False, help='start simulation at drifter start')
parser.add_argument('--end-at-drifter', '-ed', action='store_true', default=False, help='end simulation at drifter start')
parser.add_argument('--hindcast', action='store_true', default=False, help='use hindcast rather than forecast data')
parser.add_argument('--plot', action='store_true', default=False, help='write plot')
parser.add_argument('--gif', action='store_true', default=False, help='write gif')
parser.add_argument('--use-ncep', '-n', action='store_true', default=False, help='include ncep data - wind data')
parser.add_argument('--use-ww3', '-w', action='store_true', default=False, help='include ww3 - 8 day wave forecast')
parser.add_argument('--use-gfs', '-g', action='store_true', default=False, help='include gfs - 14 day wind forecast')
parser.add_argument('--use-rtofs', '-r', action='store_true', default=False, help='include rtofs current forecasts')
args = parser.parse_args()
now = datetime.datetime.now(pytz.UTC)
# ALL TIMES IN UTC
load_from_dir = ''
if args.load_dir != '':
load_from_dir = args.load_dir
spot_dir = args.load_dir
# reload w same args
args = pickle.load( open(os.path.join(spot_dir, 'args.pkl'), 'rb'))
np.random.seed(args.seed)
now_str = now.strftime("%Y%m%d-%H%M")
start_time, start_str, end_time, end_str = make_datetimes_from_args(args)
if load_from_dir == '':
if args.model_type == 'Leeway':
model_name = args.model_type + str(args.object_type)
else:
model_name = 'WD%.02f_'%(args.wind_drift_factor_max) + args.model_type
spot_dir = os.path.join(args.save_dir, 'spots_N%s_S%s_E%s_DS%s_DE%s_R%sG%sW%sN%s_%s'%(now_str,
start_str, end_str, int(args.start_at_drifter), int(args.end_at_drifter),
int(args.use_rtofs), int(args.use_gfs), int(args.use_ww3), int(args.use_ncep),
model_name))
if not os.path.exists(spot_dir):
os.makedirs(spot_dir)
os.makedirs(os.path.join(spot_dir, 'python'))
cmd = 'cp *.py %s/' %os.path.join(spot_dir, 'python')
os.system(cmd)
pickle.dump(args, open(os.path.join(spot_dir, 'args.pkl'), 'wb'))
#track_df, wave_df = load_drifter_data(search_path='data/challenge*day*JSON.json', start_date=start_time, end_date=end_time)
#track_df, wave_df = load_drifter_data(search_path='data/challenge*day*JSON.json')
track_df = pd.read_csv(os.path.join(args.data_dir, 'challenge_30-day_sofar_20211102_csv.csv'))
track_df['ts'] = track_df['timestamp']
track_df['ts_utc'] = pd.to_datetime(track_df['ts'])
track_df.index = track_df['ts_utc']
spot_names = sorted(track_df['spotterId'].unique())
# sample a number for testing
if args.test_spots > 0:
spot_names = np.random.choice(spot_names, args.test_spots)
print(spot_names)
if args.hindcast:
readers = load_hindcast_environment_data()
else:
readers = load_environment_data(args.data_dir, start_time, use_gfs=True, use_ncep=args.use_ncep, use_ww3=args.use_ww3, use_rtofs=True)
for spot in spot_names:
if not os.path.exists(os.path.join(spot_dir, spot + '.nc')):
simulate_spot(spot, start_datetime=start_time, end_datetime=end_time,\
start_at_drifter=args.start_at_drifter, end_at_drifter=args.end_at_drifter, \
plot_plot=args.plot, plot_gif=args.gif, num_seeds=args.num_seeds,
seed_radius=args.seed_radius, wind_drift_factor_max=args.wind_drift_factor_max,
model_type=args.model_type, object_type=args.object_type)
``` |
{
"source": "johannah/iceview",
"score": 3
} |
#### File: iceview/iceview/features.py
```python
class ZernikeMoments:
def __init__(self, radius):
"""
:param radius: the maximum radius for the Zernike polynomials, in pixels
"""
from mahotas.features import zernike_moments
self.radius = radius
def detect_and_extract(self, image):
self.moments = zernike_moments(image, self.radius)
def detect_and_extract(detector, img):
try:
detector.detect_and_extract(img)
except IndexError as e:
print("ERROR: %s" %e)
print("Perhaps not enought keypoints were found. Check image size.")
print("Exiting")
raise SystemExit
keypoints = detector.keypoints
descriptors = detector.descriptors
return keypoints, descriptors
``` |
{
"source": "johannah/interview_practice",
"score": 4
} |
#### File: interview_practice/sort/insertion_sort.py
```python
def insertion_sort(array):
for i in range(1, len(array)):
key = array[i]
j = i -1
while (j>=0 and array[j]>key):
# scoot
array[j+1] = array[j]
j-=1
array[j+1] = key
return array
arr = [3,-9,5, 100,-2, 294,5,56]
sarr = insertion_sort(arr)
print(sarr, sorted(arr))
assert(sarr == sorted(arr))
```
#### File: interview_practice/sort/quicksort.py
```python
def partition(array, low, high):
i = low-1
pivot = array[high]
for j in range(low, high):
if array[j] < pivot:
i+=1
array[i], array[j] = array[j], array[i]
pi = i+1
array[pi], array[high] = array[high], array[pi]
return pi, array
def quicksort(array, low=None, high=None):
if low == None:
low = 0
if high == None:
high = len(array)-1
if low < high:
# pi is parittioning index, array[pi] is at correct place
pi, array = partition(array, low, high)
array = quicksort(array, low, pi-1)
array = quicksort(array, pi+1, high)
return array
arr = [2354,2,35,-22,34,2,344]
sarr = quicksort(arr)
print(sarr)
assert sarr == sorted(arr)
``` |
{
"source": "johannah/robosuite",
"score": 3
} |
#### File: robosuite/demos/demo_robot_movement.py
```python
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from copy import deepcopy
import argparse
from robosuite.utils import transform_utils
from robosuite.wrappers import VisualizationWrapper
import os
import sys
import json
import imageio
import colorsys
import random
from IPython import embed
import numpy as np
np.set_printoptions(suppress=True)
import matplotlib.cm as cm
from PIL import Image
from robosuite.models.robots.manipulators import DEFAULT_INIT_QPOS, REAL_INIT_QPOS
import robosuite as suite
from robosuite.controllers import load_controller_config
# all_joints_move.npz finger_joints_move.npz joint_0_full_revolution.npz tool_orientation.npz
real_robot_data = ['joint_0_full_revolution', 'all_joints_move',
'tool_orientation']#, 'finger_joints_move']
def pose_in_base_from_name(env, name):
"""
A helper function that takes in a named data field and returns the pose
of that object in the base frame.
Args:
name (str): Name of body in sim to grab pose
Returns:
np.array: (4,4) array corresponding to the pose of @name in the base frame
"""
pos_in_world = env.sim.data.get_body_xpos(name)
rot_in_world = env.sim.data.get_body_xmat(name).reshape((3, 3))
pose_in_world = transform_utils.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = env.sim.data.get_body_xpos(env.robot_model.root_body)
base_rot_in_world = env.sim.data.get_body_xmat(env.robot_model.root_body).reshape((3, 3))
base_pose_in_world = transform_utils.make_pose(base_pos_in_world, base_rot_in_world)
world_pose_in_base = transform_utils.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)
return pose_in_base
def get_sim_posquat(env):
sim_eef_pose = deepcopy(env.robots[0].pose_in_base_from_name('gripper0_eef'))
sim_eef_pos = deepcopy(sim_eef_pose)[:3, 3]
sim_eef_quat = deepcopy(transform_utils.mat2quat(sim_eef_pose))
return sim_eef_pos, sim_eef_quat
def get_sim2real_posquat(env):
sim_eef_pose = deepcopy(env.robots[0].pose_in_base_from_name('gripper0_eef'))
angle = np.deg2rad(-90)
direction_axis = [0, 0, 1]
rotation_matrix = transform_utils.rotation_matrix(angle, direction_axis)
sim_pose_rotated = np.dot(rotation_matrix, sim_eef_pose)
sim_eef_pos_rotated = deepcopy(sim_pose_rotated)[:3, 3]
sim_eef_quat_rotated = deepcopy(transform_utils.mat2quat(sim_pose_rotated))
return sim_eef_pos_rotated, sim_eef_quat_rotated
def get_real2sim_posquat(pos, quat):
real_eef_pose = transform_utils.pose2mat((pos,quat))
angle = np.deg2rad(90)
direction_axis = [0, 0, 1]
rotation_matrix = transform_utils.rotation_matrix(angle, direction_axis)
real_pose_rotated = np.dot(rotation_matrix, real_eef_pose)
real_eef_pos_rotated = deepcopy(real_pose_rotated)[:3, 3]
real_eef_quat_rotated = deepcopy(transform_utils.mat2quat(real_pose_rotated))
return real_eef_pos_rotated, real_eef_quat_rotated
def run_sim_test(type_test):
real_robot_joints = []
sim_robot_joints = []
target_robot_joints = []
real_robot_eef_rframe = []
real_robot_eef_sframe = []
sim_robot_eef_rframe = []
sim_robot_eef_sframe = []
# Create dict to hold options that will be passed to env creation call
options = {}
# Choose environment and add it to options
options["env_name"] = "Lift"
options["robots"] = ["Jaco"]
# Choose camera
camera = "frontview"
n_joints = 7
write_path = os.path.join('datasets', type_test)
# load data
# latin1 allows us to load python2
real_robot = np.load(os.path.join('datasets', type_test + '.npz'), allow_pickle=True, encoding='latin1')
real_eef_pos = real_robot['eef_pos']
#real_joint_pos = np.mod(real_robot['joint_pos'], 4*np.pi)
real_joint_pos = real_robot['joint_pos']
real_actions = real_robot['actions']
init_qpos = list(real_joint_pos[0][:7])
# Choose controller
controller_file = "jaco_joint_position_5hz.json"
controller_fpath = os.path.join(
os.path.split(suite.__file__)[0], 'controllers', 'config',
controller_file)
print('loading controller from', controller_fpath)
# Load the desired controller
options["controller_configs"] = load_controller_config(custom_fpath=controller_fpath)
#options['initial_qposes'] = [init_qpos]
control_freq = 2
n_steps = len(real_actions)
# initialize the task
env = suite.make(
**options,
has_renderer=False,
has_offscreen_renderer=True,
ignore_done=True,
use_camera_obs=True,
control_freq=control_freq,
camera_names=camera,
camera_heights=512,
camera_widths=512,
)
site = 'gripper0_grip_site'
env = VisualizationWrapper(env)
env.reset()
env.robots[0].set_robot_joint_positions(init_qpos)
env.robots[0].controller.update_initial_joints(init_qpos)
video_writer = imageio.get_writer(write_path + '.mp4', fps=2)
eef_site_id = env.robots[0].eef_site_id
# Get action limits
low, high = env.action_spec
#env.robots[0].set_robot_joint_positions(init_real[:7])
sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
for t in range(n_steps-1):
#action = np.deg2rad(real_actions[t-1])
action = real_joint_pos[t,:7]-sim_joint_pos
if len(action) == 7:
action = np.hstack((action, [0]))
obs, reward, done, _ = env.step(action)
video_img = obs['%s_image'%camera][::-1]
video_writer.append_data(video_img)
# get simulator position and quaternion in real robot frame
sim_eef_pos_rframe, sim_eef_quat_rframe = get_sim2real_posquat(env)
sim_eef_pos_sframe, sim_eef_quat_sframe = get_sim_posquat(env)
sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
sim_goal_joint_pos = env.robots[0].controller.goal_qpos
sim_robot_eef_rframe.append(deepcopy(np.hstack((sim_eef_pos_rframe, sim_eef_quat_rframe))))
sim_robot_eef_sframe.append(deepcopy(np.hstack((sim_eef_pos_sframe, sim_eef_quat_sframe))))
sim_robot_joints.append(deepcopy(sim_joint_pos))
target_robot_joints.append(deepcopy(sim_goal_joint_pos))
real_eef_pos_sframe, real_eef_quat_sframe = get_real2sim_posquat(real_eef_pos[t,:3], real_eef_pos[t,3:7])
real_robot_eef_rframe.append(real_eef_pos[t])
real_robot_eef_sframe.append(deepcopy(np.hstack((real_eef_pos_sframe, real_eef_quat_sframe))))
real_robot_joints.append(real_joint_pos[t])
f, ax = plt.subplots(7, figsize=(10,20))
real_robot_eef_rframe = np.array(real_robot_eef_rframe)
real_robot_eef_sframe = np.array(real_robot_eef_sframe)
sim_robot_eef_rframe = np.array(sim_robot_eef_rframe)
sim_robot_eef_sframe = np.array(sim_robot_eef_sframe)
y = np.arange(len(real_robot_eef_rframe))
vmin = -np.pi
vmax = np.pi
for i in range(7):
if not i:
ax[i].scatter(y, real_robot_eef_rframe[:,i] , marker='o', s=4, c='r', label='robot_rframe')
ax[i].scatter(y, real_robot_eef_sframe[:,i] , marker='o', s=4, c='k', label='robot_sframe')
ax[i].scatter(y, sim_robot_eef_rframe[:,i] , marker='o', s=4, c='g', label='sim_rframe')
ax[i].scatter(y, sim_robot_eef_sframe[:,i] , marker='o', s=4, c='b', label='sim_sframe')
else:
ax[i].scatter(y, real_robot_eef_rframe[:,i] , marker='o', s=4, c='r')
ax[i].scatter(y, real_robot_eef_sframe[:,i] , marker='o', s=4, c='k')
ax[i].scatter(y, sim_robot_eef_rframe[:,i] , marker='o', s=4, c='g' )
ax[i].scatter(y, sim_robot_eef_sframe[:,i] , marker='o', s=4, c='b' )
ax[i].plot(real_robot_eef_rframe[:, i], c='r')
ax[i].plot(real_robot_eef_sframe[:, i], c='k')
ax[i].plot( sim_robot_eef_rframe[:, i], c='g' )
ax[i].plot( sim_robot_eef_sframe[:, i], c='b' )
for i in range(4, 7):
ax[i].set_ylim([vmin, vmax])
ax[0].set_title('x'); ax[1].set_title('y'); ax[2].set_title('z')
ax[3].set_title('qx'); ax[4].set_title('qy'); ax[5].set_title('qz'); ax[6].set_title('qw')
ax[0].legend()
plt.savefig(write_path + '_eef.png')
plt.close()
f, ax = plt.subplots(7, figsize=(10,20))
real_robot_joints = np.array(real_robot_joints)
sim_robot_joints = np.array(sim_robot_joints)
target_robot_joints = np.array(target_robot_joints)
vmin = -4*np.pi
vmax = 4*np.pi
for i in range(7):
ax[i].set_title(i)
if not i:
ax[i].plot(real_robot_joints[:,i], c='b', label='real')
ax[i].plot(sim_robot_joints[:,i], c='k', label='sim')
ax[i].plot(target_robot_joints[:,i], c='c', label='goal')
else:
ax[i].plot(real_robot_joints[:,i], c='b')
ax[i].plot(sim_robot_joints[:,i], c='k')
ax[i].plot(target_robot_joints[:,i], c='c')
ax[i].scatter(y, real_robot_joints[:,i], s=2, c='b')
ax[i].scatter(y, sim_robot_joints[:,i], s=2, c='k')
ax[i].scatter(y, target_robot_joints[:,i], s=2, c='c')
for x in range(7):
ax[x].set_ylim([vmin, vmax])
ax[0].legend()
plt.savefig(write_path + '_joints.png')
plt.close()
video_writer.close()
print("Video saved to {}".format(write_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--type-test", type=str, default="all", help="type",
choices=real_robot_data)
args = parser.parse_args()
if args.type_test == 'all':
for type_test in real_robot_data:
run_sim_test(type_test)
else:
run_sim_test(args.type_test)
```
#### File: robots/manipulators/jaco_robot.py
```python
import numpy as np
from robosuite.models.robots.manipulators.manipulator_model import ManipulatorModel
from robosuite.utils.mjcf_utils import xml_path_completion
DEFAULT_INIT_QPOS = np.array([3.192, 3.680, -0.000, 1.170, 0.050, 3.760, 3.142])
# Jaco Real home pose
REAL_INIT_QPOS = np.array([4.942, 2.842, 0.0011, 0.758, 4.6368, 4.492, 5.0244])
DOWN_INIT_QPOS = np.array([4.992, 3.680, -0.000, 1.170, 0.050, 3.760, 3.142])
class Jaco(ManipulatorModel):
"""
Jaco is a kind and assistive robot created by Kinova
Args:
idn (int or str): Number or some other unique identification string for this robot instance
"""
def __init__(self, idn=0, init_qpos=DOWN_INIT_QPOS):
super().__init__(xml_path_completion("robots/jaco/robot.xml"), idn=idn)
self.set_init_qpos(init_qpos)
@property
def default_mount(self):
return "RethinkMount"
@property
def default_gripper(self):
return "JacoThreeFingerGripper"
@property
def default_controller_config(self):
return "default_jaco"
def set_init_qpos(self, init_qpos):
self.init_qpos = init_qpos
def init_qpos(self):
## default position
#return np.array([3.192, 3.680, -0.000, 1.170, 0.050, 3.760, 3.142])
# Jaco Real home pose
#return np.array([4.942, 2.842, 0.0011, 0.758, 4.6368, 4.492, 5.0244])
#return np.array([4.708, 2.619, 0.000, 0.521, 6.279, 3.714, 3.14])
#np.array([4.872, 3.055, 0.5, 1.294, 4.497, 4.343, 5.0])
return self.init_qpos
@property
def base_xpos_offset(self):
return {
"bins": (-0.5, -0.1, 0),
"empty": (-0.6, 0, 0),
"table": lambda table_length: (-0.16 - table_length / 2, 0, 0),
}
@property
def top_offset(self):
return np.array((0, 0, 1.0))
@property
def _horizontal_radius(self):
return 0.5
@property
def arm_type(self):
return "single"
``` |
{
"source": "johannah/robotics-rl-srl",
"score": 3
} |
#### File: rl_baselines/models/sac_models.py
```python
import torch as th
import torch.nn as nn
import torch.nn.functional as F
def encodeOneHot(tensor, n_dim):
"""
One hot encoding for a given tensor
:param tensor: (th Tensor)
:param n_dim: (int) Number of dimensions
:return: (th.Tensor)
"""
encoded_tensor = th.Tensor(tensor.shape[0], n_dim).zero_().to(tensor.device)
return encoded_tensor.scatter_(1, tensor, 1.)
class NatureCNN(nn.Module):
"""
CNN from Nature paper.
:param n_channels: (int)
"""
def __init__(self, n_channels):
super(NatureCNN, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(n_channels, 32, kernel_size=8, stride=4),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
)
self.fc = nn.Linear(36864, 512)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc(x))
return x
class MLPPolicy(nn.Module):
"""
:param input_dim: (int)
:param out_dim: (int)
:param hidden_dim: (int)
"""
def __init__(self, input_dim, out_dim, hidden_dim=128):
super(MLPPolicy, self).__init__()
self.policy_net = nn.Sequential(
nn.Linear(int(input_dim), hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True),
)
self.mean_head = nn.Linear(hidden_dim, int(out_dim))
self.logstd_head = nn.Linear(hidden_dim, int(out_dim))
def forward(self, x):
x = self.policy_net(x)
return self.mean_head(x), self.logstd_head(x)
class MLPValueNetwork(nn.Module):
"""
:param input_dim: (int)
:param hidden_dim: (int)
"""
def __init__(self, input_dim, hidden_dim=128):
super(MLPValueNetwork, self).__init__()
self.value_net = nn.Sequential(
nn.Linear(int(input_dim), hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 1)
)
def forward(self, x):
return self.value_net(x)
class MLPQValueNetwork(nn.Module):
"""
:param input_dim: (int)
:param n_actions: (int)
:param continuous_actions: (bool)
:param hidden_dim: (int)
"""
def __init__(self, input_dim, n_actions, continuous_actions, hidden_dim=128):
super(MLPQValueNetwork, self).__init__()
self.continuous_actions = continuous_actions
self.n_actions = n_actions
self.q_value_net = nn.Sequential(
nn.Linear(int(input_dim) + int(n_actions), hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, 1)
)
def forward(self, obs, action):
"""
:param obs: (th.Tensor)
:param action: (th.Tensor)
:return: (th.Tensor)
"""
if not self.continuous_actions:
action = encodeOneHot(action.unsqueeze(1).long(), self.n_actions)
return self.q_value_net(th.cat([obs, action], dim=1))
```
#### File: robotics-rl-srl/rl_baselines/utils.py
```python
from collections import OrderedDict
from multiprocessing import Queue, Process
import numpy as np
import tensorflow as tf
import torch as th
from stable_baselines.common.vec_env import VecEnv, VecNormalize, DummyVecEnv, SubprocVecEnv, VecFrameStack
from environments import ThreadingType
from environments.utils import makeEnv, dynamicEnvLoad
from rl_baselines.visualize import loadCsv
from srl_zoo.utils import printYellow, printGreen
from state_representation.models import loadSRLModel, getSRLDim
def createTensorflowSession():
"""
Create tensorflow session with specific argument
to prevent it from taking all gpu memory
"""
# Let Tensorflow choose the device
config = tf.ConfigProto(allow_soft_placement=True)
# Prevent tensorflow from taking all the gpu memory
config.gpu_options.allow_growth = True
tf.Session(config=config).__enter__()
def computeMeanReward(log_dir, last_n_episodes, is_es=False, return_n_episodes=False):
"""
Compute the mean reward for the last n episodes
:param log_dir: (str)
:param last_n_episodes: (int)
:param is_es: (bool)
:param return_n_episodes: (bool)
:return: (bool, numpy array or tuple when return_n_episodes is True)
"""
result, _ = loadCsv(log_dir, is_es=is_es)
if len(result) == 0:
return False, 0
y = np.array(result)[:, 1]
if return_n_episodes:
return True, (y[-last_n_episodes:].mean(), len(y))
return True, y[-last_n_episodes:].mean()
def isJsonSafe(data):
"""
Check if an object is json serializable
:param data: (python object)
:return: (bool)
"""
if data is None:
return True
elif isinstance(data, (bool, int, float, str)):
return True
elif isinstance(data, (tuple, list)):
return all(isJsonSafe(x) for x in data)
elif isinstance(data, dict):
return all(isinstance(k, str) and isJsonSafe(v) for k, v in data.items())
return False
def filterJSONSerializableObjects(input_dict):
"""
Filter and sort entries of a dictionnary
to save it as a json
:param input_dict: (dict)
:return: (OrderedDict)
"""
output_dict = OrderedDict()
for key in sorted(input_dict.keys()):
if isJsonSafe(input_dict[key]):
output_dict[key] = input_dict[key]
return output_dict
class CustomDummyVecEnv(VecEnv):
"""Dummy class in order to use FrameStack with SAC"""
def __init__(self, env_fns):
"""
:param env_fns: ([function])
"""
assert len(env_fns) == 1, "This dummy class does not support multiprocessing"
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.env = self.envs[0]
self.actions = None
self.obs = None
self.reward, self.done, self.infos = None, None, None
def step_wait(self):
self.obs, self.reward, self.done, self.infos = self.env.step(self.actions[0])
return np.copy(self.obs[None]), self.reward, [self.done], [self.infos]
def step_async(self, actions):
"""
:param actions: ([int])
"""
self.actions = actions
def reset(self):
return self.env.reset()
def close(self):
return
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
class WrapFrameStack(VecFrameStack):
"""
Wrap VecFrameStack in order to be usable with SAC
and scale output if necessary
"""
def __init__(self, venv, n_stack, normalize=True):
super(WrapFrameStack, self).__init__(venv, n_stack)
self.factor = 255.0 if normalize else 1
def step(self, action):
self.step_async([action])
stackedobs, rewards, dones, infos = self.step_wait()
return stackedobs[0] / self.factor, rewards, dones[0], infos[0]
def reset(self):
"""
Reset all environments
"""
stackedobs = super(WrapFrameStack, self).reset()
return stackedobs[0] / self.factor
def get_original_obs(self):
"""
Hack to use VecNormalize
:return: (numpy float)
"""
return self.venv.get_original_obs()
def saveRunningAverage(self, path):
"""
Hack to use VecNormalize
:param path: (str) path to log dir
"""
self.venv.save_running_average(path)
def loadRunningAverage(self, path):
"""
Hack to use VecNormalize
:param path: (str) path to log dir
"""
self.venv.load_running_average(path)
# Compatibility with stable-baselines
save_running_average = saveRunningAverage
load_running_average = loadRunningAverage
class MultiprocessSRLModel:
"""
Allows multiple environments to use a single SRL model
:param num_cpu: (int) the number of environments that will spawn
:param env_id: (str) the environment id string
:param env_kwargs: (dict)
"""
def __init__(self, num_cpu, env_id, env_kwargs):
# Create a duplex pipe between env and srl model, where all the inputs are unified and the origin
# marked with a index number
self.pipe = (Queue(), [Queue() for _ in range(num_cpu)])
module_env, class_name, _ = dynamicEnvLoad(env_id)
# we need to know the expected dim output of the SRL model, before it is created
self.state_dim = getSRLDim(env_kwargs.get("srl_model_path", None), module_env.__dict__[class_name])
self.p = Process(target=self._run, args=(env_kwargs,))
self.p.daemon = True
self.p.start()
def _run(self, env_kwargs):
# this is to control the number of CPUs that torch is allowed to use.
# By default it will use all CPUs, even with GPU acceleration
th.set_num_threads(1)
self.model = loadSRLModel(env_kwargs.get("srl_model_path", None), th.cuda.is_available(), self.state_dim,
env_object=None)
# run until the end of the caller thread
while True:
# pop an item, get state, and return to sender.
env_id, var = self.pipe[0].get()
self.pipe[1][env_id].put(self.model.getState(var, env_id=env_id))
def createEnvs(args, allow_early_resets=False, env_kwargs=None, load_path_normalise=None):
"""
:param args: (argparse.Namespace Object)
:param allow_early_resets: (bool) Allow reset before the enviroment is done, usually used in ES to halt the envs
:param env_kwargs: (dict) The extra arguments for the environment
:param load_path_normalise: (str) the path to loading the rolling average, None if not available or wanted.
:return: (Gym VecEnv)
"""
# imported here to prevent cyclic imports
from environments.registry import registered_env
from state_representation.registry import registered_srl, SRLType
assert not (registered_env[args.env][3] is ThreadingType.NONE and args.num_cpu != 1), \
"Error: cannot have more than 1 CPU for the environment {}".format(args.env)
if env_kwargs is not None and registered_srl[args.srl_model][0] == SRLType.SRL:
srl_model = MultiprocessSRLModel(args.num_cpu, args.env, env_kwargs)
env_kwargs["state_dim"] = srl_model.state_dim
env_kwargs["srl_pipe"] = srl_model.pipe
envs = [makeEnv(args.env, args.seed, i, args.log_dir, allow_early_resets=allow_early_resets, env_kwargs=env_kwargs)
for i in range(args.num_cpu)]
if len(envs) == 1:
# No need for subprocesses when having only one env
envs = DummyVecEnv(envs)
else:
envs = SubprocVecEnv(envs)
envs = VecFrameStack(envs, args.num_stack)
if args.srl_model != "raw_pixels":
printYellow("Using MLP policy because working on state representation")
envs = VecNormalize(envs, norm_obs=True, norm_reward=False)
envs = loadRunningAverage(envs, load_path_normalise=load_path_normalise)
return envs
def loadRunningAverage(envs, load_path_normalise=None):
if load_path_normalise is not None:
try:
printGreen("Loading saved running average")
envs.load_running_average(load_path_normalise)
envs.training = False
except FileNotFoundError:
envs.training = True
printYellow("Running Average files not found for VecNormalize, switching to training mode")
return envs
def softmax(x):
"""
Numerically stable implementation of softmax.
:param x: (numpy float)
:return: (numpy float)
"""
e_x = np.exp(x.T - np.max(x.T, axis=0))
return (e_x / e_x.sum(axis=0)).T
``` |
{
"source": "johannah/ssm",
"score": 3
} |
#### File: ssm/ssm/preprocessing.py
```python
from tqdm.auto import trange
import autograd.numpy as np
from sklearn.decomposition import PCA
def pca_with_imputation(D, datas, masks, num_iters=20):
datas = [datas] if not isinstance(datas, (list, tuple)) else datas
if masks is not None:
masks = [masks] if not isinstance(masks, (list, tuple)) else masks
assert np.all([m.shape == d.shape for d, m in zip(datas, masks)])
else:
masks = [np.ones_like(data, dtype=bool) for data in datas]
# Flatten the data and masks
data = np.concatenate(datas)
mask = np.concatenate(masks)
if np.any(~mask):
# Fill in missing data with mean to start
fulldata = data.copy()
for n in range(fulldata.shape[1]):
fulldata[~mask[:,n], n] = fulldata[mask[:,n], n].mean()
for itr in range(num_iters):
# Run PCA on imputed data
pca = PCA(D)
x = pca.fit_transform(fulldata)
# Fill in missing data with PCA predictions
pred = pca.inverse_transform(x)
fulldata[~mask] = pred[~mask]
else:
pca = PCA(D)
x = pca.fit_transform(data)
# Unpack xs
xs = np.split(x, np.cumsum([len(data) for data in datas])[:-1])
assert len(xs) == len(datas)
assert all([x.shape[0] == data.shape[0] for x, data in zip(xs, datas)])
return pca, xs
def factor_analysis_with_imputation(D, datas, masks=None, num_iters=50):
datas = [datas] if not isinstance(datas, (list, tuple)) else datas
if masks is not None:
masks = [masks] if not isinstance(masks, (list, tuple)) else masks
assert np.all([m.shape == d.shape for d, m in zip(datas, masks)])
else:
masks = [np.ones_like(data, dtype=bool) for data in datas]
N = datas[0].shape[1]
# Make the factor analysis model
from pybasicbayes.models import FactorAnalysis
fa = FactorAnalysis(N, D, alpha_0=1e-3, beta_0=1e-3)
fa.regression.sigmasq_flat = np.ones(N)
for data, mask in zip(datas, masks):
fa.add_data(data, mask=mask)
fa.set_empirical_mean()
# Fit with EM
lls = [fa.log_likelihood()]
pbar = trange(num_iters)
pbar.set_description("Itr {} LP: {:.1f}".format(0, lls[-1]))
for itr in pbar:
fa.EM_step()
lls.append(fa.log_likelihood())
pbar.set_description("Itr {} LP: {:.1f}".format(itr, lls[-1]))
pbar.update(1)
lls = np.array(lls)
# Get the continuous states and rotate them with SVD
# so that the emission matrix C is orthogonal and sorted
# in order of decreasing explained variance
xs = [states.Z for states in fa.data_list]
C, S, VT = np.linalg.svd(fa.W, full_matrices=False)
xhats = [x.dot(VT.T) for x in xs]
# Test that we got this right
for x, xhat in zip(xs, xhats):
y = x.dot(fa.W.T) + fa.mean
yhat = xhat.dot((C * S).T) + fa.mean
assert np.allclose(y, yhat)
# Strip out the data from the factor analysis model,
# update the emission matrix
fa.regression.A = C * S
fa.data_list = []
return fa, xhats, lls
def interpolate_data(data, mask):
"""
Interpolate over missing entries
"""
assert data.shape == mask.shape and mask.dtype == bool
T, N = data.shape
interp_data = data.copy()
if np.any(~mask):
for n in range(N):
if np.sum(mask[:,n]) >= 2:
t_missing = np.arange(T)[~mask[:,n]]
t_given = np.arange(T)[mask[:,n]]
y_given = data[mask[:,n], n]
interp_data[~mask[:,n], n] = np.interp(t_missing, t_given, y_given)
else:
# Can't do much if we don't see anything... just set it to zero
interp_data[~mask[:,n], n] = 0
return interp_data
def trend_filter(data, npoly=1, nexp=0):
"""
Subtract a linear trend from the data
"""
from sklearn.linear_model import LinearRegression
lr = LinearRegression(fit_intercept=True)
T = data.shape[0]
t = np.arange(T)
# Create feature matrix
features = np.zeros((T, npoly + nexp))
# Polynomial of given order (npoly)
for k in range(npoly):
features[:, k] = t**(k+1)
# Exponential functions (logarithmically spaced)
for k in range(nexp):
tau = T / (k+1)
features[:, npoly+k] = np.exp(-t / tau)
lr.fit(features, data)
trend = lr.predict(features)
return data - trend
def standardize(data, mask):
data2 = data.copy()
data2[~mask] = np.nan
m = np.nanmean(data2, axis=0)
s = np.nanstd(data2, axis=0)
s[~np.any(mask, axis=0)] = 1
y = (data - m) / s
y[~mask] = 0
assert np.all(np.isfinite(y))
return y
``` |
{
"source": "johannah/trajectories",
"score": 2
} |
#### File: trajectories/trajectories/utils.py
```python
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils import weight_norm as wn
import numpy as np
from IPython import embed
def to_scalar(arr):
if type(arr) == list:
return [x.cpu().data.tolist() for x in arr]
else:
return arr.cpu().data.tolist()
def get_cuts(length,window_size):
if window_size<length:
st_pts = list(np.arange(0,length,window_size,dtype=np.int))
end_pts = st_pts[1:]
if end_pts[-1] != length:
end_pts.append(length)
else:
print("cutting start")
st_pts = st_pts[:-1]
return zip(st_pts, end_pts)
else:
return zip([0], [length])
def concat_elu(x):
""" like concatenated ReLU (http://arxiv.org/abs/1603.05201), but then with ELU """
# Pytorch ordering
axis = len(x.size()) - 3
return F.elu(torch.cat([x, -x], dim=axis))
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
def log_prob_from_logits(x):
""" numerically stable log_softmax implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis, keepdim=True)
return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))
def discretized_mix_logistic_loss(prediction, target, nr_mix=10, use_cuda=False):
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
# Pytorch ordering
l = prediction
x = target
x = x.permute(0, 2, 3, 1)
l = l.permute(0, 2, 3, 1)
xs = [int(y) for y in x.size()]
ls = [int(y) for y in l.size()]
# here and below: unpacking the params of the mixture of logistics
#nr_mix = int(ls[-1] / 10)
# l is prediction
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix*2]) # 3--changed to 1 for mean, scale, coef
means = l[:, :, :, :, :nr_mix]
# log_scales = torch.max(l[:, :, :, :, nr_mix:2 * nr_mix], -7.)
log_scales = torch.clamp(l[:, :, :, :, nr_mix:2 * nr_mix], min=-7.)
#coeffs = F.tanh(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = x.contiguous()
if use_cuda:
x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]).cuda(), requires_grad=False)
else:
x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]), requires_grad=False)
# ugggghhh
# m2 = (means[:, :, :, 1, :] + coeffs[:, :, :, 0, :]
# * x[:, :, :, 0, :]).view(xs[0], xs[1], xs[2], 1, nr_mix)
# m3 = (means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] +
# coeffs[:, :, :, 2, :] * x[:, :, :, 1, :]).view(xs[0], xs[1], xs[2], 1, nr_mix)
#
# means = torch.cat((means[:, :, :, 0, :].unsqueeze(3), m2, m3), dim=3)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = F.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = F.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -F.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()
# log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5 (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs
# the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(torch.clamp(cdf_delta, min=1e-12)) + (1. - inner_inner_cond) * (log_pdf_mid - np.log(127.5))
inner_cond = (x > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out
cond = (x < -0.999).float()
log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
log_probs = torch.sum(log_probs, dim=3) + log_prob_from_logits(logit_probs)
lse = log_sum_exp(log_probs)
# hacky hack mask to weight cars and frogs
masked = (target[:,0,:,:]>-.98).float()*lse
out = lse+masked
return -out.mean()
def discretized_mix_logistic_loss_1d(x, l, use_cuda=False):
# Pytorch ordering
x = x.permute(0, 2, 3, 1)
l = l.permute(0, 2, 3, 1)
xs = [int(y) for y in x.size()]
ls = [int(y) for y in l.size()]
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
# Pytorch ordering
l = prediction
x = target
embed()
x = x.permute(0, 2, 3, 1)
l = l.permute(0, 2, 3, 1)
xs = [int(y) for y in x.size()]
ls = [int(y) for y in l.size()]
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 3)
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2]) # 2 for mean, scale
means = l[:, :, :, :, :nr_mix]
log_scales = torch.clamp(l[:, :, :, :, nr_mix:2 * nr_mix], min=-7.)
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = x.contiguous()
if use_cuda:
x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]).cuda(), requires_grad=False)
else:
x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]), requires_grad=False)
# means = torch.cat((means[:, :, :, 0, :].unsqueeze(3), m2, m3), dim=3)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = F.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = F.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -F.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(torch.clamp(cdf_delta, min=1e-12)) + (1. - inner_inner_cond) * (log_pdf_mid - np.log(127.5))
inner_cond = (x > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out
cond = (x < -0.999).float()
log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
log_probs = torch.sum(log_probs, dim=3) + log_prob_from_logits(logit_probs)
return -torch.sum(log_sum_exp(log_probs))
def to_one_hot(tensor, n, fill_with=1.):
# we perform one hot encore with respect to the last axis
one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_()
if tensor.is_cuda : one_hot = one_hot.cuda()
one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with)
return Variable(one_hot)
def sample_from_discretized_mix_logistic_1d(l, nr_mix):
# Pytorch ordering
l = l.permute(0, 2, 3, 1)
ls = [int(y) for y in l.size()]
xs = ls[:-1] + [1] #[3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2]) # for mean, scale
# sample mixture indicator from softmax
temp = torch.FloatTensor(logit_probs.size())
if l.is_cuda : temp = temp.cuda()
temp.uniform_(1e-5, 1. - 1e-5)
temp = logit_probs.data - torch.log(- torch.log(temp))
_, argmax = temp.max(dim=3)
one_hot = to_one_hot(argmax, nr_mix)
sel = one_hot.view(xs[:-1] + [1, nr_mix])
# select logistic parameters
means = torch.sum(l[:, :, :, :, :nr_mix] * sel, dim=4)
log_scales = torch.clamp(torch.sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.)
u = torch.FloatTensor(means.size())
if l.is_cuda : u = u.cuda()
u.uniform_(1e-5, 1. - 1e-5)
u = Variable(u)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.), max=1.)
out = x0.unsqueeze(1)
return out
def sample_from_discretized_mix_logistic(l, nr_mix, only_mean=True, deterministic=False):
# Pytorch ordering
l = l.permute(0, 2, 3, 1)
ls = [int(y) for y in l.size()]
xs = ls[:-1] + [1]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = l[:, :, :, nr_mix:].contiguous().view(xs + [nr_mix * 2])
# sample mixture indicator from softmax
temp = torch.FloatTensor(logit_probs.size())
if l.is_cuda : temp = temp.cuda()
temp.uniform_(1e-5, 1. - 1e-5)
# hack to make deterministic JRH
# could also just take argmax of logit_probs
if deterministic:
temp = temp*0.0+0.5
temp = logit_probs.data - torch.log(- torch.log(temp))
_, argmax = temp.max(dim=3)
one_hot = to_one_hot(argmax, nr_mix)
sel = one_hot.view(xs[:-1] + [1, nr_mix])
# select logistic parameters
means = torch.sum(l[:, :, :, :, :nr_mix] * sel, dim=4)
log_scales = torch.clamp(torch.sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, dim=4), min=-7.)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = torch.FloatTensor(means.size())
if l.is_cuda : u = u.cuda()
u.uniform_(1e-5, 1. - 1e-5)
# hack to make deterministic JRH
if deterministic:
u= u*0.0+0.5
u = Variable(u)
if only_mean:
x = means
else:
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
out = torch.clamp(torch.clamp(x,min=-1.),max=1.)
#x0 = torch.clamp(torch.clamp(x[:, :, :, 0], min=-1.), max=1.)
#x1 = torch.clamp(torch.clamp(
# x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, min=-1.), max=1.)
#x2 = torch.clamp(torch.clamp(
# x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, min=-1.), max=1.)
#out = torch.cat([x0.view(xs[:-1] + [1]), x1.view(xs[:-1] + [1]), x2.view(xs[:-1] + [1])], dim=3)
# put back in Pytorch ordering
out = out.permute(0, 3, 1, 2)
return out
''' utilities for shifting the image around, efficient alternative to masking convolutions '''
def down_shift(x, pad=None):
# Pytorch ordering
xs = [int(y) for y in x.size()]
# when downshifting, the last row is removed
x = x[:, :, :xs[2] - 1, :]
# padding left, padding right, padding top, padding bottom
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
def right_shift(x, pad=None):
# Pytorch ordering
xs = [int(y) for y in x.size()]
# when righshifting, the last column is removed
x = x[:, :, :, :xs[3] - 1]
# padding left, padding right, padding top, padding bottom
pad = nn.ZeroPad2d((1, 0, 0, 0)) if pad is None else pad
return pad(x)
def load_part_of_model(model, path):
params = torch.load(path)
added = 0
for name, param in params.items():
if name in model.state_dict().keys():
try :
model.state_dict()[name].copy_(param)
added += 1
except Exception as e:
print e
pass
print('added %s of params:' % (added / float(len(model.state_dict().keys()))))
```
#### File: trajectories/trajectories/vqvae.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from copy import deepcopy
from IPython import embed
class AutoEncoder(nn.Module):
def __init__(self, num_clusters=512, encoder_output_size=32, nr_logistic_mix=10):
super(AutoEncoder, self).__init__()
self.nr_logistic_mix = nr_logistic_mix
data_channels_size = 1
# the encoder_output_size is the size of the vector that is compressed
# with vector quantization. if it is too large, vector quantization
# becomes more difficult. if it is too small, then the conv net has less
# capacity.
# 64 - the network seems to train fairly well in only one epoch -
# 16 - the network was able to perform nearly perfectly after 100 epochs
# the compression factor can be thought of as follows for an input space
# of 40x40x1 and z output of 10x10x9 (512 = 2**9 = 9 bits)
# (40x40x1x8)/(10x10x9) = 12800/900 = 14.22
self.name = 'vqvae4layer'
num_mixture = 2*self.nr_logistic_mix*data_channels_size+self.nr_logistic_mix
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=data_channels_size,
out_channels=16,
kernel_size=4,
stride=2, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.Conv2d(in_channels=16,
out_channels=32,
kernel_size=4,
stride=2, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Conv2d(in_channels=32,
out_channels=42,
kernel_size=4,
stride=2, padding=1),
nn.BatchNorm2d(42),
nn.ReLU(True),
nn.Conv2d(in_channels=42,
out_channels=encoder_output_size,
kernel_size=1,
stride=1, padding=0),
nn.BatchNorm2d(encoder_output_size),
)
## vq embedding scheme
self.embedding = nn.Embedding(num_clusters, encoder_output_size)
# common scaling for embeddings - variance roughly scales with num_clusters
self.embedding.weight.data.copy_(1./num_clusters *
torch.randn(num_clusters,encoder_output_size))
self.decoder = nn.Sequential(
nn.Conv2d(in_channels=encoder_output_size,
out_channels=42,
kernel_size=1,
stride=1, padding=0),
nn.BatchNorm2d(42),
nn.ReLU(True),
# applies a 2d transposed convolution operator over input image
# composed of several input planes. Can be seen as gradient of Conv2d
# with respsct to its input. also known as fractionally-strided conv.
nn.ConvTranspose2d(in_channels=42,
out_channels=32,
kernel_size=4,
stride=2, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=32,
out_channels=16,
kernel_size=4,
stride=2, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.ConvTranspose2d(in_channels=16,
out_channels=num_mixture,
kernel_size=4,
stride=2, padding=1),
#nn.Sigmoid()
)
def forward(self, x):
# get continuous output directly from encoder
z_e_x = self.encoder(x)
# NCHW is the order in the encoder
# (num, channels, height, width)
N, C, H, W = z_e_x.size()
# need NHWC instead of default NCHW for easier computations
z_e_x_transposed = z_e_x.permute(0,2,3,1)
# needs C,K
emb = self.embedding.weight.transpose(0,1)
# broadcast to determine distance from encoder output to clusters
# NHWC -> NHWCK
measure = z_e_x_transposed.unsqueeze(4) - emb[None, None, None]
# square each element, then sum over channels
dists = torch.pow(measure, 2).sum(-2)
# pytorch gives real min and arg min - select argmin
# this is the closest k for each sample - Equation 1
# latents is a array of integers
latents = dists.min(-1)[1]
# look up cluster centers
z_q_x = self.embedding(latents.view(latents.size(0), -1))
# back to NCHW (orig) - now cluster centers/class
z_q_x = z_q_x.view(N, H, W, C).permute(0, 3, 1, 2)
# put quantized data through decoder
x_tilde = self.decoder(z_q_x)
return x_tilde, z_e_x, z_q_x, latents
``` |
{
"source": "Johanna-hub/wpt",
"score": 2
} |
#### File: sec-metadata/resources/post-to-owner.py
```python
import json
def main(request, response):
headers = [("Content-Type", "text/html")]
body = """
<!DOCTYPE html>
<script>
var data = %s;
if (window.opener)
window.opener.postMessage(data, "*");
if (window.top != window)
window.top.postMessage(data, "*");
</script>
""" % json.dumps({
"dest": request.headers.get("sec-fetch-dest", ""),
"mode": request.headers.get("sec-fetch-mode", ""),
"site": request.headers.get("sec-fetch-site", ""),
"user": request.headers.get("sec-fetch-user", ""),
})
return headers, body
```
#### File: manifest/tests/test_item.py
```python
import pytest
from ..item import URLManifestItem, TestharnessTest
@pytest.mark.parametrize("path", [
"a.https.c",
"a.b.https.c",
"a.https.b.c",
"a.b.https.c.d",
"a.serviceworker.c",
"a.b.serviceworker.c",
"a.serviceworker.b.c",
"a.b.serviceworker.c.d",
])
def test_url_https(path):
m = URLManifestItem("/foobar", "/" + path, "/", "/foo.bar/" + path)
assert m.https is True
@pytest.mark.parametrize("path", [
"https",
"a.https",
"a.b.https",
"https.a",
"https.a.b",
"a.bhttps.c",
"a.httpsb.c",
"serviceworker",
"a.serviceworker",
"a.b.serviceworker",
"serviceworker.a",
"serviceworker.a.b",
"a.bserviceworker.c",
"a.serviceworkerb.c",
])
def test_url_not_https(path):
m = URLManifestItem("/foobar", "/" + path, "/", "/foo.bar/" + path)
assert m.https is False
def test_testharness_meta_key_includes_jsshell():
a = TestharnessTest("/foobar", "/foo", "/foo.bar", "/foo.bar/foo",
jsshell=False, script_metadata=[])
b = TestharnessTest("/foobar", "/foo", "/foo.bar", "/foo.bar/foo",
jsshell=True, script_metadata=[])
assert a.meta_key() != b.meta_key()
@pytest.mark.parametrize("script_metadata", [
None,
[],
[('script', '/resources/WebIDLParser.js'), ('script', '/resources/idlharness.js')],
[[u'script', u'/resources/WebIDLParser.js'], [u'script', u'/resources/idlharness.js']],
])
def test_testharness_hashable_script_metadata(script_metadata):
a = TestharnessTest("/",
"BackgroundSync/interfaces.https.any.js",
"/",
"/BackgroundSync/interfaces.https.any.js",
script_metadata=script_metadata)
assert hash(a) is not None
```
#### File: tools/wpt/install.py
```python
import argparse
import browser
import sys
latest_channels = {
'firefox': 'nightly',
'chrome': 'dev',
'safari': 'preview',
'servo': 'nightly'
}
channel_by_name = {
'stable': 'stable',
'release': 'stable',
'beta': 'beta',
'nightly': latest_channels,
'dev': latest_channels,
'preview': latest_channels,
'experimental': latest_channels,
}
def get_parser():
parser = argparse.ArgumentParser(description="""Install a given browser or webdriver frontend.
For convenience the release channel of the browser accepts various spellings,
but we actually support at most three variants; whatever the latest development
release is (e.g. Firefox nightly or Chrome dev), the latest beta release, and
the most recent stable release.""")
parser.add_argument('browser', choices=['firefox', 'chrome', 'servo'],
help='name of web browser product')
parser.add_argument('component', choices=['browser', 'webdriver'],
help='name of component')
parser.add_argument('--channel', choices=channel_by_name.keys(),
default="nightly", help='Name of browser release channel. '
'"stable" and "release" are synonyms for the latest browser stable release,'
'"nightly", "dev", "experimental", and "preview" are all synonyms for '
'the latest available development release. For WebDriver installs, '
'we attempt to select an appropriate, compatible, version for the '
'latest browser release on the selected channel.')
parser.add_argument('-d', '--destination',
help='filesystem directory to place the component')
return parser
def get_channel(browser, channel):
channel = channel_by_name[channel]
if isinstance(channel, dict):
channel = channel.get(browser)
return channel
def run(venv, **kwargs):
browser = kwargs["browser"]
destination = kwargs["destination"]
channel = get_channel(browser, kwargs["channel"])
if channel != kwargs["channel"]:
print("Interpreting channel '%s' as '%s'" % (kwargs["channel"],
channel))
if destination is None:
if venv:
if kwargs["component"] == "browser":
destination = venv.path
else:
destination = venv.bin_path
else:
raise argparse.ArgumentError(None,
"No --destination argument, and no default for the environment")
install(browser, kwargs["component"], destination, channel)
def install(name, component, destination, channel="nightly", logger=None):
if logger is None:
import logging
logger = logging.getLogger("install")
if component == 'webdriver':
method = 'install_webdriver'
else:
method = 'install'
subclass = getattr(browser, name.title())
sys.stdout.write('Now installing %s %s...\n' % (name, component))
path = getattr(subclass(logger), method)(dest=destination, channel=channel)
if path:
sys.stdout.write('Binary installed as %s\n' % (path,))
```
#### File: wptrunner/tests/test_testloader.py
```python
from __future__ import unicode_literals
import os
import sys
import tempfile
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from mozlog import structured
from wptrunner.testloader import TestFilter as Filter
from .test_wpttest import make_mock_manifest
structured.set_default_logger(structured.structuredlog.StructuredLogger("TestLoader"))
include_ini = """\
skip: true
[test_\u53F0]
skip: false
"""
@pytest.mark.xfail(sys.platform == "win32",
reason="NamedTemporaryFile cannot be reopened on Win32")
def test_filter_unicode():
tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
("test", "c", 10))
with tempfile.NamedTemporaryFile("wb", suffix=".ini") as f:
f.write(include_ini.encode('utf-8'))
f.flush()
Filter(manifest_path=f.name, test_manifests=tests)
``` |
{
"source": "JohannaLatt/SHM-Kinect",
"score": 2
} |
#### File: SHM-Kinect/Simulator/index.py
```python
import data_formatter as DataFormatter
import messaging as Messaging
from messaging import MSG_TO_SERVER_KEYS
import threading
import argparse
import sys
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", help="source for data ('kinect', 'stanford' or 'cornell', default is cornell)")
parser.add_argument('-f', "--filename", help='name of sample data file (including file extension)')
parser.add_argument("-v", "--verbose", action="store_true", help="increase output verbosity")
args = parser.parse_args()
# Sample data
sample_tracking_data = []
path = './data'
if args.source is not None:
if args.source.lower() == 'stanford':
path += '/sample-stanford/'
if args.filename is None:
path += 'sample_squat.txt'
else:
path += args.filename
elif args.source.lower() == 'cornell':
path += '/sample-cornell/'
if args.filename is None:
path += 'sample.txt'
else:
path += args.filename
else:
path += '/sample-kinect/'
if args.filename is None:
path += 'sample.log'
else:
path += args.filename
else:
path += '/sample-kinect/'
if args.filename is None:
path += 'sample.log'
else:
path += args.filename
sample_tracking_data = open(path).read().splitlines()
# Initiate Messaging
Messaging.init(args.verbose)
# Simulation Thread
thread = threading.Thread(target=Messaging.start_sending)
thread.daemon = True
thread.start()
stop_simulating = threading.Event()
stop_simulating.set()
# Simulate tracking
def simulate_tracking():
while True:
for tracking_data_item in sample_tracking_data:
# Pause simulation is requested
while stop_simulating.is_set():
pass
# Format the data according to its source
data_to_send = ""
if args.source is not None:
if args.source.lower() == 'stanford':
data_to_send = DataFormatter.format_stanford(tracking_data_item)
elif args.source.lower() == 'cornell':
data_to_send = DataFormatter.format_cornell(tracking_data_item)
else:
data_to_send = tracking_data_item
else:
data_to_send = tracking_data_item
# Send the data to the server
if data_to_send != "":
Messaging.send(MSG_TO_SERVER_KEYS.TRACKING_DATA.name, data_to_send)
time.sleep(.05)
simulation_thread = threading.Thread(target=simulate_tracking)
simulation_thread.start()
# Run the program from the terminal
while(True):
input_key = input("Press q to quit, t to start the tracking simulation and p to pause the simulation...\n").strip()
if input_key == "q":
stop_simulating.set()
Messaging.send(MSG_TO_SERVER_KEYS.TRACKING_LOST.name, '')
print('Exiting..')
os._exit(0)
elif input_key == "p":
stop_simulating.set()
Messaging.send(MSG_TO_SERVER_KEYS.TRACKING_LOST.name, '')
elif input_key == "t":
if stop_simulating is not None:
print('Started tracking')
Messaging.send(MSG_TO_SERVER_KEYS.TRACKING_STARTED.name, '')
stop_simulating.clear()
else:
continue
``` |
{
"source": "JohannaLatt/SHM-Mirror",
"score": 2
} |
#### File: JohannaLatt/SHM-Mirror/rendering.py
```python
from utils.enums import MSG_TO_MIRROR_KEYS, MSG_FROM_MIRROR_KEYS
from rendering_widgets.abstract_gui_base import AbstractGUIBase
import configparser
import json
import importlib
def init_gui(Messaging):
print('[Rendering][info] Initializing GUI')
# Read which GUIBase should be used from the config-file
Config = configparser.ConfigParser()
Config.read('./config/mirror_config.ini')
gui_base_path = Config.get('GUIBase', 'path_name').strip()
gui_base_class = Config.get('GUIBase', 'class_name').strip()
gui_base_module = importlib.import_module(gui_base_path)
class_ = getattr(gui_base_module, gui_base_class)
global gui
gui = class_()
# Make sure that the chosen GUIBase implements the AbstractGUIBase-interface
if not issubclass(type(gui), AbstractGUIBase):
print("\033[91m[Error][Rendering] The chosen GUIBase in the config-file does not implement the AbstractGUIBase - aborting\033[91m")
else:
Messaging.send(MSG_FROM_MIRROR_KEYS.MIRROR_READY.name, '')
gui.run()
# Called by kivy
def render(view, data):
try:
gui
except NameError:
print('[Rendering][warning] Message discarded, rendering not initialized yet')
else:
if view == MSG_TO_MIRROR_KEYS.TEXT.name:
data = decode_data(data)
gui.show_text(data)
elif view == MSG_TO_MIRROR_KEYS.CLEAR_SKELETON.name:
gui.clear_skeleton()
elif view == MSG_TO_MIRROR_KEYS.RENDER_SKELETON.name:
data = decode_data(data)
gui.render_skeleton_data(data)
elif view == MSG_TO_MIRROR_KEYS.CHANGE_SKELETON_COLOR.name:
data = decode_data(data)
gui.change_joint_or_bone_color(data)
elif view == MSG_TO_MIRROR_KEYS.UPDATE_GRAPHS.name:
data = decode_data(data)
gui.update_graps(data)
else:
print('[Rendering][warning] %r is not a suported view' % view)
def decode_data(data):
try:
data = json.loads(data)
except json.decoder.JSONDecodeError:
print('[Rendering][warning] Message discarded, could not decode json: {}'.format(data))
else:
return data
```
#### File: rendering_widgets/Kivy/label_renderer.py
```python
from .animated_label import AnimatedLabel
# ANIMATION
FADE_IN = "fade_in"
STAY = "stay"
FADE_OUT = "fade_out"
class LabelRenderer():
def __init__(self, gui_base):
# Store the root to be able to add and remove labels
self.root = gui_base.root
# Store the SkeletonWidget to be able to retrieve joint positions
self.skeleton_widget = gui_base.skeleton_widget
# Dict to store reused labels
self.existing_labels = {}
def check_text_arguments(self, data):
if "text" not in data:
return False
data["text"] = str(data["text"])
if "font_size" not in data:
data["font_size"] = 40
if "halign" not in data:
data["halign"] = "left"
if "position" not in data:
data["position"] = {"x": 0.5, "y": 0.9}
if "color" not in data:
data["color"] = (1, 1, 1, 1)
if "animation" not in data:
data["animation"] = {}
data["animation"]["fade_in"] = 2
data["animation"]["stay"] = 5
data["animation"]["fade_out"] = 2
else:
if "fade_in" not in data["animation"]:
data["animation"]["fade_in"] = 2
if "stay" not in data["animation"]:
data["animation"]["stay"] = 5
if "fade_out" not in data["animation"]:
data["animation"]["fade_out"] = 2
return True
def show_text(self, data):
# Check the data to make sure it has the necessary arguments
is_valid_data = self.check_text_arguments(data)
if not is_valid_data:
print("[LabelRenderer][warning] Received invalid static text data - discarding")
pass
# Calculate the label's position if it is dynamic
if isinstance(data["position"], str):
# Dynamic label at a joint's position
pos = self.skeleton_widget.get_percentage_joint_pos(data["position"])
x_pos = pos[0]
y_pos = pos[1]
# Slighlt adjust the text to the left or right so it's visible
if "Left" in data["position"]:
x_pos -= 0.024
else:
x_pos += 0.024
data["position"] = {"x": x_pos, "y": y_pos}
# We do NOT want the text bounding box to extend to the whole screen
data["size_hint"] = (0, 0)
else:
# We do want the text bounding box to extend to the whole screen
data["size_hint"] = (1, 1)
# Check if there is an ID being sent, ie the label might exist already
if "id" in data:
# Label exists already
if data["id"] in self.existing_labels:
label = self.existing_labels[data["id"]]
self.update_existing_label(label, data, data["position"])
# We need a new label and save a reference to it via the ID
else:
label = AnimatedLabel(text=data["text"], pos_hint=data["position"], color=data["color"], font_size=data["font_size"], size_hint=data["size_hint"], halign=data["halign"])
label.set_id(data["id"])
self.existing_labels[data["id"]] = label
self.root.add_widget(label)
self.animate_and_remove_label(label, {FADE_IN: data["animation"][FADE_IN], STAY: data["animation"][STAY], FADE_OUT: data["animation"][FADE_OUT]})
# We need a new label that we do not need to save for later reference
else:
label = AnimatedLabel(text=data["text"], pos_hint=data["position"], color=data["color"], font_size=data["font_size"], size_hint=data["size_hint"], halign=data["halign"])
self.root.add_widget(label)
self.animate_and_remove_label(label, {FADE_IN: data["animation"][FADE_IN], STAY: data["animation"][STAY], FADE_OUT: data["animation"][FADE_OUT]})
def update_existing_label(self, label, data, pos):
# Stop possible animations
label.cancel_animations()
# Set the text and re-animate the text, skipping the fade-in
label.set_text(data["text"])
label.set_color(data["color"])
label.set_pos_hint(pos)
self.animate_and_remove_label(label, {FADE_IN: 0, STAY: data["animation"][STAY], FADE_OUT: data["animation"][FADE_OUT]})
def animate_and_remove_label(self, label, animation_data):
# Define function to remove label from root
def remove_label(animation, label):
self.root.remove_widget(label)
# Remove reference to the label
if label.get_id() in self.existing_labels:
del self.existing_labels[label.get_id()]
label.fade_in_and_out(animation_data[FADE_IN], animation_data[STAY], animation_data[FADE_OUT], remove_label)
``` |
{
"source": "JohannaLatt/SHM-Server",
"score": 2
} |
#### File: main/module_evaluate_squat/evaluate_squat_module.py
```python
from Server.modules.abstract_main_module import AbstractMainModule
from Server.user import USER_STATE, EXERCISE
from Server.utils.enums import MSG_TO_MIRROR_KEYS, USER_JOINTS, KINECT_BONES
from Server.utils.utils import angle_between, get_angle_between_bones, get_vector_of_bone, get_color_at_angle
import json
import numpy as np
from collections import deque
import configparser
class EvaluateSquatModule(AbstractMainModule):
timeseries_length = 10
def __init__(self, Messaging, queue, User):
super().__init__(Messaging, queue, User)
self.evaluating = False
# Config
Config = configparser.ConfigParser()
Config.read('././config/mirror_config.ini')
# Colors
self.color_wrong = (0, .7, 1, .7) # red
self.color_correct = (.33, .7, 1, .7) # green
# Keep track of UI changes to easily clean them if needed
self.text_ids = set()
self.colored_bones = set()
self.colored_joints = set()
# Shoulder data
self.rounded_shoulder_warning_angle = Config.getint('EvaluateSquatModule', 'rounded_shoulder_warning_angle', fallback=20)
self.text_id_shoulder_1 = "shoulder_evaluation_1"
self.text_id_shoulder_2 = "shoulder_evaluation_2"
self.shoulder_left_angle_over_time = deque(maxlen=self.timeseries_length)
self.shoulder_right_angle_over_time = deque(maxlen=self.timeseries_length)
# Knee angle data
self.repetitions_until_check = Config.getint('EvaluateSquatModule', 'repetitions_until_check', fallback=4)
self.max_knee_angle_for_warning = Config.getint('EvaluateSquatModule', 'max_knee_angle_for_warning', fallback=90)
self.text_id_knees = "knee_evaluation"
self.text_id_knees_min_1 = "knee_min_evaluation_1"
self.text_id_knees_min_2 = "knee_min_evaluation_2"
self.min_knee_angle_in_current_rep = 180
self.min_knee_angle_over_time = deque(maxlen=self.repetitions_until_check)
self.showing_knee_warning = False
# Looking straight ahead
self.tilted_sideways_head_min_warning_angle = Config.getint('EvaluateSquatModule', 'tilted_sideways_head_min_warning_angle', fallback=5)
self.tilted_up_down_head_min_warning_angle = Config.getint('EvaluateSquatModule', 'tilted_up_down_head_min_warning_angle', fallback=5)
self.text_id_straight_1 = "head_evaluation_1"
self.text_id_straight_2 = "head_evaluation_2"
self.head_tilted_up_down_over_time = deque(maxlen=self.timeseries_length)
self.head_tilted_sideways_over_time = deque(maxlen=self.timeseries_length)
self.showing_head_warning = False
# Knees behind toes
self.knee_behind_toes_tolerance = Config.getint('EvaluateSquatModule', 'knee_behind_toes_tolerance', fallback=20)
self.text_id_knee_toes_1 = "knee_toes_evaluation_1"
self.text_id_knee_toes_2 = "knee_toes_evaluation_2"
self.knee_over_toe_left = deque(maxlen=self.timeseries_length)
self.knee_over_toe_right = deque(maxlen=self.timeseries_length)
# summary
self.reset_summary_variables()
self.text_id_summary_1 = "summary_1"
self.text_id_summary_2 = "summary_2"
self.text_id_summary_3 = "summary_3"
self.text_id_summary_4 = "summary_4"
self.text_id_summary_5 = "summary_5"
def user_skeleton_updated(self, user):
super().user_skeleton_updated(user)
# Check if the user is currently doing a squat
if user.get_user_state() is USER_STATE.EXERCISING and user.get_exercise() is EXERCISE.SQUAT:
self.evaluating = True
self.joints = user.get_joints()
self.bones = user.get_bones()
if len(self.joints) == 0 or len(self.bones) == 0:
pass # Data not ready yet
self.show_knee_angles()
self.check_straight_shoulders()
self.check_body_behind_toes()
self.check_facing_forward()
elif self.evaluating:
self.clean_UI()
self.reset_skeleton_color()
self.reset_variables()
def user_state_updated(self, user):
super().user_state_updated(user)
current_rep = user.get_current_repetitions()
# Show a summary when the user walks away
if user.get_user_state() is USER_STATE.NONE and current_rep > 0:
average_knee_angle = int(np.mean(np.asarray(self.min_knee_angles)))
head_warnings = sum(self.head_warnings_per_rep)
shoulder_warnings = sum(self.shoulder_warnings_per_rep)
toe_warnings = sum(self.toe_warnings_per_rep)
self.show_message_at_position("Good job! You finished {} repetitions!".format(current_rep), self.text_id_summary_1, halign="center", position={"x": 0, "y": 0.1}, stay=4)
self.show_message_at_position("Your average knee angle was {}°".format(average_knee_angle), self.text_id_summary_2, halign="center", position={"x": 0, "y": 0.04}, stay=4)
self.show_message_at_position("Your head was not straight in {} repetitions.".format(head_warnings), self.text_id_summary_3, halign="center", position={"x": 0, "y": -0.02}, stay=4)
self.show_message_at_position("Your shoulders were rounded in {} repetitions".format(shoulder_warnings), self.text_id_summary_4, halign="center", position={"x": 0, "y": -0.08}, stay=4)
self.show_message_at_position("Your toes were in front of your knees in {} repetitions".format(toe_warnings), self.text_id_summary_5, halign="center", position={"x": 0, "y": -0.14}, stay=4)
self.reset_summary_variables()
def user_finished_repetition(self, user):
super().user_finished_repetition(user)
current_rep = user.get_current_repetitions()
# Save the maximum angle the user reached in this rep
self.min_knee_angle_over_time.append(self.min_knee_angle_in_current_rep)
self.min_knee_angles.append(self.min_knee_angle_in_current_rep)
self.min_knee_angle_in_current_rep = 180
# Summary (did the user get a warning in this rep)
self.head_warnings_per_rep.append(self.head_warning_in_current_rep)
self.shoulder_warnings_per_rep.append(self.shoulder_warning_in_current_rep)
self.toe_warnings_per_rep.append(self.toe_warning_in_current_rep)
# See if the user continuously does not go low (check that after every
# x repetitions)
if current_rep % self.repetitions_until_check != 0:
return
average_min_knee_angle = int(np.mean(np.asarray(self.min_knee_angle_over_time)))
if average_min_knee_angle > self.max_knee_angle_for_warning:
self.show_message_at_position("Your average knee angle is only {}°".format(average_min_knee_angle), self.text_id_knees_min_1, position={"x":-0.02, "y":-0.10}, stay=3)
self.show_message_at_position("Try and go lower! ", self.text_id_knees_min_2, position={"x":-0.02, "y":-0.16}, stay=3)
def tracking_lost(self):
print("[EvaluateSquatModule][info] Cleaning up")
super().tracking_lost()
self.clean_UI()
self.reset_skeleton_color()
self.reset_variables()
def reset_variables(self):
self.shoulder_left_angle_over_time.clear()
self.shoulder_right_angle_over_time.clear()
self.min_knee_angle_over_time.clear()
self.min_knee_angle_in_current_rep = 180
self.head_tilted_up_down_over_time.clear()
self.head_tilted_up_down_over_time.clear()
self.evaluating = False
def reset_summary_variables(self):
self.min_knee_angles = []
self.head_warning_in_current_rep = False
self.head_warnings_per_rep = []
self.shoulder_warning_in_current_rep = False
self.shoulder_warnings_per_rep = []
self.toe_warning_in_current_rep = False
self.toe_warnings_per_rep = []
def clean_UI(self):
for text_ids in self.text_ids.copy():
self.hide_message_at_position(text_ids)
def reset_skeleton_color(self):
for colored_bone in self.colored_bones.copy():
self.change_joint_or_bone_color('bone', colored_bone, '')
for colored_joint in self.colored_joints.copy():
self.change_joint_or_bone_color('joint', colored_joint, '')
def show_knee_angles(self):
# Calculate the angle between the thigh and shin
right_knee_angle = 180 - int(get_angle_between_bones(self.joints, self.bones, KINECT_BONES.ThighRight, KINECT_BONES.ShinRight))
left_knee_angle = 180 - int(get_angle_between_bones(self.joints, self.bones, KINECT_BONES.ThighLeft, KINECT_BONES.ShinLeft))
# Sanity check
if right_knee_angle == 0 or left_knee_angle == 0:
return
if right_knee_angle < 130 or left_knee_angle < 130:
self.showing_knee_warning = True
# Show angles
self.show_message_at_joint(str(right_knee_angle) + "°", USER_JOINTS.KneeRight.name)
self.show_message_at_joint(str(left_knee_angle) + "°", USER_JOINTS.KneeLeft.name)
# Show colored feedback
left_color = get_color_at_angle(left_knee_angle, 70, 130, self.color_correct, self.color_wrong)
self.change_joint_or_bone_color('joint', USER_JOINTS.KneeLeft.name, left_color)
right_color = get_color_at_angle(right_knee_angle, 70, 130, self.color_correct, self.color_wrong)
self.change_joint_or_bone_color('joint', USER_JOINTS.KneeRight.name, right_color)
elif self.showing_knee_warning:
self.showing_knee_warning = False
self.hide_message_at_joint(USER_JOINTS.KneeRight.name)
self.hide_message_at_joint(USER_JOINTS.KneeLeft.name)
self.change_joint_or_bone_color('joint', USER_JOINTS.KneeLeft.name, '')
self.change_joint_or_bone_color('joint', USER_JOINTS.KneeRight.name, '')
# Save the angle over time
angle_over_time = np.mean([right_knee_angle, left_knee_angle])
if angle_over_time < self.min_knee_angle_in_current_rep:
self.min_knee_angle_in_current_rep = angle_over_time
# The shoulders should be pushed out and the user shouldn't round his shoulders
def check_straight_shoulders(self):
x_axis = (1, 0, 0)
# Calculate the angle of the left shoulder
left_shoulder_angle = self.clean_angle(angle_between(x_axis, get_vector_of_bone(self.joints, self.bones, KINECT_BONES.ClavicleLeft)))
self.shoulder_left_angle_over_time.append(left_shoulder_angle)
# Find out whether the angle is in front of the use (ie rounded shoulders)
# or behind the user (ie open shoulders)
left_in_front_of_user = self.joints[USER_JOINTS.ShoulderLeft.name][2] < self.joints[USER_JOINTS.SpineShoulder.name][2]
left_shoulder_okay = False
# Calculate the angle of the right shoulder
right_shoulder_angle = self.clean_angle(angle_between(x_axis, get_vector_of_bone(self.joints, self.bones, KINECT_BONES.ClavicleRight)))
self.shoulder_right_angle_over_time.append(right_shoulder_angle)
right_in_front_of_user = self.joints[USER_JOINTS.ShoulderRight.name][2] < self.joints[USER_JOINTS.SpineShoulder.name][2]
right_shoulder_okay = False
if len(self.shoulder_left_angle_over_time) < self.timeseries_length:
# not enough data yet
return
# Left shoulder isn't straight over period of 10 frames - show warning
if np.mean(np.asarray(self.shoulder_left_angle_over_time)) > self.rounded_shoulder_warning_angle and left_in_front_of_user:
self.show_message_at_joint(str(left_shoulder_angle) + "°", USER_JOINTS.ShoulderLeft.name)
self.change_joint_or_bone_color('joint', USER_JOINTS.SpineShoulder.name, self.color_wrong)
self.change_joint_or_bone_color('bone', KINECT_BONES.ClavicleLeft.name, self.color_wrong)
self.change_joint_or_bone_color('joint', USER_JOINTS.ShoulderLeft.name, self.color_wrong)
self.show_shoulder_warning()
else:
left_shoulder_okay = True
# Right shoulder isn't straight over period of 10 frames - show warning
if np.mean(np.asarray(self.shoulder_right_angle_over_time)) > self.rounded_shoulder_warning_angle and right_in_front_of_user:
self.show_message_at_joint(str(right_shoulder_angle) + "°", USER_JOINTS.ShoulderRight.name)
self.change_joint_or_bone_color('joint', USER_JOINTS.SpineShoulder.name, self.color_wrong)
self.change_joint_or_bone_color('bone', KINECT_BONES.ClavicleRight.name, self.color_wrong)
self.change_joint_or_bone_color('joint', USER_JOINTS.ShoulderRight.name, self.color_wrong)
self.show_shoulder_warning()
else:
right_shoulder_okay = True
# Summary
if not left_shoulder_okay or not right_shoulder_okay:
self.shoulder_warning_in_current_rep = True
# Cleanup
if left_shoulder_okay and right_shoulder_okay and USER_JOINTS.SpineShoulder.name in self.colored_joints:
self.change_joint_or_bone_color('joint', USER_JOINTS.SpineShoulder.name, '')
if left_shoulder_okay and KINECT_BONES.ClavicleLeft.name in self.colored_bones:
self.hide_message_at_joint(USER_JOINTS.ShoulderLeft.name)
self.change_joint_or_bone_color('bone', KINECT_BONES.ClavicleLeft.name, '')
self.change_joint_or_bone_color('joint', USER_JOINTS.ShoulderLeft.name, '')
if right_shoulder_okay and KINECT_BONES.ClavicleRight.name in self.colored_bones:
self.hide_message_at_joint(USER_JOINTS.ShoulderRight.name)
self.change_joint_or_bone_color('bone', KINECT_BONES.ClavicleRight.name, '')
self.change_joint_or_bone_color('joint', USER_JOINTS.ShoulderRight.name, '')
def show_shoulder_warning(self):
self.show_message_at_position("Make sure to push your shoulders back,", self.text_id_shoulder_1, position={"x":-0.02, "y":0.33}, stay=3)
self.show_message_at_position("right now they are rounded!", self.text_id_shoulder_2, position={"x":-0.02, "y":0.27}, stay=3)
# Check whether the user's knees stay above or behind the toes at all times,
# i.e. the z-component of the knee is always greater than the toes
def check_body_behind_toes(self):
# Left leg
left_knee_behind_toes = self.joints[USER_JOINTS.KneeLeft.name][2] + self.knee_behind_toes_tolerance - self.joints[USER_JOINTS.FootLeft.name][2]
self.knee_over_toe_left.append(True) if left_knee_behind_toes < 0 else self.knee_over_toe_left.append(False)
left_knee_okay = False
# Right leg
right_knee_behind_toes = self.joints[USER_JOINTS.KneeRight.name][2] + self.knee_behind_toes_tolerance - self.joints[USER_JOINTS.FootRight.name][2]
self.knee_over_toe_right.append(True) if right_knee_behind_toes < 0 else self.knee_over_toe_right.append(False)
right_knee_okay = False
if len(self.knee_over_toe_left) < self.timeseries_length:
# not enough data yet
return
# If 70% of the last frames were wrong knee positions...
if (self.knee_over_toe_left.count(True)/self.timeseries_length) > 0.7:
left_leg_color = get_color_at_angle(left_knee_behind_toes, -200, 0, self.color_correct, self.color_wrong)
self.change_left_leg_color(left_leg_color)
self.show_knee_toe_warning()
else:
left_knee_okay = True
# If 70% of the last frames were wrong knee positions...
if (self.knee_over_toe_right.count(True)/self.timeseries_length) > 0.7:
right_leg_color = get_color_at_angle(right_knee_behind_toes, -200, 0, self.color_correct, self.color_wrong)
self.change_right_leg_color(right_leg_color)
self.show_knee_toe_warning()
else:
right_knee_okay = True
# Summary
if not left_knee_okay or not right_knee_okay:
self.toe_warning_in_current_rep = True
# Cleanup
if left_knee_okay and KINECT_BONES.FootLeft.name in self.colored_bones:
self.change_left_leg_color('')
if right_knee_okay and KINECT_BONES.FootRight.name in self.colored_bones:
self.change_right_leg_color('')
def show_knee_toe_warning(self):
self.show_message_at_position("Your knee should not be in front", self.text_id_knee_toes_1, position={"x":-0.02, "y":-0.3}, stay=2)
self.show_message_at_position("of your toes, try and push your hips further out!", self.text_id_knee_toes_2, position={"x":-0.02, "y":-0.36}, stay=2)
def change_right_leg_color(self, color):
self.change_joint_or_bone_color('bone', KINECT_BONES.ShinRight.name, color)
self.change_joint_or_bone_color('bone', KINECT_BONES.FootRight.name, color)
self.change_joint_or_bone_color('joint', USER_JOINTS.AnkleRight.name, color)
self.change_joint_or_bone_color('joint', USER_JOINTS.FootRight.name, color)
def change_left_leg_color(self, color):
self.change_joint_or_bone_color('bone', KINECT_BONES.ShinLeft.name, color)
self.change_joint_or_bone_color('bone', KINECT_BONES.FootLeft.name, color)
self.change_joint_or_bone_color('joint', USER_JOINTS.AnkleLeft.name, color)
self.change_joint_or_bone_color('joint', USER_JOINTS.FootLeft.name, color)
# Check whether the user is always facing forward which prevents
# spine pain and damage.
# For that, the vector through head and neck ideally has to be
# perpendicualr to the x-axis (head not tilted sideways) and perpendicular
# to the z-axis (head not tilted up or down)
def check_facing_forward(self):
head_vector = get_vector_of_bone(self.joints, self.bones, KINECT_BONES.Head)
# Check for x-axis perpendicularity
x_axis = (1, 0, 0)
tilted_sideways = 90 - self.clean_angle(angle_between(x_axis, head_vector))
if tilted_sideways > self.tilted_up_down_head_min_warning_angle:
self.head_tilted_sideways_over_time.append(True)
else:
self.head_tilted_sideways_over_time.append(False)
# Check for z-perpendicularity
y_axis = (0, 0, 1)
tilted_up_down = 90 - self.clean_angle(angle_between(y_axis, head_vector))
if tilted_up_down > self.tilted_up_down_head_min_warning_angle:
self.head_tilted_up_down_over_time.append(True)
else:
self.head_tilted_up_down_over_time.append(False)
if len(self.head_tilted_sideways_over_time) < self.timeseries_length:
# not enough data yet
return
# If 70% of the last frames were wrong head positions...
if (self.head_tilted_up_down_over_time.count(True)/self.timeseries_length) > 0.7:
msg = str(tilted_up_down) + "°"
self.show_message_at_joint(msg, USER_JOINTS.Head.name)
head_color = get_color_at_angle(tilted_up_down, 0, self.tilted_up_down_head_min_warning_angle + 5, self.color_correct, self.color_wrong)
self.set_head_color(head_color)
self.head_warning_in_current_rep = True
if not self.showing_head_warning:
self.showing_head_warning = True
direction = "up" if self.joints[USER_JOINTS.Head.name][2] > self.joints[USER_JOINTS.Neck.name][2] else "down"
self.show_message_at_position("Your head is tilted {},".format(direction), self.text_id_straight_1, position={"x":-0.02, "y":0.46}, stay=2)
self.show_message_at_position("try and look straight ahead!", self.text_id_straight_2, position={"x":-0.02, "y":0.40}, stay=2)
elif (self.head_tilted_sideways_over_time.count(True)/self.timeseries_length) > 0.7:
msg = str(tilted_sideways) + "°"
self.show_message_at_joint(msg, USER_JOINTS.Head.name)
head_color = get_color_at_angle(tilted_sideways, 0, self.tilted_sideways_head_min_warning_angle + 5, self.color_correct, self.color_wrong)
self.set_head_color(head_color)
self.head_warning_in_current_rep = True
if not self.showing_head_warning:
self.showing_head_warning = True
self.show_message_at_position("Your head is tilted sideways,", self.text_id_straight_1, position={"x":-0.02, "y":0.46}, stay=2)
self.show_message_at_position("try and look straight ahead!", self.text_id_straight_2, position={"x":-0.02, "y":0.40}, stay=2)
elif self.showing_head_warning:
self.showing_head_warning = False
self.hide_message_at_position(self.text_id_straight_1)
self.hide_message_at_position(self.text_id_straight_2)
self.hide_message_at_joint(USER_JOINTS.Head.name)
self.set_head_color('')
def set_head_color(self, color):
self.change_joint_or_bone_color('joint', KINECT_BONES.Neck.name, color)
self.change_joint_or_bone_color('joint', KINECT_BONES.Head.name, color)
self.change_joint_or_bone_color('bone', USER_JOINTS.Head.name, color)
def clean_angle(self, angle):
if angle > 90:
angle = 180 - angle
return int(angle)
def show_message_at_joint(self, text, joint):
self.Messaging.send_message(MSG_TO_MIRROR_KEYS.TEXT.name,
json.dumps({
"text": text,
"id": joint,
"font_size": 30,
"position": joint,
"animation": {
"fade_in": 0.5,
"stay": 10000,
"fade_out": 1}
}))
self.text_ids.add(joint)
def hide_message_at_joint(self, joint):
self.Messaging.send_message(MSG_TO_MIRROR_KEYS.TEXT.name,
json.dumps({
"text": "",
"id": joint,
"position": joint
}))
if joint in self.text_ids:
self.text_ids.remove(joint)
def show_message_at_position(self, text, id, position, stay=10000, halign="right"):
self.Messaging.send_text_to_mirror(text, id=id, position=position, stay=stay, halign=halign)
self.text_ids.add(id)
def hide_message_at_position(self, id):
self.Messaging.hide_text_message(id)
if id in self.text_ids:
self.text_ids.remove(id)
def change_joint_or_bone_color(self, type, name, color):
if type =='joint':
if color == '' and name in self.colored_joints:
self.colored_joints.remove(name)
elif color != '':
self.colored_joints.add(name)
elif type =='bone':
if color == '' and name in self.colored_bones:
self.colored_bones.remove(name)
elif color != '':
self.colored_bones.add(name)
self.Messaging.send_message(MSG_TO_MIRROR_KEYS.CHANGE_SKELETON_COLOR.name,
json.dumps({
"type": type,
"name": name,
"color": color
}))
```
#### File: Server/utils/utils.py
```python
from numpy import (dot, arccos, linalg, clip, degrees)
import numpy as np
# Math
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in degrees between vectors 'v1' and 'v2' """
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return degrees(arccos(clip(dot(v1_u, v2_u), -1.0, 1.0)))
# Skeleton-specific utils
def get_vector_of_bone(joints, bones, bone):
return (joints[bones[bone][0].name][0] - joints[bones[bone][1].name][0], # x
joints[bones[bone][0].name][1] - joints[bones[bone][1].name][1], # y
joints[bones[bone][0].name][2] - joints[bones[bone][1].name][2]) # z
def get_angle_between_bones(joints, bones, bone_a, bone_b):
vector_a = get_vector_of_bone(joints, bones, bone_a)
vector_b = get_vector_of_bone(joints, bones, bone_b)
angle = np.around(angle_between(vector_a, vector_b), decimals=1)
return angle
# Colors
def get_color_at_angle(angle, angle_min, angle_max, color_low, color_high):
''' Returns a color between red and green depending
on the input angle and the max and min angles.
Smaller equals right color, bigger wrong. '''
# Transform the angle to a value between 0 and 1
if angle < angle_min:
t = 0
elif angle > angle_max:
t = 1
else:
t = (angle - angle_min) / (angle_max - angle_min)
# Calculate the interpolated color
return lerp_hsv(color_low, color_high, t)
def lerp_hsv(color_a, color_b, t):
# Hue interpolation
d = color_b[0] - color_a[0]
if color_a[0] > color_b[0]:
# Swap (a.h, b.h)
h3 = color_b[0]
color_b = (color_a[0], color_b[1], color_b[2], color_b[3])
color_a = (h3, color_a[1], color_a[2], color_a[3])
d = -d;
t = 1 - t;
if d > 0.5: # 180deg
color_a = (color_a[0] + 1, color_a[1], color_a[2], color_a[3]) # 360deg
h = (color_a[0] + t * (color_b[0] - color_a[0]) ) % 1 # 360deg
if d <= 0.5: # 180deg
h = color_a[0] + t * d
return(h,
color_a[1] + t * (color_b[1]-color_a[1]), # S
color_a[2] + t * (color_b[2]-color_a[2]), # V
color_a[3] + t * (color_b[3]-color_a[3]) # A
)
``` |
{
"source": "johannamay/ILIAS---Test-Generator",
"score": 2
} |
#### File: ILIAS---Test-Generator/Analyse_Tool/read_data_test.py
```python
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from helper_functions import ResultDict
import custom_irt as irt
class IliasDataPlotter:
def __init__(self, filename, nr_questions=8):
self.data = pd.read_excel(filename).dropna()
self.nr_questions = nr_questions
@property
def questions(self):
return self.data.iloc[:, -self.nr_questions:]
def fix_question_length(self, question, maxlen=25):
if len(question) > maxlen:
return question[:(maxlen - 3)] + "..."
else:
return question
def plot_used_time(self):
minutes = [dp.minute + dp.second / 60 for dp in pd.to_datetime(self.data["Bearbeitungsdauer"])]
sns.boxplot(pd.Series(minutes, name="Bearbeitungszeit in Minuten"), orient="v")
def plot_result(self, min=0.0, max=10.0):
results = self.data["Testergebnis in Punkten"]
sns.distplot(results, kde=False)
def plot_question_dists(self):
question_df = self.questions
fig, axes = plt.subplots(int(self.nr_questions/3) + 1, 3,
figsize=(10, 10))
plt.subplots_adjust(wspace=0.4, hspace=0.5)
for i, question in enumerate(question_df):
data = question_df[question]
sns.distplot(data,
kde=False,
bins=np.arange(0, max(data)+2),
ax=axes[i % 3, int(i / 3)],
axlabel=False)
axes[i % 3, int(i / 3)].set_title(self.fix_question_length(question))
plt.show()
class ExamDataPlotter:
def __init__(self, filename, max_points):
cols = ["ZW 0", "ZW 1", "ZW 2", "", "P 0", "P 1", "P 2", "P 3"]
self.data = pd.read_csv(filename, delimiter=";", skiprows=2, names=cols, header=0)
self.max_points = max_points
def boxplot(self, size=(12, 6), color=None):
plt.figure(figsize=size)
sns.boxplot(data=self.data, palette=color)
plt.ylabel("Klausurpunkte")
plt.xlabel("Anzahl absolvierter Zwischenleistungen")
def violinplot(self, size=(12, 6), color=None):
plt.figure(figsize=size)
sns.violinplot(data=self.data, palette=color)
plt.ylabel("Klausurpunkte")
plt.xlabel("Anzahl absolvierter Zwischenleistungen")
class IliasParser:
def __init__(self, filename):
non_question_cols = 19 # Anzahl der Spalten in der Ausgabedatei ohne Fragen!
self.df_dict = pd.read_excel(filename, sheet_name=None)
self.nr_questions = len(self.df_dict["Testergebnisse"].keys()) - non_question_cols
@property
def test_results(self):
# Im folgenden werde fehlende Werte ergänzt, dafür werden unterschiedliche Methoden je nach Spalte benötigt!
df_full = self.df_dict["Testergebnisse"].fillna(value=0)
df_statistics = self.df_dict["Testergebnisse"].loc[:, :"Durchlauf"].fillna(method="ffill")
df_full.update(df_statistics, overwrite=True)
return df_full.set_index("Name")
def _get_correct_entry(self, df, name):
# Kleine Hilfsfunktion da teilweise mehr als eine Zeile pro Person verarbeitet werden muss
final_rating_row = df.loc[name]["<NAME>"]
if type(final_rating_row) is pd.Series:
boolean_comprehension = df.loc[name]["Durchlauf"] == final_rating_row[0]
return df.loc[name][boolean_comprehension].iloc[0]
else:
return df.loc[name]
def _unique_test_results(self):
# Die Funktion wählt die korrekte Spalte im Übersichtstabellenblatt aus.
# ILIAS erzeugt Leerzeilen, wenn eine Person mehrere Testdurchläufe durchführt
unique_df = {}
df = self.test_results
for name in df.index:
if name in unique_df:
continue
unique_df[name] = self._get_correct_entry(df, name)
return pd.DataFrame(unique_df).T
def _answers_per_sheet(self):
answer_sheets = list(self.df_dict.keys())[1:]
for i, name in enumerate(answer_sheets):
df = self.df_dict[name]
df.columns = ["Question", "Answer"]
df = df.set_index("Question")
df.to_csv(f"./answer_sheets/{i}.csv")
def _answers_single_sheet(self):
df = self.df_dict["Auswertung für alle Benutzer"]
df.columns = ["Question", "Answer"]
user = {}
j = 0
for i, line in tqdm(df.iterrows()):
user[i] = line
if type(line["Question"]) is str:
if "Ergebnisse von Testdurchlauf" in line["Question"]:
user = pd.DataFrame(user).T.iloc[:-1]
user.reset_index(drop=True, inplace=True)
user.to_csv(f"./answer_sheets/{j}.csv")
j += 1
user = {}
def _create_answer_log(self):
if not "answer_sheets" in os.listdir():
os.makedirs("./answer_sheets/")
if "Auswertung für alle Benutzer" in self.df_dict.keys():
self._answers_single_sheet()
else:
self._answers_per_sheet()
def _create_results_dict(self):
dir = "./answer_sheets/"
results = os.listdir(dir)
result_dict = ResultDict()
unique_id = 0
for (student_id, file) in tqdm(enumerate(results)):
table = pd.read_csv(dir + file, index_col=0)
for row in table.iterrows():
if row[1].Question is np.nan and row[1].Answer is np.nan:
# deletes empty rows from file and skips loop execution
continue
if row[1].Question in ("Formelfrage", "Single Choice", "Multiple Choice"):
# identifies current question
current_question = row[1].Answer
unique_id = 0 # the unique id helps, if ilias is not returning any variables as question name
continue
result_dict.append(current_question, row[1], unique_id, student_id)
unique_id += 1
result_dict.save()
def export(self, name):
df = self._unique_test_results()
self._create_answer_log()
df.to_csv(f"{name}.csv")
print(f"Test results saved as {name}.csv!")
def export_anon(self, name):
df = self._unique_test_results()
self._create_answer_log()
df.reset_index(drop=True, inplace=True)
df["Benutzername"] = range(len(df))
df["Matrikelnummer"] = range(len(df))
df.to_csv(f"{name}.csv")
print(f"Anonymous test results saved as {name}.csv!")
class PaperDataPlotter:
def __init__(self, filename):
self.df_dict = pd.read_excel(filename, sheet_name=None)
self.keys = list(self.df_dict.keys())
self.max_points = [41, 43, 50, 40, 40, 40]
self.font_size = 2
self.color = "Blues"
self.size = (24, 12)
for key in self.df_dict:
self.df_dict[key].drop(self.df_dict[key].index[0:3], inplace=True)
self.df_dict[key].columns = self.df_dict[key].iloc[0]
self.df_dict[key].drop(self.df_dict[key].index[0], inplace=True)
def boxplot(self, key=0):
sns.set(font_scale=self.font_size)
plt.figure(figsize=self.size)
data = self.df_dict[self.keys[key]] / self.max_points[key]
data.columns = data.columns.fillna(value="")
ax = sns.boxplot(data=data, palette=self.color)
nobs = self.df_dict[self.keys[key]].count(axis=0)
median = data.median(axis=0)
pos = range(len(nobs))
half = len(nobs)/2
for tick, label in zip(pos, ax.get_xticklabels()):
if tick > half:
col = "w"
else:
col = "k"
ax.text(pos[tick], median[tick] + 0.005, f"n={nobs[tick]}",
horizontalalignment='center', size='x-small', color=col, weight='semibold')
ax.set(ylim=(0, 1))
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right")
ax.text(pos[0], 0.95, "Zwischentests",
horizontalalignment='left', size='large', color='k', weight='semibold')
ax.text(pos[10], 0.95, "Praktika",
horizontalalignment='left', size='large', color='k', weight='semibold')
plt.ylabel("Erreichte relative Klausurpunktzahl")
plt.xlabel("Anzahl absolvierter Zwischenleistungen")
def boxplot_p(self, key=4):
sns.set(font_scale=self.font_size)
plt.figure(figsize=self.size)
data = self.df_dict[self.keys[key]]
data.columns = data.columns.fillna(value="")
ax = sns.boxplot(data=data, palette=self.color)
nobs = self.df_dict[self.keys[key]].count(axis=0)
median = data.median(axis=0)
pos = range(len(nobs))
half = len(nobs)/2
for tick, label in zip(pos, ax.get_xticklabels()):
if tick > half:
col = "w"
else:
col = "k"
ax.text(pos[tick], median[tick] + 0.005, f"n={nobs[tick]}",
horizontalalignment='center', size='x-small', color=col, weight='semibold')
ax.set(ylim=(0, 1))
ax.text(pos[0], 0.95, "Praktika",
horizontalalignment='left', size='large', color='k', weight='semibold')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right")
plt.ylabel("Erreichte relative Klausurpunktzahl")
plt.xlabel("Anzahl absolvierter Praktika")
def boxplot_bp(self, key=4):
sns.set(font_scale=self.font_size)
plt.figure(figsize=self.size)
data = self.df_dict[self.keys[key]]
data.columns = data.columns.fillna(value="")
ax = sns.boxplot(data=data, palette=self.color)
nobs = self.df_dict[self.keys[key]].count(axis=0)
median = data.median(axis=0)
pos = range(len(nobs))
half = len(nobs)/2
for tick, label in zip(pos, ax.get_xticklabels()):
if tick > half:
col = "w"
else:
col = "k"
ax.text(pos[tick], median[tick] + 0.005, f"n={nobs[tick]}",
horizontalalignment='center', size='x-small', color=col, weight='semibold')
ax.set(ylim=(0, 1))
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment="right")
ax.text(pos[0], 0.95, "Bonuspunkte",
horizontalalignment='left', size='large', color='k', weight='semibold')
plt.ylabel("Erreichte relative Klausurpunktzahl")
plt.xlabel("Anzahl erreichter Bonuspunkte")
class IRT_Plotter:
def __init__(self, filename):
self.data = pd.read_excel(filename)
self._learn()
def _learn(self):
print("Berechnung der IRT-Variablen gestartet, dieser Prozess kann einige Minuten in Anspruch nehmen!")
self.thetas, abcd = irt.estimate_thetas(self.data, verbose=True)
self.abcd = pd.DataFrame(abcd)
self.abcd.columns = ["a", "b", "c", "d"]
self.abcd.index = self.data.columns
print("Berechnung der IRT-Variablen abgeschlossen!")
def _4pl_model(self, x, abcd):
return abcd["c"]*((abcd["a"]-abcd["d"])/(self._complex((float(x - abcd["d"]) - 1) ** float(1/abcd["b"]))))
#return abcd["d"] + (abcd["a"] - abcd["d"]) / (1 + self._complex(x / abcd["c"]))**abcd["b"]
def _complex(self, c):
dir = 1
if c.real < 0:
dir = -1
return np.sqrt(c.real**2 + c.imag**2) * dir
def show_thetas(self):
sns.set(font_scale=1.5)
plt.figure(figsize=(10, 5))
plt.plot(np.sort(np.squeeze(self.thetas)))
def show_a(self):
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
self.abcd["a"].plot.bar()
def show_b(self):
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
self.abcd["b"].plot.bar()
def show_c(self):
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
self.abcd["c"].plot.bar()
def show_d(self):
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
self.abcd["d"].plot.bar()
def show_model_curve(self, exercise):
xs = np.arange(0, 1.1, 0.1)
abcd = self.abcd.loc[exercise]
y = []
for x in xs:
y.append(self._4pl_model(x, abcd))
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
pd.DataFrame(y, xs).plot()
plt.show()
def show_model_curve2(self, exercise):
xs = np.arange(0, 1.1, 0.1)
abcd = self.abcd.loc[exercise]
y = []
for x in xs:
y.append(self._4pl_model(x, abcd))
sns.set(font_scale=1.5)
plt.figure(figsize=(14, 7))
pd.DataFrame(y, xs).plot(xlim=(0, 1), ylim=(0, 1))
plt.show()
def show_all_models(self):
exercises = list(self.data.columns)
for e in exercises:
self.show_model_curve2(e)
if __name__ == "__main__":
#ilias = IliasParser("Zwischentest_3__Wasserkocher_results.xlsx")
#df = ilias._unique_test_results()
#ilias.export_anon("fn")
#self = ilias # für einfacheres debugging ^^'
e = IRT_Plotter("Klausur.xlsx")
#e.show_model_curve("1a")
self=e
```
#### File: ILIAS---Test-Generator/Test_Generator_Module/test_generator_modul_datenbanken_anzeigen.py
```python
from tkinter import ttk
from tkinter import *
from tkinter.ttk import *
import pandas as pd
import sqlite3
import tkinter as tk
from tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)
class MainGUI:
def __init__(self, sql_database_name, sql_table_name):
# self.master = master + in übergabe (self, master)
#conn = sqlite3.connect(sql_database_name + '.db')
conn = sqlite3.connect(sql_database_name)
df = pd.read_sql_query("SELECT *, oid FROM " + sql_table_name, conn)
cursor = conn.cursor()
cursor.execute("SELECT *, oid FROM " + sql_table_name)
self.db_records = cursor.fetchall()
self.db_records_listing = []
for self.db_record in self.db_records:
self.db_records_listing.append(len(self.db_records))
print("Anzahl DB Einträge: " + str(len(self.db_records_listing)))
conn.commit()
conn.close()
win = tk.Tk()
win.title('Datenbank - Anzahl der Einträge: ' + str(len(self.db_records_listing)))
# scrollable Frame
self.sf_database = ScrolledFrame(win, width=500, height=500)
self.sf_database.pack(expand=1, fill="both")
# Create a frame within the ScrolledFrame
self.db_inner_frame = self.sf_database.display_widget(Frame)
#win.resizable(width=0, height=0)
self.tree = ttk.Treeview(self.db_inner_frame, selectmode="browse", height=30)
self.tree.pack(fill="both", expand = 1)
#self.tree.pack(side='left')
#vsb = ttk.Scrollbar(win, orient="vertical", command=self.tree.yview)
#vsb.pack(side='right', fill='y')
#self.tree.configure(yscrollcommand=vsb.set)
hsb = ttk.Scrollbar(win, orient="horizontal", command=self.tree.xview)
hsb.pack(side='bottom', fill='x')
self.tree['show'] = 'headings'
self.tree["columns"] = df.columns.values.tolist()
for i, header in enumerate(df.columns.values.tolist()):
self.tree.column(i, width=100)
self.tree.heading(i, text=header)
for row in df.iterrows():
self.tree.insert("", 'end', values=list(row[1]))
# self.tree.bind("<Button-3>", self.preClick)
# self.tree.bind("<Button-1>", self.onLeft)
#self.tree["displaycolumns"] = df.columns.values.tolist()[0:(len(self.db_records_listing)-7)]
# Alle Spalten-Einträge in Datenbank Übersicht zeigen
self.tree["displaycolumns"] = df.columns.values.tolist()
```
#### File: ILIAS---Test-Generator/Test_Generator_Module/test_generator_modul_test_einstellungen.py
```python
from tkinter import *
from tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)
import sqlite3
import xml.etree.ElementTree as ET
from datetime import datetime
import datetime
import os
class Test_Einstellungen_GUI:
def __init__(self, project_root_path, test_qti_file_path_output):
# Projekt-Pfad
self.project_root_path = project_root_path
# Pfad für qti_(XML)-Datei für erstellten Test
self.test_qti_file_path_output = test_qti_file_path_output
# Name für Datenbank und Tabelle
self.settings_database = "test_settings_profiles_db.db"
self.settings_database_table = "my_profiles_table"
# Pfad für die Datenbank
self.settings_database_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.settings_database))
# New Window must be "Toplevel" not "Tk()" in order to get Radiobuttons to work properly
self.test_settings_window = Toplevel()
self.test_settings_window.title("Test Einstellungen verwalten")
# Create a ScrolledFrame widget
self.sf_test_settings = ScrolledFrame(self.test_settings_window, width=300,
height=300)
self.sf_test_settings.pack(expand=1, fill="both")
# Bind the arrow keys and scroll wheel
### Bind the arrow keys and scroll wheel
### Funktion hat keine auswirkungen, erzeugt jedoch (vernachlässigbare) Fehler
# self.sf_test_settings.bind_arrow_keys(app)
# self.sf_test_settings.bind_scroll_wheel(app)
# Create a frame within the ScrolledFrame
self.test_settings = self.sf_test_settings.display_widget(Frame)
self.frame1 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame1.grid(row=0, column=0, padx=20, pady=10, sticky=NW)
self.frame2 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame2.grid(row=0, column=1, padx=20, pady=10, sticky=NW)
self.frame3 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame3.grid(row=0, column=2, padx=20, pady=10, sticky=NW)
self.res12_min_listbox_label = Label(self.frame1, text="EINSTELLUNGEN DES TESTS",
font=('Helvetica', 10, 'bold'))
self.res12_min_listbox_label.grid(row=0, column=0, sticky=W, padx=10, pady=(20, 0))
self.res90_min_listbox_label = Label(self.frame1, text="Test-Titel")
self.res90_min_listbox_label.grid(row=1, column=0, sticky=W, padx=10)
self.res91_max_listbox_label = Label(self.frame1, text="Beschreibung")
self.res91_max_listbox_label.grid(row=2, column=0, sticky=W, padx=10)
self.res1_max_listbox_label = Label(self.frame1, text="Auswahl der Testfragen")
self.res1_max_listbox_label.grid(row=4, column=0, sticky=W, padx=10)
self.res1_prec_listbox_label = Label(self.frame1, text="Datenschutz")
self.res1_prec_listbox_label.grid(row=7, column=0, sticky=W, padx=10)
self.res1_tol_listbox_label = Label(self.frame1, text="VERFÜGBARKEIT", font=('Helvetica', 10, 'bold'))
self.res1_tol_listbox_label.grid(row=9, column=0, sticky=W, padx=10, pady=(20, 0))
self.res1_points_listbox_label = Label(self.frame1, text="Online --- not working")
self.res1_points_listbox_label.grid(row=10, column=0, sticky=W, padx=10)
self.res13_points_listbox_label = Label(self.frame1,
text="Zeitlich begrenzte Verfügbarkeit --- not working")
self.res13_points_listbox_label.grid(row=11, column=0, sticky=W, padx=10)
self.res22_tol_listbox_label = Label(self.frame1, text="INFORMATIONEN ZUM EINSTIEG",
font=('Helvetica', 10, 'bold'))
self.res22_tol_listbox_label.grid(row=14, column=0, sticky=W, padx=10, pady=(20, 0))
self.res23_points_listbox_label = Label(self.frame1, text="Einleitung")
self.res23_points_listbox_label.grid(row=15, column=0, sticky=W, padx=10)
self.res24_points_listbox_label = Label(self.frame1, text="Testeigenschaften anzeigen")
self.res24_points_listbox_label.grid(row=16, column=0, sticky=W, padx=10)
self.res31_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: ZUGANG", font=('Helvetica', 10, 'bold'))
self.res31_tol_listbox_label.grid(row=17, column=0, sticky=W, padx=10, pady=(20, 0))
self.test_time_year_label = Label(self.frame1, text="Jahr")
self.test_time_year_label.grid(row=17, column=1, sticky=W)
self.test_time_month_label = Label(self.frame1, text="Mon.")
self.test_time_month_label.grid(row=17, column=1, sticky=W, padx=35)
self.test_time_day_label = Label(self.frame1, text="Tag")
self.test_time_day_label.grid(row=17, column=1, sticky=W, padx=70)
self.test_time_hour_label = Label(self.frame1, text="Std.")
self.test_time_hour_label.grid(row=17, column=1, sticky=W, padx=105)
self.test_time_minute_label = Label(self.frame1, text="Min.")
self.test_time_minute_label.grid(row=17, column=1, sticky=W, padx=140)
self.res32_points_listbox_label = Label(self.frame1, text="Test-Start")
self.res32_points_listbox_label.grid(row=18, column=0, sticky=W, padx=10)
self.res33_points_listbox_label = Label(self.frame1, text="Test-Ende")
self.res33_points_listbox_label.grid(row=19, column=0, sticky=W, padx=10)
self.res34_tol_listbox_label = Label(self.frame1, text="Test-Passwort")
self.res34_tol_listbox_label.grid(row=20, column=0, sticky=W, padx=10)
self.res35_points_listbox_label = Label(self.frame1, text="Nur ausgewählte Teilnehmer")
self.res35_points_listbox_label.grid(row=21, column=0, sticky=W, padx=10)
self.res36_points_listbox_label = Label(self.frame1, text="Anzahl gleichzeitiger Teilnehmer begrenzen")
self.res36_points_listbox_label.grid(row=22, column=0, sticky=W, padx=10)
self.res37_points_listbox_label = Label(self.frame1, text="Inaktivitätszeit der Teilnehmner (in Sek.)")
self.res37_points_listbox_label.grid(row=23, column=0, sticky=W, padx=30)
self.res41_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: STEUERUNG TESTDURCHLAUF",
font=('Helvetica', 10, 'bold'))
self.res41_tol_listbox_label.grid(row=24, column=0, sticky=W, padx=10, pady=(20, 0))
self.res42_points_listbox_label = Label(self.frame1, text="Anzahl von Testdurchläufen begrenzen")
self.res42_points_listbox_label.grid(row=25, column=0, sticky=W, padx=10)
self.res43_points_listbox_label = Label(self.frame1, text="Wartezeit zwischen Durchläufen erzwingen")
self.res43_points_listbox_label.grid(row=26, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer begrenzen")
self.res44_tol_listbox_label.grid(row=27, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer (in Min).")
self.res44_tol_listbox_label.grid(row=28, column=0, sticky=W, padx=30)
self.res44_tol_listbox_label = Label(self.frame1, text="Max. Bearbeitungsdauer für jeden Testlauf zurücksetzen")
self.res44_tol_listbox_label.grid(row=29, column=0, sticky=W, padx=30)
self.res45_points_listbox_label = Label(self.frame1, text="Prüfungsansicht")
self.res45_points_listbox_label.grid(row=30, column=0, sticky=W, padx=10)
self.res45_1_points_listbox_label = Label(self.frame1, text="Titel des Tests")
self.res45_1_points_listbox_label.grid(row=31, column=0, sticky=W, padx=30)
self.res45_2_points_listbox_label = Label(self.frame1, text="Name des Teilnehmers")
self.res45_2_points_listbox_label.grid(row=32, column=0, sticky=W, padx=30)
self.res46_points_listbox_label = Label(self.frame1, text="ILIAS-Prüfungsnummer anzeigen")
self.res46_points_listbox_label.grid(row=33, column=0, sticky=W, padx=10)
self.res51_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: VERHALTEN DER FRAGE",
font=('Helvetica', 10, 'bold'))
self.res51_tol_listbox_label.grid(row=0, column=2, sticky=W, padx=10, pady=(20, 0))
self.res52_points_listbox_label = Label(self.frame2, text="Anzeige der Fragentitel")
self.res52_points_listbox_label.grid(row=1, column=2, sticky=W, padx=10)
self.res53_points_listbox_label = Label(self.frame2, text="Automatisches speichern")
self.res53_points_listbox_label.grid(row=4, column=2, sticky=W, padx=10)
self.res54_tol_listbox_label = Label(self.frame2, text="Fragen mischen")
self.res54_tol_listbox_label.grid(row=5, column=2, sticky=W, padx=10)
self.res55_points_listbox_label = Label(self.frame2, text="Lösungshinweise")
self.res55_points_listbox_label.grid(row=6, column=2, sticky=W, padx=10)
self.res56_points_listbox_label = Label(self.frame2, text="Direkte Rückmeldung --- not working")
self.res56_points_listbox_label.grid(row=7, column=2, sticky=W, padx=10)
self.res57_tol_listbox_label = Label(self.frame2, text="Teilnehmerantworten")
self.res57_tol_listbox_label.grid(row=8, column=2, sticky=W, padx=10)
self.res58_points_listbox_label = Label(self.frame2, text="Verpflichtende Fragen")
self.res58_points_listbox_label.grid(row=12, column=2, sticky=W, padx=10)
self.res61_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: FUNKTIONEN FÜR TEILNEHMER",
font=('Helvetica', 10, 'bold'))
self.res61_tol_listbox_label.grid(row=13, column=2, sticky=W, padx=10, pady=(20, 0))
self.res62_points_listbox_label = Label(self.frame2, text="Verwendung vorheriger Lösungen")
self.res62_points_listbox_label.grid(row=14, column=2, sticky=W, padx=10)
self.res63_points_listbox_label = Label(self.frame2, text="\"Test unterbrechen\" anzeigen")
self.res63_points_listbox_label.grid(row=15, column=2, sticky=W, padx=10)
self.res64_tol_listbox_label = Label(self.frame2, text="Nicht beantwortete Fragen")
self.res64_tol_listbox_label.grid(row=16, column=2, sticky=W, padx=10)
self.res65_points_listbox_label = Label(self.frame2, text="Fragenliste und Bearbeitungsstand anzeigen")
self.res65_points_listbox_label.grid(row=18, column=2, sticky=W, padx=10)
self.res66_points_listbox_label = Label(self.frame2, text="Fragen markieren")
self.res66_points_listbox_label.grid(row=19, column=2, sticky=W, padx=10)
self.res71_tol_listbox_label = Label(self.frame2, text="TEST ABSCHLIESSEN", font=('Helvetica', 10, 'bold'))
self.res71_tol_listbox_label.grid(row=20, column=2, sticky=W, padx=10, pady=(20, 0))
self.res72_points_listbox_label = Label(self.frame2, text="Übersicht gegebener Antworten")
self.res72_points_listbox_label.grid(row=21, column=2, sticky=W, padx=10)
self.res73_points_listbox_label = Label(self.frame2, text="Abschließende Bemerkung")
self.res73_points_listbox_label.grid(row=22, column=2, sticky=W, padx=10)
self.res74_tol_listbox_label = Label(self.frame2, text="Weiterleitung")
self.res74_tol_listbox_label.grid(row=23, column=2, sticky=W, padx=10)
self.res75_points_listbox_label = Label(self.frame2, text="Benachrichtigung")
self.res75_points_listbox_label.grid(row=24, column=2, sticky=W, padx=10)
# --------------------------- DEFINE CHECKBOXES WITH ENTRYS ---------------------------------------
# --------------------------- CHECKBOXES ---------------------------------------
self.var_online = IntVar()
self.check_online = Checkbutton(self.frame1, text="", variable=self.var_online, onvalue=1, offvalue=0)
self.check_online.deselect()
self.check_online.grid(row=10, column=1, sticky=W)
self.var_time_limited = IntVar()
self.time_limited_start_label = Label(self.frame1, text="Start")
self.time_limited_start_day_label = Label(self.frame1, text="Tag")
self.time_limited_start_day_entry = Entry(self.frame1, width=3)
self.time_limited_start_month_label = Label(self.frame1, text="Mo")
self.time_limited_start_month_entry = Entry(self.frame1, width=3)
self.time_limited_start_year_label = Label(self.frame1, text="Jahr")
self.time_limited_start_year_entry = Entry(self.frame1, width=4)
self.time_limited_start_hour_label = Label(self.frame1, text="Std")
self.time_limited_start_hour_entry = Entry(self.frame1, width=3)
self.time_limited_start_minute_label = Label(self.frame1, text="Min")
self.time_limited_start_minute_entry = Entry(self.frame1, width=3)
self.time_limited_end_label = Label(self.frame1, text="Ende")
self.time_limited_end_day_label = Label(self.frame1, text="Tag")
self.time_limited_end_day_entry = Entry(self.frame1, width=3)
self.time_limited_end_month_label = Label(self.frame1, text="Mo")
self.time_limited_end_month_entry = Entry(self.frame1, width=3)
self.time_limited_end_year_label = Label(self.frame1, text="Jahr")
self.time_limited_end_year_entry = Entry(self.frame1, width=4)
self.time_limited_end_hour_label = Label(self.frame1, text="Std")
self.time_limited_end_hour_entry = Entry(self.frame1, width=3)
self.time_limited_end_minute_label = Label(self.frame1, text="Min")
self.time_limited_end_minute_entry = Entry(self.frame1, width=3)
# self.entry.grid(row=11, column=1, sticky=W, padx=20)
self.check_time_limited = Checkbutton(self.frame1, text="", variable=self.var_time_limited, onvalue=1,
offvalue=0,
command=lambda
v=self.var_time_limited: Test_Einstellungen_GUI.show_entry_time_limited_start(
self, v))
self.check_time_limited.deselect()
self.check_time_limited.grid(row=11, column=1, sticky=W)
self.var_introduction = IntVar()
self.check_introduction = Checkbutton(self.frame1, text="", variable=self.var_introduction, onvalue=1,
offvalue=0,
command=lambda
v=self.var_introduction: Test_Einstellungen_GUI.show_introduction_textfield(
self, v))
self.check_introduction.deselect()
self.check_introduction.grid(row=15, column=1, sticky=W)
self.var_test_prop = IntVar()
self.check_test_prop = Checkbutton(self.frame1, text="", variable=self.var_test_prop, onvalue=1, offvalue=0)
self.check_test_prop.deselect()
self.check_test_prop.grid(row=16, column=1, sticky=W)
# self.var_test_password = IntVar()
# self.check_test_password = Checkbutton(self.frame1, text="", variable=self.var_test_password, onvalue=1, offvalue=0)
# self.check_test_password.deselect()
# self.check_test_password.grid(row=20, column=1, sticky=W)
self.var_specific_users = IntVar()
self.check_specific_users = Checkbutton(self.frame1, text="", variable=self.var_specific_users, onvalue=1,
offvalue=0)
self.check_specific_users.deselect()
self.check_specific_users.grid(row=21, column=1, sticky=W)
# self.var_fixed_users = IntVar()
# self.check_fixed_users = Checkbutton(self.frame1, text="", variable=self.var_fixed_users, onvalue=1, offvalue=0)
# self.check_fixed_users.deselect()
# self.check_fixed_users.grid(row=22, column=1, sticky=W)
# self.var_limit_test_runs = IntVar()
# self.check_limit_test_runs = Checkbutton(self.frame1, text="", variable=self.var_limit_test_runs, onvalue=1, offvalue=0)
# self.check_limit_test_runs.deselect()
# self.check_limit_test_runs.grid(row=22, column=1, sticky=W)
# self.var_time_betw_test_runs = IntVar()
# self.check_time_betw_test_runs = Checkbutton(self.frame1, text="", variable=self.var_time_betw_test_runs, onvalue=1, offvalue=0)
# self.check_time_betw_test_runs.deselect()
# self.check_time_betw_test_runs.grid(row=25, column=1, sticky=W)
self.var_processing_time = IntVar()
self.check_processing_time = Checkbutton(self.frame1, text="", variable=self.var_processing_time, onvalue=1,
offvalue=0)
self.check_processing_time.deselect()
self.check_processing_time.grid(row=27, column=1, sticky=W)
self.var_processing_time_reset = IntVar()
self.check_processing_time_reset = Checkbutton(self.frame1, text="", variable=self.var_processing_time_reset,
onvalue=1, offvalue=0)
self.check_processing_time_reset.deselect()
self.check_processing_time_reset.grid(row=29, column=1, sticky=W)
self.var_examview = IntVar()
self.check_examview = Checkbutton(self.frame1, text="", variable=self.var_examview, onvalue=1, offvalue=0)
self.check_examview.deselect()
self.check_examview.grid(row=30, column=1, sticky=W)
self.var_examview_test_title = IntVar()
self.check_examview_test_title = Checkbutton(self.frame1, text="", variable=self.var_examview_test_title,
onvalue=1, offvalue=0)
self.check_examview_test_title.deselect()
self.check_examview_test_title.grid(row=31, column=1, sticky=W)
self.var_examview_user_name = IntVar()
self.check_examview_user_name = Checkbutton(self.frame1, text="", variable=self.var_examview_user_name,
onvalue=1, offvalue=0)
self.check_examview_user_name.deselect()
self.check_examview_user_name.grid(row=32, column=1, sticky=W)
self.var_show_ilias_nr = IntVar()
self.check_show_ilias_nr = Checkbutton(self.frame1, text="", variable=self.var_show_ilias_nr, onvalue=1,
offvalue=0)
self.check_show_ilias_nr.deselect()
self.check_show_ilias_nr.grid(row=33, column=1, sticky=W)
self.var_autosave = IntVar()
self.check_autosave = Checkbutton(self.frame2, text="", variable=self.var_autosave, onvalue=1, offvalue=0,
command=lambda v=self.var_autosave: Test_Einstellungen_GUI.enable_autosave(self,
v))
self.check_autosave_interval_label = Label(self.frame2, text="Speicherintervall (in Sek.):")
self.check_autosave_interval_entry = Entry(self.frame2, width=10)
self.check_autosave.deselect()
self.check_autosave.grid(row=4, column=3, sticky=W)
self.var_mix_questions = IntVar()
self.check_mix_questions = Checkbutton(self.frame2, text="", variable=self.var_mix_questions, onvalue=1,
offvalue=0)
self.check_mix_questions.deselect()
self.check_mix_questions.grid(row=5, column=3, sticky=W)
self.var_show_solution_notes = IntVar()
self.check_show_solution_notes = Checkbutton(self.frame2, text="", variable=self.var_show_solution_notes,
onvalue=1, offvalue=0)
self.check_show_solution_notes.deselect()
self.check_show_solution_notes.grid(row=6, column=3, sticky=W)
self.var_direct_response = IntVar()
self.check_direct_response = Checkbutton(self.frame2, text="", variable=self.var_direct_response, onvalue=1,
offvalue=0)
self.check_direct_response.deselect()
self.check_direct_response.grid(row=7, column=3, sticky=W)
self.var_mandatory_questions = IntVar()
self.check_mandatory_questions = Checkbutton(self.frame2, text="", variable=self.var_mandatory_questions,
onvalue=1, offvalue=0)
self.check_mandatory_questions.deselect()
self.check_mandatory_questions.grid(row=12, column=3, sticky=W)
self.var_use_previous_solution = IntVar()
self.check_use_previous_solution = Checkbutton(self.frame2, text="", variable=self.var_use_previous_solution,
onvalue=1, offvalue=0)
self.check_use_previous_solution.deselect()
self.check_use_previous_solution.grid(row=14, column=3, sticky=W)
self.var_show_test_cancel = IntVar()
self.check_show_test_cancel = Checkbutton(self.frame2, text="", variable=self.var_show_test_cancel, onvalue=1,
offvalue=0)
self.check_show_test_cancel.deselect()
self.check_show_test_cancel.grid(row=15, column=3, sticky=W)
self.var_show_question_list_process_status = IntVar()
self.check_show_question_list_process_status = Checkbutton(self.frame2, text="",
variable=self.var_show_question_list_process_status,
onvalue=1, offvalue=0)
self.check_show_question_list_process_status.deselect()
self.check_show_question_list_process_status.grid(row=18, column=3, sticky=W)
self.var_question_mark = IntVar()
self.check_question_mark = Checkbutton(self.frame2, text="", variable=self.var_question_mark, onvalue=1,
offvalue=0)
self.check_question_mark.deselect()
self.check_question_mark.grid(row=19, column=3, sticky=W)
self.var_overview_answers = IntVar()
self.check_overview_answers = Checkbutton(self.frame2, text="", variable=self.var_overview_answers, onvalue=1,
offvalue=0)
self.check_overview_answers.grid(row=21, column=3, sticky=W)
self.var_show_end_comment = IntVar()
self.check_show_end_comment = Checkbutton(self.frame2, text="", variable=self.var_show_end_comment, onvalue=1,
offvalue=0,
command=lambda
v=self.var_show_end_comment: Test_Einstellungen_GUI.show_concluding_remarks(
self, v))
self.check_show_end_comment.deselect()
self.check_show_end_comment.grid(row=22, column=3, sticky=W)
self.var_forwarding = IntVar()
self.check_forwarding = Checkbutton(self.frame2, text="", variable=self.var_forwarding, onvalue=1, offvalue=0)
self.check_forwarding.deselect()
self.check_forwarding.grid(row=23, column=3, sticky=W)
self.var_notification = IntVar()
self.check_notification = Checkbutton(self.frame2, text="", variable=self.var_notification, onvalue=1,
offvalue=0)
self.check_notification.deselect()
self.check_notification.grid(row=24, column=3, sticky=W)
# --------------------------- RADIO BUTTONS ---------------------------------------
self.select_question = IntVar()
self.select_question.set(0)
self.select_question_radiobtn1 = Radiobutton(self.frame1, text="Fest definierte Fragenauswahl",
variable=self.select_question, value=0)
self.select_question_radiobtn1.grid(row=4, column=1, pady=0, sticky=W) # FIXED_QUEST_SET
self.select_question_radiobtn2 = Radiobutton(self.frame1, text="Zufällige Fragenauswahl",
variable=self.select_question, value=1)
self.select_question_radiobtn2.grid(row=5, column=1, pady=0, sticky=W) # RANDOM_QUEST_SET
self.select_question_radiobtn3 = Radiobutton(self.frame1,
text="Wiedervorlagemodus - alle Fragen eines Fragenpools",
variable=self.select_question, value=2)
self.select_question_radiobtn3.grid(row=6, column=1, pady=0, sticky=W) # DYNAMIC_QUEST_SET
self.select_anonym = IntVar()
self.select_anonym.set(0)
self.select_anonym_radiobtn1 = Radiobutton(self.frame1, text="Testergebnisse ohne Namen",
variable=self.select_anonym, value=0, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn1.grid(row=7, column=1, pady=0, sticky=W)
self.select_anonym_radiobtn2 = Radiobutton(self.frame1, text="Testergebnisse mit Namen",
variable=self.select_anonym, value=1, borderwidth=0,
command=self.select_anonym.get())
self.select_anonym_radiobtn2.grid(row=8, column=1, pady=0, sticky=W)
self.select_show_question_title = IntVar()
self.select_show_question_title.set(0)
self.select_show_question_title_radiobtn1 = Radiobutton(self.frame2, text="Fragentitel und erreichbare Punkte",
variable=self.select_show_question_title, value=0,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn1.grid(row=1, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn2 = Radiobutton(self.frame2, text="Nur Fragentitel",
variable=self.select_show_question_title, value=1,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn2.grid(row=2, column=3, pady=0, sticky=W)
self.select_show_question_title_radiobtn3 = Radiobutton(self.frame2,
text="Weder Fragentitel noch erreichbare Punkte",
variable=self.select_show_question_title, value=2,
borderwidth=0,
command=self.select_show_question_title.get())
self.select_show_question_title_radiobtn3.grid(row=3, column=3, pady=0, sticky=W)
self.select_user_response = IntVar()
self.select_user_response.set(0)
self.select_user_response_radiobtn1 = Radiobutton(self.frame2,
text="Antworten während des Testdurchlaufs nicht festschreiben",
variable=self.select_user_response, value=0, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn1.grid(row=8, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn2 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Rückmeldung festschreiben",
variable=self.select_user_response, value=1, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn2.grid(row=9, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn3 = Radiobutton(self.frame2,
text="Antworten bei Anzeige der Folgefrage festschreiben",
variable=self.select_user_response, value=2, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn3.grid(row=10, column=3, pady=0, sticky=W)
self.select_user_response_radiobtn4 = Radiobutton(self.frame2,
text="Antworten mit der Anzeige von Rückmeldungen oder der Folgefrage festschreiben",
variable=self.select_user_response, value=3, borderwidth=0,
command=self.select_user_response.get())
self.select_user_response_radiobtn4.grid(row=11, column=3, pady=0, sticky=W)
self.select_not_answered_questions = IntVar()
self.select_not_answered_questions.set(0)
self.select_not_answered_questions_radiobtn1 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen bleiben an ihrem Platz",
variable=self.select_not_answered_questions, value=0,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn1.grid(row=16, column=3, pady=0, sticky=W)
self.select_not_answered_questions_radiobtn2 = Radiobutton(self.frame2,
text="Nicht beantwortete Fragen werden ans Testende gesschoben",
variable=self.select_not_answered_questions, value=1,
borderwidth=0,
command=self.select_not_answered_questions.get())
self.select_not_answered_questions_radiobtn2.grid(row=17, column=3, pady=0, sticky=W)
# --------------------------- ENTRY BOXES ---------------------------------------
self.titel_entry = Entry(self.frame1, width=47)
self.titel_entry.grid(row=1, column=1)
self.introduction_bar = Scrollbar(self.frame1)
self.introduction_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.test_start_year_entry = Entry(self.frame1, width=5)
self.test_start_year_entry.grid(row=18, column=1, sticky=W)
self.test_start_year_entry.insert(0, "YYYY")
self.test_start_month_entry = Entry(self.frame1, width=5)
self.test_start_month_entry.grid(row=18, column=1, sticky=W, padx=35)
self.test_start_month_entry.insert(0, "MM")
self.test_start_day_entry = Entry(self.frame1, width=5)
self.test_start_day_entry.grid(row=18, column=1, sticky=W, padx=70)
self.test_start_day_entry.insert(0, "DD")
self.test_start_hour_entry = Entry(self.frame1, width=5)
self.test_start_hour_entry.grid(row=18, column=1, sticky=W, padx=105)
self.test_start_hour_entry.insert(0, "HH")
self.test_start_minute_entry = Entry(self.frame1, width=5)
self.test_start_minute_entry.grid(row=18, column=1, sticky=W, padx=140)
self.test_start_minute_entry.insert(0, "mm")
self.test_end_year_entry = Entry(self.frame1, width=5)
self.test_end_year_entry.grid(row=19, column=1, sticky=W, pady=5)
self.test_end_year_entry.insert(0, "YYYY")
self.test_end_month_entry = Entry(self.frame1, width=5)
self.test_end_month_entry.grid(row=19, column=1, sticky=W, padx=35)
self.test_end_month_entry.insert(0, "MM")
self.test_end_day_entry = Entry(self.frame1, width=5)
self.test_end_day_entry.grid(row=19, column=1, sticky=W, padx=70)
self.test_end_day_entry.insert(0, "DD")
self.test_end_hour_entry = Entry(self.frame1, width=5)
self.test_end_hour_entry.grid(row=19, column=1, sticky=W, padx=105)
self.test_end_hour_entry.insert(0, "HH")
self.test_end_minute_entry = Entry(self.frame1, width=5)
self.test_end_minute_entry.grid(row=19, column=1, sticky=W, padx=140)
self.test_end_minute_entry.insert(0, "mm")
self.test_password_entry = Entry(self.frame1, width=20)
self.test_password_entry.grid(row=20, column=1, sticky=W, pady=3)
self.description_bar = Scrollbar(self.frame1)
self.description_infobox = Text(self.frame1, height=4, width=40, font=('Helvetica', 9))
self.description_bar.grid(row=2, column=2)
self.description_infobox.grid(row=2, column=1, pady=10)
self.description_bar.config(command=self.description_infobox.yview)
self.description_infobox.config(yscrollcommand=self.description_bar.set)
self.limit_users_max_amount_entry = Entry(self.frame1, width=5)
self.limit_users_max_amount_entry.grid(row=22, column=1, sticky=W)
self.inactivity_time_for_users_entry = Entry(self.frame1, width=5)
self.inactivity_time_for_users_entry.grid(row=23, column=1, sticky=W)
self.inactivity_time_for_users_entry.insert(0, "300")
self.limit_test_runs_entry = Entry(self.frame1, width=10)
self.limit_test_runs_entry.grid(row=25, column=1, sticky=W)
self.limit_test_runs_entry.insert(0, "3")
self.limit_time_betw_test_runs_month_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_month_entry.grid(row=26, column=1, sticky=W, pady=5)
self.limit_time_betw_test_runs_month_entry.insert(0, "MM")
self.limit_time_betw_test_runs_day_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_day_entry.grid(row=26, column=1, sticky=W, padx=35)
self.limit_time_betw_test_runs_day_entry.insert(0, "DD")
self.limit_time_betw_test_runs_hour_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_hour_entry.grid(row=26, column=1, sticky=W, padx=70)
self.limit_time_betw_test_runs_hour_entry.insert(0, "HH")
self.limit_time_betw_test_runs_minute_entry = Entry(self.frame1, width=5)
self.limit_time_betw_test_runs_minute_entry.grid(row=26, column=1, sticky=W, padx=105)
self.limit_time_betw_test_runs_minute_entry.insert(0, "mm")
self.limit_processing_time_minutes_entry = Entry(self.frame1, width=5)
self.limit_processing_time_minutes_entry.grid(row=28, column=1, sticky=W)
self.limit_processing_time_minutes_entry.insert(0, "90")
self.concluding_remarks_bar = Scrollbar(self.frame2)
self.concluding_remarks_infobox = Text(self.frame2, height=4, width=40, font=('Helvetica', 9))
self.profile_name_label = Label(self.frame3, text="Speichern unter...")
self.profile_name_label.grid(row=0, column=0)
self.profile_name_entry = Entry(self.frame3, width=15)
self.profile_name_entry.grid(row=0, column=1)
# self.profile_oid_label = Label(self.frame3, text="Choose oid to delete")
# self.profile_oid_label.grid(row=4, column=0)
self.profile_oid_entry = Entry(self.frame3, width=10)
self.profile_oid_entry.grid(row=4, column=1)
self.load_settings_entry = Entry(self.frame3, width=10)
self.load_settings_entry.grid(row=3, column=1)
# self.delete_settings_btn = Button(self.frame3, text="Delete Profile from ID", command=Test_Einstellungen_GUI.profile_save_settings(self))
# self.delete_settings_btn.grid(row=4, column=0)
self.profile_oid_listbox_label = Label(self.frame3, text=" DB\nID")
self.profile_oid_listbox_label.grid(row=1, column=4, sticky=W)
self.profile_name_listbox_label = Label(self.frame3, text="Name")
self.profile_name_listbox_label.grid(row=1, column=5, sticky=W)
self.my_listbox_profile_oid = Listbox(self.frame3, width=5)
self.my_listbox_profile_oid.grid(row=2, column=4, sticky=W)
self.my_listbox_profile_name = Listbox(self.frame3, width=15)
self.my_listbox_profile_name.grid(row=2, column=5, sticky=W)
self.save_settings_btn = Button(self.frame3, text="Speichern", command=lambda: Test_Einstellungen_GUI.profile_save_settings(self))
self.save_settings_btn.grid(row=2, column=0)
self.load_settings_btn = Button(self.frame3, text="Profil laden", command=lambda: Test_Einstellungen_GUI.profile_load_settings(self))
self.load_settings_btn.grid(row=3, column=0)
self.delete_profile_btn = Button(self.frame3, text="Profil löschen", command=lambda: Test_Einstellungen_GUI.profile_delete(self))
self.delete_profile_btn.grid(row=4, column=0)
self.show_profiles_btn = Button(self.frame3, text="Alle gespeicherten Profile anzeigen", command=lambda: Test_Einstellungen_GUI.profile_show_db(self))
self.show_profiles_btn.grid(row=5, column=0)
#self.create_profile_btn = Button(self.frame3, text="Create Profile-Settings", command=lambda: Test_Einstellungen_GUI.create_settings(self))
#self.create_profile_btn.grid(row=6, column=0)
#Test_Einstellungen_GUI.create_settings(self, self.settings_database_path, self.settings_database_table, self.settings_db_profile_name)
def show_entry_time_limited_start(self, var):
if var.get() == 0:
self.time_limited_start_label.grid_forget()
self.time_limited_start_year_label.grid_forget()
self.time_limited_start_year_entry.grid_forget()
self.time_limited_start_month_label.grid_forget()
self.time_limited_start_month_entry.grid_forget()
self.time_limited_start_day_label.grid_forget()
self.time_limited_start_day_entry.grid_forget()
self.time_limited_start_hour_label.grid_forget()
self.time_limited_start_hour_entry.grid_forget()
self.time_limited_start_minute_label.grid_forget()
self.time_limited_start_minute_entry.grid_forget()
self.time_limited_end_label.grid_forget()
self.time_limited_end_year_label.grid_forget()
self.time_limited_end_year_entry.grid_forget()
self.time_limited_end_month_label.grid_forget()
self.time_limited_end_month_entry.grid_forget()
self.time_limited_end_day_label.grid_forget()
self.time_limited_end_day_entry.grid_forget()
self.time_limited_end_hour_label.grid_forget()
self.time_limited_end_hour_entry.grid_forget()
self.time_limited_end_minute_label.grid_forget()
self.time_limited_end_minute_entry.grid_forget()
else:
self.time_limited_start_label.grid(row=10, column=1, sticky=W, padx=50)
self.time_limited_start_day_label.grid(row=11, column=1, sticky=W, padx=30)
self.time_limited_start_month_label.grid(row=11, column=1, sticky=W, padx=55)
self.time_limited_start_year_label.grid(row=11, column=1, sticky=W, padx=80)
self.time_limited_start_hour_label.grid(row=11, column=1, sticky=W, padx=110)
self.time_limited_start_minute_label.grid(row=11, column=1, sticky=W, padx=135)
self.time_limited_end_label.grid(row=10, column=1, sticky=E, padx=50)
self.time_limited_end_day_label.grid(row=11, column=1, sticky=E, padx=110)
self.time_limited_end_month_label.grid(row=11, column=1, sticky=E, padx=85)
self.time_limited_end_year_label.grid(row=11, column=1, sticky=E, padx=55)
self.time_limited_end_hour_label.grid(row=11, column=1, sticky=E, padx=30)
self.time_limited_end_minute_label.grid(row=11, column=1, sticky=E, padx=5)
self.time_limited_start_day_entry.grid(row=12, column=1, sticky=W, padx=30)
self.time_limited_start_month_entry.grid(row=12, column=1, sticky=W, padx=55)
self.time_limited_start_year_entry.grid(row=12, column=1, sticky=W, padx=80)
self.time_limited_start_hour_entry.grid(row=12, column=1, sticky=W, padx=110)
self.time_limited_start_minute_entry.grid(row=12, column=1, sticky=W, padx=135)
self.time_limited_end_day_entry.grid(row=12, column=1, sticky=E, padx=110)
self.time_limited_end_month_entry.grid(row=12, column=1, sticky=E, padx=85)
self.time_limited_end_year_entry.grid(row=12, column=1, sticky=E, padx=55)
self.time_limited_end_hour_entry.grid(row=12, column=1, sticky=E, padx=30)
self.time_limited_end_minute_entry.grid(row=12, column=1, sticky=E, padx=5)
def show_introduction_textfield(self, introduction_var):
print(introduction_var.get())
if introduction_var.get() == 0:
self.introduction_bar.grid_forget()
self.introduction_infobox.grid_forget()
else:
self.introduction_bar.grid(row=15, column=1, sticky=E)
self.introduction_infobox.grid(row=15, column=1, padx=30)
self.introduction_bar.config(command=self.introduction_infobox.yview)
self.introduction_infobox.config(yscrollcommand=self.introduction_bar.set)
def enable_autosave(self, var):
if var.get() == 0:
self.check_autosave_interval_entry.grid_forget()
self.check_autosave_interval_label.grid_forget()
else:
self.check_autosave_interval_entry.grid(row=4, column=3, padx=10)
self.check_autosave_interval_label.grid(row=4, column=3, padx=50, sticky=W)
def show_concluding_remarks(self, var):
if var.get() == 0:
self.concluding_remarks_bar.grid_forget()
self.concluding_remarks_infobox.grid_forget()
else:
self.concluding_remarks_bar.grid(row=22, column=3, sticky=E)
self.concluding_remarks_infobox.grid(row=22, column=3, padx=30)
self.concluding_remarks_bar.config(command=self.concluding_remarks_infobox.yview)
self.concluding_remarks_infobox.config(yscrollcommand=self.concluding_remarks_bar.set)
def profile_show_db(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("SELECT *, oid FROM " + self.settings_database_table)
profile_records = c.fetchall()
# Clear List Boxes
self.my_listbox_profile_name.delete(0, END)
self.my_listbox_profile_oid.delete(0, END)
# Loop thru Results
for profile_record in profile_records:
self.my_listbox_profile_name.insert(END, profile_record[0])
self.my_listbox_profile_oid.insert(END, profile_record[len(profile_record) - 1])
self.profile_records_len = len(profile_records)
# print(profile_records[len(profile_records)-1])
conn.commit()
conn.close()
print("LOOP THROUGH... SHOW PROFILES!")
def profile_save_settings(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
# Insert into Table
c.execute(
"INSERT INTO " + self.settings_database_table + " VALUES ("
":profile_name, :entry_description, :radio_select_question, :radio_select_anonymous, :check_online, :check_time_limited, "
":check_introduction, :entry_introduction, :check_test_properties, "
":entry_test_start_year, :entry_test_start_month, :entry_test_start_day, :entry_test_start_hour, :entry_test_start_minute,"
":entry_test_end_year, :entry_test_end_month, :entry_test_end_day, :entry_test_end_hour, :entry_test_end_minute,"
":entry_test_password, :check_specific_users, :entry_limit_users, :entry_user_inactivity, :entry_limit_test_runs,"
":entry_limit_time_betw_test_run_month, :entry_limit_time_betw_test_run_day, :entry_limit_time_betw_test_run_hour, :entry_limit_time_betw_test_run_minute,"
":check_processing_time, :entry_processing_time_in_minutes, :check_processing_time_reset,"
":check_examview, :check_examview_titel, :check_examview_username, :check_show_ilias_nr,"
":radio_select_show_question_title, :check_autosave, :entry_autosave_interval, :check_mix_questions, :check_show_solution_notes, :check_direct_response,"
":radio_select_user_response, :check_mandatory_questions, :check_use_previous_solution, :check_show_test_cancel, :radio_select_not_answered_questions,"
":check_show_question_list_process_status, :check_question_mark, :check_overview_answers, :check_show_end_comment, :entry_end_comment, :check_forwarding, :check_notification)",
{
'profile_name': self.profile_name_entry.get(),
'entry_description': self.description_infobox.get("1.0", 'end-1c'),
'radio_select_question': self.select_question.get(),
'radio_select_anonymous': self.select_anonym.get(),
'check_online': self.var_online.get(),
'check_time_limited': self.var_time_limited.get(),
'check_introduction': self.var_introduction.get(),
'entry_introduction': self.introduction_infobox.get("1.0", 'end-1c'),
'check_test_properties': self.var_test_prop.get(),
'entry_test_start_year': self.test_start_year_entry.get(),
'entry_test_start_month': self.test_start_month_entry.get(),
'entry_test_start_day': self.test_start_day_entry.get(),
'entry_test_start_hour': self.test_start_hour_entry.get(),
'entry_test_start_minute': self.test_start_minute_entry.get(),
'entry_test_end_year': self.test_end_year_entry.get(),
'entry_test_end_month': self.test_end_month_entry.get(),
'entry_test_end_day': self.test_end_day_entry.get(),
'entry_test_end_hour': self.test_end_hour_entry.get(),
'entry_test_end_minute': self.test_end_minute_entry.get(),
'entry_test_password': self.test_password_entry.get(),
'check_specific_users': self.var_specific_users.get(),
'entry_limit_users': self.limit_users_max_amount_entry.get(),
'entry_user_inactivity': self.inactivity_time_for_users_entry.get(),
'entry_limit_test_runs': self.limit_test_runs_entry.get(),
'entry_limit_time_betw_test_run_month': self.limit_time_betw_test_runs_month_entry.get(),
'entry_limit_time_betw_test_run_day': self.limit_time_betw_test_runs_day_entry.get(),
'entry_limit_time_betw_test_run_hour': self.limit_time_betw_test_runs_hour_entry.get(),
'entry_limit_time_betw_test_run_minute': self.limit_time_betw_test_runs_minute_entry.get(),
'check_processing_time': self.var_processing_time.get(),
'entry_processing_time_in_minutes': self.limit_processing_time_minutes_entry.get(),
'check_processing_time_reset': self.var_processing_time_reset.get(),
'check_examview': self.var_examview.get(),
'check_examview_titel': self.var_examview_test_title.get(),
'check_examview_username': self.var_examview_user_name.get(),
'check_show_ilias_nr': self.var_show_ilias_nr.get(),
'radio_select_show_question_title': self.select_show_question_title.get(),
'check_autosave': self.var_autosave.get(),
'entry_autosave_interval': self.check_autosave_interval_entry.get(),
'check_mix_questions': self.var_mix_questions.get(),
'check_show_solution_notes': self.var_show_solution_notes.get(),
'check_direct_response': self.var_direct_response.get(),
'radio_select_user_response': self.select_user_response.get(),
'check_mandatory_questions': self.var_mandatory_questions.get(),
'check_use_previous_solution': self.var_use_previous_solution.get(),
'check_show_test_cancel': self.var_show_test_cancel.get(),
'radio_select_not_answered_questions': self.select_not_answered_questions.get(),
'check_show_question_list_process_status': self.var_show_question_list_process_status.get(),
'check_question_mark': self.var_question_mark.get(),
'check_overview_answers': self.var_overview_answers.get(),
'check_show_end_comment': self.var_show_end_comment.get(),
'entry_end_comment': self.concluding_remarks_infobox.get("1.0", 'end-1c'),
'check_forwarding': self.var_forwarding.get(),
'check_notification': self.var_notification.get()
}
)
conn.commit()
conn.close()
print("GOT VALUES")
def profile_load_settings(self):
print("LOAD")
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("SELECT * FROM " + self.settings_database_table + " WHERE oid =" + self.load_settings_entry.get())
profile_records = c.fetchall()
# Loop thru Results
for profile_record in profile_records:
self.profile_name_entry.get()
# profil_name_entry -> profile_record[0]
self.description_infobox.delete('1.0', END)
self.description_infobox.insert('1.0', profile_record[1])
self.select_question.set(profile_record[2])
self.select_anonym.set(profile_record[3])
self.var_online.set(profile_record[4])
self.var_time_limited.set(profile_record[5])
self.var_introduction.set(profile_record[6])
self.introduction_infobox.delete('1.0', END)
self.introduction_infobox.insert('1.0', profile_record[7])
self.var_test_prop.set(profile_record[8])
self.test_start_year_entry.delete(0, END)
self.test_start_year_entry.insert(0, profile_record[9])
self.test_start_month_entry.delete(0, END)
self.test_start_month_entry.insert(0, profile_record[10])
self.test_start_day_entry.delete(0, END)
self.test_start_day_entry.insert(0, profile_record[11])
self.test_start_hour_entry.delete(0, END)
self.test_start_hour_entry.insert(0, profile_record[12])
self.test_start_minute_entry.delete(0, END)
self.test_start_minute_entry.insert(0, profile_record[13])
self.test_end_year_entry.delete(0, END)
self.test_end_year_entry.insert(0, profile_record[14])
self.test_end_month_entry.delete(0, END)
self.test_end_month_entry.insert(0, profile_record[15])
self.test_end_day_entry.delete(0, END)
self.test_end_day_entry.insert(0, profile_record[16])
self.test_end_hour_entry.delete(0, END)
self.test_end_hour_entry.insert(0, profile_record[17])
self.test_end_minute_entry.delete(0, END)
self.test_end_minute_entry.insert(0, profile_record[18])
self.test_password_entry.delete(0, END)
self.test_password_entry.insert(0, profile_record[19])
self.var_specific_users.set(profile_record[20])
self.limit_users_max_amount_entry.delete(0, END)
self.limit_users_max_amount_entry.insert(0, profile_record[21])
self.inactivity_time_for_users_entry.delete(0, END)
self.inactivity_time_for_users_entry.insert(0, profile_record[22])
self.limit_test_runs_entry.delete(0, END)
self.limit_test_runs_entry.insert(0, profile_record[23])
self.limit_time_betw_test_runs_month_entry.delete(0, END)
self.limit_time_betw_test_runs_month_entry.insert(0, profile_record[24])
self.limit_time_betw_test_runs_day_entry.delete(0, END)
self.limit_time_betw_test_runs_day_entry.insert(0, profile_record[25])
self.limit_time_betw_test_runs_hour_entry.delete(0, END)
self.limit_time_betw_test_runs_hour_entry.insert(0, profile_record[26])
self.limit_time_betw_test_runs_minute_entry.delete(0, END)
self.limit_time_betw_test_runs_minute_entry.insert(0, profile_record[27])
self.var_processing_time.set(profile_record[28])
self.limit_processing_time_minutes_entry.delete(0, END)
self.limit_processing_time_minutes_entry.insert(0, profile_record[29])
self.var_processing_time_reset.set(profile_record[30])
self.var_examview.set(profile_record[31])
self.var_examview_test_title.set(profile_record[32])
self.var_examview_user_name.set(profile_record[33])
self.var_show_ilias_nr.set(profile_record[34])
self.select_show_question_title.set(profile_record[35])
self.var_autosave.set(profile_record[36])
self.check_autosave_interval_entry.delete(0, END)
self.check_autosave_interval_entry.insert(0, profile_record[37])
self.var_mix_questions.set(profile_record[38])
self.var_show_solution_notes.set(profile_record[39])
self.var_direct_response.set(profile_record[40])
self.select_user_response.set(profile_record[41])
self.var_mandatory_questions.set(profile_record[42])
self.var_use_previous_solution.set(profile_record[43])
self.var_show_test_cancel.set(profile_record[44])
self.select_not_answered_questions.set(profile_record[45])
self.var_show_question_list_process_status.set(profile_record[46])
self.var_question_mark.set(profile_record[47])
self.var_overview_answers.set(profile_record[48])
self.var_show_end_comment.set(profile_record[49])
self.concluding_remarks_infobox.delete('1.0', END)
self.concluding_remarks_infobox.insert('1.0', profile_record[50])
self.var_forwarding.set(profile_record[51])
self.var_notification.set(profile_record[52])
conn.commit()
conn.close()
def profile_delete(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
c.execute("DELETE from " + self.settings_database_table + " WHERE oid= " + self.profile_oid_entry.get())
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
def profile_delete_last(self):
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
self.profile_oid_entry.insert(0, self.profile_records_len)
c.execute("DELETE from " + self.settings_database_table + " WHERE oid= " + self.profile_oid_entry.get())
print("LAST DB ENTRY DELETED")
# self.profile_oid_entry(0, END)
conn.commit()
conn.close()
# For create test settings --> Toplevel must be opened (Test-Settings Window)
def create_settings(self, settings_database_path, settings_database_table, selected_settings_db_profile_name):
self.settings_database_path = settings_database_path
self.settings_database_table = settings_database_table
self.settings_db_profile_name = selected_settings_db_profile_name
print("=======")
print(self.settings_database_path)
print(self.settings_database_table)
print(self.settings_db_profile_name)
print("=======")
###################### DATENBANK ENTRIES UND INDEX DICT ERSTELLEN ###################
# Dictionary aus zwei Listen erstellen
self.settings_db_find_entries = []
self.settings_db_find_indexes = []
self.settings_db_column_names_list = []
self.settings_collection_of_question_titles = []
connect = sqlite3.connect(self.settings_database_path)
cursor = connect.execute('select * from ' + self.settings_database_table)
self.settings_db_column_names_list = list(map(lambda x: x[0], cursor.description))
self.db_column_names_string = ', :'.join(self.settings_db_column_names_list)
self.db_column_names_string = ":" + self.db_column_names_string
for i in range(len(self.settings_db_column_names_list)):
self.settings_db_find_indexes.append(i)
"""
# Durch list(map(lambdax: x[0])) werden die Spaltennamen aus der DB ausgelesen
cursor = conn.execute('select * from ' + self.ff_database_table)
db_column_names_list = list(map(lambda x: x[0], cursor.description))
db_column_names_string = ', :'.join(db_column_names_list)
db_column_names_string = ":" + db_column_names_string
"""
self.settings_db_entry_to_index_dict = dict(
zip((self.settings_db_column_names_list), (self.settings_db_find_indexes)))
connect.commit()
connect.close()
#####
# mit Datenbank verbinden
conn = sqlite3.connect(self.settings_database_path)
c = conn.cursor()
#c.execute("SELECT * FROM " + self.settings_database_table + " WHERE profile_name =" + self.settings_db_profile_name)
c.execute("SELECT * FROM " + self.settings_database_table)
profile_records = c.fetchall()
# Loop through Results
for profile_record in profile_records:
if profile_record[self.settings_db_entry_to_index_dict["profile_name"]] == self.settings_db_profile_name:
self.profile_name = profile_record[self.settings_db_entry_to_index_dict["profile_name"]]
self.description = profile_record[self.settings_db_entry_to_index_dict["entry_description"]]
self.question_type = profile_record[self.settings_db_entry_to_index_dict["radio_select_question"]]
self.anonym = profile_record[self.settings_db_entry_to_index_dict["radio_select_anonymous"]]
self.online = profile_record[self.settings_db_entry_to_index_dict["check_online"]]
self.time_limited = profile_record[self.settings_db_entry_to_index_dict["check_time_limited"]]
self.introduction = profile_record[self.settings_db_entry_to_index_dict["check_introduction"]]
self.introduction_infobox = profile_record[self.settings_db_entry_to_index_dict["entry_introduction"]]
self.test_prop = profile_record[self.settings_db_entry_to_index_dict["check_test_properties"]]
self.test_start_year = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_year"]]
self.test_start_month = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_month"]]
self.test_start_day = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_day"]]
self.test_start_hour = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_hour"]]
self.test_start_minute = profile_record[self.settings_db_entry_to_index_dict["entry_test_start_minute"]]
self.test_end_year = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_year"]]
self.test_end_month = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_month"]]
self.test_end_day = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_day"]]
self.test_end_hour = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_hour"]]
self.test_end_minute = profile_record[self.settings_db_entry_to_index_dict["entry_test_end_minute"]]
self.test_password = profile_record[self.settings_db_entry_to_index_dict["entry_test_password"]]
self.specific_users = profile_record[self.settings_db_entry_to_index_dict["check_specific_users"]]
self.limit_users_max = profile_record[self.settings_db_entry_to_index_dict["entry_limit_users"]]
self.inactivity_time_for_users = profile_record[self.settings_db_entry_to_index_dict["entry_user_inactivity"]]
self.limit_test_runs = profile_record[self.settings_db_entry_to_index_dict["entry_limit_test_runs"]]
self.limit_time_betw_test_runs_month = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_month"]]
self.limit_time_betw_test_runs_day = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_day"]]
self.limit_time_betw_test_runs_hour = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_hour"]]
self.limit_time_betw_test_runs_minute = profile_record[self.settings_db_entry_to_index_dict["entry_limit_time_betw_test_run_minute"]]
self.processing_time = profile_record[self.settings_db_entry_to_index_dict["check_processing_time"]]
self.limit_processing_time_minutes = profile_record[self.settings_db_entry_to_index_dict["entry_processing_time_in_minutes"]]
self.processing_time_reset = profile_record[self.settings_db_entry_to_index_dict["check_processing_time_reset"]]
self.examview = profile_record[self.settings_db_entry_to_index_dict["check_examview"]]
self.examview_test_title = profile_record[self.settings_db_entry_to_index_dict["check_examview_titel"]]
self.examview_user_name = profile_record[self.settings_db_entry_to_index_dict["check_examview_username"]]
self.show_ilias_nr = profile_record[self.settings_db_entry_to_index_dict["check_show_ilias_nr"]]
self.select_show_question_title = profile_record[self.settings_db_entry_to_index_dict["radio_select_show_question_title"]]
self.autosave = profile_record[self.settings_db_entry_to_index_dict["check_autosave"]]
self.autosave_interval = profile_record[self.settings_db_entry_to_index_dict["entry_autosave_interval"]]
self.mix_questions = profile_record[self.settings_db_entry_to_index_dict["check_mix_questions"]]
self.show_solution_notes = profile_record[self.settings_db_entry_to_index_dict["check_show_solution_notes"]]
self.direct_response = profile_record[self.settings_db_entry_to_index_dict["check_direct_response"]]
self.select_user_response = profile_record[self.settings_db_entry_to_index_dict["radio_select_user_response"]]
self.mandatory_questions = profile_record[self.settings_db_entry_to_index_dict["check_mandatory_questions"]]
self.use_previous_solution = profile_record[self.settings_db_entry_to_index_dict["check_use_previous_solution"]]
self.show_test_cancel = profile_record[self.settings_db_entry_to_index_dict["check_show_test_cancel"]]
self.select_not_answered_questions = profile_record[self.settings_db_entry_to_index_dict["radio_select_not_answered_questions"]]
self.show_question_list_process_status = profile_record[self.settings_db_entry_to_index_dict["check_show_question_list_process_status"]]
self.question_mark = profile_record[self.settings_db_entry_to_index_dict["check_question_mark"]]
self.overview_answers = profile_record[self.settings_db_entry_to_index_dict["check_overview_answers"]]
self.show_end_comment = profile_record[self.settings_db_entry_to_index_dict["check_show_end_comment"]]
self.concluding_remarks_infobox = profile_record[self.settings_db_entry_to_index_dict["entry_end_comment"]]
self.forwarding = profile_record[self.settings_db_entry_to_index_dict["check_forwarding"]]
self.notification = profile_record[self.settings_db_entry_to_index_dict["check_notification"]]
self.mytree = ET.parse(self.test_qti_file_path_output)
self.myroot = self.mytree.getroot()
# hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
self.duration_time = int(self.limit_processing_time_minutes)
self.duration_time_hours = self.duration_time // 60
self.duration_time_minutes = self.duration_time % 60
# Format of duration: P0Y0M0DT1H30M0S
self.duration = "P0Y0M0DT" + str(self.duration_time_hours) + "H" + str(self.duration_time_minutes) + "M0S"
for qticomment in self.myroot.iter('qticomment'):
qticomment.text = self.description
break
for duration in self.myroot.iter('duration'):
duration.text = self.duration
break
questestinterop = ET.Element('questestinterop')
assessment = ET.SubElement(questestinterop, 'assessment')
qticomment = ET.SubElement(assessment, 'qticomment')
qticomment.text = self.description
for qtimetadatafield in self.myroot.iter('qtimetadatafield'):
if qtimetadatafield.find('fieldlabel').text == "anonymity":
qtimetadatafield.find('fieldentry').text = self.anonym
if self.anonym == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <ANONYM>")
if qtimetadatafield.find('fieldlabel').text == "question_set_type":
if self.question_type == 0:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
# print("WRITE FIXED-Question")
elif self.question_type == 1:
qtimetadatafield.find('fieldentry').text = "RANDOM_QUEST_SET"
# print("WRITE RANDOM-Question")
elif self.question_type == 2:
qtimetadatafield.find('fieldentry').text = "DYNAMIC_QUEST_SET"
# print("WRITE DYNAMIC-Question")
else:
qtimetadatafield.find('fieldentry').text = "FIXED_QUEST_SET"
print("NO ENTRY IN <QUESTION_TYPE> ")
# if qtimetadatafield.find('fieldlabel').text == "author":
# qtimetadatafield.find('fieldentry').text = str(Formelfrage.autor_entry.get())
if qtimetadatafield.find('fieldlabel').text == "reset_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.processing_time_reset)
if self.processing_time_reset == "":
qtimetadatafield.find('fieldentry').text = "0"
print("NO ENTRY IN <RESET PROCESSING TIME>")
if qtimetadatafield.find('fieldlabel').text == "password":
qtimetadatafield.find('fieldentry').text = str(self.test_password)
if qtimetadatafield.find('fieldlabel').text == "allowedUsers":
qtimetadatafield.find('fieldentry').text = str(self.limit_users_max)
if qtimetadatafield.find('fieldlabel').text == "allowedUsersTimeGap":
qtimetadatafield.find('fieldentry').text = str(self.inactivity_time_for_users)
if qtimetadatafield.find('fieldlabel').text == "nr_of_tries":
qtimetadatafield.find('fieldentry').text = str(self.limit_test_runs)
if qtimetadatafield.find('fieldlabel').text == "pass_waiting":
qtimetadatafield.find('fieldentry').text = str(self.limit_time_betw_test_runs_month) + ":0" + str(
self.limit_time_betw_test_runs_day) + ":" + str(
self.limit_time_betw_test_runs_hour) + ":" + str(self.limit_time_betw_test_runs_minute) + ":00"
if self.limit_time_betw_test_runs_month == "MM":
qtimetadatafield.find('fieldentry').text = "00:000:00:00:00"
print(
" >WARNING< NO limit_time_betw_test_runs SET.. --> set limit_time to \"00:000:00:00:00\" ")
# Prüfungsansicht: Alle drei haken (Titel+Ansicht): "7" / Zwei Haken (Titel) = "3" / Zwei Haken (Name) = "5" / Ein Haken = "1" / "0" -> deaktiviert
if qtimetadatafield.find('fieldlabel').text == "kiosk":
if self.examview == 0:
qtimetadatafield.find('fieldentry').text = "0"
elif self.examview == 1:
qtimetadatafield.find('fieldentry').text = "1"
elif self.examview == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "3"
elif self.examview == 1 and self.examview_user_name == 1:
qtimetadatafield.find('fieldentry').text = "5"
elif self.examview == 1 and self.examview_user_name == 1 and self.examview_test_title == 1:
qtimetadatafield.find('fieldentry').text = "7"
# if qtimetadatafield.find('fieldlabel').text == "use_previous_answers":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "title_output":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "examid_in_test_pass":
# qtimetadatafield.find('fieldentry').text = "0"
# if qtimetadatafield.find('fieldlabel').text == "show_summary":
# qtimetadatafield.find('fieldentry').text = "0"
if qtimetadatafield.find('fieldlabel').text == "show_cancel":
qtimetadatafield.find('fieldentry').text = str(self.show_test_cancel)
# if qtimetadatafield.find('fieldlabel').text == "show_marker":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "fixed_participants":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "showinfo":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "shuffle_questions":
qtimetadatafield.find('fieldentry').text = str(self.mix_questions)
if qtimetadatafield.find('fieldlabel').text == "processing_time":
# self.minutes = self.limit_processing_time_minutes
hours_from_minutes = str(datetime.timedelta(minutes=int(self.limit_processing_time_minutes)))
print("len_min_to_hours: " + str(hours_from_minutes))
qtimetadatafield.find('fieldentry').text = "0" + hours_from_minutes
if qtimetadatafield.find('fieldlabel').text == "enable_examview":
qtimetadatafield.find('fieldentry').text = str(self.examview)
# if qtimetadatafield.find('fieldlabel').text == "show_examview_pdf":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "starting_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_start_year) + "Y" + str(
self.test_start_month) + "M" + str(self.test_start_day) + "DT" + str(
self.test_start_hour) + "H" + str(self.test_start_minute) + "M" + "0S"
if self.test_start_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y1M1DT00H0M0S"
print(" >WARNING< NO STARTING TIME SET.. --> set START to \"P2020Y1M1DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "ending_time":
qtimetadatafield.find('fieldentry').text = "P" + str(self.test_end_year) + "Y" + str(self.test_end_month) + "M" + str(self.test_end_day) + "DT" + str(self.test_end_hour) + "H" + str(self.test_end_minute) + "M" + "0S"
if self.test_end_year == "YYYY":
qtimetadatafield.find('fieldentry').text = "P2020Y12M30DT00H0M0S"
print(" >WARNING< NO ENDING TIME SET.. --> set END to \"P2020Y12M30DT00H0M0S\"")
if qtimetadatafield.find('fieldlabel').text == "autosave":
qtimetadatafield.find('fieldentry').text = str(self.autosave)
if qtimetadatafield.find('fieldlabel').text == "autosave_ival":
qtimetadatafield.find('fieldentry').text = str(self.autosave_interval)
# if qtimetadatafield.find('fieldlabel').text == "offer_question_hints":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "obligations_enabled":
# qtimetadatafield.find('fieldentry').text = "99"
if qtimetadatafield.find('fieldlabel').text == "enable_processing_time":
qtimetadatafield.find('fieldentry').text = str(self.processing_time)
# if qtimetadatafield.find('fieldlabel').text == "mark_step_0":
# qtimetadatafield.find('fieldentry').text = "99"
# if qtimetadatafield.find('fieldlabel').text == "mark_step_1":
# qtimetadatafield.find('fieldentry').text = "99"
# tree = ET.ElementTree(questestinterop)
# tree.write("WORKED_neuerAnfang.xml")
print("Write Test_Settings to File --- ",self.profile_name)
self.mytree.write(self.test_qti_file_path_output)
print("Create Test WITH Test_settings")
``` |
{
"source": "JohannaMoose/EITN41-Advanced-Websecurity",
"score": 3
} |
#### File: com/company/sob.py
```python
import hashlib
import os
import random
import struct
class SMP(object):
def __init__(self, secret=None):
self.p = 2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919
self.modOrder = (self.p - 1) / 2
self.g = 2
self.match = False
if type(secret) is str:
# Encode the string as a hex value
self.secret = int(secret.encode('hex'), 16)
elif type(secret) is int or type(secret) is long:
self.secret = secret
else:
raise TypeError("Secret must be an int or a string. Got type: " + str(type(secret)))
def step1(self):
self.b2 = createRandomExponent()
self.b3 = createRandomExponent()
self.g2 = pow(self.g, self.b2, self.p)
self.g3 = pow(self.g, self.b3, self.p)
(c1, d1) = self.createLogProof('1', self.b2)
(c2, d2) = self.createLogProof('2', self.b3)
# Send g2a, g3a, c1, d1, c2, d2
return packList(self.g2, self.g3, c1, d1, c2, d2)
def step1ForB(self, buffer):
(g2a, g3a, c1, d1, c2, d2) = unpackList(buffer)
if not self.isValidArgument(g2a) or not self.isValidArgument(g3a):
raise ValueError("Invalid g2a/g3a values")
if not self.checkLogProof('1', g2a, c1, d1):
raise ValueError("Proof 1 check failed")
if not self.checkLogProof('2', g3a, c2, d2):
raise ValueError("Proof 2 check failed")
self.g2a = g2a
self.g3a = g3a
self.b2 = createRandomExponent()
self.b3 = createRandomExponent()
b = createRandomExponent()
self.g2 = pow(self.g, self.b2, self.p)
self.g3 = pow(self.g, self.b3, self.p)
(c3, d3) = self.createLogProof('3', self.b2)
(c4, d4) = self.createLogProof('4', self.b3)
self.gb2 = pow(self.g2a, self.b2, self.p)
self.gb3 = pow(self.g3a, self.b3, self.p)
self.pb = pow(self.gb3, b, self.p)
self.qb = mulm(pow(self.g, b, self.p), pow(self.gb2, self.secret, self.p), self.p)
(c5, d5, d6) = self.createCoordsProof('5', self.gb2, self.gb3, b)
# Sends g2b, g3b, pb, qb, all the c's and d's
return packList(self.g2, self.g3, self.pb, self.qb, c3, d3, c4, d4, c5, d5, d6)
def step3(self, buffer):
(g2b, g3b, pb, qb, c3, d3, c4, d4, c5, d5, d6) = unpackList(buffer)
if not self.isValidArgument(g2b) or not self.isValidArgument(g3b) or \
not self.isValidArgument(pb) or not self.isValidArgument(qb):
raise ValueError("Invalid g2b/g3b/pb/qb values")
if not self.checkLogProof('3', g2b, c3, d3):
raise ValueError("Proof 3 check failed")
if not self.checkLogProof('4', g3b, c4, d4):
raise ValueError("Proof 4 check failed")
self.g2b = g2b
self.g3b = g3b
self.ga2 = pow(self.g2b, self.b2, self.p)
self.ga3 = pow(self.g3b, self.b3, self.p)
if not self.checkCoordsProof('5', c5, d5, d6, self.ga2, self.ga3, pb, qb):
raise ValueError("Proof 5 check failed")
s = createRandomExponent()
self.qb = qb
self.pb = pb
self.pa = pow(self.ga3, s, self.p)
self.qa = mulm(pow(self.g, s, self.p), pow(self.ga2, self.secret, self.p), self.p)
(c6, d7, d8) = self.createCoordsProof('6', self.ga2, self.ga3, s)
inv = self.invm(qb)
self.ra = pow(mulm(self.qa, inv, self.p), self.b3, self.p)
(c7, d9) = self.createEqualLogsProof('7', self.qa, inv, self.b3)
# Sends pa, qa, ra, c6, d7, d8, c7, d9
return packList(self.pa, self.qa, self.ra, c6, d7, d8, c7, d9)
def step4(self, buffer):
(pa, qa, ra, c6, d7, d8, c7, d9) = unpackList(buffer)
if not self.isValidArgument(pa) or not self.isValidArgument(qa) or not self.isValidArgument(ra):
raise ValueError("Invalid pa/qa/ra values")
if not self.checkCoordsProof('6', c6, d7, d8, self.gb2, self.gb3, pa, qa):
raise ValueError("Proof 6 check failed")
if not self.checkEqualLogs('7', c7, d9, self.g3a, mulm(qa, self.invm(self.qb), self.p), ra):
raise ValueError("Proof 7 check failed")
inv = self.invm(self.qb)
rb = pow(mulm(qa, inv, self.p), self.b3, self.p)
(c8, d10) = self.createEqualLogsProof('8', qa, inv, self.b3)
rab = pow(ra, self.b3, self.p)
inv = self.invm(self.pb)
if rab == mulm(pa, inv, self.p):
self.match = True
# Send rb, c8, d10
return packList(rb, c8, d10)
def step5(self, buffer):
(rb, c8, d10) = unpackList(buffer)
if not self.isValidArgument(rb):
raise ValueError("Invalid rb values")
if not self.checkEqualLogs('8', c8, d10, self.g3b, mulm(self.qa, self.invm(self.qb), self.p), rb):
raise ValueError("Proof 8 check failed")
rab = pow(rb, self.b3, self.p)
inv = self.invm(self.pb)
if rab == mulm(self.pa, inv, self.p):
self.match = True
def createLogProof(self, version, x):
randExponent = createRandomExponent()
c = sha256(version + str(pow(self.g, randExponent, self.p)))
d = (randExponent - mulm(x, c, self.modOrder)) % self.modOrder
return (c, d)
def checkLogProof(self, version, g, c, d):
gd = pow(self.g, d, self.p)
gc = pow(g, c, self.p)
gdgc = gd * gc % self.p
return (sha256(version + str(gdgc)) == c)
def createCoordsProof(self, version, g2, g3, r):
r1 = createRandomExponent()
r2 = createRandomExponent()
tmp1 = pow(g3, r1, self.p)
tmp2 = mulm(pow(self.g, r1, self.p), pow(g2, r2, self.p), self.p)
c = sha256(version + str(tmp1) + str(tmp2))
# TODO: make a subm function
d1 = (r1 - mulm(r, c, self.modOrder)) % self.modOrder
d2 = (r2 - mulm(self.secret, c, self.modOrder)) % self.modOrder
return (c, d1, d2)
def checkCoordsProof(self, version, c, d1, d2, g2, g3, p, q):
tmp1 = mulm(pow(g3, d1, self.p), pow(p, c, self.p), self.p)
tmp2 = mulm(mulm(pow(self.g, d1, self.p), pow(g2, d2, self.p), self.p), pow(q, c, self.p), self.p)
cprime = sha256(version + str(tmp1) + str(tmp2))
return (c == cprime)
def createEqualLogsProof(self, version, qa, qb, x):
r = createRandomExponent()
tmp1 = pow(self.g, r, self.p)
qab = mulm(qa, qb, self.p)
tmp2 = pow(qab, r, self.p)
c = sha256(version + str(tmp1) + str(tmp2))
tmp1 = mulm(x, c, self.modOrder)
d = (r - tmp1) % self.modOrder
return (c, d)
def checkEqualLogs(self, version, c, d, g3, qab, r):
tmp1 = mulm(pow(self.g, d, self.p), pow(g3, c, self.p), self.p)
tmp2 = mulm(pow(qab, d, self.p), pow(r, c, self.p), self.p)
cprime = sha256(version + str(tmp1) + str(tmp2))
return (c == cprime)
def invm(self, x):
return pow(x, self.p - 2, self.p)
def isValidArgument(self, val):
return (val >= 2 and val <= self.p - 2)
def packList(*items):
buffer = ''
# For each item in the list, convert it to a byte string and add its length as a prefix
for item in items:
bytes = longToBytes(item)
buffer += struct.pack('!I', len(bytes)) + bytes
return buffer
def unpackList(buffer):
items = []
index = 0
while index < len(buffer):
# Get the length of the long (4 byte int before the actual long)
length = struct.unpack('!I', buffer[index:index+4])[0]
index += 4
# Convert the data back to a long and add it to the list
item = bytesToLong(buffer[index:index+length])
items.append(item)
index += length
return items
def bytesToLong(bytes):
length = len(bytes)
string = 0
for i in range(length):
string += byteToLong(bytes[i:i+1]) << 8*(length-i-1)
return string
def longToBytes(long):
bytes = ''
while long != 0:
bytes = longToByte(long & 0xff) + bytes
long >>= 8
return bytes
def byteToLong(byte):
return struct.unpack('B', byte)[0]
def longToByte(long):
return struct.pack('B', long)
def mulm(x, y, mod):
return x * y % mod
def createRandomExponent():
return random.getrandbits(192*8)
def sha256(message):
return long(hashlib.sha256(str(message)).hexdigest(), 16)
``` |
{
"source": "JohannaMW/potato",
"score": 2
} |
#### File: potato/potato_blog/models.py
```python
from django.db import models
from scaffold import settings
from django.db import models
class Comment(models.Model):
body = models.TextField(blank=True, null=True)
title = models.CharField(max_length=120, blank=True, null=True)
def __unicode__(self):
return u"{}".format(self.text)
class Post(models.Model):
created = models.DateField(auto_now_add=True)
title = models.CharField(max_length=120)
category = models.CharField(max_length=120)
body = models.TextField()
author = models.CharField(max_length=200)
comment = models.ForeignKey(Comment, related_name='post', blank=True, null=True)
def __unicode__(self):
return u"{}".format(self.title)
``` |
{
"source": "johannapeltarion/community-code",
"score": 3
} |
#### File: community-code/image_classification/resize_and_bundle_images.py
```python
import os
from glob import glob
import argparse
from PIL import Image
import pandas as pd
import zipfile36 as zipfile
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', default='./data',
help="Directory containing sub-directories with images")
parser.add_argument('--output_path', default='./data/resized',
help="Directory to write output to, should not exist")
parser.add_argument('--zip_filename', default='data.zip',
help="Filename of the output zip bundle file")
parser.add_argument('--new_width', default=300,
help="Width to resize all images to")
parser.add_argument('--new_height', default=200,
help="Height to resize all images to")
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def main():
args = parser.parse_args()
if os.path.exists(args.output_path):
raise ValueError('Output path already exists', args.output_path)
os.makedirs(args.output_path)
# Include images of type jpg and png
images_full_path = glob(os.path.join(args.input_path, '*', '*.jpg')) \
+ glob(os.path.join(args.input_path, '*', '*.png'))
print("Num images found: ", len(images_full_path))
images = []
classes = []
for i in images_full_path:
print(i)
img_type = i.split('/')[-1].split('.')[-1] # 'jpg' or 'png'
i_rel_path = os.path.join(*i.split('/')[-2:]) # path including 'class/file'
class_name = i_rel_path.split('/')[0]
# Create class directory
if not os.path.exists(os.path.join(args.output_path, class_name)):
os.makedirs(os.path.join(args.output_path, class_name))
# Open image, resize and save in new path
im = Image.open(i)
if im.mode not in ['RGB', 'RGBA']:
continue
im = im.convert('RGB')
new_img = im.resize((int(args.new_width), int(args.new_height)))
new_img_rel_path = i_rel_path.split('.')[0] + "_resized." + img_type
new_img_path = os.path.join(args.output_path, new_img_rel_path)
new_img.save(new_img_path, quality=95)
# Save img relative path and class for index.csv file
images.append(new_img_rel_path)
classes.append(class_name)
# Save index.csv file, one row per image
dataset_index = pd.DataFrame({'image': images, 'class': classes})
dataset_index.to_csv(os.path.join(args.output_path,'index.csv'), index=False)
# Create zip file with index.csv and resized images
zipf = zipfile.ZipFile(os.path.join(args.output_path, args.zip_filename),
'w',
zipfile.ZIP_DEFLATED)
zipdir(args.output_path, zipf)
zipf.close()
if __name__ == '__main__':
main()
```
#### File: community-code/solar_panels/solar_preprocess.py
```python
import pandas as pd
import numpy as np
import glob
import cv2
import os
import shutil
from fire import Fire
from keras.applications import vgg19
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from sklearn.model_selection import train_test_split
def remove_old_output(index_target_file_path, image_target_dir):
try:
os.remove(index_target_file_path)
image_files = glob.glob(os.path.join(image_target_dir,'*'))
for f in image_files:
os.remove(f)
except:
pass
try:
if os.path.isdir(image_target_dir)==False:
pass
os.makedirs(image_target_dir)
except:
print('Cannot create target directory: %s' % (image_target_dir))
exit(1)
def get_csv(index_source_file_path):
return pd.read_csv(index_source_file_path,
delim_whitespace=True,
header = None,
names=["image", "prob", "type"])
def convert_to_rgb(df, root_dir, image_target_dir, target_size):
for index, row in df.iterrows():
image_source_path = os.path.join(root_dir, row['image'])
rgb_image = cv2.imread(image_source_path)
if target_size != 300:
rgb_image = cv2.resize(rgb_image, (target_size, target_size))
image_target_path = os.path.join(image_target_dir, row['image'].split('/')[1])
assert(cv2.imwrite(image_target_path, rgb_image) == True)
print('Converted images to RGB...')
def convert_to_npy(df, root_dir, image_target_dir, target_size):
for index, row in df.iterrows():
image_source_path = os.path.join(root_dir, row['image'])
rgb_image = cv2.imread(image_source_path)
if target_size != 300:
rgb_image = cv2.resize(rgb_image, (target_size, target_size))
np_data = np.array(rgb_image, dtype='f4', order='C')
np_data_target_path = os.path.join(image_target_dir, row['image'].split('/')[1])
np_data_target_path = np_data_target_path.replace('.png', '')
np.save(np_data_target_path + '.npy', np_data)
print('Converted images to NumPy...')
def convert_to_npy_vgg19(df, root_dir, image_target_dir, target_size):
for index, row in df.iterrows():
image_source_path = os.path.join(root_dir, row['image'])
image = load_img(image_source_path, target_size=(target_size, target_size))
numpy_image = img_to_array(image)
image_batch = np.expand_dims(numpy_image, axis=0)
np_data = vgg19.preprocess_input(image_batch.copy())
np_data_target_path = os.path.join(image_target_dir, row['image'].split('/')[1])
np_data_target_path = np_data_target_path.replace('.png', '')
np.save(np_data_target_path + '.npy', np_data[0,:,:,:])
print('Converted images to VGG19 normalized NumPy...')
def save_as_csv(df, index_target_file_path, delimiter=','):
df.to_csv(index_target_file_path, sep=delimiter, encoding='utf-8', index=False)
def create_subsets(df, stratify_on_type):
if stratify_on_type == True:
df["strata"] = df["prob"].map(str) + df["type"]
train_data, validate_data = train_test_split(df, test_size=0.25, random_state=42, stratify=df[['strata']])
train_data = train_data.drop(['strata'], axis=1)
validate_data = validate_data.drop(['strata'], axis=1)
else:
train_data, validate_data = train_test_split(df, test_size=0.25, random_state=42, stratify=df[['prob']])
print('Training samples: ' + str(len(train_data.values)))
print('Validation samples: ' + str(len(validate_data.values)))
train_data.insert(loc=3, column='subset', value='T')
validate_data.insert(loc=3, column='subset', value='V')
return train_data.append(validate_data, ignore_index=True)
def balance_classes(df):
df_sample_training = df[(df['subset'] == 'T') & (df['prob'] == 1.0)]
df_sample_validation = df[(df['subset'] == 'V') & (df['prob'] == 1.0)]
print('Upsampled defects...')
return pd.concat([df, df_sample_training, df_sample_validation], axis=0, sort=False)
def add_binary_label(df):
# Add column for use in classification models
df['prob_binary'] = df['prob'].apply(lambda x: 0 if x==0.0 else 1)
print('Added binary label...')
return df
def add_rotated_samples(df, root_dir, image_target_dir):
for index, row in df.iterrows():
image_source_path = os.path.join(root_dir, row['image'])
image = cv2.imread(image_source_path)
rotated_image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
rotate_filename = 'rot_' + row['image'].split('/')[1]
image_target_path = os.path.join(image_target_dir, rotate_filename)
assert (cv2.imwrite(image_target_path, rotated_image) == True)
df2 = df.copy()
df2['image'].replace({'cell': 'rot_cell'}, inplace=True, regex=True)
print('Added rotated duplicates...')
return pd.concat([df, df2])
def create_zip(archive_base_dir):
shutil.make_archive(base_name=archive_base_dir,
format="zip",
root_dir=archive_base_dir,
base_dir=archive_base_dir)
print('Zip file: %s.zip' % (archive_base_dir)
)
def run(root_dir,
rotate=False,
stratify_on_type=False,
image_as_np=False,
image_as_vgg19=True,
balance=False,
target_size=300):
archive_base_dir = os.path.join(root_dir, 'preprocessed')
image_target_dir = os.path.join(root_dir, 'preprocessed/images')
index_source_file_path = os.path.join(root_dir, 'labels.csv')
index_target_file_path = os.path.join(root_dir, 'preprocessed/index.csv')
remove_old_output(index_target_file_path, image_target_dir)
df = get_csv(index_source_file_path)
if image_as_vgg19 == True:
convert_to_npy_vgg19(df, root_dir, image_target_dir, target_size)
df['image'] = df['image'].str.replace('.png', '.npy')
elif image_as_np == True:
convert_to_npy(df, root_dir, image_target_dir, target_size)
df['image'] = df['image'].str.replace('.png', '.npy')
else:
convert_to_rgb(df, root_dir, image_target_dir, target_size)
df = create_subsets(df, stratify_on_type)
if rotate == True:
df = add_rotated_samples(df, root_dir, image_target_dir)
if balance == True:
df = balance_classes(df)
df = add_binary_label(df)
print('Total samples: ' + str(len(df.values)))
save_as_csv(df, index_target_file_path)
create_zip(archive_base_dir)
print('Done.')
if __name__ == "__main__":
Fire(run)
``` |
{
"source": "JohannaRahm/microDL",
"score": 3
} |
#### File: micro_dl/cli/dataset_pooling.py
```python
import argparse
import os
import yaml
import pandas as pd
from shutil import copy, copy2
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.meta_utils as meta_utils
def parse_args():
"""Parse command line arguments
In python namespaces are implemented as dictionaries
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--config',
type=str,
help='path to inference yaml configuration file',
)
args = parser.parse_args()
return args
def pool_dataset(config):
"""
:param dict args: dict with input options
:return:
"""
config_fname = config
with open(config_fname, 'r') as f:
pool_config = yaml.safe_load(f)
dst_dir = pool_config['destination']
num_workers = pool_config['num_workers']
pool_mode = pool_config['pool_mode']
frames_meta_dst_path = os.path.join(dst_dir, 'frames_meta.csv')
ints_meta_dst_path = os.path.join(dst_dir, 'intensity_meta.csv')
pos_idx_cur = 0
os.makedirs(dst_dir, exist_ok=True)
if os.path.exists(frames_meta_dst_path) and pool_mode == 'add':
frames_meta_dst = pd.read_csv(frames_meta_dst_path, index_col=0)
ints_meta_dst = pd.read_csv(ints_meta_dst_path, index_col=0)
pos_idx_cur = frames_meta_dst['pos_idx'].max() + 1
else:
frames_meta_dst = aux_utils.make_dataframe(nbr_rows=None)
ints_meta_dst = pd.DataFrame()
for src_key in pool_config:
if 'source' in src_key:
src_dir = pool_config[src_key]['dir']
src_pos_ids = pool_config[src_key]['pos_ids']
frames_meta_src = meta_utils.frames_meta_generator(
src_dir,
name_parser=pool_config['name_parser'],
)
ints_meta_src = meta_utils.ints_meta_generator(
src_dir,
name_parser=pool_config['name_parser'],
num_workers=num_workers,
)
if src_pos_ids == 'all':
src_pos_ids = frames_meta_src['pos_idx'].unique()
src_pos_ids.sort()
pos_idx_map = dict(zip(src_pos_ids, range(pos_idx_cur, pos_idx_cur + len(src_pos_ids))))
# select positions to pool and update their indices
frames_meta_src_new = frames_meta_src.copy()
frames_meta_src_new = frames_meta_src_new[frames_meta_src['pos_idx'].isin(src_pos_ids)]
frames_meta_src_new['pos_idx'] = frames_meta_src_new['pos_idx'].map(pos_idx_map)
ints_meta_src_new = ints_meta_src.copy()
ints_meta_src_new = ints_meta_src_new[ints_meta_src['pos_idx'].isin(src_pos_ids)]
ints_meta_src_new['pos_idx'] = ints_meta_src_new['pos_idx'].map(pos_idx_map)
# update file names and copy the files
for row_idx in list(frames_meta_src_new.index):
meta_row = frames_meta_src_new.loc[row_idx]
im_name_dst = aux_utils.get_sms_im_name(
time_idx=meta_row['time_idx'],
channel_name=meta_row['channel_name'],
slice_idx=meta_row['slice_idx'],
pos_idx=meta_row['pos_idx'],
ext='.tif',
)
frames_meta_src_new.loc[row_idx, 'file_name'] = im_name_dst
im_name_src = frames_meta_src.loc[row_idx, 'file_name']
# copy(os.path.join(src_dir, im_name_src),
# os.path.join(dst_dir, im_name_dst))
os.link(os.path.join(src_dir, im_name_src),
os.path.join(dst_dir, im_name_dst))
frames_meta_dst = frames_meta_dst.append(
frames_meta_src_new,
ignore_index=True,
)
ints_meta_dst = ints_meta_dst.append(
ints_meta_src_new,
ignore_index=True,
)
pos_idx_cur = pos_idx_map[src_pos_ids[-1]] + 1
frames_meta_dst.to_csv(frames_meta_dst_path, sep=",")
ints_meta_dst.to_csv(ints_meta_dst_path, sep=",")
if __name__ == '__main__':
args = parse_args()
pool_dataset(args.config)
```
#### File: micro_dl/cli/metrics_script.py
```python
import argparse
import numpy as np
import os
import pandas as pd
import yaml
import micro_dl.inference.evaluation_metrics as metrics
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.preprocess_utils as preprocess_utils
import micro_dl.utils.image_utils as image_utils
import micro_dl.utils.normalize as normalize
def parse_args():
"""Parse command line arguments
In python namespaces are implemented as dictionaries
:return: namespace containing the arguments passed.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
required=True,
help='Directory containing model weights, config and csv files',
)
parser.add_argument(
'--model_fname',
type=str,
default=None,
help='File name of weights in model dir (.hdf5). If None grab newest.',
)
parser.add_argument(
'--test_data',
dest='test_data',
action='store_true',
help="Use test indices in split_samples.json",
)
parser.add_argument(
'--all_data',
dest='test_data',
action='store_false',
)
parser.set_defaults(test_data=True)
parser.add_argument(
'--image_dir',
type=str,
required=True,
help="Directory containing target images",
)
parser.add_argument(
'--metrics',
type=str,
required=True,
nargs='*',
help='Metrics for model evaluation'
)
parser.add_argument(
'--orientations',
type=str,
default='xyz',
nargs='*',
help='Evaluate metrics along these orientations (xy, xz, yz, xyz)'
)
parser.add_argument(
'--name_parser',
type=str,
default='parse_sms_name',
help="The function in aux_utils that will parse the file name for indices",
)
return parser.parse_args()
def compute_metrics(model_dir,
image_dir,
metrics_list,
orientations_list,
test_data=True,
name_parser='parse_sms_name'):
"""
Compute specified metrics for given orientations for predictions, which
are assumed to be stored in model_dir/predictions. Targets are stored in
image_dir.
Writes metrics csv files for each orientation in model_dir/predictions.
:param str model_dir: Assumed to contain config, split_samples.json and
subdirectory predictions/
:param str image_dir: Directory containing target images with frames_meta.csv
:param list metrics_list: See inference/evaluation_metrics.py for options
:param list orientations_list: Any subset of {xy, xz, yz, xyz}
(see evaluation_metrics)
:param bool test_data: Uses test indices in split_samples.json,
otherwise all indices
:param str name_parser: Type of name parser (default or parse_idx_from_name)
"""
# Load config file
config_name = os.path.join(model_dir, 'config.yml')
with open(config_name, 'r') as f:
config = yaml.safe_load(f)
preprocess_config = preprocess_utils.get_preprocess_config(config['dataset']['data_dir'])
# Load frames metadata and determine indices
frames_meta = pd.read_csv(os.path.join(image_dir, 'frames_meta.csv'))
if isinstance(metrics_list, str):
metrics_list = [metrics_list]
metrics_inst = metrics.MetricsEstimator(metrics_list=metrics_list)
split_idx_name = config['dataset']['split_by_column']
if test_data:
idx_fname = os.path.join(model_dir, 'split_samples.json')
try:
split_samples = aux_utils.read_json(idx_fname)
test_ids = np.sort(split_samples['test'])
except FileNotFoundError as e:
print("No split_samples file. Will predict all images in dir.")
else:
test_ids = np.sort(np.unique(frames_meta[split_idx_name]))
# Find other indices to iterate over than split index name
# E.g. if split is position, we also need to iterate over time and slice
test_meta = pd.read_csv(os.path.join(model_dir, 'test_metadata.csv'))
metadata_ids = {split_idx_name: test_ids}
iter_ids = ['slice_idx', 'pos_idx', 'time_idx']
for id in iter_ids:
if id != split_idx_name:
metadata_ids[id] = np.sort(np.unique(test_meta[id]))
# Create image subdirectory to write predicted images
pred_dir = os.path.join(model_dir, 'predictions')
target_channel = config['dataset']['target_channels'][0]
# If network depth is > 3 determine depth margins for +-z
depth = 1
if 'depth' in config['network']:
depth = config['network']['depth']
normalize_im = 'stack'
if 'normalize_im' in preprocess_config:
normalize_im = preprocess_config['normalize_im']
elif 'normalize_im' in preprocess_config['tile']:
normalize_im = preprocess_config['tile']['normalize_im']
# Get channel name and extension for predictions
parse_func = aux_utils.import_object('utils.aux_utils', name_parser, 'function')
pred_fnames = [f for f in os.listdir(pred_dir) if f.startswith('im')]
meta_row = parse_func(pred_fnames[0])
pred_channel = meta_row['channel_idx']
_, ext = os.path.splitext(pred_fnames[0])
if isinstance(orientations_list, str):
orientations_list = [orientations_list]
available_orientations = {'xy', 'xz', 'yz', 'xyz'}
assert set(orientations_list).issubset(available_orientations), \
"Orientations must be subset of {}".format(available_orientations)
fn_mapping = {
'xy': metrics_inst.estimate_xy_metrics,
'xz': metrics_inst.estimate_xz_metrics,
'yz': metrics_inst.estimate_yz_metrics,
'xyz': metrics_inst.estimate_xyz_metrics,
}
metrics_mapping = {
'xy': metrics_inst.get_metrics_xy,
'xz': metrics_inst.get_metrics_xz,
'yz': metrics_inst.get_metrics_yz,
'xyz': metrics_inst.get_metrics_xyz,
}
df_mapping = {
'xy': pd.DataFrame(),
'xz': pd.DataFrame(),
'yz': pd.DataFrame(),
'xyz': pd.DataFrame(),
}
# Iterate over all indices for test data
for time_idx in metadata_ids['time_idx']:
for pos_idx in metadata_ids['pos_idx']:
target_stack = []
pred_stack = []
for slice_idx in metadata_ids['slice_idx']:
im_idx = aux_utils.get_meta_idx(
frames_metadata=frames_meta,
time_idx=time_idx,
channel_idx=target_channel,
slice_idx=slice_idx,
pos_idx=pos_idx,
)
target_fname = os.path.join(
image_dir,
frames_meta.loc[im_idx, 'file_name'],
)
im_target = image_utils.read_image(target_fname)
im_target = im_target.astype(np.float32)
pred_fname = aux_utils.get_im_name(
time_idx=time_idx,
channel_idx=pred_channel,
slice_idx=slice_idx,
pos_idx=pos_idx,
ext=ext,
)
pred_fname = os.path.join(pred_dir, pred_fname)
im_pred = image_utils.read_image(pred_fname)
# Un-zscore the predicted image. Necessary before computing SSIM
# if normalize_im is not None:
# if normalize_im in ['dataset', 'volume', 'slice']:
# zscore_median = frames_meta.loc[im_idx, 'zscore_median']
# zscore_iqr = frames_meta.loc[im_idx, 'zscore_iqr']
# else:
# zscore_median = np.nanmean(im_target)
# zscore_iqr = np.nanstd(im_target)
# im_pred = normalize.unzscore(im_pred, zscore_median, zscore_iqr)
target_stack.append(im_target)
pred_stack.append(im_pred)
target_stack = np.squeeze(np.dstack(target_stack)).astype(np.float32)
pred_stack = np.squeeze(np.stack(pred_stack, axis=-1)).astype(np.float32)
pred_name = "t{}_p{}".format(time_idx, pos_idx)
for orientation in orientations_list:
print('Compute {} metrics...'.format(orientation))
metric_fn = fn_mapping[orientation]
metric_fn(
target=target_stack,
prediction=pred_stack,
pred_name=pred_name,
)
df_mapping[orientation] = df_mapping[orientation].append(
metrics_mapping[orientation](),
ignore_index=True,
)
# Save non-empty dataframes
for orientation in orientations_list:
metrics_df = df_mapping[orientation]
df_name = 'metrics_{}.csv'.format(orientation)
metrics_name = os.path.join(pred_dir, df_name)
metrics_df.to_csv(metrics_name, sep=",", index=False)
if __name__ == '__main__':
args = parse_args()
compute_metrics(
model_dir=args.model_dir,
image_dir=args.image_dir,
metrics_list=args.metrics,
orientations_list=args.orientations,
test_data=args.test_data,
name_parser=args.name_parser,
)
```
#### File: micro_dl/inference/evaluation_metrics.py
```python
import functools
import numpy as np
import pandas as pd
from skimage.measure import compare_ssim as ssim
import sklearn.metrics
from scipy.stats import pearsonr
def mask_decorator(metric_function):
"""Decorator for masking the metrics
:param function metric_function: a python function that takes target and
prediction arrays as input
:return function wrapper_metric_function: input function that returns
metrics and masked_metrics if mask was passed as param to input function
"""
@functools.wraps(metric_function)
def wrapper_metric_function(**kwargs):
"""Expected inputs cur_target, prediction, mask
:param dict kwargs: with keys target, prediction and mask all of which
are np.arrays
"""
metric = metric_function(target=kwargs['target'],
prediction=kwargs['prediction'])
if 'mask' in kwargs:
mask = kwargs['mask']
cur_target = kwargs['target']
cur_pred = kwargs['prediction']
masked_metric = metric_function(target=cur_target[mask],
prediction=cur_pred[mask])
return [metric, masked_metric]
return metric
return wrapper_metric_function
@mask_decorator
def mse_metric(target, prediction):
"""MSE of target and prediction
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float mean squared error
"""
return np.mean((target - prediction) ** 2)
@mask_decorator
def mae_metric(target, prediction):
"""MAE of target and prediction
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float mean absolute error
"""
return np.mean(np.abs(target - prediction))
@mask_decorator
def r2_metric(target, prediction):
"""Coefficient of determination of target and prediction
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float coefficient of determination
"""
ss_res = np.sum((target - prediction) ** 2)
ss_tot = np.sum((target - np.mean(target)) ** 2)
cur_r2 = 1 - (ss_res / (ss_tot + 1e-8))
return cur_r2
@mask_decorator
def corr_metric(target, prediction):
"""Pearson correlation of target and prediction
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float Pearson correlation
"""
cur_corr = pearsonr(target.flatten(), prediction.flatten())[0]
return cur_corr
def ssim_metric(target,
prediction,
mask=None,
win_size=21):
"""
Structural similarity indiex (SSIM) of target and prediction.
Window size is not passed into function so make sure tiles
are never smaller than default win_size.
:param np.array target: ground truth array
:param np.array prediction: model prediction
:param np.array/None mask: Mask
:param int win_size: window size for computing local SSIM
:return float/list ssim and ssim_masked
"""
if mask is None:
cur_ssim = ssim(
target,
prediction,
win_size=win_size,
data_range=target.max() - target.min(),
)
return cur_ssim
else:
cur_ssim, cur_ssim_img = ssim(
target,
prediction,
data_range=target.max() - target.min(),
full=True,
)
cur_ssim_masked = np.mean(cur_ssim_img[mask])
return [cur_ssim, cur_ssim_masked]
def accuracy_metric(target, prediction):
"""Accuracy of binary target and prediction.
Not using mask decorator for binary data evaluation.
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float Accuracy: Accuracy for binarized data
"""
target_bin = binarize_array(target)
pred_bin = binarize_array(prediction)
return sklearn.metrics.accuracy_score(target_bin, pred_bin)
def dice_metric(target, prediction):
"""Dice similarity coefficient (F1 score) of binary target and prediction.
Reports global metric.
Not using mask decorator for binary data evaluation.
:param np.array target: ground truth array
:param np.array prediction: model prediction
:return float dice: Dice for binarized data
"""
target_bin = binarize_array(target)
pred_bin = binarize_array(prediction)
return sklearn.metrics.f1_score(target_bin, pred_bin, average='micro')
def binarize_array(im):
"""Binarize image
:param np.array im: Prediction or target array
:return np.array im_bin: Flattened and binarized array
"""
im_bin = (im.flatten() / im.max()) > .5
return im_bin.astype(np.uint8)
class MetricsEstimator:
"""Estimate metrics for evaluating a trained model"""
def __init__(self,
metrics_list,
masked_metrics=False):
"""
Init. After instantiating the class you can call metrics estimation
in xy, xz, yz and xyz orientations assuming images are of shape xyz.
The first three indices will iterate over planes whereas xyz will
generate one global 3D metric.
:param list metrics_list: list of strings with name of metrics
Currently available metrics:
'ssim' - Structual similarity index
'corr' - Correlation
'r2' - R squared (coefficient of determination
'mse' - Mean squared error
'mae' - Mean absolute error
'acc' - Accuracy (for binary data, no masks)
'dice' - Dice similarity coefficient (for binary data, no masks)
:param bool masked_metrics: get the metrics for the masked region
"""
available_metrics = {'ssim', 'corr', 'r2', 'mse', 'mae', 'acc', 'dice'}
assert set(metrics_list).issubset(available_metrics), \
'only ssim, r2, corr, mse, mae, acc, dice are currently supported'
self.metrics_list = metrics_list
self.pd_col_names = metrics_list.copy()
self.masked_metrics = masked_metrics
self.metrics_xyz = None
self.metrics_xy = None
self.metrics_xz = None
self.metrics_yz = None
# No masking for evaluating segmentations (which are masks)
if 'acc' in metrics_list or 'dice' in metrics_list:
assert not self.masked_metrics, \
"Don't use masked metrics if evaluating segmentation"
if self.masked_metrics:
self.pd_col_names.append('vol_frac')
for metric in metrics_list:
cur_col_name = '{}_masked'.format(metric)
self.pd_col_names.append(cur_col_name)
self.pd_col_names.append('pred_name')
self.fn_mapping = {
'mae_metric': mae_metric,
'mse_metric': mse_metric,
'r2_metric': r2_metric,
'corr_metric': corr_metric,
'ssim_metric': ssim_metric,
'acc_metric': accuracy_metric,
'dice_metric': dice_metric,
}
def get_metrics_xyz(self):
"""Return 3D metrics"""
return self.metrics_xyz
def get_metrics_xy(self):
"""Return xy metrics"""
return self.metrics_xy
def get_metrics_xz(self):
"""Return xz metrics"""
return self.metrics_xz
def get_metrics_yz(self):
"""Return yz metrics"""
return self.metrics_yz
@staticmethod
def mask_to_bool(mask):
"""
If mask exists and is not boolean, convert.
Assume mask values == 0 is background
:param np.array mask: Mask
:return np.array mask: Mask with boolean dtype
"""
if mask is not None:
if mask.dtype != 'bool':
mask = mask > 0
return mask
@staticmethod
def assert_input(target,
prediction,
pred_name,
mask=None):
assert isinstance(pred_name, str), \
'more than one pred_name is passed. Only one target-pred pair ' \
'is handled per function call'
assert target.shape == prediction.shape, \
'The shape of target and prediction are not same: {}, {}'.format(
target.shape, prediction.shape
)
assert target.dtype == prediction.dtype, \
'The dtype of target and prediction are not same: {}, {}'.format(
target.dtype, prediction.dtype
)
if mask is not None:
assert target.shape == mask.shape, \
'The shape of target and mask are not same: {}, {}'.format(
target.shape, mask.shape
)
assert mask.dtype == 'bool', 'mask is not boolean'
def compute_metrics_row(self,
target,
prediction,
pred_name,
mask):
"""
Compute one row in metrics dataframe.
:param np.array target: ground truth
:param np.array prediction: model prediction
:param str pred_name: filename used for saving model prediction
:param np.array mask: binary mask with foreground / background
:return: dict metrics_row: a row for a metrics dataframe
"""
metrics_row = dict.fromkeys(self.pd_col_names)
metrics_row['pred_name'] = pred_name
for metric_name in self.metrics_list:
metric_fn_name = '{}_metric'.format(metric_name)
metric_fn = self.fn_mapping[metric_fn_name]
if self.masked_metrics:
cur_metric_list = metric_fn(
target=target,
prediction=prediction,
mask=mask,
)
vol_frac = np.mean(mask)
metrics_row['vol_frac'] = vol_frac
metrics_row[metric_name] = cur_metric_list[0]
metric_name = '{}_masked'.format(metric_name)
metrics_row[metric_name] = cur_metric_list[1]
else:
cur_metric = metric_fn(
target=target,
prediction=prediction,
)
metrics_row[metric_name] = cur_metric
return metrics_row
def estimate_xyz_metrics(self,
target,
prediction,
pred_name,
mask=None):
"""
Estimate 3D metrics for the current input, target pair
:param np.array target: ground truth
:param np.array prediction: model prediction
:param str pred_name: filename used for saving model prediction
:param np.array mask: binary mask with foreground / background
"""
mask = self.mask_to_bool(mask)
self.assert_input(target, prediction, pred_name, mask)
self.metrics_xyz = pd.DataFrame(columns=self.pd_col_names)
metrics_row = self.compute_metrics_row(
target=target,
prediction=prediction,
pred_name=pred_name,
mask=mask,
)
# Append to existing dataframe
self.metrics_xyz = self.metrics_xyz.append(
metrics_row,
ignore_index=True,
)
print('metrics xyz')
print(self.metrics_xyz)
def estimate_xy_metrics(self,
target,
prediction,
pred_name,
mask=None):
"""
Estimate metrics for the current input, target pair
along each xy slice (in plane)
:param np.array target: ground truth
:param np.array prediction: model prediction
:param str/list pred_name: filename(s) used for saving model prediction
:param np.array mask: binary mask with foreground / background
"""
mask = self.mask_to_bool(mask)
self.assert_input(target, prediction, pred_name, mask)
if len(target.shape) == 2:
target = target[..., np.newaxis]
prediction = prediction[..., np.newaxis]
self.metrics_xy = pd.DataFrame(columns=self.pd_col_names)
# Loop through slices
for slice_idx in range(target.shape[2]):
slice_name = "{}_xy{}".format(pred_name, slice_idx)
cur_mask = mask[..., slice_idx] if mask is not None else None
metrics_row = self.compute_metrics_row(
target=target[..., slice_idx],
prediction=prediction[..., slice_idx],
pred_name=slice_name,
mask=cur_mask,
)
# Append to existing dataframe
self.metrics_xy = self.metrics_xy.append(
metrics_row,
ignore_index=True,
)
def estimate_xz_metrics(self,
target,
prediction,
pred_name,
mask=None):
"""
Estimate metrics for the current input, target pair
along each xz slice
:param np.array target: ground truth
:param np.array prediction: model prediction
:param str pred_name: filename used for saving model prediction
:param np.array mask: binary mask with foreground / background
"""
mask = self.mask_to_bool(mask)
self.assert_input(target, prediction, pred_name, mask)
assert len(target.shape) == 3, 'Dataset is assumed to be 3D'
self.metrics_xz = pd.DataFrame(columns=self.pd_col_names)
# Loop through slices
for slice_idx in range(target.shape[0]):
slice_name = "{}_xz{}".format(pred_name, slice_idx)
cur_mask = mask[slice_idx, ...] if mask is not None else None
metrics_row = self.compute_metrics_row(
target=target[slice_idx, ...],
prediction=prediction[slice_idx, ...],
pred_name=slice_name,
mask=cur_mask,
)
# Append to existing dataframe
self.metrics_xz = self.metrics_xz.append(
metrics_row,
ignore_index=True,
)
def estimate_yz_metrics(self,
target,
prediction,
pred_name,
mask=None):
"""
Estimate metrics for the current input, target pair
along each yz slice
:param np.array target: ground truth
:param np.array prediction: model prediction
:param str pred_name: filename used for saving model prediction
:param np.array mask: binary mask with foreground / background
"""
mask = self.mask_to_bool(mask)
self.assert_input(target, prediction, pred_name, mask)
assert len(target.shape) == 3, 'Dataset is assumed to be 3D'
self.metrics_yz = pd.DataFrame(columns=self.pd_col_names)
# Loop through slices
for slice_idx in range(target.shape[1]):
slice_name = "{}_yz{}".format(pred_name, slice_idx)
cur_mask = mask[:, slice_idx, :] if mask is not None else None
metrics_row = self.compute_metrics_row(
target=target[:, slice_idx, :],
prediction=prediction[:, slice_idx, :],
pred_name=slice_name,
mask=cur_mask,
)
# Append to existing dataframe
self.metrics_yz = self.metrics_yz.append(
metrics_row,
ignore_index=True,
)
```
#### File: micro_dl/input/inference_dataset.py
```python
import keras
import numpy as np
import os
import pandas as pd
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.image_utils as image_utils
class InferenceDataSet(keras.utils.Sequence):
"""Dataset class for model inference"""
def __init__(self,
image_dir,
inference_config,
dataset_config,
network_config,
split_col_ids,
preprocess_config=None,
image_format='zyx',
mask_dir=None,
flat_field_dir=None,
crop2base=True):
"""Init
:param str image_dir: dir containing images AND NOT TILES!
:param dict dataset_config: dict with dataset related params
:param dict network_config: dict with network related params
:param tuple split_col_ids: How to split up the dataset for inference:
for frames_meta: (str split column name, list split row indices)
:param str image_format: xyz or zyx format
:param str/None mask_dir: If inference targets are masks stored in a
different directory than the image dir. Assumes the directory contains
a frames_meta.csv containing mask channels (which will be target channels
in the inference config) z, t, p indices matching the ones in image_dir
:param str flat_field_dir: Directory with flat field images
"""
self.image_dir = image_dir
self.target_dir = image_dir
self.frames_meta = aux_utils.read_meta(self.image_dir)
self.flat_field_dir = flat_field_dir
if mask_dir is not None:
self.target_dir = mask_dir
# Append mask meta to frames meta
mask_meta = aux_utils.read_meta(mask_dir)
self.frames_meta = self.frames_meta.append(
mask_meta,
ignore_index=True,
)
# Use only indices selected for inference
(split_col, split_ids) = split_col_ids
meta_ids = self.frames_meta[split_col].isin(split_ids)
self.frames_meta = self.frames_meta[meta_ids]
assert image_format in {'xyz', 'zyx'}, \
"Image format should be xyz or zyx, not {}".format(image_format)
self.image_format = image_format
# Check if model task (regression or segmentation) is specified
self.model_task = 'regression'
if 'model_task' in dataset_config:
self.model_task = dataset_config['model_task']
assert self.model_task in {'regression', 'segmentation'}, \
"Model task must be either 'segmentation' or 'regression'"
normalize_im = 'stack'
if preprocess_config is not None:
if 'normalize' in preprocess_config:
if 'normalize_im' in preprocess_config['normalize']:
normalize_im = preprocess_config['normalize']['normalize_im']
elif 'normalize_im' in preprocess_config:
normalize_im = preprocess_config['normalize_im']
elif 'normalize_im' in preprocess_config['tile']:
normalize_im = preprocess_config['tile']['normalize_im']
self.normalize_im = normalize_im
# assume input and target channels are the same as training if not specified
self.input_channels = dataset_config['input_channels']
self.target_channels = dataset_config['target_channels']
slice_ids = self.frames_meta['slice_idx'].unique()
pos_ids = self.frames_meta['pos_idx'].unique()
time_ids = self.frames_meta['time_idx'].unique()
# overwrite default parameters from train config
if 'dataset' in inference_config:
if 'input_channels' in inference_config['dataset']:
self.input_channels = inference_config['dataset']['input_channels']
if 'target_channels' in inference_config['dataset']:
self.target_channels = inference_config['dataset']['target_channels']
if 'slice_ids' in inference_config['dataset']:
slice_ids = inference_config['dataset']['slice_ids']
if 'pos_ids' in inference_config['dataset']:
pos_ids = inference_config['dataset']['pos_ids']
if 'time_ids' in inference_config['dataset']:
time_ids = inference_config['dataset']['time_ids']
if not set(self.target_channels) <= set(self.frames_meta['channel_idx'].unique()):
ValueError('target channels are out of range. Add "mask" to config if target channel is mask')
# get a subset of frames meta for only one channel to easily
# extract indices (pos, time, slice) to iterate over
self.inf_frames_meta = aux_utils.get_sub_meta(
self.frames_meta,
time_ids=time_ids,
pos_ids=pos_ids,
slice_ids=slice_ids,
channel_ids=self.target_channels)
self.depth = 1
self.target_depth = 1
# adjust slice margins if stacktostack or stackto2d
network_cls = network_config['class']
if network_cls in ['UNetStackTo2D', 'UNetStackToStack']:
self.depth = network_config['depth']
self.adjust_slice_indices()
# if Unet2D 4D tensor, remove the singleton dimension, else 5D
self.squeeze = False
if network_cls == 'UNet2D':
self.squeeze = True
self.im_3d = False
if network_cls == 'UNet3D':
self.im_3d = True
self.data_format = 'channels_first'
if 'data_format' in network_config:
self.data_format = network_config['data_format']
# check if sorted values look right
self.inf_frames_meta = self.inf_frames_meta.sort_values(
['pos_idx', 'slice_idx'],
ascending=[True, True],
)
self.inf_frames_meta = self.inf_frames_meta.reset_index(drop=True)
self.num_samples = len(self.inf_frames_meta)
self.crop2base = crop2base
def adjust_slice_indices(self):
"""
Adjust slice indices if model is UNetStackTo2D or UNetStackToStack.
These networks will have a depth > 1.
Adjust inf_frames_meta only as we'll need all the indices to load
stack with depth > 1.
"""
margin = self.depth // 2
# Drop indices on both margins
max_slice_idx = self.inf_frames_meta['slice_idx'].max() + 1
min_slice_idx = self.inf_frames_meta['slice_idx'].min()
drop_idx = list(range(max_slice_idx - margin, max_slice_idx)) + \
list(range(min_slice_idx, min_slice_idx + margin))
df_drop_idx = self.inf_frames_meta.index[
self.inf_frames_meta['slice_idx'].isin(drop_idx),
]
self.inf_frames_meta.drop(df_drop_idx, inplace=True)
# Drop indices below margin
df_drop_idx = self.inf_frames_meta.index[
self.inf_frames_meta['slice_idx'].isin(list(range(margin)))
]
self.inf_frames_meta.drop(df_drop_idx, inplace=True)
def get_iteration_meta(self):
"""
Get the dataframe containing indices for one channel for
inference iterations.
:return pandas Dataframe inf_frames_meta: Metadata and indices for
first target channel
"""
return self.inf_frames_meta
def __len__(self):
"""
Get the total number of samples inference is performed on.
:return int num_samples: Number of inference samples
"""
return self.num_samples
def _get_image(self,
input_dir,
cur_row,
channel_ids,
depth,
normalize_im,
is_mask=False):
"""
Assemble one input or target tensor
:param str input_dir: Directory containing images or targets
:param pd.Series cur_row: Current row in frames_meta
:param int/list channel_ids: Channel indices
:param int depth: Stack depth
:param str normalize: normalization options for images
:return np.array (3D / 4D) im_stack: Image stack
"""
im_stack = []
for channel_idx in channel_ids:
flat_field_im = None
if self.flat_field_dir is not None:
assert normalize_im in [None, 'stack'],\
"flat field correction currently only supports " \
"None or 'stack' option for 'normalize_im'"
flat_field_fname = os.path.join(
self.flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx)
)
flat_field_im = np.load(flat_field_fname)
# Load image with given indices
im = image_utils.preprocess_imstack(
frames_metadata=self.frames_meta,
input_dir=input_dir,
depth=depth,
time_idx=cur_row['time_idx'],
channel_idx=channel_idx,
slice_idx=cur_row['slice_idx'],
pos_idx=cur_row['pos_idx'],
flat_field_im=flat_field_im,
normalize_im=normalize_im,
)
# Crop image to nearest factor of two in xy
if self.crop2base:
im = image_utils.crop2base(im) # crop_z=self.im_3d)
# Make sure image format is right and squeeze for 2D models
if self.image_format == 'zyx' and len(im.shape) > 2:
im = np.transpose(im, [2, 0, 1])
if self.squeeze:
im = np.squeeze(im)
im_stack.append(im)
# stack for channel dimension
if self.data_format == 'channels_first':
im_stack = np.stack(im_stack)
else:
im_stack = np.stack(im_stack, axis=self.n_dims - 2)
# binarize the target images for segmentation task
if is_mask:
im_stack = im_stack > 0
# Make sure all images have the same dtype
im_stack = im_stack.astype(np.float32)
return im_stack
def __getitem__(self, index):
"""
Get a batch of data, input and target stacks, for inference.
:param int index: Iteration index (looped through linearly in inference)
:return np.array input_stack: Input image stack with dimensionality
matching model
:return np.array target_stack: Target image stack for model inference
"""
# Get indices for current inference iteration
cur_row = self.inf_frames_meta.iloc[index]
# binarize the target images for segmentation task
is_mask = False
# if self.model_task == 'segmentation':
# is_mask = True
# Get input and target stacks for inference
input_stack = self._get_image(
input_dir=self.image_dir,
cur_row=cur_row,
channel_ids=self.input_channels,
depth=self.depth,
normalize_im=self.normalize_im,
)
target_stack = self._get_image(
input_dir=self.target_dir,
cur_row=cur_row,
channel_ids=self.target_channels,
depth=self.target_depth,
normalize_im=None,
is_mask=is_mask,
)
# Add batch dimension
input_stack = np.expand_dims(input_stack, axis=0)
target_stack = np.expand_dims(target_stack, axis=0)
return input_stack, target_stack
```
#### File: micro_dl/utils/meta_utils.py
```python
import itertools
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.mp_utils as mp_utils
import os
import pandas as pd
import sys
def frames_meta_generator(
input_dir,
order='cztp',
name_parser='parse_sms_name',
):
"""
Generate metadata from file names for preprocessing.
Will write found data in frames_metadata.csv in input directory.
Assumed default file naming convention is for 'parse_idx_from_name':
dir_name
|
|- im_c***_z***_t***_p***.png
|- im_c***_z***_t***_p***.png
c is channel
z is slice in stack (z)
t is time
p is position (FOV)
Other naming convention for 'parse_sms_name':
img_channelname_t***_p***_z***.tif for parse_sms_name
:param str input_dir: path to input directory containing images
:param str order: Order in which file name encodes cztp
:param str name_parser: Function in aux_utils for parsing indices from file name
"""
parse_func = aux_utils.import_object('utils.aux_utils', name_parser, 'function')
im_names = aux_utils.get_sorted_names(input_dir)
frames_meta = aux_utils.make_dataframe(nbr_rows=len(im_names))
channel_names = []
# Fill dataframe with rows from image names
for i in range(len(im_names)):
kwargs = {"im_name": im_names[i]}
if name_parser == 'parse_idx_from_name':
kwargs["order"] = order
elif name_parser == 'parse_sms_name':
kwargs["channel_names"] = channel_names
meta_row = parse_func(**kwargs)
meta_row['dir_name'] = input_dir
frames_meta.loc[i] = meta_row
# Write metadata
frames_meta_filename = os.path.join(input_dir, 'frames_meta.csv')
frames_meta.to_csv(frames_meta_filename, sep=",")
return frames_meta
def ints_meta_generator(
input_dir,
num_workers=4,
block_size=256,
flat_field_dir=None,
channel_ids=-1,
):
"""
Generate pixel intensity metadata for estimating image normalization
parameters during preprocessing step. Pixels are sub-sampled from the image
following a grid pattern defined by block_size to for efficient estimation of
median and interquatile range. Grid sampling is preferred over random sampling
in the case due to the spatial correlation in images.
Will write found data in ints_meta.csv in input directory.
Assumed default file naming convention is:
dir_name
|
|- im_c***_z***_t***_p***.png
|- im_c***_z***_t***_p***.png
c is channel
z is slice in stack (z)
t is time
p is position (FOV)
Other naming convention is:
img_channelname_t***_p***_z***.tif for parse_sms_name
:param str input_dir: path to input directory containing images
:param int num_workers: number of workers for multiprocessing
:param int block_size: block size for the grid sampling pattern. Default value works
well for 2048 X 2048 images.
:param str flat_field_dir: Directory containing flatfield images
:param list/int channel_ids: Channel indices to process
"""
if block_size is None:
block_size = 256
frames_metadata = aux_utils.read_meta(input_dir)
if not isinstance(channel_ids, list):
# Use all channels
channel_ids = frames_metadata['channel_idx'].unique()
mp_fn_args = []
# Fill dataframe with rows from image names
for i, meta_row in frames_metadata.iterrows():
im_path = os.path.join(input_dir, meta_row['file_name'])
ff_path = None
if flat_field_dir is not None:
channel_idx = meta_row['channel_idx']
if isinstance(channel_idx, (int, float)) and channel_idx in channel_ids:
ff_path = os.path.join(
flat_field_dir,
'flat-field_channel-{}.npy'.format(channel_idx)
)
mp_fn_args.append((im_path, ff_path, block_size, meta_row))
im_ints_list = mp_utils.mp_sample_im_pixels(mp_fn_args, num_workers)
im_ints_list = list(itertools.chain.from_iterable(im_ints_list))
ints_meta = pd.DataFrame.from_dict(im_ints_list)
ints_meta_filename = os.path.join(input_dir, 'intensity_meta.csv')
ints_meta.to_csv(ints_meta_filename, sep=",")
def mask_meta_generator(
input_dir,
num_workers=4,
):
"""
Generate pixel intensity metadata for estimating image normalization
parameters during preprocessing step. Pixels are sub-sampled from the image
following a grid pattern defined by block_size to for efficient estimation of
median and interquatile range. Grid sampling is preferred over random sampling
in the case due to the spatial correlation in images.
Will write found data in intensity_meta.csv in input directory.
Assumed default file naming convention is:
dir_name
|
|- im_c***_z***_t***_p***.png
|- im_c***_z***_t***_p***.png
c is channel
z is slice in stack (z)
t is time
p is position (FOV)
Other naming convention is:
img_channelname_t***_p***_z***.tif for parse_sms_name
:param str input_dir: path to input directory containing images
:param str order: Order in which file name encodes cztp
:param str name_parser: Function in aux_utils for parsing indices from file name
:param int num_workers: number of workers for multiprocessing
:return pd.DataFrame mask_meta: Metadata with mask info
"""
frames_metadata = aux_utils.read_meta(input_dir)
mp_fn_args = []
# Fill dataframe with rows from image names
for i, meta_row in frames_metadata.iterrows():
meta_row['dir_name'] = input_dir
im_path = os.path.join(input_dir, meta_row['file_name'])
mp_fn_args.append((im_path, meta_row))
meta_row_list = mp_utils.mp_wrapper(
mp_utils.get_mask_meta_row,
mp_fn_args,
num_workers,
)
mask_meta = pd.DataFrame.from_dict(meta_row_list)
mask_meta_filename = os.path.join(input_dir, 'mask_meta.csv')
mask_meta.to_csv(mask_meta_filename, sep=",")
return mask_meta
def compute_zscore_params(frames_meta,
ints_meta,
input_dir,
normalize_im,
min_fraction=0.99):
"""
Get zscore median and interquartile range
:param pd.DataFrame frames_meta: Dataframe containing all metadata
:param pd.DataFrame ints_meta: Metadata containing intensity statistics
each z-slice and foreground fraction for masks
:param str input_dir: Directory containing images
:param None or str normalize_im: normalization scheme for input images
:param float min_fraction: Minimum foreground fraction (in case of masks)
for computing intensity statistics.
:return pd.DataFrame frames_meta: Dataframe containing all metadata
:return pd.DataFrame ints_meta: Metadata containing intensity statistics
each z-slice
"""
assert normalize_im in [None, 'slice', 'volume', 'dataset'], \
'normalize_im must be None or "slice" or "volume" or "dataset"'
if normalize_im is None:
# No normalization
frames_meta['zscore_median'] = 0
frames_meta['zscore_iqr'] = 1
return frames_meta
elif normalize_im == 'dataset':
agg_cols = ['time_idx', 'channel_idx', 'dir_name']
elif normalize_im == 'volume':
agg_cols = ['time_idx', 'channel_idx', 'dir_name', 'pos_idx']
else:
agg_cols = ['time_idx', 'channel_idx', 'dir_name', 'pos_idx', 'slice_idx']
# median and inter-quartile range are more robust than mean and std
ints_meta_sub = ints_meta[ints_meta['fg_frac'] >= min_fraction]
ints_agg_median = \
ints_meta_sub[agg_cols + ['intensity']].groupby(agg_cols).median()
ints_agg_hq = \
ints_meta_sub[agg_cols + ['intensity']].groupby(agg_cols).quantile(0.75)
ints_agg_lq = \
ints_meta_sub[agg_cols + ['intensity']].groupby(agg_cols).quantile(0.25)
ints_agg = ints_agg_median
ints_agg.columns = ['zscore_median']
ints_agg['zscore_iqr'] = ints_agg_hq['intensity'] - ints_agg_lq['intensity']
ints_agg.reset_index(inplace=True)
cols_to_merge = frames_meta.columns[[
col not in ['zscore_median', 'zscore_iqr']
for col in frames_meta.columns]]
frames_meta = pd.merge(
frames_meta[cols_to_merge],
ints_agg,
how='left',
on=agg_cols,
)
if frames_meta['zscore_median'].isnull().values.any():
raise ValueError('Found NaN in normalization parameters. \
min_fraction might be too low or images might be corrupted.')
frames_meta_filename = os.path.join(input_dir, 'frames_meta.csv')
frames_meta.to_csv(frames_meta_filename, sep=",")
cols_to_merge = ints_meta.columns[[
col not in ['zscore_median', 'zscore_iqr']
for col in ints_meta.columns]]
ints_meta = pd.merge(
ints_meta[cols_to_merge],
ints_agg,
how='left',
on=agg_cols,
)
ints_meta['intensity_norm'] = \
(ints_meta['intensity'] - ints_meta['zscore_median']) / \
(ints_meta['zscore_iqr'] + sys.float_info.epsilon)
return frames_meta, ints_meta
```
#### File: tests/cli/metrics_script_tests.py
```python
import cv2
import nose.tools
import numpy as np
import os
import pandas as pd
from testfixtures import TempDirectory
import unittest
from unittest.mock import patch
import yaml
import micro_dl.cli.metrics_script as metrics_script
import micro_dl.utils.aux_utils as aux_utils
import micro_dl.utils.normalize as normalize
class TestMetricsScript(unittest.TestCase):
def setUp(self):
"""
Set up a directory with some images to generate frames_meta.csv for
"""
self.tempdir = TempDirectory()
self.temp_dir = self.tempdir.path
self.model_dir = os.path.join(self.temp_dir, 'model_dir')
self.pred_dir = os.path.join(self.model_dir, 'predictions')
self.image_dir = os.path.join(self.temp_dir, 'image_dir')
self.tempdir.makedir(self.model_dir)
self.tempdir.makedir(self.pred_dir)
self.tempdir.makedir(self.image_dir)
# Write images
self.time_idx = 5
self.pos_idx = 7
self.im = 1500 * np.ones((30, 20), dtype=np.uint16)
im_add = np.zeros((30, 20), dtype=np.uint16)
im_add[15:, :] = 10
self.ext = '.tif'
# Start frames meta file
self.meta_name = 'frames_meta.csv'
self.frames_meta = aux_utils.make_dataframe()
for c in range(3):
for z in range(5, 10):
im_name = aux_utils.get_im_name(
channel_idx=c,
slice_idx=z,
time_idx=self.time_idx,
pos_idx=self.pos_idx,
ext=self.ext,
)
cv2.imwrite(os.path.join(self.image_dir, im_name), self.im)
if c == 2:
norm_im = normalize.zscore(self.im + im_add).astype(np.float32)
cv2.imwrite(
os.path.join(self.pred_dir, im_name),
norm_im,
)
self.frames_meta = self.frames_meta.append(
aux_utils.parse_idx_from_name(im_name),
ignore_index=True,
)
# Write metadata
self.frames_meta.to_csv(
os.path.join(self.image_dir, self.meta_name),
sep=',',
)
# Write as test metadata in model dir too
self.frames_meta.to_csv(
os.path.join(self.model_dir, 'test_metadata.csv'),
sep=',',
)
# Write split samples
split_idx_fname = os.path.join(self.model_dir, 'split_samples.json')
split_samples = {'test': [5, 6, 7, 8, 9]}
aux_utils.write_json(split_samples, split_idx_fname)
# Write config in model dir
config = {
'dataset': {
'input_channels': [0, 1],
'target_channels': [2],
'split_by_column': 'slice_idx',
'data_dir': self.image_dir
},
'network': {}
}
config_name = os.path.join(self.model_dir, 'config.yml')
with open(config_name, 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=False)
# Write preprocess config
pp_config = {
'normalize_im': 'stack',
}
processing_info = [{'processing_time': 5,
'config': pp_config}]
config_name = os.path.join(self.image_dir, 'preprocessing_info.json')
aux_utils.write_json(processing_info, config_name)
def tearDown(self):
"""
Tear down temporary folder and file structure
"""
TempDirectory.cleanup_all()
nose.tools.assert_equal(os.path.isdir(self.temp_dir), False)
def test_parse_args(self):
with patch('argparse._sys.argv',
['python',
'--model_dir', self.model_dir,
'--image_dir', self.image_dir,
'--metrics', 'ssim', 'corr',
'--orientations', 'xy', 'xz']):
parsed_args = metrics_script.parse_args()
self.assertEqual(parsed_args.model_dir, self.model_dir)
self.assertTrue(parsed_args.test_data)
self.assertEqual(parsed_args.image_dir, self.image_dir)
self.assertListEqual(parsed_args.metrics, ['ssim', 'corr'])
self.assertListEqual(parsed_args.orientations, ['xy', 'xz'])
@nose.tools.raises(BaseException)
def test_parse_args_no_input(self):
with patch('argparse._sys.argv',
['python',
'--model_dir', self.model_dir]):
metrics_script.parse_args()
def test_compute_metrics(self):
metrics_script.compute_metrics(
model_dir=self.model_dir,
image_dir=self.image_dir,
metrics_list=['mse', 'mae'],
orientations_list=['xy', 'xyz'],
name_parser='parse_idx_from_name',
)
metrics_xy = pd.read_csv(os.path.join(self.pred_dir, 'metrics_xy.csv'))
self.assertTupleEqual(metrics_xy.shape, (5, 3))
for i, row in metrics_xy.iterrows():
expected_name = 't5_p7_xy{}'.format(i)
self.assertEqual(row.pred_name, expected_name)
# TODO: Double check values below
# self.assertEqual(row.mse, 1.0)
# self.assertEqual(row.mae, 1.0)
# Same for xyz
metrics_xyz = pd.read_csv(
os.path.join(self.pred_dir, 'metrics_xyz.csv'),
)
self.assertTupleEqual(metrics_xyz.shape, (1, 3))
# self.assertEqual(metrics_xyz.loc[0, 'mse'], 1.0)
# self.assertEqual(metrics_xyz.loc[0, 'mae'], 1.0)
self.assertEqual(metrics_xyz.loc[0, 'pred_name'], 't5_p7')
```
#### File: tests/preprocessing/estimate_flat_field_tests.py
```python
import cv2
import itertools
import nose.tools
import numpy as np
import os
from testfixtures import TempDirectory
import unittest
import micro_dl.preprocessing.estimate_flat_field as flat_field
import micro_dl.utils.aux_utils as aux_utils
class TestEstimateFlatField(unittest.TestCase):
def setUp(self):
"""
Set up directories with input images for flatfield correction
"""
self.tempdir = TempDirectory()
self.temp_path = self.tempdir.path
self.image_dir = self.temp_path
self.output_dir = os.path.join(self.temp_path, 'out_dir')
self.tempdir.makedir(self.output_dir)
# Start frames meta file
self.meta_name = 'frames_meta.csv'
self.frames_meta = aux_utils.make_dataframe()
# Write images
self.time_idx = 0
self.pos_ids = [7, 8]
self.channel_ids = [2, 3]
self.slice_ids = [0, 1, 2]
self.im = 1500 * np.ones((20, 15), dtype=np.uint16)
self.im[10:, 10:] = 3000
for c in self.channel_ids:
for p in self.pos_ids:
for z in self.slice_ids:
im_name = aux_utils.get_im_name(
channel_idx=c,
slice_idx=z,
time_idx=self.time_idx,
pos_idx=p,
)
im = self.im + c * 100
cv2.imwrite(os.path.join(self.temp_path, im_name),
im)
meta_row = aux_utils.parse_idx_from_name(im_name)
meta_row['mean'] = np.nanmean(im)
meta_row['std'] = np.nanstd(im)
self.frames_meta = self.frames_meta.append(
meta_row,
ignore_index=True,
)
# Write metadata
self.frames_meta.to_csv(
os.path.join(self.image_dir, self.meta_name),
sep=',',
)
self.flat_field_dir = os.path.join(
self.output_dir,
'flat_field_images',
)
# Create flatfield class instance
self.flatfield_inst = flat_field.FlatFieldEstimator2D(
input_dir=self.image_dir,
output_dir=self.output_dir,
channel_ids=self.channel_ids,
slice_ids=self.slice_ids,
block_size=5,
)
def tearDown(self):
"""
Tear down temporary folder and file structure
"""
TempDirectory.cleanup_all()
nose.tools.assert_equal(os.path.isdir(self.temp_path), False)
def test_init(self):
"""
Check that an instance was created correctly
"""
self.assertEqual(self.flatfield_inst.input_dir, self.image_dir)
self.assertEqual(self.flatfield_inst.output_dir, self.output_dir)
self.assertEqual(
self.flatfield_inst.flat_field_dir,
self.flat_field_dir,
)
self.assertListEqual(self.flatfield_inst.slice_ids, self.slice_ids)
self.assertListEqual(self.flatfield_inst.channels_ids, self.channel_ids)
self.assertEqual(self.flatfield_inst.block_size, 5)
def test_get_flat_field_dir(self):
ff_dir = self.flatfield_inst.get_flat_field_dir()
self.assertEqual(self.flat_field_dir, ff_dir)
def test_estimate_flat_field(self):
self.flatfield_inst.estimate_flat_field()
flatfields = os.listdir(self.flat_field_dir)
# Make sure list is sorted
flatfields.sort()
for i, c in enumerate(self.channel_ids):
file_name = 'flat-field_channel-{}.npy'.format(c)
self.assertEqual(flatfields[i], file_name)
ff = np.load(os.path.join(self.flat_field_dir, file_name))
self.assertLessEqual(ff.max(), 5.)
self.assertLessEqual(0.1, ff.min())
self.assertTupleEqual(ff.shape, self.im.shape)
def test_sample_block_medians(self):
coords, vals = self.flatfield_inst.sample_block_medians(
im=self.im,
)
# Image shape is 20 x 15, so center coordinates will be:
xc = [2, 7, 12, 17]
yc = [2, 7, 12]
coord_iterator = itertools.product(yc, xc)
# Check that generated center coords are correct
for i, (y, x) in enumerate(coord_iterator):
self.assertEqual(x, coords[i, 0])
self.assertEqual(y, coords[i, 1])
# Check that values are correct
# all should be 1500 except the last 2
expected_vals = [1500] * 10 + [3000] * 2
self.assertListEqual(list(vals), expected_vals)
@nose.tools.raises(AssertionError)
def test_sample_wrong_size_block_medians(self):
self.flatfield_inst.block_size = 15
coords, vals = self.flatfield_inst.sample_block_medians(
im=self.im,
)
def test_get_flatfield(self):
test_im = np.zeros((30, 20), np.uint8) + 100
test_im[:, 10:] = 200
flatfield = self.flatfield_inst.get_flatfield(test_im)
self.assertTupleEqual(flatfield.shape, (30, 20))
self.assertLessEqual(flatfield.max(), 2)
self.assertLessEqual(0.1, flatfield.min())
def test_get_flatfield_no_norm(self):
test_im = np.zeros((30, 20), np.uint8) + 100
test_im[:, 10:] = 200
flatfield = self.flatfield_inst.get_flatfield(
im=test_im,
normalize=False,
)
self.assertTupleEqual(flatfield.shape, (30, 20))
self.assertLessEqual(flatfield.max(), 250)
self.assertLessEqual(50, flatfield.min())
@nose.tools.raises(AssertionError)
def test_get_flatfield_small_im(self):
test_im = np.zeros((10, 15), np.uint8) + 100
flatfield = self.flatfield_inst.get_flatfield(test_im)
@nose.tools.raises(ValueError)
def test_get_flatfield_neg_values(self):
test_im = np.zeros((30, 20), np.int)
test_im[15:, 5:] = -100
flatfield = self.flatfield_inst.get_flatfield(test_im)
``` |
{
"source": "JohannaRahm/picasso",
"score": 2
} |
#### File: picasso/gui/average3.py
```python
import os.path
import sys
import traceback
import colorsys
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy
from scipy import signal
from PyQt5 import QtCore, QtGui, QtWidgets
from .. import io, lib, render
from numpy.lib.recfunctions import stack_arrays
from cmath import rect, phase
from tqdm import tqdm
import scipy.ndimage.filters
DEFAULT_OVERSAMPLING = 1.0
INITIAL_REL_MAXIMUM = 2.0
ZOOM = 10 / 7
N_GROUP_COLORS = 8
@numba.jit(nopython=True, nogil=True)
def render_hist(x, y, oversampling, t_min, t_max):
n_pixel = int(np.ceil(oversampling * (t_max - t_min)))
in_view = (x > t_min) & (y > t_min) & (x < t_max) & (y < t_max)
x = x[in_view]
y = y[in_view]
x = oversampling * (x - t_min)
y = oversampling * (y - t_min)
image = np.zeros((n_pixel, n_pixel), dtype=np.float32)
render._fill(image, x, y)
return len(x), image
@numba.jit(nopython=True, nogil=True)
def render_histxyz(a, b, oversampling, a_min, a_max, b_min, b_max):
n_pixel_a = int(np.ceil(oversampling * (a_max - a_min)))
n_pixel_b = int(np.ceil(oversampling * (b_max - b_min)))
in_view = (a > a_min) & (b > b_min) & (a < a_max) & (b < b_max)
a = a[in_view]
b = b[in_view]
a = oversampling * (a - a_min)
b = oversampling * (b - b_min)
image = np.zeros((n_pixel_b, n_pixel_a), dtype=np.float32)
render._fill(image, a, b)
return len(a), image
def rotate_axis(axis, vx, vy, vz, angle, pixelsize):
if axis == "z":
vx_rot = np.cos(angle) * vx - np.sin(angle) * vy
vy_rot = np.sin(angle) * vx + np.cos(angle) * vy
vz_rot = vz
elif axis == "y":
vx_rot = np.cos(angle) * vx + np.sin(angle) * np.divide(vz, pixelsize)
vy_rot = vy
vz_rot = -np.sin(angle) * vx * pixelsize + np.cos(angle) * vz
elif axis == "x":
vx_rot = vx
vy_rot = np.cos(angle) * vy - np.sin(angle) * np.divide(vz, pixelsize)
vz_rot = np.sin(angle) * vy * pixelsize + np.cos(angle) * vz
return vx_rot, vy_rot, vz_rot
def compute_xcorr(CF_image_avg, image):
F_image = np.fft.fft2(image)
xcorr = np.fft.fftshift(np.real(np.fft.ifft2((F_image * CF_image_avg))))
return xcorr
class ParametersDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Parameters")
self.setModal(False)
grid = QtWidgets.QGridLayout(self)
grid.addWidget(QtWidgets.QLabel("Oversampling:"), 0, 0)
self.oversampling = QtWidgets.QDoubleSpinBox()
self.oversampling.setRange(1, 200)
self.oversampling.setValue(DEFAULT_OVERSAMPLING)
self.oversampling.setDecimals(1)
self.oversampling.setKeyboardTracking(False)
self.oversampling.valueChanged.connect(self.window.updateLayout)
grid.addWidget(self.oversampling, 0, 1)
self.iterations = QtWidgets.QSpinBox()
self.iterations.setRange(1, 1)
self.iterations.setValue(1)
class View(QtWidgets.QLabel):
def __init__(self, window):
super().__init__()
self.window = window
self.setMinimumSize(1, 1)
self.setAlignment(QtCore.Qt.AlignCenter)
self.setAcceptDrops(True)
self._pixmap = None
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
self.open(path)
def resizeEvent(self, event):
if self._pixmap is not None:
self.set_pixmap(self._pixmap)
def set_image(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
self._pixmap = QtGui.QPixmap.fromImage(qimage)
self.set_pixmap(self._pixmap)
def set_pixmap(self, pixmap):
self.setPixmap(
pixmap.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.FastTransformation,
)
)
def update_image(self, *args):
oversampling = self.window.parameters_dialog.oversampling.value()
t_min = -self.r
t_max = self.r
N_avg, image_avg = render.render_hist(
self.locs, oversampling, t_min, t_min, t_max, t_max
)
self.set_image(image_avg)
class DatasetDialog(QtWidgets.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle("Datasets")
self.setModal(False)
self.layout = QtWidgets.QVBoxLayout()
self.checks = []
self.setLayout(self.layout)
def add_entry(self, path):
c = QtWidgets.QCheckBox(path)
self.layout.addWidget(c)
self.checks.append(c)
self.checks[-1].setChecked(True)
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Picasso: Average3")
self.resize(1024, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "average.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setAcceptDrops(True)
self.parameters_dialog = ParametersDialog(self)
self.dataset_dialog = DatasetDialog(self)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
open_action = file_menu.addAction("Open")
open_action.setShortcut(QtGui.QKeySequence.Open)
open_action.triggered.connect(self.open)
file_menu.addAction(open_action)
save_action = file_menu.addAction("Save")
save_action.setShortcut(QtGui.QKeySequence.Save)
save_action.triggered.connect(self.save)
file_menu.addAction(save_action)
process_menu = menu_bar.addMenu("Process")
parameters_action = process_menu.addAction("Parameters")
parameters_action.setShortcut("Ctrl+P")
parameters_action.triggered.connect(self.parameters_dialog.show)
dataset_action = process_menu.addAction("Datasets")
dataset_action.triggered.connect(self.dataset_dialog.show)
self.status_bar = self.statusBar()
self._pixmap = None
self.locs = []
self.z_state = []
self.group_index = []
self.infos = []
self.locs_paths = []
self._mode = "Zoom"
self._pan = False
self._size_hint = (768, 768)
self.n_locs = 0
self._picks = []
self.index_blocks = []
self._drift = []
# Define DisplaySettingsDialog
self.viewxy = QtWidgets.QLabel("")
self.viewxz = QtWidgets.QLabel("")
self.viewyz = QtWidgets.QLabel("")
self.viewcp = QtWidgets.QLabel("")
minsize = 512
self.viewxy.setFixedWidth(minsize)
self.viewxy.setFixedHeight(minsize)
self.viewxz.setFixedWidth(minsize)
self.viewxz.setFixedHeight(minsize)
self.viewyz.setFixedWidth(minsize)
self.viewyz.setFixedHeight(minsize)
self.viewcp.setFixedWidth(minsize)
self.viewcp.setFixedHeight(minsize)
# Define layout
display_groupbox = QtWidgets.QGroupBox("Display")
displaygrid = QtWidgets.QGridLayout(display_groupbox)
displaygrid.addWidget(QtWidgets.QLabel("XY"), 0, 0)
displaygrid.addWidget(self.viewxy, 1, 0)
displaygrid.addWidget(QtWidgets.QLabel("XZ"), 0, 1)
displaygrid.addWidget(self.viewxz, 1, 1)
displaygrid.addWidget(QtWidgets.QLabel("YZ"), 2, 0)
displaygrid.addWidget(self.viewyz, 3, 0)
displaygrid.addWidget(QtWidgets.QLabel("CP"), 2, 1)
displaygrid.addWidget(self.viewcp, 3, 1)
button_groupbox = QtWidgets.QGroupBox("Buttons")
buttongrid = QtWidgets.QGridLayout(button_groupbox)
rotation_groupbox = QtWidgets.QGroupBox("Rotation + Translation")
rotationgrid = QtWidgets.QGridLayout(rotation_groupbox)
centerofmassbtn = QtWidgets.QPushButton("Center of Mass XYZ")
axis_groupbox = QtWidgets.QGroupBox("Axis")
axisgrid = QtWidgets.QGridLayout(axis_groupbox)
self.x_axisbtn = QtWidgets.QRadioButton("X")
self.y_axisbtn = QtWidgets.QRadioButton("Y")
self.z_axisbtn = QtWidgets.QRadioButton("Z")
self.z_axisbtn.setChecked(True)
axisgrid.addWidget(self.x_axisbtn, 0, 0)
axisgrid.addWidget(self.y_axisbtn, 0, 1)
axisgrid.addWidget(self.z_axisbtn, 0, 2)
proj_groupbox = QtWidgets.QGroupBox("Projection")
projgrid = QtWidgets.QGridLayout(proj_groupbox)
self.xy_projbtn = QtWidgets.QRadioButton("XY")
self.yz_projbtn = QtWidgets.QRadioButton("YZ")
self.xz_projbtn = QtWidgets.QRadioButton("XZ")
self.xy_projbtn.setChecked(True)
projgrid.addWidget(self.xy_projbtn, 0, 0)
projgrid.addWidget(self.yz_projbtn, 0, 1)
projgrid.addWidget(self.xz_projbtn, 0, 2)
rotatebtn = QtWidgets.QPushButton("Rotate")
self.radio_sym = QtWidgets.QRadioButton("x symmetry")
self.symEdit = QtWidgets.QSpinBox()
self.symEdit.setRange(2, 100)
self.symEdit.setValue(8)
self.radio_sym_custom = QtWidgets.QRadioButton("custom symmetry")
self.symcustomEdit = QtWidgets.QLineEdit("90,180,270")
deg_groupbox = QtWidgets.QGroupBox("Degrees")
deggrid = QtWidgets.QGridLayout(deg_groupbox)
self.full_degbtn = QtWidgets.QRadioButton("Full")
self.part_degbtn = QtWidgets.QRadioButton("Part")
self.degEdit = QtWidgets.QTextEdit()
self.degEdit = QtWidgets.QSpinBox()
self.degEdit.setRange(1, 10)
self.degEdit.setValue(5)
deggrid.addWidget(self.full_degbtn, 0, 0)
deggrid.addWidget(self.part_degbtn, 0, 1)
deggrid.addWidget(self.degEdit, 0, 2)
self.full_degbtn.setChecked(True)
# Rotation Groupbox
rotationgrid.addWidget(axis_groupbox, 0, 0, 1, 2)
rotationgrid.addWidget(proj_groupbox, 1, 0, 1, 2)
rotationgrid.addWidget(deg_groupbox, 2, 0, 1, 2)
rotationgrid.addWidget(rotatebtn, 3, 0, 1, 2)
rotationgrid.addWidget(self.symEdit, 4, 0)
rotationgrid.addWidget(self.radio_sym, 4, 1)
rotationgrid.addWidget(self.radio_sym_custom, 5, 0)
rotationgrid.addWidget(self.symcustomEdit, 5, 1)
buttongrid.addWidget(centerofmassbtn, 0, 0)
buttongrid.addWidget(rotation_groupbox, 1, 0)
centerofmassbtn.clicked.connect(self.centerofmass)
rotatebtn.clicked.connect(self.rotate_groups)
self.translatebtn = QtWidgets.QCheckBox("Translate only")
self.flipbtn = QtWidgets.QCheckBox("Consider flipped structures")
self.alignxbtn = QtWidgets.QPushButton("Align X")
self.alignybtn = QtWidgets.QPushButton("Align Y")
self.alignzzbtn = QtWidgets.QPushButton("Align Z_Z")
self.alignzybtn = QtWidgets.QPushButton("Align Z_Y")
self.translatexbtn = QtWidgets.QPushButton("Translate X")
self.translateybtn = QtWidgets.QPushButton("Translate Y")
self.translatezbtn = QtWidgets.QPushButton("Translate Z")
self.rotatexy_convbtn = QtWidgets.QPushButton("Rotate XY - Convolution")
self.scorebtn = QtWidgets.QPushButton("Calculate Score")
operate_groupbox = QtWidgets.QGroupBox("Operate")
operategrid = QtWidgets.QGridLayout(operate_groupbox)
rotationgrid.addWidget(self.translatebtn, 7, 0)
rotationgrid.addWidget(self.flipbtn, 8, 0)
self.x_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("x-Range (Px)"), 9, 0)
rotationgrid.addWidget(self.x_range, 9, 1)
self.y_range = QtWidgets.QLineEdit("-3,3")
rotationgrid.addWidget(QtWidgets.QLabel("y-Range (Px)"), 10, 0)
rotationgrid.addWidget(self.y_range, 10, 1)
self.z_range = QtWidgets.QLineEdit("-1000,1000")
rotationgrid.addWidget(QtWidgets.QLabel("z-Range (nm)"), 11, 0)
rotationgrid.addWidget(self.z_range, 11, 1)
self.z_range.textChanged.connect(self.adjust_z)
self.x_range.textChanged.connect(self.adjust_xy)
self.y_range.textChanged.connect(self.adjust_xy)
operategrid.addWidget(self.alignxbtn, 0, 1)
operategrid.addWidget(self.alignybtn, 1, 1)
operategrid.addWidget(self.alignzzbtn, 2, 1)
operategrid.addWidget(self.alignzybtn, 3, 1)
operategrid.addWidget(self.translatexbtn, 0, 0)
operategrid.addWidget(self.translateybtn, 1, 0)
operategrid.addWidget(self.translatezbtn, 2, 0)
operategrid.addWidget(self.rotatexy_convbtn, 4, 0)
operategrid.addWidget(self.scorebtn, 4, 1)
self.rotatexy_convbtn.clicked.connect(self.rotatexy_convolution)
self.alignxbtn.clicked.connect(self.align_x)
self.alignybtn.clicked.connect(self.align_y)
self.alignzzbtn.clicked.connect(self.align_zz)
self.alignzybtn.clicked.connect(self.align_zy)
self.translatexbtn.clicked.connect(self.translate_x)
self.translateybtn.clicked.connect(self.translate_y)
self.translatezbtn.clicked.connect(self.translate_z)
self.scorebtn.clicked.connect(self.calculate_score)
buttongrid.addWidget(operate_groupbox, 2, 0)
self.contrastEdit = QtWidgets.QDoubleSpinBox()
self.contrastEdit.setDecimals(1)
self.contrastEdit.setRange(0, 10)
self.contrastEdit.setValue(0.5)
self.contrastEdit.setSingleStep(0.1)
self.contrastEdit.valueChanged.connect(self.updateLayout)
self.grid = QtWidgets.QGridLayout()
self.grid.addWidget(display_groupbox, 0, 0, 2, 1)
self.grid.addWidget(button_groupbox, 0, 1, 1, 1)
contrast_groupbox = QtWidgets.QGroupBox("Contrast")
contrastgrid = QtWidgets.QGridLayout(contrast_groupbox)
contrastgrid.addWidget(self.contrastEdit)
buttongrid.addWidget(contrast_groupbox)
MODEL_X_DEFAULT = "0,20,40,60,0,20,40,60,0,20,40,60"
MODEL_Y_DEFAULT = "0,20,40,0,20,40,0,20,40,0,20,40"
MODEL_Z_DEFAULT = "0,0,0,0,0,0,0,0,0,0,0,0"
self.modelchk = QtWidgets.QCheckBox("Use Model")
self.model_x = QtWidgets.QLineEdit(MODEL_X_DEFAULT)
self.model_y = QtWidgets.QLineEdit(MODEL_Y_DEFAULT)
self.model_z = QtWidgets.QLineEdit(MODEL_Z_DEFAULT)
self.model_preview_btn = QtWidgets.QPushButton("Preview")
self.model_preview_btn.clicked.connect(self.model_preview)
self.modelblurEdit = QtWidgets.QDoubleSpinBox()
self.modelblurEdit.setDecimals(1)
self.modelblurEdit.setRange(0, 10)
self.modelblurEdit.setValue(0.5)
self.modelblurEdit.setSingleStep(0.1)
self.pixelsizeEdit = QtWidgets.QSpinBox()
self.pixelsizeEdit.setRange(1, 999)
self.pixelsizeEdit.setValue(130)
model_groupbox = QtWidgets.QGroupBox("Model")
modelgrid = QtWidgets.QGridLayout(model_groupbox)
modelgrid.addWidget(self.modelchk, 0, 0)
modelgrid.addWidget(QtWidgets.QLabel("X-Coordinates"), 1, 0)
modelgrid.addWidget(self.model_x, 1, 1)
modelgrid.addWidget(QtWidgets.QLabel("Y-Coordinates"), 2, 0)
modelgrid.addWidget(self.model_y, 2, 1)
modelgrid.addWidget(QtWidgets.QLabel("Z-Coordinates"), 3, 0)
modelgrid.addWidget(self.model_z, 3, 1)
modelgrid.addWidget(QtWidgets.QLabel("Blur:"), 4, 0)
modelgrid.addWidget(self.modelblurEdit, 4, 1)
modelgrid.addWidget(QtWidgets.QLabel("Pixelsize:"), 5, 0)
modelgrid.addWidget(self.pixelsizeEdit, 5, 1)
modelgrid.addWidget(self.model_preview_btn, 6, 0)
modelgrid.addWidget(self.modelchk, 6, 1)
buttongrid.addWidget(model_groupbox)
mainWidget = QtWidgets.QWidget()
mainWidget.setLayout(self.grid)
self.setCentralWidget(mainWidget)
self.status_bar.showMessage("Average3 ready.")
def open(self):
path, exe = QtWidgets.QFileDialog.getOpenFileName(
self, "Open localizations", filter="*.hdf5"
)
if path:
self.add(path)
def save(self, path):
n_channels = len(self.locs)
for i in range(n_channels):
cx = self.infos[i][0]["Width"] / 2
cy = self.infos[i][0]["Height"] / 2
out_locs = self.locs[i].copy()
out_locs.x += cx
out_locs.y += cy
info = self.infos[i] + [{"Generated by": "Picasso Average3"}]
if not self.z_state[i]:
out_locs = lib.remove_from_rec(out_locs, "z")
out_path = os.path.splitext(self.locs_paths[i])[0] + "_avg3.hdf5"
path, exe = QtWidgets.QFileDialog.getSaveFileName(
self, "Save localizations", out_path, filter="*.hdf5"
)
io.save_locs(path, out_locs, info)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
path = urls[0].toLocalFile()
ext = os.path.splitext(path)[1].lower()
if ext == ".hdf5":
print("Opening {} ..".format(path))
self.add(path)
def add(self, path, rendermode=True):
try:
locs, info = io.load_locs(path, qt_parent=self)
except io.NoMetadataFileError:
return
if len(self.locs) == 0:
self.pixelsize = 0
if not hasattr(locs, "group"):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle("Error")
msgBox.setText(
("Datafile does not contain group information."
" Please load file with picked localizations.")
)
msgBox.exec_()
else:
locs = lib.ensure_sanity(locs, info)
if not hasattr(locs, "z"):
locs = lib.append_to_rec(locs, locs.x.copy(), "z")
self.pixelsize = 1
has_z = False
else:
has_z = True
if self.pixelsize == 0:
pixelsize, ok = QtWidgets.QInputDialog.getInt(
self,
"Pixelsize Dialog",
"Please enter the pixelsize in nm",
130,
)
if ok:
self.pixelsize = pixelsize
else:
self.pixelsize = 130
self.locs.append(locs)
self.z_state.append(has_z)
self.infos.append(info)
self.locs_paths.append(path)
self.index_blocks.append(None)
self._drift.append(None)
self.dataset_dialog.add_entry(path)
self.dataset_dialog.checks[-1].stateChanged.connect(
self.updateLayout
)
cx = self.infos[-1][0]["Width"] / 2
cy = self.infos[-1][0]["Height"] / 2
self.locs[-1].x -= cx
self.locs[-1].y -= cy
if len(self.locs) == 1:
self.median_lp = np.mean(
[np.median(locs.lpx), np.median(locs.lpy)]
)
if hasattr(locs, "group"):
groups = np.unique(locs.group)
groupcopy = locs.group.copy()
for i in range(len(groups)):
groupcopy[locs.group == groups[i]] = i
np.random.shuffle(groups)
groups %= N_GROUP_COLORS
self.group_color = groups[groupcopy]
if render:
self.fit_in_view(autoscale=True)
else:
if render:
self.update_scene()
self.oversampling = 1
if len(self.locs) == 1:
self.t_min = np.min([np.min(locs.x), np.min(locs.y)])
self.t_max = np.max([np.max(locs.x), np.max(locs.y)])
self.z_min = np.min(locs.z)
self.z_max = np.max(locs.z)
else:
self.t_min = np.min(
[np.min(locs.x), np.min(locs.y), self.t_min]
)
self.t_max = np.max(
[np.max(locs.x), np.max(locs.y), self.t_max]
)
self.z_min = np.min([np.min(locs.z), self.z_min])
self.z_max = np.min([np.max(locs.z), self.z_max])
if len(self.locs) == 1:
print("Dataset loaded from {}.".format(path))
else:
print(
("Dataset loaded from {},"
" Total number of datasets {}.").format(
path, len(self.locs)
)
)
# CREATE GROUP INDEX
if hasattr(locs, "group"):
groups = np.unique(locs.group)
n_groups = len(groups)
n_locs = len(locs)
group_index = scipy.sparse.lil_matrix(
(n_groups, n_locs), dtype=np.bool
)
progress = lib.ProgressDialog(
"Creating group index", 0, len(groups), self
)
progress.set_value(0)
for i, group in enumerate(groups):
index = np.where(locs.group == group)[0]
group_index[i, index] = True
progress.set_value(i + 1)
self.group_index.append(group_index)
self.n_groups = n_groups
os.chdir(os.path.dirname(path))
self.calculate_radii()
self.oversampling = 4
self.updateLayout()
def updateLayout(self):
if len(self.locs) > 0:
pixmap1, pixmap2, pixmap3 = self.hist_multi_channel(self.locs)
self.viewxy.setPixmap(pixmap1)
self.viewxz.setPixmap(pixmap2)
self.viewyz.setPixmap(pixmap3)
def centerofmass_all(self):
# Align all by center of mass
n_channels = len(self.locs)
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
# stack arrays
sel_locs_x = self.locs[j].x
sel_locs_y = self.locs[j].y
sel_locs_z = self.locs[j].z
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
out_locs_x = stack_arrays(out_locs_x, asrecarray=True, usemask=False)
out_locs_y = stack_arrays(out_locs_y, asrecarray=True, usemask=False)
out_locs_z = stack_arrays(out_locs_z, asrecarray=True, usemask=False)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
self.locs[j].x -= mean_x
self.locs[j].y -= mean_y
self.locs[j].z -= mean_z
def calculate_radii(self):
# CALCULATE PROPER R VALUES
n_channels = len(self.locs)
self.r = 0
self.r_z = 0
for j in range(n_channels):
self.r = np.max(
[
3
* np.sqrt(
np.mean(self.locs[j].x ** 2 + self.locs[j].y ** 2)
),
self.r,
]
)
self.r_z = np.max(
[5 * np.sqrt(np.mean(self.locs[j].z ** 2)), self.r_z]
)
self.t_min = -self.r
self.t_max = self.r
self.z_min = -self.r_z
self.z_max = self.r_z
self.z_min_load = self.z_min.copy()
self.z_max_load = self.z_max.copy()
def centerofmass(self):
print("Aligning by center of mass.. ", end="", flush=True)
n_groups = self.n_groups
n_channels = len(self.locs)
progress = lib.ProgressDialog(
"Aligning by center of mass", 0, n_groups, self
)
progress.set_value(0)
for i in range(n_groups):
out_locs_x = []
out_locs_y = []
out_locs_z = []
for j in range(n_channels):
sel_locs_x = []
sel_locs_y = []
sel_locs_z = []
index = self.group_index[j][i, :].nonzero()[1]
# stack arrays
sel_locs_x = self.locs[j].x[index]
sel_locs_y = self.locs[j].y[index]
sel_locs_z = self.locs[j].z[index]
out_locs_x.append(sel_locs_x)
out_locs_y.append(sel_locs_y)
out_locs_z.append(sel_locs_z)
progress.set_value(i + 1)
out_locs_x = stack_arrays(
out_locs_x, asrecarray=True, usemask=False
)
out_locs_y = stack_arrays(
out_locs_y, asrecarray=True, usemask=False
)
out_locs_z = stack_arrays(
out_locs_z, asrecarray=True, usemask=False
)
mean_x = np.mean(out_locs_x)
mean_y = np.mean(out_locs_y)
mean_z = np.mean(out_locs_z)
for j in range(n_channels):
index = self.group_index[j][i, :].nonzero()[1]
self.locs[j].x[index] -= mean_x
self.locs[j].y[index] -= mean_y
self.locs[j].z[index] -= mean_z
self.calculate_radii()
self.updateLayout()
print("Complete.")
def histtoImage(self, image):
cmap = np.uint8(np.round(255 * plt.get_cmap("magma")(np.arange(256))))
image /= image.max()
image = np.minimum(image, 1.0)
image = np.round(255 * image).astype("uint8")
Y, X = image.shape
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def hist_multi_channel(self, locs):
oversampling = self.parameters_dialog.oversampling.value()
self.oversampling = oversampling
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
renderings = []
for i in range(n_channels):
if self.dataset_dialog.checks[i].isChecked():
renderings.append(
render.render_hist3d(
locs[i],
oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
)
images = np.array([_[1] for _ in renderings])
pixmap1 = self.pixmap_from_colors(images, colors, 2)
pixmap2 = self.pixmap_from_colors(images, colors, 0)
pixmap3 = self.pixmap_from_colors(images, colors, 1)
return pixmap1, pixmap2, pixmap3
def pixmap_from_colors(self, images, colors, axisval):
if axisval == 2:
image = [np.sum(_, axis=axisval) for _ in images]
else:
image = [np.transpose(np.sum(_, axis=axisval)) for _ in images]
image = np.array([self.scale_contrast(_) for _ in image])
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
qimage = qimage.scaled(
self.viewxy.width(),
np.round(self.viewxy.height() * Y / X),
QtCore.Qt.KeepAspectRatioByExpanding,
)
pixmap = QtGui.QPixmap.fromImage(qimage)
return pixmap
def align_x(self):
print("Align X")
self.align_all("x")
def align_y(self):
print("Align Y")
self.align_all("y")
def align_zz(self):
print("Align Z")
self.align_all("zz")
def align_zy(self):
print("Align Z")
self.align_all("zy")
def translate_x(self):
print("Translate X")
self.translate("x")
def translate_y(self):
print("Translate Y")
self.translate("y")
def translate_z(self):
print("Translate Z")
self.translate("z")
def translate(self, translateaxis):
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
if translateaxis == "x":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
elif translateaxis == "y":
image = [np.sum(_, axis=2) for _ in images]
signalimg = [np.sum(_, axis=1) for _ in image]
elif translateaxis == "z":
image = [np.sum(_, axis=1) for _ in images]
signalimg = [np.sum(_, axis=0) for _ in image]
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
for element in signalimg:
plt.plot(element)
n_groups = self.group_index[0].shape[0]
print("Translating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.translate_group(signalimg, i, translateaxis)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
self.centerofmass_all()
self.updateLayout()
self.status_bar.showMessage("Done!")
def translate_group(self, signalimg, group, translateaxis):
n_channels = len(self.locs)
all_xcorr = np.zeros((1, n_channels))
all_da = np.zeros((1, n_channels))
if translateaxis == "x":
proplane = "xy"
elif translateaxis == "y":
proplane = "xy"
elif translateaxis == "z":
proplane = "xz"
plotmode = 0
for j in range(n_channels):
if plotmode:
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
plt.plot(signalimg[j])
ax2 = fig.add_subplot(1, 3, 2)
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
plane = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) #
if translateaxis == "x":
projection = np.sum(plane, axis=0)
elif translateaxis == "y":
projection = np.sum(plane, axis=1)
elif translateaxis == "z":
projection = np.sum(plane, axis=1)
if plotmode:
plt.plot(projection)
# print('Step X')
# ax3 = fig.add_subplot(1,3,3)
# plt.imshow(plane, interpolation='nearest', cmap=plt.cm.ocean)
corrval = np.max(signal.correlate(signalimg[j], projection))
shiftval = (
np.argmax(signal.correlate(signalimg[j], projection))
- len(signalimg[j])
+ 1
)
all_xcorr[0, j] = corrval
all_da[0, j] = shiftval / self.oversampling
if plotmode:
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
dafinal = np.mean(all_da[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
if translateaxis == "x":
self.locs[j].x[index] += dafinal
elif translateaxis == "y":
self.locs[j].y[index] += dafinal
elif translateaxis == "z":
self.locs[j].z[index] += dafinal * self.pixelsize
def adjust_z(self):
z_range_str = np.asarray((self.z_range.text()).split(","))
z_range = []
for element in z_range_str:
try:
z_range.append(float(element))
except ValueError:
pass
z_min = z_range[0]
z_max = z_range[1]
self.z_min = np.max([z_min, self.z_min_load])
self.z_max = np.min([z_max, self.z_max_load])
print("Z min {}, Z max {}".format(self.z_min, self.z_max))
self.updateLayout()
def adjust_xy(self):
x_range_str = np.asarray((self.x_range.text()).split(","))
x_range = []
for element in x_range_str:
try:
x_range.append(float(element))
except ValueError:
pass
x_min = x_range[0]
x_max = x_range[1]
self.x_min = np.max([x_min, self.t_min])
self.x_max = np.min([x_max, self.t_max])
print("X min {}, X max {}".format(self.x_min, self.x_max))
y_range_str = np.asarray((self.y_range.text()).split(","))
y_range = []
for element in y_range_str:
try:
y_range.append(float(element))
except ValueError:
pass
y_min = y_range[0]
y_max = y_range[1]
self.y_min = np.max([y_min, self.t_min])
self.y_max = np.min([y_max, self.t_max])
print("Y min {}, Y max {}".format(self.y_min, self.y_max))
self.updateLayout()
def rotatexy_convolution_group(
self, CF_image_avg, angles, group, rotaxis, proplane
):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
# rotate locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = np.sum(np.multiply(CF_image_avg[j], image))
all_xcorr[k, j] = xcorr
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
def rotatexy_convolution(self):
# TODO: re-write ths with kwargs at some point
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
# Change CFiamge for symmetry
if self.radio_sym.isChecked():
print("Using symmetry.")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
print(symmetry_txt)
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = image
print("Convolving..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.rotatexy_convolution_group(
CF_image_avg, angles, i, rotaxis, proplane
)
self.updateLayout()
self.status_bar.showMessage("Done!")
def rotate_groups(self):
# Read out values from radiobuttons
# TODO: maybe re-write this with kwargs
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
n_groups = self.group_index[0].shape[0]
a_step = np.arcsin(1 / (self.oversampling * self.r))
if self.full_degbtn.isChecked():
angles = np.arange(0, 2 * np.pi, a_step)
elif self.part_degbtn.isChecked():
degree = self.degEdit.value()
angles = np.arange(
-degree / 360 * 2 * np.pi, degree / 360 * 2 * np.pi, a_step
)
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
images = np.array([_[1] for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
image = [np.sum(_, axis=2) for _ in images]
elif self.yz_projbtn.isChecked():
proplane = "yz"
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif self.xz_projbtn.isChecked():
proplane = "xz"
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
if self.radio_sym.isChecked():
print("Radio sym")
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for i in range(symmetry - 1):
image[0] += scipy.ndimage.interpolation.rotate(
imageold,
((i + 1) * 360 / symmetry),
axes=(1, 0),
reshape=False,
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
# TODO: Sort these functions out,
# combine with radio_sym / also for convolving.
if self.radio_sym_custom.isChecked():
print("Using custom symmetry.")
symmetry_txt = np.asarray((self.symcustomEdit.text()).split(","))
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 2, 1)
symmetry = self.symEdit.value()
ax1.set_aspect("equal")
imageold = image[0].copy()
plt.imshow(imageold, interpolation="nearest", cmap=plt.cm.ocean)
# rotate image
for degree in symmetry_txt:
image[0] += scipy.ndimage.interpolation.rotate(
imageold, float(degree), axes=(1, 0), reshape=False
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(image[0], interpolation="nearest", cmap=plt.cm.ocean)
fig.canvas.draw()
size = fig.canvas.size()
width, height = size.width(), size.height()
im = QtGui.QImage(
fig.canvas.buffer_rgba(),
width,
height,
QtGui.QImage.Format_ARGB32,
)
self.viewcp.setPixmap((QtGui.QPixmap(im)))
self.viewcp.setAlignment(QtCore.Qt.AlignCenter)
plt.close(fig)
if self.modelchk.isChecked():
self.generate_template()
image[0] = self.template_img
CF_image_avg = [np.conj(np.fft.fft2(_)) for _ in image]
# n_pixel, _ = image_avg.shape
# image_half = n_pixel / 2
print("Rotating..")
for i in tqdm(range(n_groups)):
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.align_group(CF_image_avg, angles, i, rotaxis, proplane)
self.updateLayout()
self.status_bar.showMessage("Done!")
def getUIstate(self):
rotaxis = []
if self.x_axisbtn.isChecked():
rotaxis = "x"
elif self.y_axisbtn.isChecked():
rotaxis = "y"
elif self.z_axisbtn.isChecked():
rotaxis = "z"
proplane = []
if self.xy_projbtn.isChecked():
proplane = "xy"
elif self.yz_projbtn.isChecked():
proplane = "yz"
elif self.xz_projbtn.isChecked():
proplane = "xz"
return rotaxis, proplane
def projectPlanes(self, images, proplane):
if proplane == "xy":
image = [np.sum(_, axis=2) for _ in images]
elif proplane == "yz":
image = [np.sum(_, axis=1) for _ in images]
image = [_.transpose() for _ in image]
elif proplane == "xz":
image = [(np.sum(_, axis=0)) for _ in images]
image = [_.transpose() for _ in image]
return image
def generate_template(self):
model_x_str = np.asarray((self.model_x.text()).split(","))
model_y_str = np.asarray((self.model_y.text()).split(","))
model_z_str = np.asarray((self.model_z.text()).split(","))
model_x = []
model_y = []
model_z = []
for element in model_x_str:
try:
model_x.append(float(element))
except ValueError:
pass
for element in model_y_str:
try:
model_y.append(float(element))
except ValueError:
pass
for element in model_z_str:
try:
model_z.append(float(element))
except ValueError:
pass
pixelsize = self.pixelsizeEdit.value()
blur = self.modelblurEdit.value()
# Center of mass
model_x = np.array(model_x) / pixelsize
model_y = np.array(model_y) / pixelsize
model_z = np.array(model_z)
model_x = model_x - np.mean(model_x)
model_y = model_y - np.mean(model_y)
model_z = model_z - np.mean(model_z)
rotaxis, proplane = self.getUIstate()
template_img = self.render_planes(
model_x, model_y, model_z, proplane, pixelsize
)
self.template_img = scipy.ndimage.filters.gaussian_filter(
template_img, blur
)
def model_preview(self):
self.generate_template()
# Generate a template image
fig = plt.figure()
plt.title("Preview of Template")
plt.imshow(self.template_img, interpolation="nearest", cmap=plt.cm.hot)
plt.show()
def calculate_score(self):
# Dummy button -> Functionality of rotatebtn for now
# TODO: maybe re-write this with kwargs
self.scores = []
rotaxis, proplane = self.getUIstate()
n_groups = self.group_index[0].shape[0]
renderings = [
render.render_hist3d(
_,
self.oversampling,
self.t_min,
self.t_min,
self.t_max,
self.t_max,
self.z_min,
self.z_max,
self.pixelsize,
)
for _ in self.locs
]
n_locs = sum([_[0] for _ in renderings])
# Make an average and not a sum image here..
images = np.array([_[1] / n_groups for _ in renderings])
# DELIVER CORRECT PROJECTION FOR IMAGE
image = self.projectPlanes(images, proplane)
n_channels = len(image)
print("Calculating score..")
for i in tqdm(range(n_groups)):
channel_score = []
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][i].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
groupimage = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
)
score = np.sum(np.sqrt(groupimage * image[j])) / np.sum(
np.sqrt(groupimage * groupimage)
)
channel_score.append(score)
self.scores.append(channel_score)
self.status_bar.showMessage("Group {} / {}.".format(i, n_groups))
self.status_bar.showMessage(
"Done. Average score: {}".format(np.mean(self.scores))
)
plt.hist(np.array(self.scores), 40)
plt.title(
"Histogram of Scores, Mean: {:.2f}".format(np.mean(self.scores))
)
plt.xlabel("Score")
plt.ylabel("Counts")
plt.show()
def mean_angle(self, deg):
return phase(sum(rect(1, d) for d in deg) / len(deg))
def render_planes(self, xdata, ydata, zdata, proplane, pixelsize):
# assign correct renderings for all planes
a_render = []
b_render = []
if proplane == "xy":
a_render = xdata
b_render = ydata
aval_min = self.t_min
aval_max = self.t_max
bval_min = self.t_min
bval_max = self.t_max
elif proplane == "yz":
a_render = ydata
b_render = np.divide(zdata, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
elif proplane == "xz":
b_render = np.divide(zdata, pixelsize)
a_render = xdata
bval_min = np.divide(self.z_min, pixelsize)
bval_max = np.divide(self.z_max, pixelsize)
aval_min = self.t_min
aval_max = self.t_max
N, plane = render_histxyz(
a_render,
b_render,
self.oversampling,
aval_min,
aval_max,
bval_min,
bval_max,
)
return plane
def align_all(self, alignaxis):
a_step = np.arcsin(1 / (self.oversampling * self.r))
angles = np.arange(0, 2 * np.pi, a_step)
n_channels = len(self.locs)
n_angles = len(angles)
all_corr = np.zeros((n_angles, n_channels))
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
alignimage = []
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
alignimage = []
for k in range(n_angles):
angle = angles[k]
if alignaxis == "zz":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "zy":
proplane = "yz"
rotaxis = "x"
elif alignaxis == "y":
proplane = "xy"
rotaxis = "z"
elif alignaxis == "x":
proplane = "xy"
rotaxis = "z"
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
if alignimage == []:
alignimage = np.zeros(image.shape)
# CREATE ALIGNIMAGE
if alignaxis == "zz":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
elif alignaxis == "zy":
alignimage[:, np.int(alignimage.shape[0] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[0] / 2) + 1
] += 1
alignimage[
:, np.int(alignimage.shape[0] / 2) - 1
] += 1
elif alignaxis == "y":
alignimage[:, np.int(alignimage.shape[1] / 2)] += 2
alignimage[
:, np.int(alignimage.shape[1] / 2) - 1
] += 1
alignimage[
:, np.int(alignimage.shape[1] / 2) + 1
] += 1
elif alignaxis == "x":
alignimage[np.int(alignimage.shape[0] / 2), :] += 2
alignimage[
np.int(alignimage.shape[0] / 2) + 1, :
] += 1
alignimage[
np.int(alignimage.shape[0] / 2) - 1, :
] += 1
all_corr[k, j] = np.sum(np.multiply(alignimage, image))
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image, interpolation="nearest", cmap=plt.cm.ocean
)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_aspect("equal")
plt.imshow(
alignimage,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_corr, axis=1))
rotfinal = angles[maximumcc]
for j in range(n_channels):
x_rot = self.locs[j].x
y_rot = self.locs[j].y
z_rot = self.locs[j].z
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
self.locs[j].x = x_rot
self.locs[j].y = y_rot
self.locs[j].z = z_rot
self.updateLayout()
self.status_bar.showMessage(
"Align on Axis {} complete.".format(alignaxis)
)
def align_group(self, CF_image_avg, angles, group, rotaxis, proplane):
n_channels = len(self.locs)
n_angles = len(angles)
all_xcorr = np.zeros((n_angles, n_channels))
all_da = np.zeros((n_angles, n_channels))
all_db = np.zeros((n_angles, n_channels))
flips = 1
if self.flipbtn.isChecked():
print("Considering flipped structures...")
flips = 2
for f in range(flips):
for j in range(n_channels):
if self.dataset_dialog.checks[j].isChecked():
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
if f == 1: # Flipped round
if proplane == "xy":
x_original = -x_original
elif proplane == "yz":
y_original = -y_original
elif proplane == "xz":
z_original = -z_original
if self.translatebtn.isChecked():
angles = [0]
n_angles = 1
for k in range(n_angles):
angle = angles[k]
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
angle,
self.pixelsize,
)
# render group image for plane
image = self.render_planes(
x_rot, y_rot, z_rot, proplane, self.pixelsize
) # RENDR PLANES WAS BUGGY AT SOME POINT
# calculate cross-correlation
if 0:
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_aspect("equal")
plt.imshow(
image,
interpolation="nearest",
cmap=plt.cm.ocean,
)
plt.colorbar()
plt.show()
plt.waitforbuttonpress()
xcorr = compute_xcorr(CF_image_avg[j], image)
n_pixelb, n_pixela = image.shape
image_halfa = n_pixela / 2 # TODO: CHECK THOSE VALUES
image_halfb = n_pixelb / 2
# find the brightest pixel
b_max, a_max = np.unravel_index(
xcorr.argmax(), xcorr.shape
)
# store the transformation if the correlation
# is larger than before
all_xcorr[k, j] = xcorr[b_max, a_max]
all_db[k, j] = (
np.ceil(b_max - image_halfb) / self.oversampling
)
all_da[k, j] = (
np.ceil(a_max - image_halfa) / self.oversampling
)
flipstate = False
if f == 0:
# value with biggest cc value form table
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
maximumcc_val = np.max(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc, :])
dbfinal = np.mean(all_db[maximumcc, :])
else:
maximumcc_val_f = np.max(np.sum(all_xcorr, axis=1))
if maximumcc_val < maximumcc_val_f:
flipstate = True
maximumcc = np.argmax(np.sum(all_xcorr, axis=1))
rotfinal = angles[maximumcc]
dafinal = np.mean(all_da[maximumcc, :])
dbfinal = np.mean(all_db[maximumcc, :])
for j in range(n_channels):
index = self.group_index[j][group].nonzero()[1]
x_rot = self.locs[j].x[index]
y_rot = self.locs[j].y[index]
z_rot = self.locs[j].z[index]
x_original = x_rot.copy()
y_original = y_rot.copy()
z_original = z_rot.copy()
# rotate and shift image group locs
x_rot, y_rot, z_rot = rotate_axis(
rotaxis,
x_original,
y_original,
z_original,
rotfinal,
self.pixelsize,
)
if flipstate:
if proplane == "xy":
self.locs[j].x[index] = -x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
elif proplane == "yz":
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = -y_rot
self.locs[j].z[index] = z_rot
elif proplane == "xz":
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = -z_rot
else:
self.locs[j].x[index] = x_rot
self.locs[j].y[index] = y_rot
self.locs[j].z[index] = z_rot
# Shift image group locs
if self.translatebtn.isChecked():
dbfinal = 0
if proplane == "xy":
self.locs[j].x[index] -= dafinal
self.locs[j].y[index] -= dbfinal
elif proplane == "yz":
self.locs[j].y[index] -= dafinal
self.locs[j].z[index] -= dbfinal * self.pixelsize
elif proplane == "xz":
self.locs[j].z[index] -= dafinal
self.locs[j].x[index] -= dbfinal * self.pixelsize
def fit_in_view(self, autoscale=False):
movie_height, movie_width = self.movie_size()
viewport = [(0, 0), (movie_height, movie_width)]
self.update_scene(viewport=viewport, autoscale=autoscale)
def movie_size(self):
movie_height = self.max_movie_height()
movie_width = self.max_movie_width()
return (movie_height, movie_width)
def max_movie_height(self):
""" Returns maximum height of all loaded images. """
return max(info[0]["Height"] for info in self.infos)
def max_movie_width(self):
return max([info[0]["Width"] for info in self.infos])
def update_scene(
self, viewport=None, autoscale=False, use_cache=False, picks_only=False
):
n_channels = len(self.locs)
if n_channels:
viewport = viewport or self.viewport
self.draw_scene(
viewport,
autoscale=autoscale,
use_cache=use_cache,
picks_only=picks_only,
)
# self.update_cursor()
def draw_scene(
self, viewport, autoscale=False, use_cache=False, picks_only=False
):
self.viewport = self.adjust_viewport_to_view(viewport)
qimage = self.render_scene(autoscale=autoscale, use_cache=use_cache)
self.qimage = qimage.scaled(
self.viewxy.width(),
self.viewxy.height(),
QtCore.Qt.KeepAspectRatioByExpanding,
)
def adjust_viewport_to_view(self, viewport):
"""
Adds space to a desired viewport so that
it matches the window aspect ratio.
"""
viewport_height = viewport[1][0] - viewport[0][0]
viewport_width = viewport[1][1] - viewport[0][1]
view_height = self.height()
view_width = self.width()
viewport_aspect = viewport_width / viewport_height
view_aspect = view_width / view_height
if view_aspect >= viewport_aspect:
y_min = viewport[0][0]
y_max = viewport[1][0]
x_range = viewport_height * view_aspect
x_margin = (x_range - viewport_width) / 2
x_min = viewport[0][1] - x_margin
x_max = viewport[1][1] + x_margin
else:
x_min = viewport[0][1]
x_max = viewport[1][1]
y_range = viewport_width / view_aspect
y_margin = (y_range - viewport_height) / 2
y_min = viewport[0][0] - y_margin
y_max = viewport[1][0] + y_margin
return [(y_min, x_min), (y_max, x_max)]
def render_scene(
self, autoscale=False, use_cache=False, cache=True, viewport=None
):
kwargs = self.get_render_kwargs(viewport=viewport)
n_channels = len(self.locs)
if n_channels == 1:
self.render_single_channel(
kwargs, autoscale=autoscale, use_cache=use_cache, cache=cache
)
else:
self.render_multi_channel(
kwargs, autoscale=autoscale, use_cache=use_cache, cache=cache
)
self._bgra[:, :, 3].fill(255)
Y, X = self._bgra.shape[:2]
qimage = QtGui.QImage(self._bgra.data, X, Y, QtGui.QImage.Format_RGB32)
return qimage
def get_render_kwargs(
self, viewport=None
): # Dummy for now: TODO: Implement
viewport = [(0, 0), (32, 32)]
return {
"oversampling": 5,
"viewport": viewport,
"blur_method": None,
"min_blur_width": float(0),
}
def render_multi_channel(
self, kwargs, autoscale=False, locs=None, use_cache=False, cache=True
):
if locs is None:
locs = self.locs
n_channels = len(locs)
hues = np.arange(0, 1, 1 / n_channels)
colors = [colorsys.hsv_to_rgb(_, 1, 1) for _ in hues]
if use_cache:
n_locs = self.n_locs
image = self.image
else:
renderings = [render.render(_, **kwargs) for _ in locs]
n_locs = sum([_[0] for _ in renderings])
image = np.array([_[1] for _ in renderings])
if cache:
self.n_locs = n_locs
self.image = image
image = self.scale_contrast(image)
Y, X = image.shape[1:]
bgra = np.zeros((Y, X, 4), dtype=np.float32)
for color, image in zip(colors, image):
bgra[:, :, 0] += color[2] * image
bgra[:, :, 1] += color[1] * image
bgra[:, :, 2] += color[0] * image
bgra = np.minimum(bgra, 1)
self._bgra = self.to_8bit(bgra)
return self._bgra
def render_single_channel(
self, kwargs, autoscale=False, use_cache=False, cache=True
):
locs = self.locs[0]
if hasattr(locs, "group"):
locs = [locs[self.group_color == _] for _ in range(N_GROUP_COLORS)]
return self.render_multi_channel(
kwargs, autoscale=autoscale, locs=locs, use_cache=use_cache
)
if use_cache:
n_locs = self.n_locs
image = self.image
else:
n_locs, image = render.render(locs, **kwargs)
if cache:
self.n_locs = n_locs
self.image = image
image = self.scale_contrast(image, autoscale=autoscale)
image = self.to_8bit(image)
Y, X = image.shape
# cmap = self.window.display_settings_dialog.colormap.currentText()
# TODO: selection of colormap?
cmap = "hot"
cmap = np.uint8(np.round(255 * plt.get_cmap(cmap)(np.arange(256))))
self._bgra = np.zeros((Y, X, 4), dtype=np.uint8, order="C")
self._bgra[..., 0] = cmap[:, 2][image]
self._bgra[..., 1] = cmap[:, 1][image]
self._bgra[..., 2] = cmap[:, 0][image]
return self._bgra
def to_8bit(self, image):
return np.round(255 * image).astype("uint8")
def scale_contrast(self, image, autoscale=False):
if image.ndim == 2:
max_ = image.max()
else:
max_ = min([_.max() for _ in image])
upper = self.contrastEdit.value() * max_
lower = 0
if upper > 0:
image = (image - lower) / (upper - lower)
image[~np.isfinite(image)] = 0
image = np.minimum(image, 1.0)
image = np.maximum(image, 0.0)
return image
def main():
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
def excepthook(type, value, tback):
lib.cancel_dialogs()
message = "".join(traceback.format_exception(type, value, tback))
errorbox = QtWidgets.QMessageBox.critical(
window, "An error occured", message
)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
if __name__ == "__main__":
main()
```
#### File: picasso/gui/toraw.py
```python
import sys
import os
import os.path
from PyQt5 import QtCore, QtGui, QtWidgets
import traceback
from .. import io, lib
class TextEdit(QtWidgets.QTextEdit):
def __init__(self, parent=None):
super().__init__(parent)
# self.setAcceptDrops(True)
def canInsertFromMimeData(self, source):
if source.hasUrls():
return True
return False
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
urls = event.mimeData().urls()
paths = [url.toLocalFile() for url in urls]
valid_paths = []
for path in paths:
base, extension = os.path.splitext(path)
if extension.lower() in [".tif", ".tiff"]:
valid_paths.append(path)
for root, dirs, files in os.walk(path):
for name in files:
candidate = os.path.join(root, name)
base, extension = os.path.splitext(candidate)
if extension.lower() in [".tif", ".tiff"]:
valid_paths.append(candidate)
self.set_paths(valid_paths)
def set_paths(self, paths):
for path in paths:
self.append(path)
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
# Init GUI
self.setWindowTitle("Picasso: ToRaw")
self.resize(768, 512)
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, "icons", "toraw.ico")
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
vbox.addWidget(QtWidgets.QLabel("Files:"))
self.path_edit = TextEdit()
vbox.addWidget(self.path_edit)
hbox = QtWidgets.QHBoxLayout()
vbox.addLayout(hbox)
self.browse_button = QtWidgets.QPushButton("Browse")
self.browse_button.clicked.connect(self.browse)
hbox.addWidget(self.browse_button)
hbox.addStretch(1)
to_raw_button = QtWidgets.QPushButton("To raw")
to_raw_button.clicked.connect(self.to_raw)
hbox.addWidget(to_raw_button)
def browse(self):
paths, exts = QtWidgets.QFileDialog.getOpenFileNames(
self, "Open files to convert", filter="*.tif; **.tiff"
)
self.path_edit.set_paths(paths)
def to_raw(self):
text = self.path_edit.toPlainText()
paths = text.splitlines()
movie_groups = io.get_movie_groups(paths)
n_movies = len(movie_groups)
if n_movies == 1:
text = "Converting 1 movie..."
else:
text = "Converting {} movies...".format(n_movies)
self.progress_dialog = QtWidgets.QProgressDialog(
text, "Cancel", 0, n_movies, self
)
progress_bar = QtWidgets.QProgressBar(self.progress_dialog)
progress_bar.setTextVisible(False)
self.progress_dialog.setBar(progress_bar)
self.progress_dialog.setMaximum(n_movies)
self.progress_dialog.setWindowTitle("Picasso: ToRaw")
self.progress_dialog.setWindowModality(QtCore.Qt.WindowModal)
self.progress_dialog.canceled.connect(self.cancel)
self.progress_dialog.closeEvent = self.cancel
self.worker = Worker(movie_groups)
self.worker.progressMade.connect(self.update_progress)
self.worker.finished.connect(self.on_finished)
self.worker.start()
self.progress_dialog.show()
def cancel(self, event=None):
self.worker.terminate()
def update_progress(self, n_done):
self.progress_dialog.setValue(n_done)
def on_finished(self, done):
self.progress_dialog.close()
QtWidgets.QMessageBox.information(
self, "Picasso: ToRaw", "Conversion complete."
)
class Worker(QtCore.QThread):
progressMade = QtCore.pyqtSignal(int)
finished = QtCore.pyqtSignal(int)
interrupted = QtCore.pyqtSignal()
def __init__(self, movie_groups):
super().__init__()
self.movie_groups = movie_groups
def run(self):
for i, (basename, paths) in enumerate(self.movie_groups.items()):
io.to_raw_combined(basename, paths)
self.progressMade.emit(i + 1)
self.finished.emit(i)
def main():
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.show()
def excepthook(type, value, tback):
lib.cancel_dialogs()
message = "".join(traceback.format_exception(type, value, tback))
errorbox = QtWidgets.QMessageBox.critical(
window, "An error occured", message
)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
if __name__ == "__main__":
main()
``` |
{
"source": "johanna-rock/im_ricnn",
"score": 2
} |
#### File: im_ricnn/data_models/objective_func.py
```python
import torch
from enum import Enum
from sklearn.metrics import mean_squared_error
from torch import nn
from torch.nn.modules.loss import _Loss
import numpy as np
from datasets.radar_dataset import DataContent
from run_scripts import print_, device
class DeltaSNR(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(DeltaSNR, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
noise_mask = noise_mask.to(device)
sinr_delta_mean = 0
num_packets = target_re_im.shape[0]
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
target_re_im_packet = target_re_im[p]
sinr_output = sinr_from_re_im_format(output_re_im_packet, object_mask, noise_mask)
sinr_target = sinr_from_re_im_format(target_re_im_packet, object_mask, noise_mask)
sinr_delta_mean += torch.abs(sinr_target - sinr_output)
else:
print_('WARNING: Not implemented yet.')
assert False
sinr_delta_mean /= num_packets
return sinr_delta_mean
class SINRLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(SINRLoss, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
noise_mask = noise_mask.to(device)
neg_sinr_mean = 0
num_packets = target_re_im.shape[0]
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
neg_sinr_mean -= sinr_from_re_im_format(output_re_im_packet, object_mask, noise_mask)
else:
print_('WARNING: Not implemented yet.')
assert False
neg_sinr_mean /= num_packets
return neg_sinr_mean
class MSEWeightedMagPhase(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(MSEWeightedMagPhase, self).__init__(size_average, reduce, reduction)
self.data_content = DataContent.COMPLEX_PACKET_RD # extend to others?!
self.mse = nn.MSELoss()
self.w_mag = 0.0
self.w_phase = 0.0
self.w_re_im = 1.0
self.epoch = 0
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
object_mask = object_mask.to(device)
loss = 0
num_packets = target_re_im.shape[0]
num_re = int(target_re_im.shape[2] / 2)
if self.data_content is DataContent.COMPLEX_PACKET_RD:
for p in range(num_packets):
output_re_im_packet = output_re_im[p]
target_re_im_packet = target_re_im[p]
output_re_packet = output_re_im_packet[:, :num_re]
output_im_packet = output_re_im_packet[:, num_re:]
target_re_packet = target_re_im_packet[:, :num_re]
target_im_packet = target_re_im_packet[:, num_re:]
output_peaks_re = torch.masked_select(output_re_packet, object_mask)
output_peaks_im = torch.masked_select(output_im_packet, object_mask)
target_peaks_re = torch.masked_select(target_re_packet, object_mask)
target_peaks_im = torch.masked_select(target_im_packet, object_mask)
phase_target = torch.atan(target_peaks_im / target_peaks_re)
phase_output = torch.atan(output_peaks_im / output_peaks_re)
target_max_mag = torch.sqrt(target_re_packet ** 2 + target_im_packet ** 2).view(-1).max()
target_re_packet_log_mag = target_re_packet / target_max_mag
target_im_packet_log_mag = target_im_packet / target_max_mag
target_log_mag = 10 * torch.log10(torch.sqrt(target_re_packet_log_mag ** 2 + target_im_packet_log_mag ** 2))
target_log_mag = torch.masked_select(target_log_mag, object_mask)
output_max_mag = torch.sqrt(output_re_packet ** 2 + output_im_packet ** 2).view(-1).max()
output_re_packet_log_mag = output_re_packet / output_max_mag
output_im_packet_log_mag = output_im_packet / output_max_mag
output_log_mag = 10 * torch.log10(torch.sqrt(output_re_packet_log_mag ** 2 + output_im_packet_log_mag ** 2))
output_log_mag = torch.masked_select(output_log_mag, object_mask)
loss += self.w_re_im * self.mse(output_re_im, target_re_im) +\
self.w_mag * self.mse(output_log_mag, target_log_mag) +\
self.w_phase * self.mse(phase_output, phase_target)
else:
print_('WARNING: Not implemented yet.')
assert False
loss /= num_packets
return loss
def next_epoch(self):
pass
self.epoch += 1
if self.epoch % 10 == 0 and self.w_re_im > 0.4:
self.w_re_im -= 0.1
self.w_mag = (1 - self.w_re_im) / 2
self.w_phase = (1 - self.w_re_im) / 2
class MSE(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='elementwise_mean'):
super(MSE, self).__init__(size_average, reduce, reduction)
self.mse = nn.MSELoss()
def forward(self, output_re_im, target_re_im, object_mask, noise_mask):
return self.mse.forward(output_re_im, target_re_im)
class ObjectiveFunction(Enum):
DELTA_SNR = DeltaSNR()
MSE = MSE()
MSE_MAG_PHASE_WEIGHTED = MSEWeightedMagPhase()
SINR = SINRLoss()
def __call__(self, *args):
return self.value(*args)
@staticmethod
def loss_to_running_loss(batch_loss, batch_size):
return batch_loss * batch_size
@staticmethod
def loss_from_running_loss(running_loss, sample_size):
return running_loss / sample_size
@staticmethod
def from_name(value):
if value == ObjectiveFunction.DELTA_SNR.name:
return ObjectiveFunction.DELTA_SNR
elif value == ObjectiveFunction.MSE.name:
return ObjectiveFunction.MSE
elif value == ObjectiveFunction.MSE_MAG_PHASE_WEIGHTED.name:
return ObjectiveFunction.MSE_MAG_PHASE_WEIGHTED
elif value == ObjectiveFunction.SINR.name:
return ObjectiveFunction.SINR
else:
return None
@staticmethod
def objective_func_name(func):
try:
if func.name in ObjectiveFunction.__members__:
return func.name
else:
return 'None'
except AttributeError:
return 'None'
def sinr_log_mag(log_mag_rd_target, log_mag_rd_test, object_mask, noise_mask):
return np.average(log_mag_rd_test[object_mask]) - np.average(log_mag_rd_test[noise_mask])
def sinr(rd_target, rd_test, object_mask, noise_mask):
rd_test_mag = np.abs(rd_test)**2
obj_values = rd_test_mag[object_mask]
obj_magnitude = np.average(obj_values)
noise_values = rd_test_mag[noise_mask]
noise_magnitude = np.average(noise_values)
return 10 * np.log10(obj_magnitude / noise_magnitude)
def sinr_1d(cr_target, cr_test, object_mask, noise_mask):
cr_test_mag = np.abs(cr_test)**2
obj_values = cr_test_mag[object_mask]
obj_magnitude = np.average(obj_values)
noise_values = cr_test_mag[noise_mask]
noise_magnitude = np.average(noise_values)
return 10 * np.log10(obj_magnitude / noise_magnitude)
def sinr_from_re_im_format(re_im_packet, obj_mask, noise_mask):
if len(re_im_packet.shape) == 3:
re_im_packet = re_im_packet[0]
num_re = int(re_im_packet.shape[1]/2)
re_packet = re_im_packet[:, :num_re]
im_packet = re_im_packet[:, num_re:]
mag = re_packet ** 2 + im_packet ** 2
obj_values = torch.masked_select(mag, obj_mask)
obj_magnitude = torch.mean(obj_values)
noise_values = torch.masked_select(mag, noise_mask)
noise_magnitude = torch.mean(noise_values)
return 10 * torch.log10(obj_magnitude / noise_magnitude)
def peak_mag_mse(log_mag_rd_target, log_mag_rd_test, object_mask, noise_mask):
obj_values_target = log_mag_rd_target[object_mask]
obj_values_test = log_mag_rd_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
return mean_squared_error(obj_values_target, obj_values_test)
def evm(rd_target, rd_test, object_mask, noise_mask):
obj_values_target = rd_target[object_mask]
obj_values_test = rd_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_norm(rd_target, rd_test, object_mask, noise_mask):
rd_target_norm = rd_target / np.amax(np.abs(rd_target))
rd_test_norm = rd_test / np.amax(np.abs(rd_test))
obj_values_target = rd_target_norm[object_mask]
obj_values_test = rd_test_norm[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_1d(cr_target, cr_test, object_mask, noise_mask):
obj_values_target = cr_target[object_mask]
obj_values_test = cr_test[object_mask]
if len(obj_values_target) == 0:
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def evm_1d_norm(cr_target, cr_test, object_mask, noise_mask):
cr_target_norm = cr_target / np.amax(np.abs(cr_target))
cr_test_norm = cr_test / np.amax(np.abs(cr_test))
obj_values_target = cr_target_norm[object_mask]
obj_values_test = cr_test_norm[object_mask]
if len(obj_values_target) == 0:
print_('WARNING: no obj peak targets found in evm_1d_norm!')
return np.nan
evms = np.abs(obj_values_target - obj_values_test) / np.abs(obj_values_target)
return np.average(evms)
def rd_obj_peak_phase_mse(rd_target, rd_test, object_mask, noise_mask):
peaks_target = rd_target[object_mask]
peaks_test = rd_test[object_mask]
if len(peaks_target) == 0:
print_('WARNING: no peaks found for evaluation metric.')
return np.nan
peaks_target_imag = np.imag(peaks_target)
peaks_target_real = np.real(peaks_target)
peaks_phase_target = np.arctan(peaks_target_imag.astype('float') / peaks_target_real.astype('float'))
peaks_test_imag = np.imag(peaks_test)
peaks_test_real = np.real(peaks_test)
peaks_phase_test = np.arctan(peaks_test_imag.astype('float') / peaks_test_real.astype('float'))
phase_mse = mean_squared_error(peaks_phase_target, peaks_phase_test)
return phase_mse
def rd_obj_peak_log_mag_mse(rd_target, rd_test, object_mask, noise_mask):
peaks_target = rd_target[object_mask]
peaks_test = rd_test[object_mask]
if len(peaks_target) == 0:
print_('WARNING: no peaks found for evaluation metric.')
return np.nan
mag_target = np.abs(peaks_target)
mag_test = np.abs(peaks_test)
phase_mse = mean_squared_error(mag_target, mag_test)
return phase_mse
```
#### File: im_ricnn/datasets/radar_dataset.py
```python
import copy
import os
import scipy
import warnings
from enum import Enum
import torch
import numpy as np
from torch.utils.data import Dataset
import scipy.io as spio
from data_models.scaler import Scaler
from run_scripts import print_
from utils.rd_processing import calculate_velocity_fft, calculate_angle_fft, v_vec_fft2, d_vec_fft2, num_angle_fft_bins, \
d_max
def split_indices_for_partitions(num_items, train_ratio=0.5, val_ratio=0.5, test_ratio=0.0):
assert(train_ratio + val_ratio + test_ratio == 1.0)
train_size = int(num_items * train_ratio)
val_size = int(num_items * val_ratio)
indices = list(range(num_items))
train_indices = indices[0:train_size]
val_indices = indices[train_size:train_size + val_size]
test_indices = indices[train_size + val_size:]
return train_indices, val_indices, test_indices
def load_data_for_denoising_ri_ramps(out, dataset):
try: # for real measurements
measurements = out['test_rd'][()].transpose()
# second FFT
num_ramps = measurements.shape[0]
num_fts = measurements.shape[1]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
x = measurements.reshape(num_ramps, 1, num_fts)
filter_mask = np.ones((x.shape[0],), dtype=int)
interference_mask = filter_mask
all_noise_mask = np.ones(measurements.shape, dtype=bool)
return x, x, x, x, x, filter_mask, interference_mask, x,\
all_noise_mask, all_noise_mask, all_noise_mask, all_noise_mask, []
except ValueError:
pass
fft_original = out['s_IF_clean_noise'][()].transpose() # IF clean + IF gaussian noise
fft_interf = out['s_IF'][()].transpose() # IF clean + IF gaussian noise + IF interference
fft_clean = out['s_IF_clean'][()].transpose() # IF clean
interference_mask = out['interference_active_ramp'][()].transpose()
fft_zero_mitigation = out['s_IF_zero_interf_td'][()].transpose()
object_targets = out['objects'][()]
num_ramps = fft_clean.shape[0]
num_fts = fft_clean.shape[1]
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
x = fft_interf.reshape(num_ramps, 1, num_fts)
y = fft_clean.reshape(num_ramps, 1, num_fts)
rd_object_masks = []
aoa_object_masks = []
rd_noise_masks = []
aoa_noise_masks = []
target_angles = []
for p in range(num_packets):
target_ranges = np.array([object_targets[p][4]]).flatten()
target_velocities = np.array([object_targets[p][7]]).flatten()
ta = np.array([object_targets[p][5]]).flatten()
rd_o_masks, rd_n_masks, d_indices, v_indices = calculate_rd_object_and_noise_masks(target_ranges, target_velocities, num_fts, dataset.num_ramps_per_packet)
aoa_o_masks, aoa_n_masks = calculate_aoa_object_and_noise_masks(target_ranges, ta, num_fts, num_angle_fft_bins)
target_angles.append({'d': d_indices, 'v': v_indices, 'a': ta})
rd_object_masks.append(rd_o_masks * dataset.num_ramps_per_packet)
rd_noise_masks.append(rd_n_masks * dataset.num_ramps_per_packet)
aoa_object_masks.append(aoa_o_masks * dataset.num_ramps_per_packet)
aoa_noise_masks.append(aoa_n_masks * dataset.num_ramps_per_packet)
rd_object_masks = np.array(rd_object_masks)
rd_noise_masks = np.array(rd_noise_masks)
aoa_object_masks = np.array(aoa_object_masks)
aoa_noise_masks = np.array(aoa_noise_masks)
filter_mask = np.ones((x.shape[0],), dtype=int)
return x, y, fft_clean, fft_original, fft_interf, filter_mask, interference_mask, fft_zero_mitigation,\
rd_object_masks, rd_noise_masks, aoa_object_masks, aoa_noise_masks, target_angles
def load_data_for_denoising_ri_ramps_training_with_interfered_ramps_only(out, dataset):
try: # for real measurements
measurements = out['test_rd'][()].transpose()
# second FFT
num_ramps = measurements.shape[0]
num_fts = measurements.shape[1]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
x = measurements.reshape(num_ramps, 1, num_fts)
filter_mask = np.ones((x.shape[0],), dtype=int)
interference_mask = filter_mask
all_noise_mask = np.ones(measurements.shape, dtype=bool)
return x, x, x, x, x, filter_mask, interference_mask, x,\
all_noise_mask, all_noise_mask, all_noise_mask, all_noise_mask, []
except ValueError:
pass
fft_original = out['s_IF_clean_noise'][()].transpose() # IF clean + IF gaussian noise
fft_interf = out['s_IF'][()].transpose() # IF clean + IF gaussian noise + IF interference
fft_clean = out['s_IF_clean'][()].transpose() # IF clean
filter_mask = out['interference_active_ramp'][()].transpose()
fft_zero_mitigation = out['s_IF_zero_interf_td'][()].transpose()
interference_mask = filter_mask
object_targets = out['objects'][()]
num_ramps = fft_clean.shape[0]
num_fts = fft_clean.shape[1]
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
x = fft_interf.reshape(num_ramps, 1, num_fts)
y = fft_clean.reshape(num_ramps, 1, num_fts)
rd_object_masks = []
aoa_object_masks = []
rd_noise_masks = []
aoa_noise_masks = []
target_angles = []
for p in range(num_packets):
target_ranges = np.array([object_targets[p][4]]).flatten()
target_velocities = np.array([object_targets[p][7]]).flatten()
ta = np.array([object_targets[p][5]]).flatten()
rd_o_masks, rd_n_masks, d_indices, v_indices = calculate_rd_object_and_noise_masks(target_ranges, target_velocities, num_fts, dataset.num_ramps_per_packet)
aoa_o_masks, aoa_n_masks = calculate_aoa_object_and_noise_masks(target_ranges, ta, num_fts, num_angle_fft_bins)
target_angles.append({'d': d_indices, 'v': v_indices, 'a': ta})
rd_object_masks.append(rd_o_masks * dataset.num_ramps_per_packet)
rd_noise_masks.append(rd_n_masks * dataset.num_ramps_per_packet)
aoa_object_masks.append(aoa_o_masks * dataset.num_ramps_per_packet)
aoa_noise_masks.append(aoa_n_masks * dataset.num_ramps_per_packet)
rd_object_masks = np.array(rd_object_masks)
rd_noise_masks = np.array(rd_noise_masks)
aoa_object_masks = np.array(aoa_object_masks)
aoa_noise_masks = np.array(aoa_noise_masks)
return x, y, fft_clean, fft_original, fft_interf, filter_mask, interference_mask, fft_zero_mitigation,\
rd_object_masks, rd_noise_masks, aoa_object_masks, aoa_noise_masks, []
def load_data_for_denoising_ri_range_doppler_map(out, dataset):
try: # for real measurements
measurements = out['test_rd'][()].transpose()
# second FFT
num_ramps = measurements.shape[0]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
rd = []
for p in range(num_packets):
rd.append(calculate_velocity_fft(
measurements[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]))
rd = np.array(rd)
filter_mask = np.ones((rd.shape[0],), dtype=int)
interference_mask = filter_mask
all_noise_mask = np.ones(measurements.shape, dtype=bool)
return rd, rd, rd, rd, rd, filter_mask, interference_mask, rd,\
all_noise_mask, all_noise_mask, all_noise_mask, all_noise_mask, []
except ValueError:
pass
fft_original = out['s_IF_clean_noise'][()].transpose() # IF clean + IF gaussian noise
fft_interf = out['s_IF'][()].transpose() # IF clean + IF gaussian noise + IF interference
fft_clean = out['s_IF_clean'][()].transpose() # IF clean
fft_zero_mitigation = out['s_IF_zero_interf_td'][()].transpose()
object_targets = out['objects'][()]
# second FFT
num_ramps = fft_original.shape[0]
num_fts = fft_original.shape[1]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
rd_original = []
rd_interf = []
rd_clean = []
rd_zero_mitigation = []
rd_object_masks = []
aoa_object_masks = []
# cr_object_masks = []
rd_noise_masks = []
aoa_noise_masks = []
# cr_noise_masks = []
target_angles = []
for p in range(num_packets):
rd_original.append(calculate_velocity_fft(fft_original[p*dataset.num_ramps_per_packet: (p+1)*dataset.num_ramps_per_packet]))
rd_interf.append(calculate_velocity_fft(
fft_interf[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]))
rd_clean.append(calculate_velocity_fft(
fft_clean[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]))
rd_zero_mitigation.append(calculate_velocity_fft(
fft_zero_mitigation[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]))
target_ranges = np.array([object_targets[p][4]]).flatten()
target_velocities = np.array([object_targets[p][7]]).flatten()
ta = np.array([object_targets[p][5]]).flatten()
rd_o_masks, rd_n_masks, d_indices, v_indices = calculate_rd_object_and_noise_masks(target_ranges, target_velocities, num_fts, dataset.num_ramps_per_packet)
aoa_o_masks, aoa_n_masks = calculate_aoa_object_and_noise_masks(target_ranges, ta, num_fts, num_angle_fft_bins)
target_angles.append({'d': d_indices, 'v': v_indices, 'a': ta})
rd_object_masks.append(rd_o_masks)
rd_noise_masks.append(rd_n_masks)
aoa_object_masks.append(aoa_o_masks)
aoa_noise_masks.append(aoa_n_masks)
rd_object_masks = np.array(rd_object_masks)
rd_noise_masks = np.array(rd_noise_masks)
aoa_object_masks = np.array(aoa_object_masks)
aoa_noise_masks = np.array(aoa_noise_masks)
rd_original = np.array(rd_original)
rd_interf = np.array(rd_interf)
rd_clean = np.array(rd_clean)
rd_zero_mitigation = np.array(rd_zero_mitigation)
x = rd_interf
y = rd_clean # y = rd_original
filter_mask = np.ones((x.shape[0],), dtype=int)
interference_mask = filter_mask
return x, y, rd_clean, rd_original, rd_interf, filter_mask, interference_mask, rd_zero_mitigation,\
rd_object_masks, rd_noise_masks, aoa_object_masks, aoa_noise_masks, target_angles
def load_data_for_denoising_log_mag_range_doppler_map(out, dataset):
try: # for real measurements
measurements = out['test_rd'][()].transpose()
# second FFT
num_ramps = measurements.shape[0]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
rd = []
for p in range(num_packets):
fft2 = calculate_velocity_fft(
measurements[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet])
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2)**2)
rd.append(fft2)
rd = np.array(rd)
filter_mask = np.ones((rd.shape[0],), dtype=int)
interference_mask = filter_mask
all_noise_mask = np.ones(measurements.shape, dtype=bool)
return rd, rd, rd, rd, rd, filter_mask, interference_mask, rd,\
all_noise_mask, all_noise_mask, all_noise_mask, all_noise_mask, []
except ValueError:
pass
fft_original = out['s_IF_clean_noise'][()].transpose() # IF clean + IF gaussian noise
fft_interf = out['s_IF'][()].transpose() # IF clean + IF gaussian noise + IF interference
fft_clean = out['s_IF_clean'][()].transpose() # IF clean
fft_zero_mitigation = out['s_IF_zero_interf_td'][()].transpose()
object_targets = out['objects'][()]
# second FFT
num_ramps = fft_original.shape[0]
num_fts = fft_original.shape[1]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
rd_original = []
rd_interf = []
rd_clean = []
rd_zero_mitigation = []
rd_object_masks = []
aoa_object_masks = []
rd_noise_masks = []
aoa_noise_masks = []
target_angles = []
for p in range(num_packets):
fft2 = calculate_velocity_fft(
fft_original[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet])
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2)**2)
rd_original.append(fft2)
fft2 = calculate_velocity_fft(
fft_interf[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet])
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2)**2)
rd_interf.append(fft2)
fft2 = calculate_velocity_fft(
fft_clean[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet])
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2)**2)
rd_clean.append(fft2)
fft2 = calculate_velocity_fft(
fft_zero_mitigation[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet])
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2)**2)
rd_zero_mitigation.append(fft2)
target_ranges = np.array([object_targets[p][4]]).flatten()
target_velocities = np.array([object_targets[p][7]]).flatten()
target_angles.append(np.array([object_targets[p][5]]).flatten())
ta = np.array([object_targets[p][5]]).flatten()
rd_o_masks, rd_n_masks, d_indices, v_indices = calculate_rd_object_and_noise_masks(target_ranges, target_velocities, num_fts, dataset.num_ramps_per_packet)
aoa_o_masks, aoa_n_masks = calculate_aoa_object_and_noise_masks(target_ranges, ta, num_fts, num_angle_fft_bins)
target_angles.append({'d': d_indices, 'v': v_indices, 'a': ta})
rd_object_masks.append(rd_o_masks)
rd_noise_masks.append(rd_n_masks)
aoa_object_masks.append(aoa_o_masks)
aoa_noise_masks.append(aoa_n_masks)
rd_original = np.array(rd_original)
rd_interf = np.array(rd_interf)
rd_clean = np.array(rd_clean)
rd_zero_mitigation = np.array(rd_zero_mitigation)
rd_object_masks = np.array(rd_object_masks)
rd_noise_masks = np.array(rd_noise_masks)
aoa_object_masks = np.array(aoa_object_masks)
aoa_noise_masks = np.array(aoa_noise_masks)
x = rd_interf
y = rd_clean
filter_mask = np.ones((x.shape[0],), dtype=int)
interference_mask = filter_mask
return x, y, rd_clean, rd_original, rd_interf, filter_mask, interference_mask, rd_zero_mitigation,\
rd_object_masks, rd_noise_masks, aoa_object_masks, aoa_noise_masks, target_angles
def load_data_for_denoising_ri_angle_map(out, dataset):
num_channels = dataset.get_num_channels()
try: # for real measurements
measurements = out['test_rd'][()].transpose()
num_ramps = measurements.shape[0]
num_fts = measurements.shape[1]
assert (num_ramps % dataset.num_ramps_per_packet == 0)
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
assert (num_packets % num_channels == 0)
aoa_maps = []
aoa_map = np.zeros((num_fts, num_channels), dtype=np.complex128)
for p in range(num_packets):
c = p % num_channels
packet_data = measurements[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]
aoa_map[:, c] = packet_data[0, :].transpose()
if c == num_channels-1:
aoa_maps.append(calculate_angle_fft(aoa_map))
aoa_maps = np.array(aoa_maps)
filter_mask = np.ones((aoa_maps.shape[0],), dtype=int)
interference_mask = filter_mask
all_noise_mask = np.ones(measurements.shape, dtype=bool)
return aoa_maps, aoa_maps, aoa_maps, aoa_maps, aoa_maps, filter_mask, interference_mask, aoa_maps,\
all_noise_mask, all_noise_mask, all_noise_mask, all_noise_mask, []
except ValueError:
pass
fft_original = out['s_IF_clean_noise'][()].transpose() # IF clean + IF gaussian noise
fft_interf = out['s_IF'][()].transpose() # IF clean + IF gaussian noise + IF interference
fft_clean = out['s_IF_clean'][()].transpose() # IF clean
fft_zero_mitigation = out['s_IF_zero_interf_td'][()].transpose()
object_targets = out['objects'][()]
# angle FFT
num_ramps = fft_original.shape[0]
num_fts = fft_original.shape[1]
num_packets = int(num_ramps / dataset.num_ramps_per_packet)
assert (num_ramps % dataset.num_ramps_per_packet == 0)
assert (num_packets % num_channels == 0)
aoa_original = []
aoa_interf = []
aoa_clean = []
aoa_zero_mitigation = []
aoa_map_original = np.zeros((num_fts, num_channels), dtype=np.complex128)
aoa_map_interf = np.zeros((num_fts, num_channels), dtype=np.complex128)
aoa_map_clean = np.zeros((num_fts, num_channels), dtype=np.complex128)
aoa_map_zero_mitigation = np.zeros((num_fts, num_channels), dtype=np.complex128)
rd_object_masks = []
aoa_object_masks = []
rd_noise_masks = []
aoa_noise_masks = []
target_angles = []
for p in range(num_packets):
c = p % num_channels
packet_data = fft_original[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]
aoa_map_original[:, c] = packet_data[0, :]
packet_data = fft_interf[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]
aoa_map_interf[:, c] = packet_data[0, :].transpose()
packet_data = fft_clean[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]
aoa_map_clean[:, c] = packet_data[0, :]
packet_data = fft_zero_mitigation[p * dataset.num_ramps_per_packet: (p + 1) * dataset.num_ramps_per_packet]
aoa_map_zero_mitigation[:, c] = packet_data[0, :]
if c == num_channels - 1:
aoa_original.append(calculate_angle_fft(aoa_map_original))
aoa_interf.append(calculate_angle_fft(aoa_map_interf))
aoa_clean.append(calculate_angle_fft(aoa_map_clean))
aoa_zero_mitigation.append(calculate_angle_fft(aoa_map_zero_mitigation))
target_ranges = np.array([object_targets[p][4]]).flatten()
target_velocities = np.array([object_targets[p][7]]).flatten()
ta = np.array([object_targets[p][5]]).flatten()
rd_o_masks, rd_n_masks, d_indices, v_indices = calculate_rd_object_and_noise_masks(target_ranges,
target_velocities,
num_fts,
dataset.num_ramps_per_packet)
aoa_o_masks, aoa_n_masks = calculate_aoa_object_and_noise_masks(target_ranges, ta, num_fts,
num_angle_fft_bins)
target_angles.append({'d': d_indices, 'v': v_indices, 'a': ta})
rd_object_masks.append(rd_o_masks)
rd_noise_masks.append(rd_n_masks)
aoa_object_masks.append(aoa_o_masks)
aoa_noise_masks.append(aoa_n_masks)
rd_object_masks = np.array(rd_object_masks)
rd_noise_masks = np.array(rd_noise_masks)
aoa_object_masks = np.array(aoa_object_masks)
aoa_noise_masks = np.array(aoa_noise_masks)
aoa_original = np.array(aoa_original)
aoa_interf = np.array(aoa_interf)
aoa_clean = np.array(aoa_clean)
aoa_zero_mitigation = np.array(aoa_zero_mitigation)
x = aoa_interf
y = aoa_clean
filter_mask = np.ones((x.shape[0],), dtype=int)
interference_mask = filter_mask
return x, y, aoa_clean, aoa_original, aoa_interf, filter_mask, interference_mask, aoa_zero_mitigation, rd_object_masks,\
rd_noise_masks, aoa_object_masks, aoa_noise_masks, target_angles
class DataSource(Enum):
DENOISE_REAL_IMAG_RAMP = load_data_for_denoising_ri_ramps
DENOISE_REAL_IMAG_RD = load_data_for_denoising_ri_range_doppler_map
DENOISE_REAL_IMAG_AOA = load_data_for_denoising_ri_angle_map
DENOISE_LOG_MAG_RD = load_data_for_denoising_log_mag_range_doppler_map
@staticmethod
def from_name(value):
if value == DataSource.DENOISE_REAL_IMAG_RAMP.__name__:
return DataSource.DENOISE_REAL_IMAG_RAMP
elif value == DataSource.DENOISE_REAL_IMAG_RD.__name__:
return DataSource.DENOISE_REAL_IMAG_RD
elif value == DataSource.DENOISE_REAL_IMAG_AOA.__name__:
return DataSource.DENOISE_REAL_IMAG_AOA
elif value == DataSource.DENOISE_LOG_MAG_RD.__name__:
return DataSource.DENOISE_LOG_MAG_RD
@staticmethod
def data_content(value):
if value is DataSource.DENOISE_REAL_IMAG_RAMP:
return DataContent.COMPLEX_RAMP
elif value is DataSource.DENOISE_REAL_IMAG_RD:
return DataContent.COMPLEX_PACKET_RD
elif value is DataSource.DENOISE_REAL_IMAG_AOA:
return DataContent.COMPLEX_PACKET_AOA
elif value is DataSource.DENOISE_LOG_MAG_RD:
return DataContent.REAL_PACKET_RD
class DatasetPartition(Enum):
TRAINING = 0
VALIDATION = 1
TEST = 2
@staticmethod
def mat_path_prefix(partition):
if partition is DatasetPartition.TRAINING:
return 'train'
elif partition is DatasetPartition.VALIDATION:
return 'val'
elif partition is DatasetPartition.TEST:
return 'test'
class DataContent(Enum):
COMPLEX_RAMP = 1
COMPLEX_PACKET_RD = 2
COMPLEX_PACKET_AOA = 3
REAL_PACKET_RD = 4
@staticmethod
def num_values_per_sample(data_content, num_fts, num_ramps_per_packet):
if data_content is DataContent.COMPLEX_RAMP:
return num_fts * 2
elif data_content is DataContent.COMPLEX_PACKET_RD:
return num_fts * num_ramps_per_packet * 2
elif data_content is DataContent.COMPLEX_PACKET_AOA:
return num_fts * 1024 * 2
elif data_content is DataContent.REAL_PACKET_RD:
return num_fts * num_ramps_per_packet
@staticmethod
def num_samples_per_packet(data_content, num_ramps_per_packet, num_antennas=8):
if data_content is DataContent.COMPLEX_RAMP:
return num_ramps_per_packet
elif data_content is DataContent.COMPLEX_PACKET_RD:
return 1
elif data_content is DataContent.COMPLEX_PACKET_AOA:
return 1/num_antennas
elif data_content is DataContent.REAL_PACKET_RD:
return 1
@staticmethod
def sample_shape(data_content, num_ramps_per_packet, num_fts):
if data_content is DataContent.COMPLEX_RAMP:
return 1, num_fts
elif data_content is DataContent.COMPLEX_PACKET_RD:
return num_fts, num_ramps_per_packet
elif data_content is DataContent.COMPLEX_PACKET_AOA:
return num_fts, 1024
elif data_content is DataContent.REAL_PACKET_RD:
return num_fts, num_ramps_per_packet
@staticmethod
def num_samples_for_rd_evaluation(data_content, num_ramps_per_packet):
if data_content is DataContent.COMPLEX_RAMP:
return num_ramps_per_packet
elif data_content is DataContent.COMPLEX_PACKET_RD:
return 1
elif data_content is DataContent.REAL_PACKET_RD:
return 1
else:
assert False
@staticmethod
def num_samples_for_aoa_evaluation(data_content, num_ramps_per_packet, num_channels):
if data_content is DataContent.COMPLEX_RAMP:
return num_ramps_per_packet * num_channels
elif data_content is DataContent.COMPLEX_PACKET_AOA:
return 1
else:
assert False
class RadarDataset(Dataset):
"""Radar dataset."""
# # mat_path
def __init__(self, data_source, mat_path, scaler, is_classification=False):
"""
Args:
mat_filename (string): Name of matlab mat file
"""
if os.path.isdir('./data'):
path_pref = './data'
elif os.path.isdir('../data'):
path_pref = '../data'
else:
assert False
self.mat_path = os.path.join(path_pref, 'radar-data', mat_path)
self.is_classification = is_classification
self.data_content = DataSource.data_content(data_source)
self.data_source = data_source
mat_folder_path_train = os.path.join(self.mat_path, DatasetPartition.mat_path_prefix(DatasetPartition.TRAINING))
mat_folder_path_val = os.path.join(self.mat_path, DatasetPartition.mat_path_prefix(DatasetPartition.VALIDATION))
mat_folder_path_test = os.path.join(self.mat_path, DatasetPartition.mat_path_prefix(DatasetPartition.TEST))
self.file_names = {DatasetPartition.TRAINING: os.listdir(mat_folder_path_train),
DatasetPartition.VALIDATION: os.listdir(mat_folder_path_val),
DatasetPartition.TEST: os.listdir(mat_folder_path_test)}
self.file_names[DatasetPartition.TRAINING].sort()
self.file_names[DatasetPartition.VALIDATION].sort()
self.file_names[DatasetPartition.TEST].sort()
self.sample_indices = {DatasetPartition.TRAINING: {},
DatasetPartition.VALIDATION: {},
DatasetPartition.TEST: {}}
mean_s_IF_per_file = []
var_s_IF_per_file = []
mean_s_IF_clean_per_file = []
var_s_IF_clean_per_file = []
mean_s_IF_original_per_file = []
var_s_IF_original_per_file = []
cov_s_IF_per_file = []
cov_s_IF_clean_per_file = []
num_packets_in_train_files = []
num_interfered_ramps = {DatasetPartition.TRAINING: 0,
DatasetPartition.VALIDATION: 0,
DatasetPartition.TEST: 0}
num_interfered_samples = {DatasetPartition.TRAINING: 0,
DatasetPartition.VALIDATION: 0,
DatasetPartition.TEST: 0}
self.num_samples = {DatasetPartition.TRAINING: 0,
DatasetPartition.VALIDATION: 0,
DatasetPartition.TEST: 0}
self.num_channels = {DatasetPartition.TRAINING: 0,
DatasetPartition.VALIDATION: 0,
DatasetPartition.TEST: 0}
print_()
print_('# Reading data set meta data #')
print_()
print_('Data folder: {}'.format(mat_path))
# read global config
try:
config_mat_path = os.path.join(self.mat_path, 'config.mat')
print_('Reading config from {}'.format(config_mat_path))
config = spio.loadmat(config_mat_path, squeeze_me=True)['config']
self.num_channels_per_scene = config['radar'][()]['N_ant_rx'][()]
self.num_ramps_per_packet = config['sig'][()]['N_sw'][()]
num_td_samples_per_ramp = config['sig'][()]['N_samp_per_ramp'][()]
self.num_fast_time_samples = int(num_td_samples_per_ramp / 2)
except IOError:
warnings.warn('IOError reading config.')
except KeyError:
warnings.warn(
'KeyError reading config. File does not contain config struct. Skipping config, using default values...')
except ValueError:
warnings.warn('One or more config values missing.')
self.num_samples_per_packet = DataContent.num_samples_per_packet(self.data_content, self.num_ramps_per_packet)
for partition in self.file_names.keys():
partition_path_prefix = DatasetPartition.mat_path_prefix(partition)
# read partition-specific config
try:
config_mat_path = os.path.join(self.mat_path, partition_path_prefix + '-config.mat')
print_('Reading partition-config from {}'.format(config_mat_path))
config = spio.loadmat(config_mat_path, squeeze_me=True)['part_config']
self.num_channels[partition] = config['num_ds_channels'][()]
except IOError:
warnings.warn('IOError reading config.')
except KeyError:
warnings.warn(
'KeyError reading config. File does not contain config struct. Skipping config, using default values...')
except ValueError:
warnings.warn('One or more config values missing.')
for file_name in self.file_names[partition]:
file_rel_path = os.path.join(partition_path_prefix, file_name)
num_packets_in_file_str = file_name[file_name.find('_p') + 2: file_name.find('_c')]
num_channels_in_file_str = file_name[file_name.find('_c') + 2: file_name.find('_i')]
try:
num_packets_in_file = int(num_packets_in_file_str)
num_channels_in_file = int(num_channels_in_file_str)
assert(self.num_channels[partition] == num_channels_in_file)
num_samples_in_file = int(num_packets_in_file * num_channels_in_file * self.num_samples_per_packet)
self.sample_indices[partition][file_name] = (self.num_samples[partition], self.num_samples[partition] + num_samples_in_file - 1)
self.num_samples[partition] += num_samples_in_file
except ValueError:
warnings.warn('Could not find number of packets contained in file {}'.format(file_rel_path))
print_('Skipping file {}. Num packets missing.'.format(file_name))
continue
try:
print_('Loading {}'.format(file_rel_path))
out = spio.loadmat(os.path.join(self.mat_path, file_rel_path), squeeze_me=True)['out']
except IOError:
warnings.warn('IOError reading file {}. Skipping file.'.format(file_rel_path))
continue
except KeyError:
warnings.warn('KeyError reading file {}. File does not contain out struct. Skipping file.'.format(file_rel_path))
continue
try:
num_interfered_ramps_in_file = out['num_interfered_ramps'][()]
num_interfered_ramps[partition] += num_interfered_ramps_in_file
num_interfered_samples_in_file = out['num_interfered_samples'][()]
num_interfered_samples[partition] += num_interfered_samples_in_file
except ValueError:
warnings.warn('No info to num interfered ramps for file {}'.format(file_rel_path))
if partition is DatasetPartition.TRAINING:
num_packets_in_train_files.append(num_packets_in_file)
try:
mean_s_IF_per_file.append(out['mean_s_IF'][()])
var_s_IF_per_file.append(out['var_s_IF'][()])
mean_s_IF_clean_per_file.append(out['mean_s_IF_clean'][()])
var_s_IF_clean_per_file.append(out['var_s_IF_clean'][()])
mean_s_IF_original_per_file.append(out['mean_s_IF_clean_noise'][()])
var_s_IF_original_per_file.append(out['var_s_IF_clean_noise'][()])
except ValueError:
warnings.warn('No mean / var data for file {}'.format(file_rel_path))
try:
cov_s_IF_per_file.append(out['cov_s_IF'][()])
cov_s_IF_clean_per_file.append(out['cov_s_IF_clean'][()])
except ValueError:
warnings.warn('No cov data for file {}'.format(file_rel_path))
total_num_ramps = int(self.num_samples[partition] / self.num_samples_per_packet * self.num_ramps_per_packet)
if total_num_ramps > 0:
print_('Number interfered ramps for {}: {}/{} ({:.2f}%)'.format(partition,
num_interfered_ramps[partition],
total_num_ramps,
100 / total_num_ramps * num_interfered_ramps[partition]))
total_num_td_samples = self.num_fast_time_samples*2*self.num_ramps_per_packet*int(self.num_samples[partition] / self.num_samples_per_packet)
if total_num_td_samples > 0:
print_('Number interfered time domain samples for {}: {}/{} ({:.2f}%)'.format(partition,
num_interfered_samples[partition],
total_num_td_samples,
100 / total_num_td_samples * num_interfered_samples[partition]))
print_()
self.num_values_per_sample = DataContent.num_values_per_sample(self.data_content, self.num_fast_time_samples, self.num_ramps_per_packet)
self.partition_indices = {DatasetPartition.TRAINING: list(range(0, self.num_samples[DatasetPartition.TRAINING])),
DatasetPartition.VALIDATION: list(range(0, self.num_samples[DatasetPartition.VALIDATION])),
DatasetPartition.TEST: list(range(0, self.num_samples[DatasetPartition.TEST]))}
self.active_partition = DatasetPartition.TRAINING
self.cached_samples = None
self.cached_sample_indices = []
self.scaler_x = None
self.scaler_y = None
if scaler is Scaler.STD_SCALER: # Attention: clean / original depends on data source target!!!!
self.fit_std_scaler(mean_s_IF_per_file, var_s_IF_per_file,
mean_s_IF_clean_per_file, var_s_IF_clean_per_file, # mean_s_IF_original_per_file, var_s_IF_original_per_file,
num_packets_in_train_files, scaler)
elif scaler is Scaler.COMPLEX_FEATURE_SCALER:
self.fit_complex_feature_scaler(mean_s_IF_per_file, cov_s_IF_per_file,
mean_s_IF_clean_per_file, cov_s_IF_clean_per_file,
num_packets_in_train_files, scaler)
def fit_std_scaler(self, mean_x_per_file, var_x_per_file,
mean_y_per_file, var_y_per_file,
num_packets_per_file, scaler):
if not all(x == num_packets_per_file[0] for x in num_packets_per_file):
warnings.warn('Not all files contain the same number of peckets. Scaling depends on this!!')
assert (len(mean_x_per_file) == len(self.file_names[DatasetPartition.TRAINING]))
self.scaler_x = scaler()
num_files = len(mean_x_per_file)
avg_num_packets_in_file = int(np.mean(num_packets_per_file))
num_packets_for_scaler_fitting = np.sum(num_packets_per_file)
mean_x = np.mean(mean_x_per_file)
# calculate total variance from subset means and variances of same sample length
# see https://stats.stackexchange.com/questions/10441/how-to-calculate-the-variance-of-a-partition-of-variables
var_x = (avg_num_packets_in_file - 1) / (num_packets_for_scaler_fitting - 1) * \
(np.sum(var_x_per_file + (
avg_num_packets_in_file * (num_files - 1) / (avg_num_packets_in_file - 1)) * np.var(
mean_x_per_file)))
self.scaler_x.mean = mean_x
self.scaler_x.var = var_x
if not self.is_classification:
self.scaler_y = scaler()
mean_y = np.mean(mean_y_per_file)
var_y = (avg_num_packets_in_file - 1) / (num_packets_for_scaler_fitting - 1) * \
(np.sum(var_y_per_file + (avg_num_packets_in_file * (num_files - 1) / (
avg_num_packets_in_file - 1)) * np.var(mean_y_per_file)))
self.scaler_y.mean = mean_y
self.scaler_y.var = var_y
def fit_complex_feature_scaler(self, mean_x_per_file, cov_x_per_file,
mean_y_per_file, cov_y_per_file,
num_packets_per_file, scaler):
if not all(x == num_packets_per_file[0] for x in num_packets_per_file):
warnings.warn('Not all files contain the same number of peckets. Scaling depends on this!!')
assert (len(mean_x_per_file) == len(self.file_names[DatasetPartition.TRAINING]))
self.scaler_x = scaler()
num_files = len(mean_x_per_file)
avg_num_packets_in_file = int(np.mean(num_packets_per_file))
num_packets_for_scaler_fitting = np.sum(num_packets_per_file)
mean_x = np.mean(mean_x_per_file)
# calculate total variance from subset means and variances of same sample length
# see https://stats.stackexchange.com/questions/10441/how-to-calculate-the-variance-of-a-partition-of-variables
cov_x = (avg_num_packets_in_file - 1) / (num_packets_for_scaler_fitting - 1) * \
(np.sum(cov_x_per_file + (avg_num_packets_in_file * (num_files - 1) / (avg_num_packets_in_file - 1)) * np.var(mean_x_per_file), axis=0))
sr_cov = scipy.linalg.sqrtm(cov_x)
inv_sr_cov = np.linalg.inv(sr_cov)
self.scaler_x.mean_complex = mean_x
self.scaler_x.sr_cov = sr_cov
self.scaler_x.inv_sr_cov = inv_sr_cov
if not self.is_classification:
self.scaler_y = scaler()
mean_y = np.mean(mean_y_per_file)
cov_y = (avg_num_packets_in_file - 1) / (num_packets_for_scaler_fitting - 1) * \
(np.sum(cov_y_per_file + (avg_num_packets_in_file * (num_files - 1) / (
avg_num_packets_in_file - 1)) * np.var(mean_y_per_file), axis=0))
sr_cov = scipy.linalg.sqrtm(cov_y)
inv_sr_cov = np.linalg.inv(sr_cov)
self.scaler_y.mean_complex = mean_y
self.scaler_y.sr_cov = sr_cov
self.scaler_y.inv_sr_cov = inv_sr_cov
def get_sample_start_and_end_indices_per_file(self):
if len(self.partition_indices[self.active_partition]) == 0:
return []
sample_start_end_indices = []
for file_name in self.file_names[self.active_partition]:
sample_start_end_indices.append(self.sample_indices[self.active_partition][file_name])
return sample_start_end_indices
def get_num_channels(self):
return self.num_channels[self.active_partition]
def __len__(self):
return len(self.partition_indices[self.active_partition])
def __getitem__(self, idx):
x, y, _, _, _, filter_mask, _, _, rd_object_masks, rd_noise_masks, aoa_object_masks, aoa_noise_masks, _ = self.load_data_for_sample_from_cache_or_disk(idx)
x = self.scale(x, is_y=False)[0]
y = self.scale(y, is_y=True)[0]
x = complex_to_format(self.data_content, x)
y = complex_to_format(self.data_content, y)
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
filter_mask = torch.tensor(filter_mask.flatten()[0], dtype=torch.uint8)
object_mask = torch.zeros(y.size())
if self.data_source in [DataSource.DENOISE_REAL_IMAG_RD, DataSource.DENOISE_LOG_MAG_RD]:
object_mask = torch.tensor(rd_object_masks[0])
elif self.data_source is DataSource.DENOISE_REAL_IMAG_AOA:
object_mask = torch.tensor(aoa_object_masks[0])
noise_mask = torch.ones(y.size())
if self.data_source in [DataSource.DENOISE_REAL_IMAG_RD, DataSource.DENOISE_LOG_MAG_RD]:
noise_mask = torch.tensor(rd_noise_masks[0])
elif self.data_source is DataSource.DENOISE_REAL_IMAG_AOA:
noise_mask = torch.tensor(aoa_noise_masks[0])
return x, y, filter_mask, object_mask, noise_mask
def load_data_for_sample_from_cache_or_disk(self, sample_idx):
try:
cached_sample_idx = self.cached_sample_indices.index(sample_idx)
return self.sample_at_index_from_cache(cached_sample_idx)
except ValueError:
pass
# item not cached --> load from file
for fn in self.file_names[self.active_partition]:
(start_i, end_i) = self.sample_indices[self.active_partition][fn]
if start_i <= sample_idx <= end_i:
file_name = fn
start_idx = start_i
end_idx = end_i
break
out = spio.loadmat(os.path.join(self.mat_path, DatasetPartition.mat_path_prefix(self.active_partition), file_name), squeeze_me=True)['out']
self.cached_samples = self.data_source(out, self)
self.cached_sample_indices = list(range(start_idx, end_idx + 1))
cached_sample_idx = self.cached_sample_indices.index(sample_idx)
return self.sample_at_index_from_cache(cached_sample_idx)
def sample_at_index_from_cache(self, sample_idx):
sample_shape = DataContent.sample_shape(self.data_content, self.num_ramps_per_packet, self.num_fast_time_samples)
sample_shape_batch = (1, sample_shape[0], sample_shape[1])
target_idx = min(sample_idx, int(sample_idx / self.num_samples_per_packet))
return (self.cached_samples[0][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[1][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[2][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[3][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[4][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[5][sample_idx:sample_idx+1].reshape(1),
self.cached_samples[6][sample_idx:sample_idx+1].reshape(1),
self.cached_samples[7][sample_idx:sample_idx+1].reshape(sample_shape_batch),
self.cached_samples[8][target_idx:target_idx + 1].reshape((1, self.num_fast_time_samples, self.num_ramps_per_packet)),
self.cached_samples[9][target_idx:target_idx+1].reshape((1, self.num_fast_time_samples, self.num_ramps_per_packet)),
self.cached_samples[10][target_idx:target_idx + 1].reshape((1, self.num_fast_time_samples, num_angle_fft_bins)),
self.cached_samples[11][target_idx:target_idx + 1].reshape((1, self.num_fast_time_samples, num_angle_fft_bins)),
self.cached_samples[12][target_idx:target_idx + 1])
def get_scene_rd_object_and_noise_masks(self, scene_idx):
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
_, _, _, _, _, _, _, _, rd_object_masks, rd_noise_masks, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
return rd_object_masks[0].astype(bool), rd_noise_masks[0].astype(bool)
def get_scene_rd_clean(self, scene_idx):
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
_, _, packet_data, _, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx+1, scene_end_idx):
_, _, packet_data_i, _, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
packet_data = calculate_velocity_fft(packet_data[:, 0, :])
else:
packet_data = packet_data[0]
return packet_data
def get_scene_rd_original(self, scene_idx):
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
_, _, _, packet_data, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx + 1, scene_end_idx):
_, _, _, packet_data_i, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
packet_data = calculate_velocity_fft(packet_data[:, 0, :])
else:
packet_data = packet_data[0]
return packet_data
def get_scene_rd_interf(self, scene_idx):
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
_, _, _, _, packet_data, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx + 1, scene_end_idx):
_, _, _, _, packet_data_i, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
packet_data = calculate_velocity_fft(packet_data[:, 0, :])
else:
packet_data = packet_data[0]
return packet_data
def get_scene_rd_zero_substitude_in_time_domain(self, scene_idx):
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
_, _, _, _, _, _, _, packet_data, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx + 1, scene_end_idx):
_, _, _, _, _, _, _, packet_data_i, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
packet_data = calculate_velocity_fft(packet_data[:, 0, :])
else:
packet_data = packet_data[0]
return packet_data
def get_scene_cr_object_and_noise_masks(self, scene_idx, d_idx, v_idx):
num_channels = self.get_num_channels()
num_samples = DataContent.num_samples_for_rd_evaluation(self.data_content, self.num_ramps_per_packet)
scene_start_idx = scene_idx * num_samples
assert (num_channels > 1)
_, _, _, _, _, _, _, _, rd_object_mask, rd_noise_mask, aoa_object_masks, aoa_noise_masks, target_angles = self.load_data_for_sample_from_cache_or_disk(
scene_start_idx)
cr_object_masks, cr_noise_masks = calculate_cr_object_and_noise_masks(target_angles[0], d_idx, v_idx, num_angle_fft_bins)
return cr_object_masks.astype(bool), cr_noise_masks.astype(bool)
def get_scene_aoa_object_and_noise_masks(self, scene_idx):
num_channels = self.get_num_channels()
num_samples = DataContent.num_samples_for_aoa_evaluation(self.data_content, self.num_ramps_per_packet, num_channels)
scene_start_idx = scene_idx * num_samples
assert (num_channels > 1)
_, _, _, _, _, _, _, _, _, _, aoa_object_masks, aoa_noise_masks, _ = self.load_data_for_sample_from_cache_or_disk(
scene_start_idx)
return aoa_object_masks[0].astype(bool), aoa_noise_masks[0].astype(bool)
def get_scene_aoa_clean(self, scene_idx):
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
assert(self.num_channels[self.active_partition] > 1)
num_samples = DataContent.num_samples_for_aoa_evaluation(self.data_content, self.num_ramps_per_packet, self.num_channels[self.active_partition])
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
packet_data = np.zeros((self.num_fast_time_samples, self.num_channels[self.active_partition]), dtype=np.complex128)
channel = 0
for i in range(scene_start_idx, scene_end_idx, self.num_ramps_per_packet):
_, _, packet_data_i, _, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data[:, channel] = packet_data_i.reshape(packet_data.shape[0])
channel += 1
packet_data = calculate_angle_fft(packet_data)
else:
_, _, packet_data, _, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_idx)
packet_data = packet_data[0]
return packet_data
def get_scene_aoa_original(self, scene_idx):
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
assert (self.num_channels[self.active_partition] > 1)
num_samples = DataContent.num_samples_for_aoa_evaluation(self.data_content, self.num_ramps_per_packet, self.num_channels[self.active_partition])
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
packet_data = np.zeros((self.num_fast_time_samples, self.num_channels[self.active_partition]), dtype=np.complex128)
channel = 0
for i in range(scene_start_idx, scene_end_idx, self.num_ramps_per_packet):
_, _, _, packet_data_i, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data[:, channel] = packet_data_i.reshape(packet_data.shape[0])
channel += 1
packet_data = calculate_angle_fft(packet_data)
else:
_, _, _, packet_data, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_idx)
packet_data = packet_data[0]
return packet_data
def get_scene_aoa_interf(self, scene_idx):
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
assert (self.num_channels[self.active_partition] > 1)
num_samples = DataContent.num_samples_for_aoa_evaluation(self.data_content, self.num_ramps_per_packet, self.num_channels[self.active_partition])
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
packet_data = np.zeros((self.num_fast_time_samples, self.num_channels[self.active_partition]), dtype=np.complex128)
channel = 0
for i in range(scene_start_idx, scene_end_idx, self.num_ramps_per_packet):
_, _, _, _, packet_data_i, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data[:, channel] = packet_data_i.reshape(packet_data.shape[0])
channel += 1
packet_data = calculate_angle_fft(packet_data)
else:
_, _, _, _, packet_data, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_idx)
packet_data = packet_data[0]
return packet_data
def get_scene_aoa_zero_substitude_in_time_domain(self, scene_idx):
if self.data_source == DataSource.DENOISE_REAL_IMAG_RAMP:
assert (self.num_channels[self.active_partition] > 1)
num_samples = DataContent.num_samples_for_aoa_evaluation(self.data_content, self.num_ramps_per_packet, self.num_channels[self.active_partition])
scene_start_idx = scene_idx * num_samples
scene_end_idx = (scene_idx + 1) * num_samples
packet_data = np.zeros((self.num_fast_time_samples, self.num_channels[self.active_partition]), dtype=np.complex128)
channel = 0
for i in range(scene_start_idx, scene_end_idx, self.num_ramps_per_packet):
_, _, _, _, _, _, _, packet_data_i, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data[:, channel] = packet_data_i.reshape(packet_data.shape[0])
channel += 1
packet_data = calculate_angle_fft(packet_data)
else:
_, _, _, _, _, _, _, packet_data, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_idx)
packet_data = packet_data[0]
return packet_data
def get_sample_interference_mask(self, scene_idx, num_samples_per_scene):
scene_start_idx = scene_idx * num_samples_per_scene
scene_end_idx = (scene_idx + 1) * num_samples_per_scene
_, _, _, _, _, _, packet_data, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx+1, scene_end_idx):
_, _, _, _, _, _, packet_data_i, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
return packet_data
def get_target_original_scaled_re_im(self, scene_idx, num_samples_per_scene):
scene_start_idx = scene_idx * num_samples_per_scene
scene_end_idx = (scene_idx + 1) * num_samples_per_scene
_, _, _, packet_data, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(scene_start_idx)
for i in range(scene_start_idx+1, scene_end_idx):
_, _, _, packet_data_i, _, _, _, _, _, _, _, _, _ = self.load_data_for_sample_from_cache_or_disk(i)
packet_data = np.vstack((packet_data, packet_data_i))
packet_data = self.scale(packet_data, is_y=True)
packet_data = self.packet_complex_to_target_format(packet_data)
return packet_data
def clone_for_new_active_partition(self, partition: DatasetPartition):
clone = copy.deepcopy(self)
clone.active_partition = partition
clone.cached_samples = None
clone.cached_sample_indices = []
return clone
def inverse_scale(self, data, is_y):
if is_y and self.is_classification:
return data
if is_y:
scaler = self.scaler_y
else:
scaler = self.scaler_x
if scaler is not None:
data = scaler.inverse_transform(data)
return data
def scale(self, data, is_y):
if is_y and self.is_classification:
return data
if is_y:
scaler = self.scaler_y
else:
scaler = self.scaler_x
if scaler is not None:
data = scaler.transform(data)
return data
def packet_in_target_format_to_complex(self, packet_data, packet_idx=None):
if self.data_content is DataContent.COMPLEX_RAMP:
packet_data = packet_data[:, :, :self.num_fast_time_samples] + 1j * packet_data[:, :, self.num_fast_time_samples:]
elif self.data_content is DataContent.COMPLEX_PACKET_RD:
packet_data = packet_data[:, :, :self.num_ramps_per_packet] + 1j * packet_data[:, :, self.num_ramps_per_packet:]
elif self.data_content is DataContent.COMPLEX_PACKET_AOA:
packet_data = packet_data[:, :, :1024] + 1j * packet_data[:, :, 1024:]
elif self.data_content is DataContent.REAL_PACKET_RD:
pass
else:
assert False
return packet_data
def packet_complex_to_target_format(self, packet_data):
return complex_to_format(self.data_content, packet_data)
def complex_to_format(target_content, data):
if target_content is DataContent.COMPLEX_RAMP:
last_axis = len(data.shape)-1
data = np.concatenate((np.real(data), np.imag(data)), axis=last_axis)
elif target_content is DataContent.COMPLEX_PACKET_RD:
last_axis = len(data.shape) - 1
data = np.concatenate((np.real(data), np.imag(data)), axis=last_axis)
elif target_content is DataContent.COMPLEX_PACKET_AOA:
last_axis = len(data.shape) - 1
data = np.concatenate((np.real(data), np.imag(data)), axis=last_axis)
elif target_content is DataContent.REAL_PACKET_RD:
pass
else:
assert False
return data
def calculate_rd_object_and_noise_masks(target_distances, target_velocities, num_fts, num_ramps):
d_vec = d_vec_fft2(num_fts)
v_vec = v_vec_fft2(num_ramps)
return calculate_object_and_noise_masks(target_distances, target_velocities, num_fts, num_ramps, v_vec, d_vec)
def calculate_aoa_object_and_noise_masks(target_distances, target_angles, num_fts, num_fft3_bins):
a_vec = np.arcsin(1 * np.linspace(-1, 1, num_angle_fft_bins))
d_vec = np.linspace(0, 1, num_angle_fft_bins) * d_max
obj_masks, noise_masks, _, _ = calculate_object_and_noise_masks(target_distances, target_angles, num_fts, num_fft3_bins, a_vec, d_vec)
return obj_masks, noise_masks
def calculate_object_and_noise_masks(target_rows, target_columns, shape0, shape1, x_vec, y_vec, noise_radius=3):
obj_mask = np.zeros((shape0, shape1), dtype=np.uint8)
noise_mask = np.ones((shape0, shape1), dtype=np.uint8)
target_range_indices = []
for r in target_rows:
target_range_indices.append(np.argmin(np.abs(y_vec - r)))
target_velocity_indices = []
for v in target_columns:
target_velocity_indices.append(np.argmin(np.abs(x_vec - v)))
obj_mask[target_range_indices, target_velocity_indices] = 1
for i in range(len(target_range_indices)):
r = target_range_indices[i]
v = target_velocity_indices[i]
r_min = max(r - noise_radius, 0)
r_max = min(r + noise_radius + 1, shape0)
v_min = max(v - noise_radius, 0)
v_max = min(v + noise_radius + 1, shape1)
noise_mask[r_min:r_max, v_min:v_max] = False
return obj_mask, noise_mask, target_range_indices, target_velocity_indices
def calculate_cr_object_and_noise_masks(target_angles, d_idx, v_idx, num_angle_bins, noise_radius=70):
obj_mask = np.zeros((1, num_angle_bins), dtype=np.uint8)
noise_mask = np.ones((1, num_angle_bins), dtype=np.uint8)
a_vec = np.arcsin(1 * np.linspace(-1, 1, num_angle_fft_bins))
target_cross_range_index = None
d_indices = target_angles['d']
v_indices = target_angles['v']
angles = target_angles['a']
for i in range(len(angles)):
if d_indices[i] == d_idx and v_indices[i] == v_idx:
target_cross_range_index = np.argmin(np.abs(a_vec - angles[i]))
break
if target_cross_range_index is None:
assert False
obj_mask[0, target_cross_range_index] = 1
cr_min = max(target_cross_range_index - noise_radius, 0)
cr_max = min(target_cross_range_index + noise_radius + 1, num_angle_bins)
noise_mask[0, cr_min:cr_max] = False
return obj_mask, noise_mask
```
#### File: im_ricnn/models/ri_cnn_rd.py
```python
import torch
import torch.nn as nn
from run_scripts import tensorboard_writer
class RICNN_RD(nn.Module):
def __init__(self, num_conv_layer, num_filters,
filter_size, padding_size=None, use_batch_norm=None, input_size=(2, 1024, 128)):
super(RICNN_RD, self).__init__()
self.tensorboardx_logging_active = False
self.forward_calls = 0
self.max_batch_size = 8
if use_batch_norm is not None:
self.use_batch_norm = use_batch_norm
else:
self.use_batch_norm = True
if num_conv_layer is not None:
self.num_conv_layer = num_conv_layer
else:
self.num_conv_layer = 6
if filter_size is not None:
self.filter_size = filter_size
else:
self.filter_size = (3, 3)
if padding_size is not None:
self.padding_size = padding_size
else:
x_padding_same = int(self.filter_size[0]/2)
y_padding_same = int(self.filter_size[1]/2)
self.padding_size = (x_padding_same, y_padding_same)
if num_filters is not None:
self.num_filters = num_filters
else:
self.num_filters = 16
self.input_size = input_size
self.convolutions = nn.ModuleList()
in_channels = input_size[0]
layer = nn.Sequential(
nn.Conv2d(in_channels, self.num_filters, kernel_size=self.filter_size, stride=1, padding=self.padding_size),
nn.ReLU())
self.convolutions.append(layer)
for c in range(self.num_conv_layer-2):
layer = nn.Sequential(
nn.Conv2d(self.num_filters, self.num_filters, kernel_size=self.filter_size, stride=1, padding=self.padding_size),
nn.BatchNorm2d(self.num_filters),
nn.ReLU())
self.convolutions.append(layer)
layer = nn.Sequential(
nn.Conv2d(self.num_filters, in_channels, kernel_size=self.filter_size, stride=1, padding=self.padding_size))
self.convolutions.append(layer)
def forward(self, x):
num_channels = self.input_size[0]
num_fts = self.input_size[1]
num_ramps = self.input_size[2]
# conv layer
out = x.reshape((-1, 1, num_fts, num_channels * num_ramps))
if num_channels == 2:
out = torch.cat((out[:, :, :, :num_ramps], out[:, :, :, num_ramps:]), 1)
for c in range(self.num_conv_layer):
out = self.convolutions[c](out)
if self.tensorboardx_logging_active:
tensorboard_writer.add_histogram('conv.{}'.format(c), out.detach().cpu().numpy(), self.forward_calls)
if num_channels == 2:
out = torch.cat((out[:, 0], out[:, 1]), 2)
else:
out = out[:, 0]
self.forward_calls += 1
return out
def reset(self):
for c in range(self.num_conv_layer):
for cc in list(self.convolutions[c]):
try:
cc.reset_parameters()
except:
pass
def set_tensorboardx_logging_active(self, active):
self.tensorboardx_logging_active = active
class MAG_CNN_RD(RICNN_RD):
def __init__(self, num_conv_layer, num_filters, filter_size):
super(MAG_CNN_RD, self).__init__(num_conv_layer, num_filters, filter_size,
padding_size=None, use_batch_norm=None, input_size=(1, 1024, 128))
```
#### File: im_ricnn/models/ri_cnn_rp.py
```python
import torch
import torch.nn as nn
# uses conv layers only
# conv+act -> conv+bn+act (num_conv_layer-2 times) -> conv
class RICNN_RP(nn.Module):
def __init__(self, num_conv_layer, num_filters,
filter_size, padding_size=None, use_batch_norm=None, input_size=(2, 1, 1024)):
super(RICNN_RP, self).__init__()
self.max_batch_size = 128
if use_batch_norm is not None:
self.use_batch_norm = use_batch_norm
else:
self.use_batch_norm = True
if num_conv_layer is not None:
self.num_conv_layer = num_conv_layer
else:
self.num_conv_layer = 6
if filter_size is not None:
self.filter_size = filter_size
else:
self.filter_size = (1, 25)
if padding_size is not None:
self.padding_size = padding_size
else:
x_padding_same = int(self.filter_size[0]/2)
y_padding_same = int(self.filter_size[1]/2)
self.padding_size = (x_padding_same, y_padding_same)
if num_filters is not None:
self.num_filters = num_filters
else:
self.num_filters = 16
self.input_size = input_size
self.convolutions = nn.ModuleList()
in_channels = input_size[0]
layer = nn.Sequential(
nn.Conv2d(in_channels, self.num_filters, kernel_size=self.filter_size, stride=1, padding=self.padding_size),
nn.ReLU())
self.convolutions.append(layer)
for c in range(self.num_conv_layer-2):
layer = nn.Sequential(
nn.Conv2d(self.num_filters, self.num_filters, kernel_size=self.filter_size, stride=1, padding=self.padding_size),
nn.BatchNorm2d(self.num_filters),
nn.ReLU())
self.convolutions.append(layer)
layer = nn.Sequential(
nn.Conv2d(self.num_filters, in_channels, kernel_size=self.filter_size, stride=1, padding=self.padding_size))
self.convolutions.append(layer)
def forward(self, x):
num_re_samples = self.input_size[2]
# conv layer
out = x.reshape((-1, 1, 2 * num_re_samples))
out = torch.stack((out[:, :, :num_re_samples], out[:, :, num_re_samples:]), 1)
for c in range(self.num_conv_layer):
out = self.convolutions[c](out)
out = torch.cat((out[:, 0], out[:, 1]), 2).reshape(-1, 1, 2 * num_re_samples)
return out
def reset(self):
for c in range(self.num_conv_layer):
for cc in list(self.convolutions[c]):
try:
cc.reset_parameters()
except:
pass
```
#### File: im_ricnn/run_scripts/run_evaluation.py
```python
import warnings
from enum import Enum
import torch
from data_models.scaler import Scaler
from datasets.radar_dataset import RadarDataset, DatasetPartition, DataContent, DataSource
from run_scripts import REPO_BASE_DIR, device
from training.rd_evaluation import evaluate_rd
class PretrainedModels(Enum):
MODEL_A = 0
MODEL_D = 1
@staticmethod
def model_path(model):
if model == PretrainedModels.MODEL_A:
return "modelA"
elif model == PretrainedModels.MODEL_D:
return "modelD"
def run_evaluation():
data_source = DataSource.DENOISE_REAL_IMAG_RD
mat_path = 'sim_200x1+25x8+25x8_1-3i'
scaler = Scaler.COMPLEX_FEATURE_SCALER
model = PretrainedModels.MODEL_D # choose pre-trained model {PretrainedModels.MODEL_A, PretrainedModels.MODEL_D}
dataset = RadarDataset(data_source, mat_path, scaler)
test_dataset = dataset.clone_for_new_active_partition(DatasetPartition.TEST)
if len(test_dataset) <= 0:
warnings.warn('Test data set empty.')
return
try:
model = torch.load(REPO_BASE_DIR + "/results/trained_models/" + PretrainedModels.model_path(model)).to(device)
except FileNotFoundError:
warnings.warn('Model not found.')
return
if dataset.data_content is DataContent.COMPLEX_PACKET_RD or dataset.data_content is DataContent.COMPLEX_RAMP:
evaluate_rd(model, test_dataset, 'evaluation_test_rd')
run_evaluation()
```
#### File: im_ricnn/training/sample_hyperparameters.py
```python
import math
import torch
import numpy as np
from data_models.parameter_configuration import ParameterConfiguration
from datasets.radar_dataset import RadarDataset
from run_scripts import task_id
from utils.distribution import loguniform
def select_and_sample_hyperparameter_config_for_cnn(configurations):
conf = configurations[task_id % len(configurations)]
hyperparameter_config = ParameterConfiguration(
optimization_algo=torch.optim.Adam,
criterion=conf['criterion'],
scheduler_partial=None,
num_model_initializations=1,
scaler=conf['scaler'],
input_size=2028,
output_size=2048,
num_epochs=conf['num_epochs'],
input_data_source=conf['data_source'],
mat_path=conf['mat_path'],
model=conf['model'](num_conv_layer=conf['num_conv_layer'], num_filters=conf['num_filters'], filter_size=conf['filter_size']))
batch_size_exp_lower_limit = conf['batch_size_exp_lower_limit']
batch_size_exp_upper_limit = conf['batch_size_exp_upper_limit']
learning_rate_lower_limit = conf['learning_rate_lower_limit']
learning_rate_upper_limit = conf['learning_rate_upper_limit']
dataset = RadarDataset(hyperparameter_config.input_data_source,
hyperparameter_config.mat_path,
hyperparameter_config.scaler,
is_classification=False)
hyperparameter_config.input_size = dataset.num_values_per_sample
# learning rate #
if learning_rate_lower_limit == learning_rate_upper_limit:
lr = learning_rate_lower_limit
else:
lr = loguniform(learning_rate_lower_limit, learning_rate_upper_limit, 1)[0]
assert (learning_rate_lower_limit <= lr <= learning_rate_upper_limit)
hyperparameter_config.learning_rate = lr
# batch size #
if batch_size_exp_lower_limit == batch_size_exp_upper_limit:
batch_size = int(math.pow(2, batch_size_exp_lower_limit))
else:
batch_size = int(math.pow(2, int(np.random.randint(batch_size_exp_lower_limit, batch_size_exp_upper_limit, 1))))
if batch_size > hyperparameter_config.model.max_batch_size:
batch_size = hyperparameter_config.model.max_batch_size
hyperparameter_config.batch_size = batch_size
return dataset, hyperparameter_config
```
#### File: im_ricnn/utils/plotting.py
```python
from utils.rd_processing import v_vec_fft2, basis_vec_fft3, d_max, calculate_cross_range_fft
from run_scripts import visualize, task_id, JOB_DIR
import matplotlib.pyplot as plt
from matplotlib2tikz import save as tikz_save
import numpy as np
ZOOM_LIMIT = 2048
FIG_SIZE = (16, 8)
FIG_SIZE_HIGH = (8, 16)
FIG_SIZE_SINGLE = (8, 8)
STD_CMAP = 'winter' # 'nipy_spectral'
color_scale_max = 0
color_scale_min = -70
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
plt.style.use("ggplot")
def save_or_show_plot(name1, name2='', force_show=False, export_tikz=False):
if not visualize and not force_show:
return
filename = JOB_DIR + '/' + name1 + '_id' + str(task_id) + name2
plt.savefig(filename + '.png')
if export_tikz:
tikz_save(filename + '.tex')
plt.close()
def plot_target_and_prediction(targets, epoch, num_epochs, phase, predictions):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE)
ax = fig.add_subplot(111)
plt.plot(targets, label='target')
plt.plot(predictions, label='prediction')
plt.legend()
plt.title("{} targets and prediction; epoch {}/{}".format(phase, epoch, num_epochs))
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('visual_target+predict', '_' + phase + '_epoch' + str(epoch))
if len(targets) > ZOOM_LIMIT:
fig = plt.figure(figsize=FIG_SIZE)
ax = fig.add_subplot(111)
plt.plot(targets[ZOOM_LIMIT:2*ZOOM_LIMIT], label='target')
plt.plot(predictions[ZOOM_LIMIT:2*ZOOM_LIMIT], label='prediction')
plt.legend()
plt.title("Zoom: {} targets and prediction; epoch {}/{}".format(phase, epoch, num_epochs))
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('visual_target+predict', '_' + phase + '_epoch' + str(epoch) + '_zoom')
def plot_losses(losses):
if not visualize:
return
plt.figure(figsize=FIG_SIZE)
for phase in ['train', 'val']:
plt.plot(losses[phase], label='phase:' + phase)
plt.legend()
plt.title("train and val losses")
save_or_show_plot('visual_losses')
def plot_input_data(dataloaders, dataset_sizes):
if not visualize:
return
# plot first element of all input windows
plt.figure(figsize=FIG_SIZE)
x = np.arange(0, dataset_sizes['train'])
y = dataloaders['train'].dataset.x.numpy()[:, 0]
plt.plot(x, y, label='training')
if dataset_sizes['val'] > 0:
x = np.arange(dataset_sizes['train'], dataset_sizes['train'] + dataset_sizes['val'])
y = dataloaders['val'].dataset.x.numpy()[:, 0]
plt.plot(x, y, label='val')
plt.legend()
plt.title("First element of input window per sample")
save_or_show_plot('visual_inputs_first')
# plot first n windows
plt.figure(figsize=FIG_SIZE)
n = 10
for i in range(n):
values = dataloaders['train'].dataset.x.numpy()[i, :]
if len(values) == 1:
plt.scatter(np.arange(2 * i * len(values), (2 * i + 1) * len(values)), values, s=1.5, marker='o', label='window ' + str(i))
else:
plt.plot(np.arange(2 * i * len(values), (2 * i + 1) * len(values)), values, label='window ' + str(i))
plt.legend()
plt.title("Total input for first {} samples".format(n))
save_or_show_plot('visual_input_windows')
def plot_data_targets_predictions(phase, data, targets, predictions, title_add, filename_add=''):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE)
ax = fig.add_subplot(311)
fig.suptitle("Evaluation ({} - {})".format(phase, title_add))
plt.plot(np.real(data.reshape(-1)), label='data real')
plt.plot(np.imag(data.reshape(-1)), label='data imag')
plt.legend()
ax.set_title("Data")
ax.ticklabel_format(useOffset=False, style='plain')
ax = fig.add_subplot(312)
plt.plot(np.real(targets.reshape(-1)), label='targets')
plt.plot(np.real(predictions.reshape(-1)), label='predictions')
plt.legend()
ax.set_title("Targets & Prediction real part")
ax.ticklabel_format(useOffset=False, style='plain')
ax = fig.add_subplot(313)
plt.plot(np.imag(targets.reshape(-1)), label='targets')
plt.plot(np.imag(predictions.reshape(-1)), label='predictions')
plt.legend()
ax.set_title("Targets & Prediction imag part")
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('eval_{}_sig+target+predict_{}'.format(phase, filename_add))
def plot_interfered_original_clean_data(interfered_data, original_data, clean_data, packet):
if not visualize or task_id > 0:
return
fig = plt.figure(figsize=FIG_SIZE)
fig.suptitle("Data fft1 (p{})".format(packet))
ax = fig.add_subplot(311)
plt.plot(np.real(interfered_data.reshape(-1)), label='data real')
plt.plot(np.imag(interfered_data.reshape(-1)), label='data imag')
plt.legend()
ax.set_title("Interfered Data")
ax.ticklabel_format(useOffset=False, style='plain')
ax = fig.add_subplot(312)
plt.plot(np.real(original_data.reshape(-1)), label='data real')
plt.plot(np.imag(original_data.reshape(-1)), label='data imag')
plt.legend()
ax.set_title("Original Data")
ax.ticklabel_format(useOffset=False, style='plain')
ax = fig.add_subplot(313)
plt.plot(np.real(clean_data.reshape(-1)), label='data real')
plt.plot(np.imag(clean_data.reshape(-1)), label='data imag')
plt.legend()
ax.set_title("Clean Data")
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('data_int_orig_clean_p{}'.format(packet))
def plot_data(phase, data):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE_SINGLE)
ax = fig.add_subplot(111)
fig.suptitle("Evaluation ({})".format(phase))
plt.plot(np.real(data.reshape(-1)), label='data real')
plt.plot(np.imag(data.reshape(-1)), label='data imag')
plt.legend()
ax.set_title("Data")
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('data_{}_sig'.format(phase))
def plot_classification_targets_and_predictions(phase, targets, predictions):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE_SINGLE)
ax = fig.add_subplot(211)
fig.suptitle("Evaluation ({})".format(phase))
plt.plot(targets.reshape(-1), label='targets')
plt.legend()
ax.set_title("Targets")
ax.ticklabel_format(useOffset=False, style='plain')
ax = fig.add_subplot(212)
plt.plot(predictions.reshape(-1), label='predictions')
plt.legend()
ax.set_title("Prediction")
ax.ticklabel_format(useOffset=False, style='plain')
save_or_show_plot('eval_{}_target+predict'.format(phase))
def plot_metrics_comparison(title, snr, snr_label):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE_SINGLE)
fig.add_subplot(111)
fig.suptitle("{}".format(title))
for i, r in enumerate(snr):
plt.plot(r, label=snr_label[i], color=COLORS[i % len(COLORS)], marker='o')
plt.legend()
save_or_show_plot('eval_{}'.format(title))
def plot_line_from_tuples(values, scale, plot_name):
if not visualize:
return
x = [r[0] for r in values]
y = [r[1] for r in values]
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xscale(scale)
save_or_show_plot(plot_name)
def plot_values(values, signal_labels, func_name, phase):
if not visualize:
return
fig = plt.figure(figsize=FIG_SIZE_SINGLE)
fig.add_subplot(111)
fig.suptitle("{} CDF".format(func_name))
for i, label in enumerate(signal_labels):
x = values[i, :]
x = x[np.logical_not(np.isnan(x))]
x.sort()
y = [v / len(x) for v in range(1, len(x)+1)]
plt.plot(x, y, label=label, color=COLORS[i % len(COLORS)])
plt.legend()
save_or_show_plot('eval_{}_{}_cdf'.format(phase, func_name))
def plot_stat_from_tuples(value_tuples, plot_name, vertical_axis=False):
if not visualize:
return
plt.subplots()
categories = list(set([r[0] for r in value_tuples]))
x = range(1, len(categories)+1)
values = []
for c in categories:
cvalues = [v[1] for v in value_tuples if v[0] == c and v[1] is not None]
values.append(cvalues)
if vertical_axis:
plt.boxplot(x, values)
plt.xticks(x, categories, rotation='vertical')
else:
plt.boxplot(values, labels=categories)
save_or_show_plot(plot_name)
def plot_rd_matrix_for_packet(targets, predictions, prediction_interf_substi, noisy_interfered, zero_substi_td, phase, packet_id, plot_substi, is_log_mag=False):
if not visualize:
return
if task_id == 0:
plot_rd_map(targets, "Evaluation ({}): Targets Doppler-Range Matrix".format(phase),
'eval_{}_doppler-range_matrix_targets_p{}'.format(phase, packet_id), is_log_mag)
plot_rd_map(noisy_interfered, "Evaluation ({}): Noisy Doppler-Range Matrix".format(phase),
'eval_{}_doppler-range_matrix_interfered_p{}'.format(phase, packet_id), is_log_mag)
plot_rd_map(zero_substi_td, "Evaluation ({}): Mitigation (zero substitude) Doppler-Range Matrix".format(phase),
'eval_{}_doppler-range_matrix_mitigation_zero_substi_p{}'.format(phase, packet_id), is_log_mag)
plot_rd_map(predictions, "Evaluation ({}): Predictions Doppler-Range Matrix".format(phase),
'eval_{}_doppler-range_matrix_predictions_p{}'.format(phase, packet_id), is_log_mag)
if plot_substi:
plot_rd_map(prediction_interf_substi,
"Evaluation ({}): Prediction (interference substitude) Doppler-Range Matrix".format(phase),
'eval_{}_doppler-range_matrix_predictions_substi_p{}'.format(phase, packet_id), is_log_mag)
def plot_target_range_doppler_matrix_with_and_out_interference(fft1_without_interference, fft1_with_interference, fft1_with_im_interference, fft1_with_re_interference):
if task_id > 0:
return
data = fft1_without_interference
plot_rd_map(data, "Range-Doppler Matrix without interference", 'targets_range-doppler_original')
data = (fft1_without_interference - fft1_without_interference)
plot_rd_map(data, "Range-Doppler Matrix without interference diff", 'targets_range-doppler_original_diff')
# without interference
data = fft1_with_interference
plot_rd_map(data, "Range-Doppler Matrix with interference", 'targets_range-doppler_interference')
data = (fft1_with_interference - fft1_without_interference)
plot_rd_map(data, "Range-Doppler Matrix with interference diff", 'targets_range-doppler_interference_diff')
# with imag interference
data = fft1_with_im_interference
plot_rd_map(data, "Range-Doppler Matrix with imag interference", 'targets_range-doppler_interference_imag')
data = (fft1_with_im_interference - fft1_without_interference)
plot_rd_map(data, "Range-Doppler Matrix with imag interference diff", 'targets_range-doppler_interference_imag_diff')
# with real interference
data = fft1_with_re_interference
plot_rd_map(data, "Range-Doppler Matrix with real interference", 'targets_range-doppler_interference_re')
data = (fft1_with_re_interference - fft1_without_interference)
plot_rd_map(data, "Range-Doppler Matrix with real interference diff", 'targets_range-doppler_interference_re_diff')
def plot_rd_map(fft2, title, filename, is_log_mag=False):
num_ramps = fft2.shape[1]
v_vec_DFT_2 = v_vec_fft2(num_ramps)
if not is_log_mag:
fft2 = fft2 / np.amax(np.abs(fft2))
fft2 = 10 * np.log10(np.abs(fft2))
fig, ax = plt.subplots(1, 1, figsize=FIG_SIZE_SINGLE)
fig.suptitle(title)
imgplot = plt.imshow(fft2, extent=[v_vec_DFT_2[0], v_vec_DFT_2[-1], 0, d_max], origin='lower', vmin=color_scale_min, vmax=color_scale_max)
ax.ticklabel_format(useOffset=False, style='plain')
ax.set_aspect((v_vec_DFT_2[-1] - v_vec_DFT_2[0]) / d_max)
plt.xlabel('velocity [m/s]')
plt.ylabel('distance [m]')
imgplot.set_cmap(STD_CMAP)
plt.colorbar()
save_or_show_plot(filename)
def plot_phase(rd_target, rd_test, phase, packet_id):
if not visualize:
return
title = 'Phase comparison'
filename = 'eval_{}_phase_p{}'.format(phase, packet_id)
num_ramps = rd_target.shape[1]
v_vec_DFT_2 = v_vec_fft2(num_ramps)
rd_target_imag = np.imag(rd_target)
rd_target_imag[np.logical_or(np.isnan(rd_target_imag), np.isinf(rd_target_imag))] = 0
rd_target_real = np.real(rd_target)
rd_target_real[np.logical_or(np.isnan(rd_target_real), np.isinf(rd_target_real))] = 0
rd_phase_target = np.arctan(rd_target_imag.astype('float') / rd_target_real.astype('float'))
rd_test_imag = np.imag(rd_test)
rd_test_imag[np.logical_or(np.isnan(rd_test_imag), np.isinf(rd_test_imag))] = 0
rd_test_real = np.real(rd_test)
rd_test_real[np.logical_or(np.isnan(rd_test_real), np.isinf(rd_test_real))] = 0
rd_phase_test = np.arctan(rd_test_imag.astype('float') / rd_test_real.astype('float'))
rd_target_plot = rd_phase_target / np.amax(np.abs(rd_phase_target))
rd_target_plot = 10 * np.log10(np.abs(rd_target_plot))
rd_test_plot = rd_phase_test / np.amax(np.abs(rd_phase_test))
rd_test_plot = 10 * np.log10(np.abs(rd_test_plot))
phase_diff = rd_phase_target - rd_phase_test
rd_diff_plot = phase_diff / np.amax(np.abs(phase_diff))
rd_diff_plot = 10 * np.log10(np.abs(rd_diff_plot))
fig = plt.figure(figsize=FIG_SIZE_HIGH)
ax1 = fig.add_subplot(311) # The big subplot
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
fig.suptitle(title)
imgplot = ax1.imshow(rd_target_plot, extent=[v_vec_DFT_2[0], v_vec_DFT_2[-1], 0, d_max], origin='lower', vmin=color_scale_min, vmax=color_scale_max)
ax1.set_title('Target')
ax1.ticklabel_format(useOffset=False, style='plain')
ax1.set_aspect((v_vec_DFT_2[-1] - v_vec_DFT_2[0]) / d_max)
ax1.set_xlabel('velocity [m/s]')
ax1.set_ylabel('distance [m]')
#imgplot.set_cmap(STD_CMAP)
imgplot = ax2.imshow(rd_test_plot, extent=[v_vec_DFT_2[0], v_vec_DFT_2[-1], 0, d_max], origin='lower',
vmin=color_scale_min, vmax=color_scale_max)
ax2.set_title('Prediction')
ax2.ticklabel_format(useOffset=False, style='plain')
ax2.set_aspect((v_vec_DFT_2[-1] - v_vec_DFT_2[0]) / d_max)
ax2.set_xlabel('velocity [m/s]')
ax2.set_ylabel('distance [m]')
#imgplot.set_cmap(STD_CMAP)
imgplot = ax3.imshow(rd_diff_plot, extent=[v_vec_DFT_2[0], v_vec_DFT_2[-1], 0, d_max], origin='lower',
vmin=color_scale_min, vmax=color_scale_max)
ax3.set_title('Diff (T-P)')
ax3.ticklabel_format(useOffset=False, style='plain')
ax3.set_aspect((v_vec_DFT_2[-1] - v_vec_DFT_2[0]) / d_max)
ax3.set_xlabel('velocity [m/s]')
ax3.set_ylabel('distance [m]')
#imgplot.set_cmap(STD_CMAP)
#plt.colorbar()
save_or_show_plot(filename)
def plot_object_mag_cuts(rd_denoised, rd_denoised_interf_substi, rd_clean,
rd_clean_noise, rd_interference, rd_zero_substi_td,
object_mask, packet_id, phase, is_rd, is_log_mag=False):
if not is_log_mag:
rd_clean = rd_clean / np.amax(np.abs(rd_clean))
rd_clean = 10 * np.log10(np.abs(rd_clean))
rd_denoised = rd_denoised / np.amax(np.abs(rd_denoised))
rd_denoised = 10 * np.log10(np.abs(rd_denoised))
rd_denoised_interf_substi = rd_denoised_interf_substi / np.amax(np.abs(rd_denoised_interf_substi))
rd_denoised_interf_substi = 10 * np.log10(np.abs(rd_denoised_interf_substi))
rd_clean_noise = rd_clean_noise / np.amax(np.abs(rd_clean_noise))
rd_clean_noise = 10 * np.log10(np.abs(rd_clean_noise))
rd_interference = rd_interference / np.amax(np.abs(rd_interference))
rd_interference = 10 * np.log10(np.abs(rd_interference))
rd_zero_substi_td = rd_zero_substi_td / np.amax(np.abs(rd_zero_substi_td))
rd_zero_substi_td = 10 * np.log10(np.abs(rd_zero_substi_td))
rd_log_mag_signals = [rd_clean, rd_clean_noise, rd_interference, rd_zero_substi_td, rd_denoised, rd_denoised_interf_substi]
signal_labels = ['Clean', 'Noisy+C', 'Interference+C+N', 'Mitigation: Zero Substitude TD', 'Denoised', 'Denoised Interference Substitude']
rows, columns = np.nonzero(object_mask)
for i in range(min(len(rows), 3)):
r = rows[i]
c = columns[i]
plot_row_column_cuts(rd_log_mag_signals, signal_labels, c, r, i, packet_id, phase, is_rd, is_mag=True)
def plot_object_phase_cuts(rd_denoised, rd_denoised_interf_substi, rd_clean,
rd_clean_noise, rd_interference, rd_zero_substi_td, object_mask,
packet_id, phase, is_rd):
rd_phase_clean = phase_by_rd(rd_clean)
rd_phase_denoised = phase_by_rd(rd_denoised)
rd_phase_clean_noise = phase_by_rd(rd_clean_noise)
rd_phase_interference = phase_by_rd(rd_interference)
rd_phase_zero_substi_td = phase_by_rd(rd_zero_substi_td)
rd_phase_denoised_substi = phase_by_rd(rd_denoised_interf_substi)
rd_log_mag_signals = [rd_phase_clean, rd_phase_clean_noise, rd_phase_interference, rd_phase_zero_substi_td, rd_phase_denoised, rd_phase_denoised_substi]
signal_labels = ['Clean', 'Noisy+C', 'Interference+C+N', 'Mitigation: Zero Substitude TD', 'Denoised', 'Denoised Interference Substitude']
rows, columns = np.nonzero(object_mask)
for i in range(min(len(rows), 3)):
r = rows[i]
c = columns[i]
plot_row_column_cuts(rd_log_mag_signals, signal_labels, c, r, i, packet_id, phase, is_rd, is_mag=False)
def phase_by_rd(rd):
rd_imag = np.imag(rd)
rd_imag[np.logical_or(np.isnan(rd_imag), np.isinf(rd_imag))] = 0
rd_real = np.real(rd)
rd_real[np.logical_or(np.isnan(rd_real), np.isinf(rd_real))] = 0
rd_phase = np.arctan(rd_imag.astype('float') / rd_real.astype('float'))
return rd_phase
def plot_row_column_cuts(rd_log_mag_signals, signal_labels, obj_col, obj_row, obj_id, packet_id, phase, is_rd, is_mag):
shape0 = len(rd_log_mag_signals[0][:, obj_col])
if is_rd:
x_label_row = 'velocity [m/s]'
row_title = 'Velocity cut'
filename_add1 = 'rd'
shape1 = len(rd_log_mag_signals[0][obj_row, :])
x_vec2 = v_vec_fft2(shape1)
x_vec1 = np.array(np.linspace(0, 1, shape0)) * d_max
else:
x_label_row = 'cross range [m]'
row_title = 'Cross Range cut'
filename_add1 = 'aoa'
x_vec2, x_vec1 = basis_vec_fft3()
x_vec2 = x_vec2[obj_row, :]
x_vec1 = x_vec1[:, obj_col]
if is_mag:
y_label = 'log mag'
filename_add2 = 'mag'
else:
y_label = 'phase'
filename_add2 = 'phase'
x_label_col = 'range [m]'
# Column cut
fig = plt.figure(figsize=FIG_SIZE)
fig.suptitle("Object cuts ({} - p={} o={})".format(phase, packet_id, obj_id))
ax = fig.add_subplot(211)
for i in range(len(rd_log_mag_signals)):
signal = rd_log_mag_signals[i][:, obj_col]
plt.plot(x_vec1, signal, label="{}".format(signal_labels[i]))
plt.axvline(x=x_vec1[obj_row])
# Row cut
ax.set_title('Range cut')
ax.ticklabel_format(useOffset=False, style='plain')
plt.legend()
plt.xlabel(x_label_col)
plt.ylabel(y_label)
ax = fig.add_subplot(212)
for i in range(len(rd_log_mag_signals)):
signal = rd_log_mag_signals[i][obj_row, :]
plt.plot(x_vec2, signal, label="{}".format(signal_labels[i]))
plt.axvline(x=x_vec2[obj_col])
ax.set_title(row_title)
ax.ticklabel_format(useOffset=False, style='plain')
plt.legend()
plt.xlabel(x_label_row)
plt.ylabel(y_label)
save_or_show_plot('eval_{}_{}_object_cut_{}_p{}_o{}'.format(phase, filename_add1, filename_add2, packet_id, obj_id))
def plot_phase_amplitude_for_packet(targets, predictions, object_mask, phase, packet_id):
if not visualize:
return
targets = targets.transpose()
predictions = predictions.transpose()
rows, columns = np.nonzero(object_mask)
for i in range(min(len(rows), 1)):
fts_idx = rows[i]
if task_id == 0:
plot_phase_amplitude_for_fts(targets[fts_idx, :], fts_idx, phase, packet_id, 'target')
plot_phase_amplitude_for_fts(predictions[fts_idx, :], fts_idx, phase, packet_id, 'denoised')
fts_idx = 20
if task_id == 0:
plot_phase_amplitude_for_fts(targets[fts_idx, :], fts_idx, phase, packet_id, 'target_no_obj')
plot_phase_amplitude_for_fts(predictions[fts_idx, :], fts_idx, phase, packet_id, 'denoised_no_obj')
def plot_phase_amplitude_for_fts(fft1_data, fts_idx, eval_phase, packet_idx, title_add):
fig, ax = plt.subplots(1, 1, figsize=FIG_SIZE_SINGLE)
fig.suptitle('Phase & Amplitude FFT1 ({} {}: p={}, fts={})'.format(eval_phase, title_add, packet_idx, fts_idx))
ax = fig.add_subplot(211)
phase = np.arctan(np.imag(fft1_data) / np.real(fft1_data))
plt.plot(phase, label="phase")
ax.set_title('Phase')
ax.ticklabel_format(useOffset=False, style='plain')
plt.xlabel('velocity [m/s]')
plt.legend()
ax = fig.add_subplot(212)
magnitude = 10 * np.log10(np.abs(fft1_data))
plt.plot(magnitude, label="magnitude")
ax.set_title('Magnitude')
ax.ticklabel_format(useOffset=False, style='plain')
plt.xlabel('velocity [m/s]')
plt.legend()
save_or_show_plot('eval_{}_phase_ampli_{}_p{}_fts{}'.format(eval_phase, title_add, packet_idx, fts_idx))
def plot_distance_map(fft1_data, title, filename):
num_ramps = fft1_data.shape[1]
fig, ax = plt.subplots(1, 1, figsize=FIG_SIZE_SINGLE)
fig.suptitle(title)
imgplot = plt.imshow(np.abs(fft1_data), extent=[0, num_ramps, 0, d_max], origin='lower')
ax.ticklabel_format(useOffset=False, style='plain')
ax.set_aspect(num_ramps / d_max)
plt.xlabel('ramps')
plt.ylabel('distance [m]')
imgplot.set_cmap(STD_CMAP)
plt.colorbar()
save_or_show_plot(filename)
def plot_rd_noise_mask(noise_mask, title, filename):
num_ramps = noise_mask.shape[1]
v_vec_DFT_2 = v_vec_fft2(num_ramps)
fig, ax = plt.subplots(1, 1, figsize=FIG_SIZE_SINGLE)
fig.suptitle(title)
imgplot = plt.imshow(noise_mask.astype(int), extent=[v_vec_DFT_2[0], v_vec_DFT_2[-1], 0, d_max], origin='lower')
ax.ticklabel_format(useOffset=False, style='plain')
ax.set_aspect((v_vec_DFT_2[-1] - v_vec_DFT_2[0]) / d_max)
plt.xlabel('velocity [m/s]')
plt.ylabel('distance [m]')
imgplot.set_cmap(STD_CMAP)
plt.colorbar()
save_or_show_plot(filename)
def plot_aoa_noise_mask(noise_mask, title, filename):
x_vec, y_vec = basis_vec_fft3()
fig = plt.figure(figsize=FIG_SIZE)
ax = fig.gca(projection='3d')
fig.suptitle(title)
surf = ax.plot_surface(x_vec, y_vec, noise_mask.astype(int), cmap=STD_CMAP, linewidth=0, vmin=0, vmax=1, rstride=1, cstride=1)
plt.xlabel('cross range [m]')
plt.ylabel('range [m]')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.view_init(87.5, -90)
plt.draw()
save_or_show_plot(filename)
def plot_angle_of_arrival_map(fft3, title, filename):
x_vec, y_vec = basis_vec_fft3()
fft3_plot = 10 * np.log10(np.abs(fft3 / np.amax(np.abs(fft3))))
fig = plt.figure(figsize=FIG_SIZE)
ax = fig.gca(projection='3d')
fig.suptitle(title)
surf = ax.plot_surface(x_vec, y_vec, fft3_plot, cmap=STD_CMAP, linewidth=0, vmin=color_scale_min, vmax=color_scale_max, rstride=1, cstride=1)
plt.xlabel('cross range [m]')
plt.ylabel('range [m]')
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.view_init(87.5, -90)
plt.draw()
save_or_show_plot(filename)
def plot_cross_ranges(obj_idx, rows, columns, phase, scene_idx, x_vec,
angular_spectrum, angular_spectrum_clean, angular_spectrum_interf,
angular_spectrum_original, angular_spectrum_zero_substi, object_mask):
cr_prediction = calculate_cross_range_fft(angular_spectrum[rows[obj_idx], columns[obj_idx]])
cr_clean = calculate_cross_range_fft(angular_spectrum_clean[rows[obj_idx], columns[obj_idx]])
cr_interf = calculate_cross_range_fft(angular_spectrum_interf[rows[obj_idx], columns[obj_idx]])
cr_original = calculate_cross_range_fft(angular_spectrum_original[rows[obj_idx], columns[obj_idx]])
cr_zero_substi = calculate_cross_range_fft(angular_spectrum_zero_substi[rows[obj_idx], columns[obj_idx]])
signals = [cr_clean, cr_prediction, cr_interf, cr_original, cr_zero_substi]
labels = ['clean', 'prediction', 'interf', 'original', 'zero substi']
fig = plt.figure(figsize=FIG_SIZE)
fig.suptitle('Cross range (s{})'.format(scene_idx))
fig.add_subplot(111)
for i in range(len(signals)):
s = signals[i]
s = 10 * np.log10(np.abs(s / np.amax(np.abs(s))))
plt.plot(x_vec, s[0], label=labels[i])
_, obj_indices = np.nonzero(object_mask)
for i in obj_indices:
plt.axvline(x=x_vec[i])
plt.xlabel('cross range [m]')
plt.ylabel('log mag')
plt.legend()
save_or_show_plot('eval_{}_cr_s{}_o{}'.format(phase, scene_idx, obj_idx))
``` |
{
"source": "johannaSommer/adversarial_relighting",
"score": 3
} |
#### File: classifiers/FaceNet/Facenet.py
```python
from facenet_pytorch import MTCNN, InceptionResnetV1
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
import torch
class FaceNet:
"""
Instantiate InceptionResnet and simple classifier
:param num_classes: number of identities/classes
:param load_model: whether to load weights for 5-celeb
"""
def __init__(self, num_classes, load_model=True):
self.model_embedding = InceptionResnetV1(pretrained='vggface2').eval().to('cpu')
if load_model:
self.model_classifier = Net(num_classes=num_classes)
self.model_classifier.load_state_dict(torch.load('weights/fiveceleb.t7'))
else:
self.model_classifier = Net(num_classes=num_classes)
def train(self, X_train, y_train, num_steps, learning_rate):
"""
Fit simple classifier to training data
:param X_train: training data in embedding format
:param y_train: training labels
:param num_steps: number of optim steps in training
:param learning_rate: learning rate for training
:return: history of loss values during training
"""
print('Training FaceNet on PubFig10')
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(self.model_classifier.parameters(), lr=learning_rate)
loss_history = []
for i in range(0, num_steps): # loop over the dataset multiple times
optimizer.zero_grad()
outputs = self.model_classifier(X_train)
loss = criterion(outputs, y_train.long())
loss.backward(retain_graph=True)
loss_history.append(loss)
# print('Loss: {}'.format(loss))
optimizer.step()
return loss_history
def forward(self, input):
self.model_classifier.eval()
embedding = self.model_embedding(input)
embedding = torch.nn.functional.normalize(embedding, p=2, dim=1, eps=1e-12, out=None)
logits = self.model_classifier(embedding)
return logits
def to(self, device):
self.model_embedding.to(device)
self.model_classifier.to(device)
def predict(self, input, log=False):
"""
predict identity for input
:param input: input images (have to be already cropped)
:param log: whether to return the log-likelihood (for NLL computation)
:return: probabilites for all classes
"""
self.model_classifier.eval()
embedding = self.model_embedding(input)
embedding = torch.nn.functional.normalize(embedding, p=2, dim=1, eps=1e-12, out=None)
logits = self.model_classifier(embedding)
if log:
probs = F.log_softmax(logits, dim=1)
else:
probs = F.softmax(logits, dim=1)
return probs
class Net(nn.Module):
"""
Instantiate simple classifier to map embeddings to faces, as mentioned in
https://arxiv.org/pdf/1801.00349.pdf
:param num_classes: number of identities/classes
:return: logits
"""
def __init__(self, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(512, num_classes)
def forward(self, x):
x = self.fc1(x)
# x = F.softmax(x)
return x
def crop_image_single(img, device):
"""
Implementation of the MTCNN network to crop single image to only show the face as shown in the facenet_pytorch doc:
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
:param device: pytorch device
:param img: single image to be cropped
:return: cropped image
"""
model = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7],
factor=0.709, post_process=False, device=device)
x_aligned, prob = model(img, return_prob=True)
return x_aligned
def crop_images_batch(device, image_folder, transform=None):
"""
Implementation of the MTCNN network to crop images to only show the face as shown in the facenet_pytorch doc:
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
:param device: pytorch device
:param image_folder: path to images
:return: cropped images, names of celebrities according to file structure
"""
model = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7],
factor=0.709, post_process=False, device=device)
dataset = datasets.ImageFolder(image_folder, transform=transform)
dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
loader = DataLoader(dataset, collate_fn=collate_fn)
aligned = None
names = None
for x, y in loader:
print(x, y)
x_aligned, prob = model(x, return_prob=True)
if x_aligned is not None:
x_aligned = x_aligned / 255
if aligned is None and names is None:
aligned = np.expand_dims(x_aligned, axis=0)
names = dataset.idx_to_class[y]
else:
aligned = np.concatenate((aligned, np.expand_dims(x_aligned, axis=0)), axis=0)
names = np.append(names, dataset.idx_to_class[y])
return aligned, names
def collate_fn(x):
return x[0]
def encode_pubfig(y):
"""
class encoding for pubfig
:param y: labels as strings
:return: labels as ints
"""
y_out = []
for x in y:
if x == 'Aaron-Eckhart':
y_out.append(0)
elif x == 'Adriana-Lima':
y_out.append(1)
elif x == 'Angela-Merkel':
y_out.append(2)
elif x == 'Beyonce-Knowles':
y_out.append(3)
elif x == 'Brad-Pitt':
y_out.append(4)
elif x == 'Clive-Owen':
y_out.append(5)
elif x == 'Drew-Barrymore':
y_out.append(6)
elif x == 'Milla-Jovovich':
y_out.append(7)
elif x == 'Quincy-Jones':
y_out.append(8)
else:
y_out.append(9)
return y_out
```
#### File: adversarial_relighting/utils/datasets.py
```python
import numpy as np
import os
import pandas as pd
import sys
import torch
import zipfile
from facenet_pytorch import MTCNN
from google_drive_downloader import GoogleDriveDownloader as gdd
from PIL import Image
from torchvision.datasets import ImageFolder
sys.path.append(os.path.join(os.path.dirname(os.path.realpath('__file__')), '..'))
from utils import labels_util
from classifiers.FaceNet.Facenet import crop_images_batch
import torchvision.transforms as transforms
def download(data_path, zip_name, drive_file_id):
zip_path = os.path.join(data_path, zip_name)
gdd.download_file_from_google_drive(file_id=drive_file_id,
dest_path=zip_path,
unzip=False)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(data_path)
class IndoorScenesDataset(torch.utils.data.Dataset):
"""Minimal dataset functionality for the Indoor Classification data.
Example usage (in a notebook in /experiments):
ds = IndoorScenesDataset('../data/indoor-scenes/Train.csv',
'../data/indoor-scenes/')
ds = IndoorScenesDataset('../data/indoor-scenes/Test.csv',
'../data/indoor-scenes/')
"""
def __init__(self, csv_filename, data_path):
self.data_path = data_path
# Download the data from Google Drive if not already
# available.
if not os.path.exists(data_path):
zip_name = 'indoor-scenes.zip'
drive_file_id = '19sajDHxP1YNs9IcvUJdgCI9nOyJeE8Up'
download(data_path, zip_name, drive_file_id)
self.df = pd.read_csv(csv_filename)
def __len__(self):
return len(self.df) - 1
def __getitem__(self, idx):
from attacks import utils # This import sometimes causes problems, so we only import it here
im_name = self.df['Id'][idx]
im_path = os.path.join(self.data_path, 'Images', im_name)
img = utils.read_image(im_path)
gt_label = self.df['Category'][idx]
return img, gt_label
class PubFigDataset(torch.utils.data.Dataset):
""" Reduced PubFig Dataset with 10 manually selected classes.
Example usage (for a notebook in /experiments):
ds = PubFigDataset('../data/pubfig/', mode='train')
ds = PubFigDataset('../data/pubfig/', mode='test')
"""
def __init__(self, data_path, mode, crop=False, transform=None, crop_size=240):
assert mode in ['train', 'test']
# Download the data from Google Drive if not already
# available.
if not os.path.exists(data_path):
zip_name = 'pubfig.zip'
drive_file_id = '1hukredXUXnSNQcOjohk7INHcFnCy2KTb'
download(data_path, zip_name, drive_file_id)
idx_to_label = labels_util.load_idx_to_label('pubfig10')
label_to_idx = {label : idx for idx, label in idx_to_label.items()}
# Store the data in a list of (image path, label)
self.data = []
self.crop = crop
self.crop_size = crop_size
self.transform = transform
if mode == 'train':
data_path = os.path.join(data_path, 'PubFig_og')
elif mode == 'test':
data_path = os.path.join(data_path, 'testimgs')
for celeb_name in os.listdir(data_path):
dir_path = os.path.join(data_path, celeb_name)
if not os.path.isdir(dir_path):
continue
if celeb_name not in label_to_idx:
continue
gt_label = label_to_idx[celeb_name]
for celeb_img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, celeb_img_name)
self.data.append((img_path, gt_label))
if crop:
self.cropper = MTCNN(image_size=self.crop_size, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7],
factor=0.709, post_process=False, device='cuda')
def shuffle(self):
np.random.shuffle(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
im_path = self.data[idx][0]
gt_label = self.data[idx][1]
if not self.crop:
img = utils.read_image(im_path)
else:
# PIL image has range 0...255. This is what the
# cropper expects.
img = np.array(Image.open(im_path))
img, probs = self.cropper(img, return_prob=True)
if img is None:
return None, None
img = img.detach().cpu().numpy().transpose((1, 2, 0))
img = img / 255.0
# Add a transform (eg: EOTAttackTransform).
if self.transform is not None:
img = self.transform(img, gt_label)
return img, gt_label
class PubFig83Dataset_test(torch.utils.data.Dataset):
""" PubFig dataset with 83 identities
"""
def __init__(self):
crops = np.load('/home/jupyter/project-1/data/pubfig83/pubfig83_crop_test.npz')['data']
self.crops_perm = []
for crop in crops:
self.crops_perm.append(torch.Tensor(crop).permute(1, 2, 0).numpy())
self.label_names = np.load('/home/jupyter/project-1/data/pubfig83/pubfig83_crop_test.npz')['labels']
def __len__(self):
return len(self.crops_perm)
def __getitem__(self, idx):
img = self.crops_perm[idx]
gt_label = self.label_names[idx]
return img, gt_label
class VGGFace2(torch.utils.data.Dataset):
def __init__(self, data_path, transform=None, image_size_for_crop=224):
self.data = ImageFolder(data_path)
self.image_size_for_crop = image_size_for_crop
self.cropper = MTCNN(image_size=image_size_for_crop, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7],
factor=0.709, post_process=False, device='cuda')
complete_to_subset = labels_util.load_idx_to_label('vggface2')
self.subset_to_complete = {value: key for key, value in complete_to_subset.items()}
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
image, label = self.data[idx]
cropped = self.cropper(image)
if cropped is None:
# Previously this returned None if the cropper could not detect a face. Now we just do a random crop of the
# right size. The advantage is, that this works with DataLoaders, whereas the DataLoaders throw an
# exception if None is returned. The result should otherwise be the same, as it is unlikely that the
# classifier will classifiy a randomly cropped image correctly. If it does by chance, this is fine
#return None, None
img = transforms.RandomCrop(self.image_size_for_crop, pad_if_needed=True, padding_mode='edge')(image)
cropped = transforms.ToTensor()(img)
img = cropped.numpy().transpose(1, 2, 0) / 255
# Add a transform (eg: EOTAttackTransform).
if self.transform is not None:
img = self.transform(img, label)
return img.astype(np.float64), self.subset_to_complete[label]
``` |
{
"source": "JohannaVonLuck/gewizes",
"score": 2
} |
#### File: util/blenderScripts/gamx_export.py
```python
__author__ = "<NAME>"
__url__ = ("http://gewizes.sourceforge.net/")
__version__ = "0.5"
__email__ = "<EMAIL>"
__bpydoc__ = """\
Description: Exports a Blender scene into a GAMX 1.0 file.
Usage: Run the script from the menu or inside Blender.
"""
import sys
import math
import os.path
import inspect # Used for script filename identification (for .sav rename)
import pickle # Used for settings save/load
import bpy
import Blender
from Blender import *
endl = "\n"
tab4 = " "
name_prefix = ""
file_prefix = ""
anim_fps = 25.0
use_no_shift_over = True
use_bilinear_over_unilinear = True
use_linear_over_cubic_cr = False
use_unique_timer = False
last_folder = os.path.abspath(os.path.dirname(Blender.Get("filename")))
# Tries to load options from .sav file (named same as running script)
def gamx_try_load_options():
global name_prefix
global file_prefix
global anim_fps
global use_no_shift_over
global use_bilinear_over_unilinear
global use_linear_over_cubic_cr
global use_unique_timer
global last_folder
try:
filename = os.path.splitext(os.path.abspath(inspect.getfile(inspect.currentframe())))[0] + ".sav"
if os.path.exists(filename):
fin = open(filename, "r")
try:
dict = pickle.load(fin)
except:
pass
finally:
fin.close()
if dict["version"] == 1:
name_prefix = dict["name_prefix"]
file_prefix = dict["file_prefix"]
anim_fps = dict["anim_fps"]
use_no_shift_over = dict["use_no_shift_over"]
use_bilinear_over_unilinear = dict["use_bilinear_over_unilinear"]
use_linear_over_cubic_cr = dict["use_linear_over_cubic_cr"]
use_unique_timer = dict["use_unique_timer"]
last_folder = dict["last_folder"]
except:
pass
# Tries to save options to .sav file (named same as running script)
def gamx_try_save_options():
try:
filename = os.path.splitext(os.path.abspath(inspect.getfile(inspect.currentframe())))[0] + ".sav"
dict = { }
dict["version"] = 1
dict["name_prefix"] = name_prefix
dict["file_prefix"] = file_prefix
dict["anim_fps"] = anim_fps
dict["use_no_shift_over"] = use_no_shift_over
dict["use_bilinear_over_unilinear"] = use_bilinear_over_unilinear
dict["use_linear_over_cubic_cr"] = use_linear_over_cubic_cr
dict["use_unique_timer"] = use_unique_timer
dict["last_folder"] = last_folder
fout = open(filename, "w")
try:
pickle.dump(dict, fout)
except:
pass
finally:
fout.close()
except:
pass
def gamx_name_prefix(prefix, name):
if name[:len(name_prefix)] == name_prefix:
name = name[len(name_prefix):]
if name[:len(prefix)] == prefix:
name = name[len(prefix):]
return name_prefix + prefix + name
def gamx_namesubprefix(prefix, name):
if name[:len(name_prefix)] == name_prefix:
name = name[len(name_prefix):]
if name[:len(prefix)] == prefix:
name = name[len(prefix):]
return prefix + name
def gamx_file_prefix(filename):
if filename[:1] != '/':
return file_prefix + filename
else:
return filename
def gamx_isnone_string(obj):
if obj == None:
return "None"
return "Found"
def gamx_deg_to_rad(degrees):
return round(degrees * 3.14159265 / 180.0, 6);
def gamx_rad_to_deg(radians):
return round(radians * 180.0 / 3.14159265, 6);
def gamx_axis_to_quat(angles=[0,0,0]):
angle = math.sqrt((angles[0] * angles[0]) + (angles[1] * angles[1]) + (angles[2] * angles[2]))
if angle > 0.0:
angles[0] /= angle
angles[1] /= angle
angles[2] /= angle
angle *= 0.5
sinHlfAng = math.sin(angle)
quat = [0,0,0,0]
quat[0] = math.cos(angle)
quat[1] = angles[0] * sinHlfAng
quat[2] = angles[1] * sinHlfAng
quat[3] = angles[2] * sinHlfAng
mag = math.sqrt((quat[0] * quat[0]) + (quat[1] * quat[1]) + (quat[2] * quat[2]) + (quat[3] * quat[3]))
quat[0] = quat[0] / mag
quat[1] = quat[1] / mag
quat[2] = quat[2] / mag
quat[3] = quat[3] / mag
else:
quat = [1,0,0,0]
return quat
def gamx_trnspresent(pos, rot, scl):
if (pos != None and (pos[0] != 0.0 or pos[1] != 0.0 or pos[2] != 0.0)) or (rot != None and (rot[0] != 0.0 or rot[1] != 0.0 or rot[2] != 0.0)) or (scl != None and (scl[0] != 1.0 or scl[1] != 1.0 or scl[2] != 1.0)):
return 1
return 0
def gamx_transform(pos, rot, scl):
node = r''
if (pos != None and (pos[0] != 0.0 or pos[1] != 0.0 or pos[2] != 0.0)) or (rot != None and (rot[0] != 0.0 or rot[1] != 0.0 or rot[2] != 0.0)) or (scl != None and (scl[0] != 1.0 or scl[1] != 1.0 or scl[2] != 1.0)):
node += r'<transform'
if rot != None and (rot[0] != 0.0 or rot[1] != 0.0 or rot[2] != 0.0):
node += r' mode="radians"'
if pos != None and (pos[0] != 0.0 or pos[1] != 0.0 or pos[2] != 0.0):
node += r' position="' + "%0.6f %0.6f %0.6f" % (pos[0], pos[2], -pos[1]) + r'"'
if rot != None and (rot[0] != 0.0 or rot[1] != 0.0 or rot[2] != 0.0):
node += r' axis="' + "%0.6f %0.6f %0.6f" % (rot[0], rot[2], -rot[1]) + r'"'
if scl != None and (scl[0] != 1.0 or scl[1] != 1.0 or scl[2] != 1.0):
node += r' scale="' + "%0.6f %0.6f %0.6f" % (scl[0], scl[2], scl[1]) + r'"'
node += r' />'
return node
def gamx_first_scene_name():
if len(bpy.data.scenes):
for scn in bpy.data.scenes:
return scn.name
else:
return "default"
def gamx_export_source_sditva(fout, mesh):
fout.write(3*tab4 + r'<geometry source="internal" type="disjoint_indexed_vertex_array">' + endl)
fout.write(4*tab4 + r'<vertices count="' + "%d" % len(mesh.verts) + r'">')
spacer = ''
for vert in mesh.verts:
fout.write(spacer + "%0.6f %0.6f %0.6f" % (round(vert.co[0], 6), round(vert.co[2], 6), round(-vert.co[1], 6)))
spacer = ', '
fout.write(r'</vertices>' + endl)
fout.write(4*tab4 + r'<normals count="' + "%d" % len(mesh.faces) + r'">')
spacer = ''
for face in mesh.faces:
fout.write(spacer + "%0.6f %0.6f %0.6f" % (round(face.no[0], 6), round(face.no[2], 6), round(-face.no[1], 6)))
spacer = ', '
fout.write(r'</normals>' + endl)
if mesh.faceUV:
count = 0
for face in mesh.faces:
count += len(face.v)
fout.write(4*tab4 + r'<texuvs count="' + "%d" % count + r'">')
spacer = ''
for face in mesh.faces:
for uv in face.uv:
fout.write(spacer + "%0.6f %0.6f" % (round(uv[0], 6), round(1.0 - uv[1], 6)))
spacer = ', '
fout.write(r'</texuvs>' + endl)
count = 0
for face in mesh.faces:
if len(face.v) == 3:
count += 1
else:
count += 2
fout.write(4*tab4 + r'<faces count="' + "%d" % count + r'" format="triangles">')
count = 0
fcount = 0
vcount = 0
spacer = ''
for face in mesh.faces:
if len(face.v) == 3:
if mesh.faceUV:
fout.write(spacer + "%d %d %d %d %d %d %d %d %d" % (face.v[0].index, fcount, vcount+0, face.v[1].index, fcount, vcount+1, face.v[2].index, fcount, vcount+2))
spacer = ', '
else:
fout.write(spacer + "%d %d %d %d %d %d" % (face.v[0].index, fcount, face.v[1].index, fcount, face.v[2].index, fcount))
spacer = ', '
count += 1
vcount += 3
else:
if mesh.faceUV:
fout.write(spacer + "%d %d %d %d %d %d %d %d %d" % (face.v[0].index, fcount, vcount+0, face.v[1].index, fcount, vcount+1, face.v[2].index, fcount, vcount+2))
spacer = ', '
fout.write(spacer + "%d %d %d %d %d %d %d %d %d" % (face.v[0].index, fcount, vcount+0, face.v[2].index, fcount, vcount+2, face.v[3].index, fcount, vcount+3))
spacer = ', '
else:
fout.write(spacer + "%d %d %d %d %d %d" % (face.v[0].index, fcount, face.v[1].index, fcount, face.v[2].index, fcount))
spacer = ', '
fout.write(spacer + "%d %d %d %d %d %d" % (face.v[0].index, fcount, face.v[2].index, fcount, face.v[3].index, fcount))
spacer = ', '
count += 2
vcount += 4
fcount += 1
fout.write(r'</faces>' + endl)
fout.write(3*tab4 + r'</geometry>' + endl)
def gamx_export_node(fout, tab, obj, chds):
tabof = tab
wrote_localizers = 0
if len(chds[obj.name]):
fout.write((tab+0)*tab4 + r'<node id="' + gamx_name_prefix('node_', obj.name) + '" type="transform">' + endl)
fout.write((tab+1)*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
if obj.ipo == None:
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tab+1)*tab4 + r'<offset>' + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + r'</offset>' + endl)
else:
fout.write((tab+1)*tab4 + r'<offset>' + endl)
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tab+2)*tab4 + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + endl)
fout.write((tab+2)*tab4 + r'<interpolator ref="' + gamx_name_prefix('ipo_', obj.ipo.name) + r'" />' + endl)
fout.write((tab+1)*tab4 + r'</offset>' + endl)
wrote_localizers = 1
fout.write((tab+1)*tab4 + r'<assets>' + endl)
tabof = tab + 2
if obj.type == 'Mesh':
mesh = obj.getData(0, 1)
mode = mesh.mode
if mesh.faceUV:
for face in mesh.faces:
mode |= face.mode
if mode & (Blender.Mesh.FaceModes["HALO"] | Blender.Mesh.FaceModes["BILLBOARD"]) or mesh.name[:3] == 'bb_': # billboard
if mesh.users == 1 and wrote_localizers:
fout.write((tabof+0)*tab4 + r'<billboard ref="' + gamx_name_prefix('bb_', mesh.name) + r'" />' + endl)
else:
fout.write((tabof+0)*tab4 + r'<billboard id="' + gamx_name_prefix('obj_', obj.name) + r'" ref="' + gamx_name_prefix('bb_', mesh.name) + r'">' + endl)
fout.write((tabof+1)*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
if not wrote_localizers:
if obj.ipo == None:
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tabof+1)*tab4 + r'<offset>' + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + r'</offset>' + endl)
else:
fout.write((tabof+1)*tab4 + r'<offset>' + endl)
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tabof+2)*tab4 + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + endl)
fout.write((tabof+2)*tab4 + r'<interpolator ref="' + gamx_name_prefix('ipo_', obj.ipo.name) + r'" />' + endl)
fout.write((tabof+1)*tab4 + r'</offset>' + endl)
fout.write((tabof+0)*tab4 + r'</billboard>' + endl)
else: # mesh
if mesh.users == 1 and wrote_localizers:
fout.write((tabof+0)*tab4 + r'<mesh ref="' + gamx_name_prefix('mesh_', mesh.name) + r'" />' + endl)
else:
fout.write((tabof+0)*tab4 + r'<mesh id="' + gamx_name_prefix('obj_', obj.name) + r'" ref="' + gamx_name_prefix('mesh_', mesh.name) + r'">' + endl)
fout.write((tabof+1)*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
if not wrote_localizers:
if obj.ipo == None:
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tabof+1)*tab4 + r'<offset>' + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + r'</offset>' + endl)
else:
fout.write((tabof+1)*tab4 + r'<offset>' + endl)
if gamx_trnspresent(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')):
fout.write((tabof+2)*tab4 + gamx_transform(obj.getLocation('localspace'), obj.getEuler('localspace'), obj.getSize('localspace')) + endl)
fout.write((tabof+2)*tab4 + r'<interpolator ref="' + gamx_name_prefix('ipo_', obj.ipo.name) + r'" />' + endl)
fout.write((tabof+1)*tab4 + r'</offset>' + endl)
fout.write((tabof+0)*tab4 + r'</mesh>' + endl)
elif obj.type == 'Camera':
pass # TODO!
print "Info: GAMX_Export: Warning: Object type '%s' export not yet supported.\r\n" % (obj.type,)
elif obj.type == 'Lamp':
pass # TODO!
print "Info: GAMX_Export: Warning: Object type '%s' export not yet supported.\r\n" % (obj.type,)
elif obj.type == "Empty":
pass # Ignore empties
else:
print "Info: GAMX_Export: Warning: Object type '%s' not supported as a leaf node.\r\n" % (obj.type,)
if len(chds[obj.name]):
for chd in chds[obj.name]:
gamx_export_node(fout, tab+2, chd, chds)
fout.write((tab+1)*tab4 + r'</assets>' + endl)
fout.write((tab+0)*tab4 + r'</node>' + endl)
def gamx_export(filename):
try:
print "Info: GAMX_Export: Beginning export to '%s'.\r\n" % (filename,) + endl
assets = 0
fout = file(filename, "w")
try:
# Write header
fout.write(0*tab4 + r'<?xml version="1.0" encoding = "utf-8"?>' + endl)
fout.write(0*tab4 + r'<gamx version="1.0">' + endl)
fout.write(1*tab4 + r'<info>' + endl)
fout.write(2*tab4 + r'<author>' + r'</author>' + endl)
fout.write(2*tab4 + r'<comments>' + r'</comments>' + endl)
fout.write(2*tab4 + r'<copyright>' + r'</copyright>' + endl)
fout.write(1*tab4 + r'</info>' + endl)
fout.write(1*tab4 + r'<assets>' + endl)
# Write interpolators
if not use_unique_timer and len(bpy.data.ipos):
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<timer id="' + gamx_name_prefix('tmr_', gamx_name_prefix('obj_', gamx_first_scene_name())) + r'" type="basic">' + endl)
fout.write(2*tab4 + r'</timer>' + endl)
for ipo in bpy.data.ipos:
if ipo.getCurve('LocX') != None or ipo.getCurve('LocY') != None or ipo.getCurve('LocZ') != None or ipo.getCurve('ScaleX') != None or ipo.getCurve('ScaleY') != None or ipo.getCurve('ScaleZ') != None or ipo.getCurve('RotX') != None or ipo.getCurve('RotY') != None or ipo.getCurve('RotZ') != None or ipo.getCurve('QuatW') != None or ipo.getCurve('QuatX') != None or ipo.getCurve('QuatY') != None or ipo.getCurve('QuatZ') != None:
# IPO validation check
valid = 1
if valid and (ipo.getCurve('LocX') != None or ipo.getCurve('LocY') != None or ipo.getCurve('LocZ') != None):
if ipo.getCurve('LocX') == None or ipo.getCurve('LocY') == None or ipo.getCurve('LocZ') == None:
print "Info: GAMX_Export: Error: Ipo '%s' is missing key frame definitions of at least one LocX, LocY, or LocZ channel (X:%s Y:%s Z:%s); skipping export.\r\n" % (ipo.name,gamx_isnone_string(ipo.getCurve('LocX')),gamx_isnone_string(ipo.getCurve('LocY')),gamx_isnone_string(ipo.getCurve('LocZ')))
valid = 0
elif len(ipo.getCurve('LocX').bezierPoints) != len(ipo.getCurve('LocY').bezierPoints) or len(ipo.getCurve('LocY').bezierPoints) != len(ipo.getCurve('LocZ').bezierPoints):
print "Info: GAMX_Export: Error: Ipo '%s' LocX, LocY, & LocZ channels do not contain same number of key frames (X:%d Y:%d Z:%d); skipping export.\r\n" % (ipo.name,len(ipo.getCurve('LocX').bezierPoints),len(ipo.getCurve('LocY').bezierPoints),len(ipo.getCurve('LocZ').bezierPoints))
valid = 0
else:
for i in range(len(ipo.getCurve('LocX').bezierPoints)):
if ipo.getCurve('LocX').bezierPoints[i].pt[0] != ipo.getCurve('LocY').bezierPoints[i].pt[0] or ipo.getCurve('LocY').bezierPoints[i].pt[0] != ipo.getCurve('LocZ').bezierPoints[i].pt[0]:
print "Info: GAMX_Export: Error: Ipo '%s' LocX, LocY, & LocZ channels are not simultaneous for all key frame indicies; skipping export.\r\n" % (ipo.name,)
valid = 0
break
if valid and (ipo.getCurve('RotX') != None or ipo.getCurve('RotY') != None or ipo.getCurve('RotZ') != None):
if ipo.getCurve('RotX') == None or ipo.getCurve('RotY') == None or ipo.getCurve('RotZ') == None:
print "Info: GAMX_Export: Error: Ipo '%s' is missing key frame definitions of at least one RotX, RotY, or RotZ channel (X:%s Y:%s Z:%s); skipping export.\r\n" % (ipo.name,gamx_isnone_string(ipo.getCurve('RotX')),gamx_isnone_string(ipo.getCurve('RotY')),gamx_isnone_string(ipo.getCurve('RotZ')))
valid = 0
elif len(ipo.getCurve('RotX').bezierPoints) != len(ipo.getCurve('RotY').bezierPoints) or len(ipo.getCurve('RotY').bezierPoints) != len(ipo.getCurve('RotZ').bezierPoints):
print "Info: GAMX_Export: Error: Ipo '%s' RotX, RotY, & RotZ channels do not contain same number of key frames (X:%d Y:%d Z:%d); skipping export.\r\n" % (ipo.name,len(ipo.getCurve('RotX').bezierPoints),len(ipo.getCurve('RotY').bezierPoints),len(ipo.getCurve('RotZ').bezierPoints))
valid = 0
else:
for i in range(len(ipo.getCurve('RotX').bezierPoints)):
if ipo.getCurve('RotX').bezierPoints[i].pt[0] != ipo.getCurve('RotY').bezierPoints[i].pt[0] or ipo.getCurve('RotY').bezierPoints[i].pt[0] != ipo.getCurve('RotZ').bezierPoints[i].pt[0]:
print "Info: GAMX_Export: Error: Ipo '%s' RotX, RotY, & RotZ channels are not simultaneous for all key frame indicies; skipping export.\r\n" % (ipo.name,)
valid = 0
break
if i > 0 and (abs(ipo.getCurve('RotX').bezierPoints[i].pt[1] - ipo.getCurve('RotX').bezierPoints[i-1].pt[1]) > 9.000001 or abs(ipo.getCurve('RotY').bezierPoints[i].pt[1] - ipo.getCurve('RotY').bezierPoints[i-1].pt[1]) > 9.000001 or abs(ipo.getCurve('RotZ').bezierPoints[i].pt[1] - ipo.getCurve('RotZ').bezierPoints[i-1].pt[1]) > 9.000001):
print "Info: GAMX_Export: Error: Ipo '%s' has at least one RotX, RotY, & RotZ channel that travels beyond the safe 90 degree key frame value extent; skipping export.\r\n" % (ipo.name,)
valid = 0
break
if valid and (ipo.getCurve('ScaleX') != None or ipo.getCurve('ScaleY') != None or ipo.getCurve('ScaleZ') != None):
if ipo.getCurve('ScaleX') == None or ipo.getCurve('ScaleY') == None or ipo.getCurve('ScaleZ') == None:
print "Info: GAMX_Export: Error: Ipo '%s' is missing key frame definitions of at least one ScaleX, ScaleY, or ScaleZ channel (X:%s Y:%s Z:%s); skipping export.\r\n" % (ipo.name,gamx_isnone_string(ipo.getCurve('ScaleX')),gamx_isnone_string(ipo.getCurve('ScaleY')),gamx_isnone_string(ipo.getCurve('ScaleZ')))
valid = 0
elif len(ipo.getCurve('ScaleX').bezierPoints) != len(ipo.getCurve('ScaleY').bezierPoints) or len(ipo.getCurve('ScaleY').bezierPoints) != len(ipo.getCurve('ScaleZ').bezierPoints):
print "Info: GAMX_Export: Error: Ipo '%s' ScaleX, ScaleY, & ScaleZ channels do not contain same number of key frames (X:%d Y:%d Z:%d); skipping export.\r\n" % (ipo.name,len(ipo.getCurve('ScaleX').bezierPoints),len(ipo.getCurve('ScaleY').bezierPoints),len(ipo.getCurve('ScaleZ').bezierPoints))
valid = 0
else:
for i in range(len(ipo.getCurve('ScaleX').bezierPoints)):
if ipo.getCurve('ScaleX').bezierPoints[i].pt[0] != ipo.getCurve('ScaleY').bezierPoints[i].pt[0] or ipo.getCurve('ScaleY').bezierPoints[i].pt[0] != ipo.getCurve('ScaleZ').bezierPoints[i].pt[0]:
print "Info: GAMX_Export: Error: Ipo '%s' ScaleX, ScaleY, & ScaleZ channels are not simultaneous for all key frame indicies; skipping export.\r\n" % (ipo.name,)
valid = 0
break
if valid and (ipo.getCurve('QuatW') != None or ipo.getCurve('QuatX') != None or ipo.getCurve('QuatY') != None or ipo.getCurve('QuatZ') != None):
if ipo.getCurve('QuatW') == None or ipo.getCurve('QuatX') == None or ipo.getCurve('QuatY') == None or ipo.getCurve('QuatZ') == None:
print "Info: GAMX_Export: Error: Ipo '%s' is missing key frame definitions of at least one QuatW, QuatX, QuatY, or QuatZ channel (W:%s X:%s Y:%s Z:%s); skipping export.\r\n" % (ipo.name,gamx_isnone_string(ipo.getCurve('QuatW')),gamx_isnone_string(ipo.getCurve('QuatX')),gamx_isnone_string(ipo.getCurve('QuatY')),gamx_isnone_string(ipo.getCurve('QuatZ')))
valid = 0
elif len(ipo.getCurve('QuatW').bezierPoints) != len(ipo.getCurve('QuatX').bezierPoints) or len(ipo.getCurve('QuatX').bezierPoints) != len(ipo.getCurve('QuatY').bezierPoints) or len(ipo.getCurve('QuatY').bezierPoints) != len(ipo.getCurve('QuatZ').bezierPoints):
print "Info: GAMX_Export: Error: Ipo '%s' QuatW, QuatX, QuatY, & QuatZ channels do not contain same number of key frames (W:%d X:%d Y:%d Z:%d); skipping export.\r\n" % (ipo.name,len(ipo.getCurve('QuatW').bezierPoints),len(ipo.getCurve('QuatX').bezierPoints),len(ipo.getCurve('QuatY').bezierPoints),len(ipo.getCurve('QuatZ').bezierPoints))
valid = 0
else:
for i in range(len(ipo.getCurve('QuatW').bezierPoints)):
if ipo.getCurve('QuatW').bezierPoints[i].pt[0] != ipo.getCurve('QuatX').bezierPoints[i].pt[0] or ipo.getCurve('QuatX').bezierPoints[i].pt[0] != ipo.getCurve('QuatY').bezierPoints[i].pt[0] or ipo.getCurve('QuatY').bezierPoints[i].pt[0] != ipo.getCurve('QuatZ').bezierPoints[i].pt[0]:
print "Info: GAMX_Export: Error: Ipo '%s' QuatW, QuatX, QuatY, & QuatZ channels are not simultaneous for all key frame indicies; skipping export.\r\n" % (ipo.name,)
valid = 0
break
if valid:
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<interpolator id="' + gamx_name_prefix('ipo_', ipo.name) + r'" type="orientation">' + endl)
fout.write(3*tab4 + r'<keyframes source="internal" type="prs_array">' + endl)
if ipo.getCurve('LocX') != None:
fout.write(4*tab4 + r'<pos_time_indicies count="' + "%d" % len(ipo.getCurve('LocX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('LocX').bezierPoints)):
if use_no_shift_over:
if ipo.getCurve('LocX').bezierPoints[i].pt[0] != 1:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('LocX').bezierPoints[i].pt[0] - 0) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('LocX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('LocX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
spacer = ', '
fout.write(r'</pos_time_indicies>' + endl)
fout.write(4*tab4 + r'<pos_key_values count="' + "%d" % len(ipo.getCurve('LocX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('LocX').bezierPoints)):
fout.write(spacer + "%0.6f %0.6f %0.6f" % (round(ipo.getCurve('LocX').bezierPoints[i].pt[1], 6), round(ipo.getCurve('LocZ').bezierPoints[i].pt[1], 6), round(-ipo.getCurve('LocY').bezierPoints[i].pt[1], 6)))
spacer = ', '
fout.write(r'</pos_key_values>' + endl)
if ipo.getCurve('QuatW') != None:
fout.write(4*tab4 + r'<rot_time_indicies count="' + "%d" % len(ipo.getCurve('QuatW').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('QuatW').bezierPoints)):
if use_no_shift_over:
if ipo.getCurve('QuatW').bezierPoints[i].pt[0] != 1:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('QuatW').bezierPoints[i].pt[0] - 0) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('QuatW').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('QuatW').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
spacer = ', '
fout.write(r'</rot_time_indicies>' + endl)
fout.write(4*tab4 + r'<rot_key_values count="' + "%d" % len(ipo.getCurve('QuatW').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('QuatW').bezierPoints)):
fout.write(spacer + "%0.6f %0.6f %0.6f %0.6f" % (round(ipo.getCurve('QuatW').bezierPoints[i].pt[1], 6), round(ipo.getCurve('QuatX').bezierPoints[i].pt[1], 6), round(ipo.getCurve('QuatZ').bezierPoints[i].pt[1], 6), round(-ipo.getCurve('QuatY').bezierPoints[i].pt[1], 6)))
spacer = ', '
fout.write(r'</rot_key_values>' + endl)
elif ipo.getCurve('RotX') != None:
fout.write(4*tab4 + r'<rot_time_indicies count="' + "%d" % len(ipo.getCurve('RotX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('RotX').bezierPoints)):
if use_no_shift_over:
if ipo.getCurve('RotX').bezierPoints[i].pt[0] != 1:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('RotX').bezierPoints[i].pt[0] - 0) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('RotX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('RotX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
spacer = ', '
fout.write(r'</rot_time_indicies>' + endl)
fout.write(4*tab4 + r'<rot_key_values count="' + "%d" % len(ipo.getCurve('RotX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('RotX').bezierPoints)):
quat = gamx_axis_to_quat([gamx_deg_to_rad(ipo.getCurve('RotX').bezierPoints[i].pt[1] * 10.0), gamx_deg_to_rad(ipo.getCurve('RotZ').bezierPoints[i].pt[1] * 10.0), gamx_deg_to_rad(-ipo.getCurve('RotY').bezierPoints[i].pt[1] * 10.0)])
fout.write(spacer + "%0.6f %0.6f %0.6f %0.6f" % (round(quat[0], 6), round(quat[1], 6), round(quat[2], 6), round(quat[3], 6)))
spacer = ', '
fout.write(r'</rot_key_values>' + endl)
if ipo.getCurve('ScaleX') != None:
fout.write(4*tab4 + r'<scl_time_indicies count="' + "%d" % len(ipo.getCurve('ScaleX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('ScaleX').bezierPoints)):
if use_no_shift_over:
if ipo.getCurve('ScaleX').bezierPoints[i].pt[0] != 1:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('ScaleX').bezierPoints[i].pt[0] - 0) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('ScaleX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
else:
fout.write(spacer + "%0.6f" % (round((ipo.getCurve('ScaleX').bezierPoints[i].pt[0] - 1) / anim_fps, 6),))
spacer = ', '
fout.write(r'</scl_time_indicies>' + endl)
fout.write(4*tab4 + r'<scl_key_values count="' + "%d" % len(ipo.getCurve('ScaleX').bezierPoints) + r'">')
spacer = ''
for i in range(len(ipo.getCurve('ScaleX').bezierPoints)):
fout.write(spacer + "%0.6f %0.6f %0.6f" % (round(ipo.getCurve('ScaleX').bezierPoints[i].pt[1], 6), round(ipo.getCurve('ScaleZ').bezierPoints[i].pt[1], 6), round(ipo.getCurve('ScaleY').bezierPoints[i].pt[1], 6)))
spacer = ', '
fout.write(r'</scl_key_values>' + endl)
fout.write(3*tab4 + r'</keyframes>' + endl)
fout.write(3*tab4 + r'<controller>' + endl)
if use_unique_timer:
fout.write(4*tab4 + r'<timer id="' + gamx_name_prefix('tmr_', gamx_name_prefix('ipo_', ipo.name)) + r'" type="basic">' + endl)
fout.write(4*tab4 + r'</timer>' + endl)
else:
fout.write(4*tab4 + r'<timer ref="' + gamx_name_prefix('tmr_', gamx_name_prefix('obj_', gamx_first_scene_name())) + r'"/>' + endl)
fout.write(3*tab4 + r'</controller>' + endl)
if ipo.getCurve('LocX') != None:
fout.write(3*tab4 + r'<pos_polation>')
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:3] == "Loc":
mode |= ipoc.interpolation
if mode & Blender.IpoCurve.InterpTypes["CONST"]:
fout.write(r'ipo_const ')
elif mode & Blender.IpoCurve.InterpTypes["LINEAR"]:
fout.write(r'ipo_linear ')
elif mode & Blender.IpoCurve.InterpTypes["BEZIER"]:
if use_linear_over_cubic_cr:
fout.write(r'ipo_linear ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'linear' position interpolation.\r\n"
else:
fout.write(r'ipo_cubiccr ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'cubic_cr' position interpolation.\r\n"
else:
fout.write(r'ipo_linear ') # default
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:3] == "Loc":
mode |= ipoc.extend
if mode & Blender.IpoCurve.ExtendTypes["CONST"]:
fout.write(r'epo_const')
elif mode & Blender.IpoCurve.ExtendTypes["EXTRAP"]:
fout.write(r'epo_linear')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC"]:
fout.write(r'epo_cyclic')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC_EXTRAP"]:
fout.write(r'epo_cyclicadd')
else:
fout.write(r'epo_const') # default
fout.write(r'</pos_polation>' + endl)
if ipo.getCurve('QuatW') != None or ipo.getCurve('RotX') != None:
fout.write(3*tab4 + r'<rot_polation>')
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:3] == "Rot" or ipoc.name[:4] == "Quat":
mode |= ipoc.interpolation
if mode & Blender.IpoCurve.InterpTypes["CONST"]:
fout.write(r'ipo_const ')
elif mode & Blender.IpoCurve.InterpTypes["LINEAR"]:
fout.write(r'ipo_linear ')
elif mode & Blender.IpoCurve.InterpTypes["BEZIER"]:
if use_linear_over_cubic_cr:
fout.write(r'ipo_linear ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'linear' rotation interpolation.\r\n"
else:
fout.write(r'ipo_cubiccr ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'cubic_cr' rotation interpolation.\r\n"
else:
fout.write(r'ipo_linear ') # default
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:3] == "Rot" or ipoc.name[:4] == "Quat":
mode |= ipoc.extend
if mode & Blender.IpoCurve.ExtendTypes["CONST"]:
fout.write(r'epo_const')
elif mode & Blender.IpoCurve.ExtendTypes["EXTRAP"]:
fout.write(r'epo_linear')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC"]:
fout.write(r'epo_cyclic')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC_EXTRAP"]:
fout.write(r'epo_cyclicadd')
else:
fout.write(r'epo_const') # default
fout.write(r'</rot_polation>' + endl)
if ipo.getCurve('ScaleX') != None:
fout.write(3*tab4 + r'<scl_polation>')
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:5] == "Scale":
mode |= ipoc.interpolation
if mode & Blender.IpoCurve.InterpTypes["CONST"]:
fout.write(r'ipo_const ')
elif mode & Blender.IpoCurve.InterpTypes["LINEAR"]:
fout.write(r'ipo_linear ')
elif mode & Blender.IpoCurve.InterpTypes["BEZIER"]:
if use_linear_over_cubic_cr:
fout.write(r'ipo_linear ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'linear' scale interpolation.\r\n"
else:
fout.write(r'ipo_cubiccr ') # no direct tag
print "Info: GAMX_Export: Warning: BEZIER IpoCurve interpolator setting not supported; using 'cubic_cr' scale interpolation.\r\n"
else:
fout.write(r'ipo_linear ') # default
mode = 0
for ipoc in ipo.curves:
if ipoc.name[:5] == "Scale":
mode |= ipoc.extend
if mode & Blender.IpoCurve.ExtendTypes["CONST"]:
fout.write(r'epo_const')
elif mode & Blender.IpoCurve.ExtendTypes["EXTRAP"]:
fout.write(r'epo_linear')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC"]:
fout.write(r'epo_cyclic')
elif mode & Blender.IpoCurve.ExtendTypes["CYCLIC_EXTRAP"]:
fout.write(r'epo_cyclicadd')
else:
fout.write(r'epo_const') # default
fout.write(r'</scl_polation>' + endl)
fout.write(2*tab4 + r'</interpolator>' + endl)
# Write materials
for mat in bpy.data.materials:
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<material id="' + gamx_name_prefix('mat_', mat.name) + r'" type="material">' + endl)
R, G, B, A = round(mat.R * mat.amb, 6), round(mat.G * mat.amb, 6), round(mat.B * mat.amb, 6), round(mat.alpha, 6)
fout.write(3*tab4 + r'<ambient>' + "%0.6f %0.6f %0.6f %0.6f" % (R,G,B,A) + r'</ambient>' + endl)
R, G, B, A = round(mat.R, 6), round(mat.G, 6), round(mat.B, 6), round(mat.alpha, 6)
fout.write(3*tab4 + r'<diffuse>' + "%0.6f %0.6f %0.6f %0.6f" % (R,G,B,A) + r'</diffuse>' + endl)
R, G, B, A = round(mat.specR, 6), round(mat.specG, 6), round(mat.specB, 6), round(mat.alpha, 6)
fout.write(3*tab4 + r'<specular>' + "%0.6f %0.6f %0.6f %0.6f" % (R,G,B,A) + r'</specular>' + endl)
R, G, B, A = round(mat.R * mat.emit, 6), round(mat.G * mat.emit, 6), round(mat.B * mat.emit, 6), round(mat.alpha, 6)
fout.write(3*tab4 + r'<emmisive>' + "%0.6f %0.6f %0.6f %0.6f" % (R,G,B,A) + r'</emmisive>' + endl)
S = round((mat.hard - 1.0) / 510.0, 6) # [1,511]
fout.write(3*tab4 + r'<shininess>' + "%0.6f" % (S,) + r'</shininess>' + endl)
fout.write(2*tab4 + r'</material>' + endl)
# Write textures
for tex in bpy.data.textures:
if tex.getImage() == None:
print "Info: GAMX_Export: Error: Texture '%s' does not have an image. Only image textures are supported.\r\n" % (tex.name,)
else:
mtex = None
# Find corresponding MTex through materials (texture class doesn't directly link)
for mat in bpy.data.materials:
for mtex in mat.getTextures():
if mtex is not None and gamx_name_prefix('tex_', mtex.tex.name) == gamx_name_prefix('tex_', tex.name):
break # layer 2
else:
mtex = None
if mtex is not None: # layer 1
break
else:
print "Info: GAMX_Export: Error: Cannot find corresponding MTex material structure for texture '%s'.\r\n" % (tex.name,)
continue # MTex not found, cannot extract texture data
# Although MTex at this point isn't necessarily the exact correspondent, for most types it's close enough
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<texture id="' + gamx_name_prefix('tex_', tex.name) + r'" type="static">' + endl)
fout.write(3*tab4 + r'<surface source="external">' + endl)
fout.write(4*tab4 + r'<url>' + gamx_file_prefix(tex.getImage().getFilename()[2:]) + r'</url>' + endl)
fout.write(4*tab4 + r'<transforms>')
spacer = ''
if tex.flags & Blender.Texture.Flags["NEGALPHA"]:
fout.write(spacer + 'invert_ac')
spacer = ' '
if mtex.noRGB:
fout.write(spacer + 'force_gs')
spacer = ' '
#else: # implication of forcing rgb is not well enough implied
# fout.write(spacer + 'force_rgb')
# spacer = ' '
if tex.useAlpha or tex.imageFlags & Blender.Texture.ImageFlags["USEALPHA"]:
fout.write(spacer + 'force_ac')
spacer = ' '
else: # very implied that if alpha is not to be used to get rid of it
fout.write(spacer + 'force_no_ac')
spacer = ' '
if tex.flags & Blender.Texture.Flags["FLIPBLEND"]:
fout.write(spacer + 'flip_vert flip_horz')
spacer = ' '
fout.write(r'</transforms>' + endl)
fout.write(3*tab4 + r'</surface>' + endl)
fout.write(3*tab4 + r'<environment>')
# Figure out the environment setting, most of which don't have enough information to determine full range of options
if tex.normalMap or tex.imageFlags & Blender.Texture.ImageFlags["NORMALMAP"]:
fout.write('dot3')
elif mtex.blendmode == Blender.Texture.BlendModes["DARKEN"]: # no direct tag
fout.write('replace')
print "Info: GAMX_Export: Warning: DARKEN BlendModes fragmentation environment setting not supported; using 'replace' fragmentation environment.\r\n"
elif mtex.blendmode == Blender.Texture.BlendModes["DIVIDE"]: # no direct tag
fout.write('decal')
print "Info: GAMX_Export: Warning: DIVIDE BlendModes fragmentation environment setting not supported; using 'decal' fragmentation environment.\r\n"
elif mtex.blendmode == Blender.Texture.BlendModes["LIGHTEN"]: # no direct tag
fout.write('replace')
print "Info: GAMX_Export: Warning: LIGHTEN BlendModes fragmentation environment setting not supported; using 'replace' fragmentation environment.\r\n"
#elif mtex.blendmode == Blender.Texture.BlendModes["MIX"]:
# fout.write('modulate') # x1,x2,x4 implemented in else block
elif mtex.blendmode == Blender.Texture.BlendModes["ADD"]:
fout.write('add')
#elif mtex.blendmode == Blender.Texture.BlendModes["MULTIPLY"]: # no direct tag
# fout.write('modulate') # x1,x2,x4 implemented in else block
elif mtex.blendmode == Blender.Texture.BlendModes["DIFFERENCE"]: # no direct tag
fout.write('subtract')
print "Info: GAMX_Export: Warning: DIFFERENCE BlendModes fragmentation environment setting not supported; using 'subtract' fragmentation environment.\r\n"
elif mtex.blendmode == Blender.Texture.BlendModes["SUBTRACT"]:
fout.write('subtract')
#elif mtex.blendmode == Blender.Texture.BlendModes["SCREEN"]: # no direct tag
# fout.write('modulate') # x1,x2,x4 implemented in else block
else:
if mtex.blendmode != Blender.Texture.BlendModes["MIX"]:
if mtex.blendmode == Blender.Texture.BlendModes["MULTIPLY"]:
print "Info: GAMX_Export: Warning: MULTIPLY BlendModes fragmentation environment setting not supported; using 'modulate' fragmentation environment.\r\n"
elif mtex.blendmode == Blender.Texture.BlendModes["SCREEN"]:
print "Info: GAMX_Export: Warning: SCREEN BlendModes fragmentation environment setting not supported; using 'modulate' fragmentation environment.\r\n"
else:
print "Info: GAMX_Export: Warning: UNKNOWN BlendModes fragmentation environment setting not supported; using 'modulate' fragmentation environment.\r\n"
if mtex.varfac == 4.0:
fout.write('modulate_x4')
elif mtex.varfac == 2.0:
fout.write('modulate_x2')
else:
fout.write('modulate')
fout.write(r'</environment>' + endl)
fout.write(3*tab4 + r'<filter>')
if tex.mipmap or tex.imageFlags & Blender.Texture.ImageFlags["MIPMAP"]:
if tex.interpol: # not enough information to determine full range of options
fout.write('trilinear')
else:
if use_bilinear_over_unilinear:
fout.write('bilinear')
print "Info: GAMX_Export: Warning: No interpolation & MIPMAP ImageFlags filter setting is ambiguous; using 'bilinear' filtering.\r\n"
else:
fout.write('unilinear')
print "Info: GAMX_Export: Warning: No interpolation & MIPMAP ImageFlags filter setting is ambiguous; using 'unilinear' filtering.\r\n"
else:
if tex.interpol:
fout.write('linear')
else:
fout.write('nearest')
fout.write(r'</filter>' + endl)
fout.write(3*tab4 + r'<swrap>')
if tex.getImage().clampX: # not enough information to determine full range of options
fout.write('clamp')
else:
fout.write('repeat')
fout.write(r'</swrap>' + endl)
fout.write(3*tab4 + r'<twrap>')
if tex.getImage().clampY: # not enough information to determine full range of options
fout.write('clamp')
else:
fout.write('repeat')
fout.write(r'</twrap>' + endl)
fout.write(2*tab4 + r'</texture>' + endl)
# Write cameras
pass # TODO!
# Write lamps
pass # TODO!
# Write BBs
for bb in bpy.data.meshes:
mode = bb.mode
if bb.faceUV:
for face in bb.faces:
mode |= face.mode
if mode & (Blender.Mesh.FaceModes["HALO"] | Blender.Mesh.FaceModes["BILLBOARD"]) or bb.name[:3] == 'bb_':
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<billboard id="' + gamx_name_prefix('bb_', bb.name) + r'" type="static">' + endl)
gamx_export_source_sditva(fout, bb)
fout.write(3*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
mats = []
for mat in bb.materials:
if mat != None:
mats.append(gamx_name_prefix('mat_', mat.name))
if len(mats):
fout.write(3*tab4 + r'<materials>' + endl)
for mat in mats:
fout.write(4*tab4 + r'<material ref="' + gamx_name_prefix('mat_', mat) + '" />' + endl)
fout.write(3*tab4 + r'</materials>' + endl)
if bb.faceUV:
texs = []
for mat in bb.materials:
if mat != None:
for tex in mat.textures:
if tex != None:
texs.append(gamx_name_prefix('tex_', tex.tex.name))
if len(texs):
fout.write(3*tab4 + r'<textures>' + endl)
for tex in texs:
fout.write(4*tab4 + r'<texture ref="' + gamx_name_prefix('tex_', tex) + '" />' + endl)
fout.write(3*tab4 + r'</textures>' + endl)
fout.write(2*tab4 + r'</billboard>' + endl)
# Write meshes
for mesh in bpy.data.meshes:
mode = mesh.mode
if mesh.faceUV:
for face in mesh.faces:
mode |= face.mode
if (mode & ~(Blender.Mesh.FaceModes["HALO"] | Blender.Mesh.FaceModes["BILLBOARD"]) and mesh.name[:3] != 'bb_') or mesh.name[:5] == 'mesh_':
assets += 1
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<mesh id="' + gamx_name_prefix('mesh_', mesh.name) + r'" type="static">' + endl)
gamx_export_source_sditva(fout, mesh)
fout.write(3*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
mats = []
for mat in mesh.materials:
if mat != None:
mats.append(gamx_name_prefix('mat_', mat.name))
if len(mats):
fout.write(3*tab4 + r'<materials>' + endl)
for mat in mats:
fout.write(4*tab4 + r'<material ref="' + gamx_name_prefix('mat_', mat) + '" />' + endl)
fout.write(3*tab4 + r'</materials>' + endl)
if mesh.faceUV:
texs = []
for mat in mesh.materials:
if mat != None:
for tex in mat.textures:
if tex != None:
texs.append(gamx_name_prefix('tex_', tex.tex.name))
if len(texs):
fout.write(3*tab4 + r'<textures>' + endl)
for tex in texs:
fout.write(4*tab4 + r'<texture ref="' + gamx_name_prefix('tex_', tex) + '" />' + endl)
fout.write(3*tab4 + r'</textures>' + endl)
fout.write(2*tab4 + r'</mesh>' + endl)
# Write & convert scenes -> object trees
for scn in bpy.data.scenes:
if len(scn.objects):
assets += len(scn.objects)
if assets > 1:
fout.write(endl)
fout.write(2*tab4 + r'<node id="' + gamx_name_prefix('obj_', scn.name) + r'" type="transform">' + endl)
fout.write(3*tab4 + r'<bounding><volume type="zero" /></bounding>' + endl)
fout.write(3*tab4 + r'<assets>' + endl)
chds = {}
for obj in scn.objects:
chds[obj.name] = []
for obj in scn.objects:
if obj.parent != None:
chds[obj.parent.name].append(obj)
for obj in scn.objects:
if obj.parent == None:
gamx_export_node(fout, 4, obj, chds)
fout.write(3*tab4 + r'</assets>' + endl)
fout.write(2*tab4 + r'</node>' + endl)
# Write footer
fout.write(1*tab4 + r'</assets>' + endl)
fout.write(0*tab4 + r'</gamx>' + endl)
except:
raise
finally:
fout.close()
print "Info: GAMX_Export: Finished exporting %d items to '%s'.\r\n" % (assets, filename)
except:
raise
def gamx_export_gui(filename):
global name_prefix
global file_prefix
global anim_fps
global use_no_shift_over
global use_bilinear_over_unilinear
global use_linear_over_cubic_cr
global use_unique_timer
global last_folder
try:
block = [ ]
opt_nmprefix = Blender.Draw.Create(name_prefix)
block.append(("Name prefix: ", opt_nmprefix, 0, 30, "Prefixes all objects. Used to identify assets in a global system."))
opt_flprefix = Blender.Draw.Create(file_prefix)
block.append(("File prefix: ", opt_flprefix, 0, 30, "Used to specify a particular local directory or filename offset."))
opt_animfps = Blender.Draw.Create(anim_fps)
block.append(("Anim. FPS: ", opt_animfps, 1.0, 120.0, "Exported frame control uses this value to convert to seconds."))
opt_noshiftover = Blender.Draw.Create(use_no_shift_over)
block.append(("Anim|NoShift", opt_noshiftover, "Fudge frame 1 as frame 0 (on) instead of full -1 frame shift (off)."))
opt_bioveruni = Blender.Draw.Create(use_bilinear_over_unilinear)
block.append(("Texs|Bi|Uni", opt_bioveruni, "Texs /w MIPs but w/o interp can use bilinear (on) or unilinear (off) filtering."))
opt_linovercub = Blender.Draw.Create(use_linear_over_cubic_cr)
block.append(("IPOs|Bez->Lin", opt_linovercub, "Bezier converts to linear (on) or cubic_cr (off)."))
opt_useunqtmr = Blender.Draw.Create(use_unique_timer)
block.append(("TMRs|Unq|All", opt_useunqtmr, "Timers are unique per object (on) or shared across all (off)."))
retVal = Blender.Draw.PupBlock("GAMX Export Options", block)
if retVal:
name_prefix = "%s" % opt_nmprefix
name_prefix = name_prefix[1:][:-1]
file_prefix = "%s" % opt_flprefix
file_prefix = file_prefix[1:][:-1]
anim_fps = float("%s" % opt_animfps)
if opt_noshiftover == 1:
use_no_shift_over = True
else:
use_no_shift_over = False
if opt_bioveruni == 1:
use_bilinear_over_unilinear = True
else:
use_bilinear_over_unilinear = False
if opt_linovercub == 1:
use_linear_over_cubic_cr = True
else:
use_linear_over_cubic_cr = False
if opt_useunqtmr == 1:
use_unique_timer = True
else:
use_unique_timer = False
last_folder = os.path.abspath(os.path.dirname(filename))
gamx_try_save_options()
gamx_export(filename)
Blender.Draw.PupMenu("Exporting successful.%t|Ok")
except:
Blender.Draw.PupMenu("Failure exporting.%t|Ok")
raise
if __name__ == '__main__':
gamx_try_load_options()
if not os.path.exists(last_folder):
last_folder = os.path.abspath(os.path.dirname(Blender.Get("filename")))
filename = os.path.join(last_folder, os.path.splitext(os.path.split(Blender.Get("filename"))[1])[0] + ".gamx")
Blender.Window.FileSelector(gamx_export_gui, "Export Asset Manifest GAMX", filename)
``` |
{
"source": "johannbrehmer/ginkgo-rl",
"score": 2
} |
#### File: ginkgo_rl/agents/base.py
```python
import numpy as np
import torch
from torch import nn
import logging
from tqdm import trange
from ..utils.replay_buffer import History
from ..utils.various import check_for_nans
logger = logging.getLogger(__name__)
class Agent(nn.Module):
""" Abstract base agent class """
def __init__(
self,
env,
gamma=1.00,
lr=1.0e-3,
lr_decay=0.01,
weight_decay=0.0,
history_length=None,
clip_gradient=None,
dtype=torch.float,
device=torch.device("cpu"),
*args,
**kwargs,
):
self.env = env
self.gamma = gamma
self.device = device
self.dtype = dtype
self.action_space = env.action_space
self.state_shape = env.observation_space.shape
self.state_length = np.product(self.state_shape)
self.num_actions = self.action_space.n
self._init_replay_buffer(history_length)
self.optimizer = None
self.lr = lr
self.lr_decay = lr_decay
self.weight_decay = weight_decay
self.clip_gradient = clip_gradient
super().__init__()
def set_env(self, env):
self.env = env
def learn(self, total_timesteps, callback=None):
# Prepare training
self.train()
if list(self.parameters()):
self.optimizer = torch.optim.Adam(params=self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.scheduler = torch.optim.lr_scheduler.ExponentialLR(
self.optimizer, gamma=self.lr_decay ** (1.0 / (total_timesteps + 1.0e-9))
)
else:
self.optimizer = None # For non-NN methods
self.scheduler = None
# Prepare episodes
state = self.env.reset()
reward = 0.0
rewards = []
episode = -1
done = True
episode_loss = 0.0
episode_reward = 0.0
episode_length = 0
for steps in trange(total_timesteps):
# Initialize episode
if done:
episode += 1
episode_loss = 0.0
episode_reward = 0.0
episode_length = 0
state = self.env.reset()
self.init_episode()
# Agent and environment step
action, agent_info = self.predict(state)
next_state, next_reward, done, env_info = self.env.step(action)
# Learning
loss = self.update(
state=self._tensorize(state),
reward=reward,
action=action,
done=done,
next_state=self._tensorize(next_state),
next_reward=next_reward,
num_episode=episode,
**agent_info,
)
# Book keeping
episode_loss += loss
episode_reward += next_reward
episode_length += 1
rewards.append(next_reward)
state = next_state
reward = next_reward
if done and callback is not None:
callback(
callback_info={
"episode": episode,
"episode_length": episode_length,
"loss": episode_loss,
"reward": episode_reward,
"likelihood_evaluations": agent_info["likelihood_evaluations"],
"mean_abs_weight": self.get_mean_weight(),
}
)
def predict(self, state):
"""
Given an environment state, pick the next action and return it.
Parameters
----------
state : ndarray
Observed state s_t.
Returns
-------
action : int
Chosen action a_t.
agent_info : dict
Additional stuffs.
"""
state = self._tensorize(state)
return self._predict(state)
def init_episode(self):
""" Is called at the beginning of an episode """
pass
def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):
"""
Is called at the end of each step, gives the agent the chance to a) update the replay buffer and b) learn its weights.
"""
raise NotImplementedError
def _init_replay_buffer(self, history_length):
self.history = History(max_length=history_length, dtype=self.dtype, device=self.device)
def _tensorize(self, array):
tensor = array if isinstance(array, torch.Tensor) else torch.tensor(array)
tensor = tensor.to(self.device, self.dtype)
check_for_nans(f"Tensorizing state {array}", tensor)
return tensor
def _gradient_step(self, loss):
self.optimizer.zero_grad()
loss.backward()
if self.clip_gradient is not None:
if self.verbose > 2:
grad_norm = torch.nn.utils.clip_grad_norm_(self.parameters(), self.clip_gradient)
logger.debug(f"Gradient norm (clipping at {clip_gradient}): {grad_norm}")
else:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.clip_gradient)
self.optimizer.step()
self.scheduler.step()
def _find_legal_actions(self, state):
# Compatibility with torch tensors and numpy arrays
try:
state = state.numpy()
except:
pass
particles = [i for i, p in enumerate(state) if np.max(p) > 0]
actions = []
try: # 1D-wrapped envs
for i, pi in enumerate(particles):
for j, pj in enumerate(particles[:i]):
actions.append(self.env.wrap_action((pi, pj)))
except:
for i, pi in enumerate(particles):
for j, pj in enumerate(particles[:i]):
actions.append((pi, pj))
return actions
def get_mean_weight(self):
return 0.0
```
#### File: ginkgo_rl/agents/mcts.py
```python
import torch
from torch import nn
import copy
import logging
import numpy as np
from ginkgo_rl.utils.mcts import MCTSNode
from .base import Agent
from ..utils.nets import MultiHeadedMLP
from ..utils.various import check_for_nans, NanException
logger = logging.getLogger(__name__)
class BaseMCTSAgent(Agent):
def __init__(
self,
*args,
n_mc_target=5,
n_mc_min=5,
n_mc_max=100,
planning_mode="mean",
decision_mode="max_reward",
c_puct=1.0,
reward_range=(-200.0, 0.0),
initialize_with_beam_search=True,
beam_size=10,
verbose=False,
**kwargs,
):
super().__init__(*args, **kwargs)
self.n_mc_target = n_mc_target
self.n_mc_min = n_mc_min
self.n_mc_max = n_mc_max
self.planning_mode = planning_mode
self.decision_mode = decision_mode
self.c_puct = c_puct
self.initialize_with_beam_search = initialize_with_beam_search
self.beam_size = beam_size
self.reward_range = reward_range
self.verbose = verbose
self.sim_env = copy.deepcopy(self.env)
self.sim_env.reset_at_episode_end = False # Avoids expensive re-sampling of jets every time we parse a path
self.episode_reward = 0.0
self.episode_likelihood_evaluations = 0
self.init_episode()
def set_env(self, env):
""" Sets current environment (and initializes episode) """
self.env = env
self.sim_env = copy.deepcopy(self.env)
self.sim_env.reset_at_episode_end = False # Avoids expensive re-sampling of jets every time we parse a path
self.init_episode()
def set_precision(self, n_mc_target, n_mc_min, n_mc_max, planning_mode, c_puct, beam_size):
""" Sets / changes MCTS precision parameters """
self.n_mc_target = n_mc_target
self.n_mc_min = n_mc_min
self.n_mc_max = n_mc_max
self.n_mc_max = n_mc_max
self.planning_mode = planning_mode
self.c_puct = c_puct
self.beam_size = beam_size
def init_episode(self):
""" Initializes MCTS tree and total reward so far """
self.mcts_head = MCTSNode(None, [], reward_min=self.reward_range[0], reward_max=self.reward_range[1])
self.episode_reward = 0.0
self.episode_likelihood_evaluations = 0
def update(self, state, reward, action, done, next_state, next_reward, num_episode, **kwargs):
""" Updates after environment reaction """
# Keep track of total reward
self.episode_reward += next_reward
if self.verbose > 0:
logger.debug(
f"Agent acknowledges receiving a reward of {next_reward}, episode reward so far {self.episode_reward}"
)
# Update MCTS tree
if not done:
self.mcts_head = self.mcts_head.children[action]
self.mcts_head.prune() # This updates the node.path
# Train
if self.training:
return self._train(kwargs["log_prob"])
else:
return 0.0
def _init_replay_buffer(self, history_length):
# No need for a history in this one!
pass
def _predict(self, state):
if self.initialize_with_beam_search:
self._beam_search(state)
action, info = self._mcts(state)
info["likelihood_evaluations"] = self.episode_likelihood_evaluations
return action, info
def _parse_path(self, state, path, from_which_env="real"):
""" Given a path (list of actions), computes the resulting environment state and total reward.
`from_which_env` defines the start point (either "sim" for self.sim_env, or "real" for self.env) """
if from_which_env == "real": # Start in self.env state
if self.sim_env.state is None or not np.all(np.isclose(self.sim_env.state, self.env.state)):
self.sim_env.set_internal_state(self.env.get_internal_state())
elif from_which_env == "sim": # Use current state of self.sim_env
pass
else:
raise ValueError(from_which_env)
self.sim_env.verbose = False
# Follow path
total_reward = 0.0
terminal = False
for action in path:
state, reward, done, info = self.sim_env.step(action)
total_reward += reward
self.episode_likelihood_evaluations += 1
if done:
terminal = True
break
state = self._tensorize(state)
return state, total_reward, terminal
def _parse_action(self, action, from_which_env="sim"):
""" Given a state and an action, computes the log likelihood """
if from_which_env == "real": # Start in self.env state
if self.sim_env.state is None or not np.all(np.isclose(self.sim_env.state, self.env.state)):
self.sim_env.set_internal_state(self.env.get_internal_state())
elif from_which_env == "sim": # Use current state of self.sim_env
pass
else:
raise ValueError(from_which_env)
self.sim_env.verbose = False
try:
_, _ = action
log_likelihood = self.sim_env._compute_log_likelihood(action)
except TypeError:
log_likelihood = self.sim_env._compute_log_likelihood(self.sim_env.unwrap_action(action))
self.episode_likelihood_evaluations += 1
return log_likelihood
def _mcts(self, state, max_steps=1000):
""" Run Monte-Carl tree search from state for n trajectories"""
n_initial_legal_actions = len(self._find_legal_actions(state))
n = min(max(self.n_mc_target * n_initial_legal_actions - self.mcts_head.n, self.n_mc_min), self.n_mc_max)
logger.debug(f"Starting MCTS with {n} trajectories")
for i in range(n):
if self.verbose > 1:
logger.debug(f"Initializing MCTS trajectory {i+1} / {n}")
node = self.mcts_head
total_reward = 0.0
for _ in range(max_steps):
# Parse current state
if len(node.path) == 0:
this_state, total_reward, terminal = self._parse_path(state, node.path)
else: # We can speed this up by just doing a single step in self.sim_env
this_state, last_step_reward, terminal = self._parse_path(
this_state, node.path[-1:], from_which_env="sim"
)
total_reward += last_step_reward
node.set_terminal(terminal)
if self.verbose > 1:
logger.debug(f" Node {node.path}")
# Termination
if terminal:
if self.verbose > 1:
logger.debug(f" Node is terminal")
break
# Expand
if not node.children:
actions = self._find_legal_actions(this_state)
if self.verbose > 1:
logger.debug(f" Expanding: {len(actions)} legal actions")
step_rewards = [self._parse_action(action, from_which_env="sim") for action in actions]
node.expand(actions, step_rewards=step_rewards)
if not node.children:
logger.warning(
f"Did not find any legal actions even though state was not recognized as terminal. "
f"Node path: {node.path}. Children: {node.children}. State: {this_state}. Actions: {actions}."
)
node.set_terminal(True)
break
# Select
policy_probs = self._evaluate_policy(
this_state, node.children.keys(), step_rewards=node.children_q_steps()
)
action = node.select_puct(policy_probs, mode=self.planning_mode, c_puct=self.c_puct)
if self.verbose > 1:
logger.debug(f" Selecting action {action}")
node = node.children[action]
# Backup
if self.verbose > 1:
logger.debug(f" Backing up total reward of {total_reward}")
node.give_reward(self.episode_reward + total_reward, backup=True)
# Select best action
legal_actions = list(self.mcts_head.children.keys())
if not legal_actions:
legal_actions = self._find_legal_actions(state)
step_rewards = self.mcts_head.children_q_steps()
if self.decision_mode == "max_reward":
action = self.mcts_head.select_best(mode="max")
elif self.decision_mode == "max_puct":
policy_probs = self._evaluate_policy(state, legal_actions, step_rewards=step_rewards)
action = self.mcts_head.select_puct(policy_probs=policy_probs, mode="max", c_puct=self.c_puct)
elif self.decision_mode == "mean_puct":
policy_probs = self._evaluate_policy(state, legal_actions, step_rewards=step_rewards)
action = self.mcts_head.select_puct(policy_probs=policy_probs, mode="max", c_puct=self.c_puct)
else:
raise ValueError(self.decision_mode)
log_prob = torch.log(self._evaluate_policy(state, legal_actions, step_rewards=step_rewards, action=action))
info = {"log_prob": log_prob}
# Debug output
if self.verbose > 0:
self._report_decision(action, state)
return action, info
def _greedy(self, state):
""" Expands MCTS tree using a greedy algorithm """
node = self.mcts_head
if self.verbose > 1:
logger.debug(f"Starting greedy algorithm.")
while not node.terminal:
# Parse current state
this_state, total_reward, terminal = self._parse_path(state, node.path)
node.set_terminal(terminal)
if self.verbose > 1:
logger.debug(f" Analyzing node {node.path}")
# Expand
if not node.terminal and not node.children:
actions = self._find_legal_actions(this_state)
step_rewards = [self._parse_action(action, from_which_env="sim") for action in actions]
if self.verbose > 1:
logger.debug(f" Expanding: {len(actions)} legal actions")
node.expand(actions, step_rewards=step_rewards)
# If terminal, backup reward
if node.terminal:
if self.verbose > 1:
logger.debug(f" Node is terminal")
if self.verbose > 1:
logger.debug(f" Backing up total reward {total_reward}")
node.give_reward(self.episode_reward + total_reward, backup=True)
# Debugging -- this should not happen
if not node.terminal and not node.children:
logger.warning(
f"Unexpected lack of children! Path: {node.path}, children: {node.children.keys()}, legal actions: {self._find_legal_actions(this_state)}, terminal: {node.terminal}"
)
node.set_terminal(True)
# Greedily select next action
if not node.terminal:
action = node.select_greedy()
node = node.children[action]
if self.verbose > 0:
choice = self.mcts_head.select_best(mode="max")
self._report_decision(choice, state, "Greedy")
def _beam_search(self, state):
""" Expands MCTS tree using beam search """
beam = [(self.episode_reward, self.mcts_head)]
next_beam = []
def format_beam():
return [node.path for _, node in beam]
if self.verbose > 1:
logger.debug(f"Starting beam search with beam size {self.beam_size}. Initial beam: {format_beam()}")
while beam or next_beam:
for i, (_, node) in enumerate(beam):
# Parse current state
this_state, total_reward, terminal = self._parse_path(state, node.path)
node.set_terminal(terminal)
if self.verbose > 1:
logger.debug(f" Analyzing node {i+1} / {len(beam)} on beam: {node.path}")
# Expand
if not node.terminal and not node.children:
actions = self._find_legal_actions(this_state)
step_rewards = [self._parse_action(action, from_which_env="sim") for action in actions]
if self.verbose > 1:
logger.debug(f" Expanding: {len(actions)} legal actions")
node.expand(actions, step_rewards=step_rewards)
# If terminal, backup reward
if node.terminal:
if self.verbose > 1:
logger.debug(f" Node is terminal")
if self.verbose > 1:
logger.debug(f" Backing up total reward {total_reward}")
node.give_reward(self.episode_reward + total_reward, backup=True)
# Did we already process this one? Then skip it
if node.n_beamsearch >= self.beam_size:
if self.verbose > 1:
logger.debug(f" Already beam searched this node sufficiently")
continue
# Beam search selection
for action in node.select_beam_search(self.beam_size):
next_reward = total_reward + node.children[action].q_step
next_node = node.children[action]
next_beam.append((next_reward, next_node))
# Mark as visited
node.in_beam = True
# Just keep top entries for next step
beam = sorted(next_beam, key=lambda x: x[0], reverse=True)[: self.beam_size]
if self.verbose > 1:
logger.debug(
f"Preparing next step, keeping {self.beam_size} / {len(next_beam)} nodes in beam: {format_beam()}"
)
next_beam = []
logger.debug(f"Finished beam search")
if self.verbose > 0:
choice = self.mcts_head.select_best(mode="max")
self._report_decision(choice, state, "Beam search")
def _report_decision(self, chosen_action, state, label="MCTS"):
legal_actions = self._find_legal_actions(state)
probs = self._evaluate_policy(state, legal_actions)
logger.debug(f"{label} results:")
for i, (action_, node_) in enumerate(self.mcts_head.children.items()):
is_chosen = "*" if action_ == chosen_action else " "
is_greedy = "g" if action_ == np.argmax(self.mcts_head.children_q_steps()) else " "
logger.debug(
f" {is_chosen}{is_greedy} {action_:>2d}: "
f"log likelihood = {node_.q_step:6.2f}, "
f"policy = {probs[i].detach().item():.2f}, "
f"n = {node_.n:>2d}, "
f"mean = {node_.q / (node_.n + 1.e-9):>5.1f} [{node_.get_reward():>4.2f}], "
f"max = {node_.q_max:>5.1f} [{node_.get_reward(mode='max'):>4.2f}]"
)
def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):
""" Evaluates the policy on the state and returns the probabilities for a given action or all legal actions """
raise NotImplementedError
def _train(self, log_prob):
""" Policy updates at end of each step, returns loss """
raise NotImplementedError
class PolicyMCTSAgent(BaseMCTSAgent):
def __init__(
self,
*args,
log_likelihood_feature=True,
hidden_sizes=(100, 100,),
activation=nn.ReLU(),
action_factor=0.01,
log_likelihood_factor=0.1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.log_likelihood_feature = log_likelihood_feature
self.actor = MultiHeadedMLP(
1 + 8 + int(self.log_likelihood_feature) + self.state_length,
hidden_sizes=hidden_sizes,
head_sizes=(1,),
activation=activation,
head_activations=(None,),
)
self.softmax = nn.Softmax(dim=0)
self.action_factor = action_factor
self.log_likelihood_factor = log_likelihood_factor
def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):
try:
policy_input = self._prepare_policy_input(state, legal_actions, step_rewards=step_rewards)
check_for_nans("Policy input", policy_input)
(probs,) = self.actor(policy_input)
check_for_nans("Policy probabilities", probs)
probs = self.softmax(probs).flatten()
except NanException:
logger.error("NaNs appeared when evaluating the policy.")
logger.error(f" state: {state}")
logger.error(f" legal actions: {legal_actions}")
logger.error(f" step rewards: {step_rewards}")
logger.error(f" action: {action}")
logger.error(f" policy weights: {list(self.parameters())}")
logger.error(f" mean weight: {self.get_mean_weight()}")
raise
if action is not None:
assert action in legal_actions
return probs[legal_actions.index(action)]
return probs
def _prepare_policy_input(self, state, legal_actions, step_rewards=None):
""" Prepares the input to the policy """
check_for_nans("Raw state", state)
state_ = state.view(-1)
if step_rewards is None or not step_rewards:
step_rewards = [None for _ in legal_actions]
batch_states = []
assert legal_actions
assert step_rewards
assert len(legal_actions) == len(step_rewards)
for action, log_likelihood in zip(legal_actions, step_rewards):
action_ = self.action_factor * torch.tensor([action]).to(self.device, self.dtype)
i, j = self.env.unwrap_action(action)
pi = state[i, :]
pj = state[j, :]
check_for_nans("Individual momenta", pi, pj)
if self.log_likelihood_feature:
if log_likelihood is None:
log_likelihood = self._parse_action(action, from_which_env="real")
if not np.isfinite(log_likelihood):
log_likelihood = 0.0
log_likelihood = np.clip(log_likelihood, self.reward_range[0], self.reward_range[1])
log_likelihood_ = self.log_likelihood_factor * torch.tensor([log_likelihood]).to(
self.device, self.dtype
)
check_for_nans("Log likelihood as policy input", log_likelihood_)
combined_state = torch.cat((action_, pi, pj, log_likelihood_, state_), dim=0)
check_for_nans("Individual policy input entry", combined_state)
else:
combined_state = torch.cat((action_, pi, pj, state_), dim=0)
check_for_nans("Individual policy input entry", combined_state)
batch_states.append(combined_state.unsqueeze(0))
batch_states = torch.cat(batch_states, dim=0)
check_for_nans("Concatenated policy input", batch_states)
return batch_states
def _train(self, log_prob):
loss = -log_prob
check_for_nans("Loss", loss)
self._gradient_step(loss)
return loss.item()
def get_mean_weight(self):
parameters = np.concatenate([param.detach().numpy().flatten() for _, param in self.named_parameters()], 0)
return np.mean(np.abs(parameters))
class RandomMCTSAgent(BaseMCTSAgent):
def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):
""" Evaluates the policy on the state and returns the probabilities for a given action or all legal actions """
if action is not None:
return torch.tensor(1.0 / len(legal_actions), dtype=self.dtype)
else:
return 1.0 / len(legal_actions) * torch.ones(len(legal_actions), dtype=self.dtype)
def _train(self, log_prob):
return torch.tensor(0.0)
class LikelihoodMCTSAgent(BaseMCTSAgent):
def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):
""" Evaluates the policy on the state and returns the probabilities for a given action or all legal actions """
assert step_rewards is not None
probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))
probabilities = probabilities / torch.sum(probabilities)
if action is not None:
return probabilities[action]
else:
return probabilities
def _train(self, log_prob):
return torch.tensor(0.0)
```
#### File: ginkgo_rl/eval/cluster_interface.py
```python
from tqdm import tqdm
import pickle
import logging
import torch
from copy import deepcopy
import numpy as np
from ginkgo_rl.envs import GinkgoLikelihood1DEnv
from ginkgo_rl.agents import PolicyMCTSAgent
logger = logging.getLogger(__name__)
class GinkgoRLInterface:
def __init__(self, state_dict_filename, **kwargs):
self.env = self._make_env(**kwargs)
self.agent = self._make_agent(state_dict_filename, **kwargs)
def generate(self, n):
""" Generates a number of jets and returns the jet dictionary """
logger.info(f"Generating {n} jets")
jets = []
for _ in range(n):
self.env.reset()
jets.append(self.env.get_internal_state()[0])
logger.info(f"Done")
return jets
def cluster(self, jets, filename=None, mode=None):
""" Clusters all jets in a jet dictionary with the MCTS agent. """
jets = self._load_jets(jets)
logger.info(f"Clustering {len(jets)} jets")
reclustered_jets = []
log_likelihoods = []
illegal_actions = []
likelihood_evaluations = []
for jet in tqdm(jets):
with torch.no_grad():
reclustered_jet, log_likelihood, error, likelihood_evaluation = self._episode(jet, mode=mode)
reclustered_jets.append(reclustered_jet)
log_likelihoods.append(log_likelihood)
illegal_actions.append(error)
likelihood_evaluations.append(likelihood_evaluation)
if filename is not None:
self._save_jets(reclustered_jets, filename)
logger.info("Done")
return reclustered_jets, log_likelihoods, illegal_actions, likelihood_evaluations
def _episode(self, jet, mode=None):
""" Clusters a single jet """
# Initialize
self.agent.eval()
self.env.set_internal_state(self._jet_to_internal_state(jet))
state = self.env.get_state()
done = False
log_likelihood = 0.0
errors = 0
reward = 0.0
likelihood_evaluations = 0
reclustered_jet = self._init_reclustered_jet(jet)
# Point agent to correct env and initialize episode: this only works for *our* models, not the baselines
try:
self.agent.set_env(self.env)
self.agent.init_episode()
except:
pass
while not done:
# Agent step
if self.agent is None:
action = self.env.action_space.sample()
agent_info = {}
elif mode is None:
action, agent_info = self.agent.predict(state)
likelihood_evaluations = max(agent_info["likelihood_evaluations"], likelihood_evaluations)
else:
action, agent_info = self.agent.predict(state, mode=mode)
likelihood_evaluations = max(agent_info["likelihood_evaluations"], likelihood_evaluations)
# Environment step
next_state, next_reward, done, info = self.env.step(action)
# Keep track of clustered tree
if info["legal"]:
self._update_reclustered_jet_with_action(reclustered_jet, action, next_reward)
# Keep track of metrics
log_likelihood += next_reward
if not info["legal"]:
errors += 1
# Update model: this only works for *our* models, not the baselines
try:
self.agent.update(
state, reward, action, done, next_state, next_reward=next_reward, num_episode=0, **agent_info
)
except:
pass
reward, state = next_reward, next_state
self._finalize_reclustered_jet(reclustered_jet)
return reclustered_jet, float(log_likelihood), int(errors), int(likelihood_evaluations)
def _init_reclustered_jet(self, jet, delete_keys=("deltas", "draws", "dij", "ConstPhi", "PhiDelta", "PhiDeltaRel")):
reclustered_jet = deepcopy(jet)
reclustered_jet["content"] = list(deepcopy(reclustered_jet["leaves"]))
reclustered_jet["tree"] = [[-1, -1] for _ in reclustered_jet["content"]]
reclustered_jet["logLH"] = [0.0 for _ in reclustered_jet["content"]]
reclustered_jet["root_id"] = None
reclustered_jet["algorithm"] = "mcts"
reclustered_jet["current_particles"] = set(
range(len(reclustered_jet["content"]))
) # dict IDs of current particles
for key in delete_keys:
try:
del reclustered_jet[key]
except:
logger.info(f"Jet dict did not contain field {key}")
return reclustered_jet
def _update_reclustered_jet_with_action(self, reclustered_jet, action, step_log_likelihood):
# Parse action
i_en, j_en = self.env.unwrap_action(action) # energy-sorted IDs of the particles to be merged
particles = [(dict_id, reclustered_jet["content"][dict_id]) for dict_id in reclustered_jet["current_particles"]]
particles = sorted(
particles, reverse=True, key=lambda x: x[1][0]
) # (dict_ID, four_momentum) of current particles, sorted by E
i_dict, j_dict = particles[i_en][0], particles[j_en][0] # dict IDs of the particles to be merged
logger.debug(f"Parsing action {action}:")
logger.debug(" E-ranking | dict ID | momentum ")
for en_id, (dict_id, four_momentum) in enumerate(particles):
logger.debug(
f" {'x' if dict_id in (i_dict, j_dict) else ' '} {en_id:>7d} | {dict_id:>7d} | {four_momentum} "
)
# Perform action
new_momentum = reclustered_jet["content"][i_dict] + reclustered_jet["content"][j_dict]
reclustered_jet["content"].append(new_momentum)
reclustered_jet["tree"].append([i_dict, j_dict])
k_dict = len(reclustered_jet["content"]) - 1
reclustered_jet["root_id"] = k_dict
reclustered_jet["logLH"].append(step_log_likelihood)
reclustered_jet["current_particles"].remove(i_dict)
reclustered_jet["current_particles"].remove(j_dict)
reclustered_jet["current_particles"].add(k_dict)
def _finalize_reclustered_jet(self, reclustered_jet):
reclustered_jet["content"] = np.asarray(reclustered_jet["content"])
reclustered_jet["logLH"] = np.asarray(reclustered_jet["logLH"])
reclustered_jet["tree"] = np.asarray(reclustered_jet["tree"], dtype=np.int)
del reclustered_jet["current_particles"]
def _make_agent(
self,
state_dict,
initialize_mcts_with_beamsearch=True,
log_likelihood_policy_input=True,
decision_mode="max_reward",
reward_range=(-500.0, 0.0),
hidden_sizes=(100, 100),
activation=torch.nn.ReLU(),
n_mc_target=2,
n_mc_min=0,
n_mc_max=50,
beamsize=20,
planning_mode="mean",
c_puct=1.0,
device=torch.device("cpu"),
dtype=torch.float,
**kwargs,
):
agent = PolicyMCTSAgent(
self.env,
reward_range=reward_range,
n_mc_target=n_mc_target,
n_mc_min=n_mc_min,
n_mc_max=n_mc_max,
planning_mode=planning_mode,
initialize_with_beam_search=initialize_mcts_with_beamsearch,
log_likelihood_feature=log_likelihood_policy_input,
c_puct=c_puct,
device=device,
dtype=dtype,
verbose=0,
decision_mode=decision_mode,
beam_size=beamsize,
hidden_sizes=hidden_sizes,
activation=activation,
)
try:
state_dict = torch.load(state_dict)
except:
pass
agent.load_state_dict(state_dict)
return agent
def _make_env(
self,
illegal_reward=-100.0,
illegal_actions_patience=3,
n_max=20,
n_min=2,
n_target=1,
min_reward=-100.0,
state_rescaling=0.01,
padding_value=-1.0,
w_jet=True,
w_rate=3.0,
qcd_rate=1.5,
pt_min=4.0 ** 2,
qcd_mass=30.0,
w_mass=80.0,
jet_momentum=400.0,
jetdir=(1, 1, 1),
max_n_try=1000,
**kwargs,
):
env = GinkgoLikelihood1DEnv(
illegal_reward,
illegal_actions_patience,
n_max,
n_min,
n_target,
min_reward,
state_rescaling,
padding_value,
w_jet,
max_n_try,
w_rate,
qcd_rate,
pt_min,
qcd_mass,
w_mass,
jet_momentum,
jetdir,
)
return env
def _load_jets(self, jets):
try:
with open(jets, "rb") as f:
return pickle.load(f)
except:
return jets
def _save_jets(self, jets, filename):
logger.info(f"Saving clustered jets at {filename}")
with open(filename, "wb") as f:
pickle.dump(jets, f)
def _internal_state_to_jet(self, internal_state):
return internal_state[0]
def _jet_to_internal_state(self, jet):
"""
Translates a jet dict to the environment internal state, a 5-tuple of the form (jet_dict, n_particles, state, is_leaf, illegal_action_counter).
Only works for "initial" states (no clustering so far, only observed particles).
"""
n = len(jet["leaves"])
state = self.env.padding_value * np.ones((self.env.n_max, 4))
state[:n] = self.env.state_rescaling * jet["leaves"]
is_leaf = [(i < n) for i in range(self.env.n_max)]
illegal_action_counter = 0
# energy sorting
idx = sorted(list(range(self.env.n_max)), reverse=True, key=lambda i: state[i, 0])
state = state[idx, :]
is_leaf = np.asarray(is_leaf, dtype=np.bool)[idx]
internal_state = (jet, n, state, is_leaf, illegal_action_counter)
return internal_state
```
#### File: ginkgo_rl/eval/evaluator.py
```python
import gym
import numpy as np
from matplotlib import pyplot as plt
import sys
import os
from tqdm import trange
import pickle
import logging
import torch
from ginkgo_rl import GinkgoLikelihoodEnv, GinkgoLikelihood1DEnv
logger = logging.getLogger(__name__)
# Workaround for now until Trellis is better packaged
try:
sys.path.append("/Users/johannbrehmer/work/projects/shower_rl/hierarchical-trellis/src")
sys.path.append("/scratch/jb6504/hierarchical-trellis/src")
from run_physics_experiment_invM import compare_map_gt_and_bs_trees as compute_trellis
except Exception:
logger.warning("Error importing hierarchical trellis code.")
compute_trellis = None
try:
sys.path.insert(0, "/Users/johannbrehmer/work/projects/shower_rl/ReclusterTreeAlgorithms/scripts")
sys.path.insert(0, "/scratch/jb6504/ReclusterTreeAlgorithms/scripts")
sys.path.insert(0, "/scratch/jb6504/ReclusterTreeAlgorithms")
import beamSearchOptimal_invM as beam_search
except Exception:
logger.warning("Error importing beam search code.")
beam_search = None
class GinkgoEvaluator:
def __init__(self, filename, env, redraw_existing_jets=False, n_jets=100):
self.filename = filename
self.env = env
self.methods = [] # Method names
self.log_likelihoods = {} # Log likelihood results
self.illegal_actions = {} # Number of illegal actions
if os.path.exists(filename) and not redraw_existing_jets:
self._load()
else:
self.n_jets = n_jets
self.jets = self._init_jets()
self._save()
def eval_true(self, method):
log_likelihoods = [[self._compute_true_log_likelihood(jet)] for jet in self.jets]
illegal_actions = [[0] for _ in self.jets]
likelihood_evaluations = [[0] for _ in self.jets]
self._update_results(method, log_likelihoods, illegal_actions)
return log_likelihoods, illegal_actions, likelihood_evaluations
def eval_exact_trellis(self, method):
log_likelihoods = [[self._compute_maximum_log_likelihood(jet)] for jet in self.jets]
illegal_actions = [[0] for _ in self.jets]
likelihood_evaluations = [[0] for _ in self.jets]
self._update_results(method, log_likelihoods, illegal_actions)
return log_likelihoods, illegal_actions, likelihood_evaluations
def eval_beam_search(self, method, beam_size):
log_likelihoods = [[self._compute_beam_search_log_likelihood(jet, beam_size)] for jet in self.jets]
illegal_actions = [[0] for _ in self.jets]
likelihood_evaluations = [
[self._compute_beam_search_likelihood_evaluations(jet, beam_size)] for jet in self.jets
]
self._update_results(method, log_likelihoods, illegal_actions)
return log_likelihoods, illegal_actions, likelihood_evaluations
def eval(self, method, model, n_repeats=1, mode=None):
log_likelihoods = [[] for _ in range(self.n_jets)]
illegal_actions = [[] for _ in range(self.n_jets)]
likelihood_evaluations = [[] for _ in range(self.n_jets)]
model.eval()
for i in trange(len(self.jets) * n_repeats):
i_jet = i // n_repeats
jet = self.jets[i_jet]
self.env.set_internal_state(jet)
with torch.no_grad():
log_likelihood, error, likelihood_evaluation = self._episode(model, mode=mode)
log_likelihoods[i_jet].append(log_likelihood)
illegal_actions[i_jet].append(error)
likelihood_evaluations[i_jet].append(likelihood_evaluation)
self._update_results(method, log_likelihoods, illegal_actions)
return log_likelihoods, illegal_actions, likelihood_evaluations
def eval_random(self, method, n_repeats=1):
return self.eval(method, None, n_repeats)
def get_jet_info(self):
return {"n_leaves": np.array([len(jet[0]["leaves"]) for jet in self.jets], dtype=np.int)}
def get_results(self):
for method in self.methods:
yield method, self.log_likelihoods[method], self.illegal_actions[method]
def __str__(self):
lengths = 20, 6, 3
results = []
for method, log_likelihood, illegals in self.get_results():
mean_log_likelihood = np.nanmean([np.nanmean(x) for x in log_likelihood])
mean_illegals = np.nanmean([np.nanmean(x) for x in illegals])
results.append((method, mean_log_likelihood, mean_illegals))
lines = []
lines.append("")
lines.append("-" * (lengths[0] + lengths[1] + lengths[2] + (3 - 1) * 3))
lines.append(f"{'Method':>{lengths[0]}s} | {'Log p':>{lengths[1]}s} | {'Err':>{lengths[2]}s}")
lines.append("-" * (lengths[0] + lengths[1] + lengths[2] + (3 - 1) * 3))
for method, mean_log_likelihood, mean_illegals in sorted(results, key=lambda x: x[1], reverse=True):
lines.append(
f"{method:>{lengths[0]}s} | {mean_log_likelihood:>{lengths[1]}.{lengths[1] - 4}f} | {mean_illegals:>{lengths[2]}.{lengths[2] - 2}f}"
)
lines.append("-" * (lengths[0] + lengths[1] + lengths[2] + (3 - 1) * 3))
lines.append("")
return "\n".join(lines)
def plot_log_likelihoods(
self,
cols=2,
rows=4,
ymax=0.5,
deltax_min=1.0,
deltax_max=10.0,
xbins=25,
panelsize=4.0,
filename=None,
linestyles=["-", "--", ":", "-."],
colors=[f"C{i}" for i in range(9)],
):
colors = colors * 10
linestyles = linestyles * 10
fig = plt.figure(figsize=(rows * panelsize, cols * panelsize))
for j in range(self.n_jets):
if j > cols * rows:
break
plt.subplot(cols, rows, j + 1)
xs = np.concatenate([logp[j] for logp in self.log_likelihoods.values()], axis=0)
xmin, xmax = np.min(xs), np.max(xs)
xmin = np.clip(xmin, xmax - deltax_max, xmax - deltax_min)
xmax = xmax + 0.05 * (xmax - xmin)
xmin = xmin - 0.05 * (xmax - xmin)
ls_counter = 0
for i, (name, logp, _) in enumerate(self.get_results()):
logp_ = np.clip(logp, xmin + 1.0e-9, xmax - 1.0e-9)
if len(logp[j]) == 1:
plt.plot(
[logp_[j][0], logp_[j][0]], [0.0, ymax], color=colors[i], ls=linestyles[ls_counter], label=name
)
ls_counter += 1
else:
plt.hist(
logp_[j],
histtype="stepfilled",
range=(xmin, xmax),
color=colors[i],
bins=xbins,
lw=1.5,
density=True,
alpha=0.15,
)
plt.hist(
logp_[j],
histtype="step",
range=(xmin, xmax),
bins=xbins,
color=colors[i],
lw=1.5,
density=True,
label=name,
)
if j == 0:
plt.legend()
plt.xlabel("Log likelihood")
plt.ylabel("Histogram")
plt.xlim(xmin, xmax)
plt.ylim(0.0, ymax)
plt.tight_layout()
if filename is not None:
plt.savefig(filename)
return fig
def _update_results(self, method, log_likelihoods, illegal_actions):
self.log_likelihoods[method] = log_likelihoods
self.illegal_actions[method] = illegal_actions
def _save(self):
data = {"n_jets": self.n_jets, "jets": self.jets}
with open(self.filename, "wb") as file:
pickle.dump(data, file)
def _load(self):
with open(self.filename, "rb") as file:
data = pickle.load(file)
self.n_jets = data["n_jets"]
self.jets = data["jets"]
def _init_jets(self):
logger.info("Generating evaluation jets")
jets = []
for _ in range(self.n_jets):
self.env.reset()
jets.append(self.env.get_internal_state())
sizes = np.array([len(jet[0]["leaves"]) for jet in jets])
logger.info(
f" Generated jets with min size {np.min(sizes)}, mean size {np.mean(sizes)}, max size {np.max(sizes)}"
)
return jets
def _episode(self, model, mode=None):
state = self.env.get_state()
done = False
log_likelihood = 0.0
errors = 0
reward = 0.0
likelihood_evaluations = 0
# Point agent to correct env and initialize episode: this only works for *our* models, not the baselines
try:
model.set_env(self.env)
model.init_episode()
except:
pass
while not done:
if model is None:
action = self.env.action_space.sample()
agent_info = {}
elif mode is None:
action, agent_info = model.predict(state)
likelihood_evaluations = max(agent_info["likelihood_evaluations"], likelihood_evaluations)
else:
action, agent_info = model.predict(state, mode=mode)
likelihood_evaluations = max(agent_info["likelihood_evaluations"], likelihood_evaluations)
next_state, next_reward, done, info = self.env.step(action)
log_likelihood += next_reward
if not info["legal"]:
errors += 1
# Update model: this only works for *our* models, not the baselines
try:
model.update(
state, reward, action, done, next_state, next_reward=next_reward, num_episode=0, **agent_info
)
except:
pass
reward, state = next_reward, next_state
return float(log_likelihood), int(errors), int(likelihood_evaluations)
@staticmethod
def _compute_true_log_likelihood(jet):
return sum(jet[0]["logLH"])
@staticmethod
def _compute_maximum_log_likelihood(jet, max_leaves=11):
""" Based on Sebastian's code at https://github.com/iesl/hierarchical-trellis/blob/sebastian/src/Jet_Experiments_invM_exactTrellis.ipynb """
if len(jet[0]["leaves"]) > max_leaves:
return np.nan
_, _, max_log_likelihood, _, _ = compute_trellis(jet[0])
return max_log_likelihood
@staticmethod
def _compute_beam_search_log_likelihood(jet, beam_size):
n = len(jet[0]["leaves"])
bs_jet = beam_search.recluster(
jet[0],
beamSize=min(beam_size, n * (n - 1) // 2),
delta_min=jet[0]["pt_cut"],
lam=float(jet[0]["Lambda"]),
N_best=1,
visualize=True,
)[0]
return sum(bs_jet["logLH"])
@staticmethod
def _compute_beam_search_likelihood_evaluations(jet, beam_size):
n = len(jet[0]["leaves"])
beam = 1
evaluations = 0
while n > 1:
evaluations += beam * n * (n - 1) // 2
beam = beam_size
n -= 1
return evaluations
```
#### File: ginkgo_rl/utils/replay_buffer.py
```python
import numpy as np
import torch
import random
import logging
logger = logging.getLogger(__name__)
class History:
""" Generic replay buffer. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
self.memories = None
self.max_length = max_length
self.data_pointer = 0
self.is_full = False
if max_length:
self.memories = np.empty((max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.device = device
self.dtype = dtype
def store(self, **kwargs):
self.memories[self.data_pointer] = kwargs
self.is_full = False
self.data_pointer += 1
if self.max_length is not None and self.data_pointer >= self.max_length:
self.data_pointer = 0
self.is_full = True
if self.data_pointer >= self.memories.shape[0] and self.max_length is None:
# self.memories.resize(self.memories.shape * 2) # Raises some ValueError
self.memories = np.resize(self.memories, self.memories.shape[0] * 2)
# @timeit
def sample(self, n):
idx = random.sample(range(len(self)), k=n)
data_batch = self.memories[idx]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return idx, None, minibatch
def rollout(self, n=None):
""" When n is not None, returns only the last n entries """
data_batch = self.memories[: len(self)] if n is None else self.memories[len(self) - n : len(self)]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return minibatch
def __len__(self):
if self.max_length is None:
return self.data_pointer
else:
if self.is_full:
return self.max_length
else:
return self.data_pointer
def clear(self):
if self.max_length:
self.memories = np.empty((self.max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.data_pointer = 0
class SequentialHistory(History):
""" Generic replay buffer where each entry represents a sequence of events. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
super().__init__(max_length=max_length, dtype=dtype, device=device)
self.current_sequence = dict()
def current_sequence_length(self):
if len(self.current_sequence) == 0:
return 0
else:
return len(self.current_sequence[list(self.current_sequence.keys())[0]])
def store(self, **kwargs):
# Store in temporary sequence buffer
if self.current_sequence_length() == 0: # Nothing saved in current sequence
for key, val in kwargs.items():
self.current_sequence[key] = [val]
self.current_sequence["first"] = [True]
else:
for key, val in kwargs.items():
self.current_sequence[key].append(val)
self.current_sequence["first"].append(False)
def flush(self):
""" Push current sequence to ("long-term") memory """
assert self.current_sequence_length() > 0
super().store(**self.current_sequence)
self.current_sequence = dict()
``` |
{
"source": "johannbrehmer/paper-inferno",
"score": 2
} |
#### File: paper-inferno/code/benchmarking.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tqdm import tqdm
from os import path
from glob import glob
import argparse
from template_model import TemplateModel
from summary_statistic_computer import SummaryStatisticComputer
from synthetic_3D_example import SyntheticThreeDimExample
from extended_model import ExtendedModel
from train_helpers import NumpyEncoder
import numpy as np
import pandas as pd
import tensorflow as tf
import json as json
parser = argparse.ArgumentParser()
parser.add_argument("--model_re", help="regular expression for models")
parser.add_argument("--model_type", help="inf or clf")
parser.add_argument("--fine_scan", help="compute shapes finely",
action="store_true")
aux_none = [None, None, None, None]
aux_std = [None, 0.4, 1.0, 100.]
benchmarks = {"b_0": (["s_exp"], aux_none),
"b_1": (["s_exp", "r_dist"], aux_none),
"b_2": (["s_exp", "r_dist", "b_rate"], aux_none),
"b_2_aux": (["s_exp", "r_dist", "b_rate"], aux_std),
"b_3_aux": (["s_exp", "r_dist", "b_rate", "b_exp"], aux_std)}
def marginal(pars, aux_std, poi="s_exp", row_name="fisher_matrix"):
def marginal_computer(row):
f = row[row_name]
aux_diag = [1. / (e**2) if e is not None else 0. for e in aux_std]
f_total = f.add_matrix(np.diag(aux_diag))
return f_total.marginals(pars)[poi]
return marginal_computer
def benchmark_model(model_re, model_type, fine_scan=False):
results = {}
tm = TemplateModel()
if fine_scan:
pars_scan = {"r_dist": np.round(np.linspace(1.5, 2.5, 21, endpoint=True),
decimals=2),
"b_rate": np.round(np.linspace(2.0, 4.0, 21, endpoint=True),
decimals=1)}
ssc = SummaryStatisticComputer(pars_scan=pars_scan)
else:
ssc = SummaryStatisticComputer()
sess = tf.Session()
model_types = {"clf": ssc.classifier_shapes,
"inf": ssc.inferno_shapes}
model_paths = glob(model_re)
common_path = path.dirname(path.commonprefix(model_paths))
print(common_path)
for model_path in tqdm(model_paths):
info_json_path = f"{model_path}/info.json"
if path.exists(info_json_path):
with open(f"{model_path}/info.json") as fp:
info = json.load(fp)
else:
info = {}
with sess.as_default():
shapes = model_types[model_type](model_path, sess=sess)
with open(f"{model_path}/templates.json", 'w') as t_file:
json.dump({str(k): v for k, v in shapes.items()},
t_file, cls=NumpyEncoder)
tm.templates_from_dict(shapes)
fisher_matrix = tm.asimov_hess(sess=sess)
results[model_path] = {"common_path": common_path,
"fisher_matrix": fisher_matrix,
**info}
df = pd.DataFrame.from_dict(results, orient="index")
for b_name, config in benchmarks.items():
pars, aux = config
df.loc[:, b_name] = df.apply(marginal(pars, aux), axis=1)
if not fine_scan:
df.to_csv(f"{common_path}/results.csv")
return df
def benchmark_optimal(path=None):
results = {}
tm = TemplateModel()
ssc = SummaryStatisticComputer()
sess = tf.Session()
with sess.as_default():
shapes = ssc.optimal_shapes(sess=sess)
tm.templates_from_dict(shapes)
fisher_matrix = tm.asimov_hess(sess=sess)
results["optimal"] = {"common_path": "optimal",
"fisher_matrix": fisher_matrix}
df = pd.DataFrame.from_dict(results, orient="index")
for b_name, config in benchmarks.items():
pars, aux = config
df.loc[:, b_name] = df.apply(marginal(pars, aux), axis=1)
if path is not None:
df.to_csv(path)
return df
def benchmark_likelihood(path=None):
results = {}
aux = {}
problem = SyntheticThreeDimExample()
x_values = tf.placeholder(dtype=tf.float32, shape=(None, 3), name="x_values")
em = ExtendedModel(problem, aux=aux)
sess = tf.Session()
with sess.as_default():
bkg_t = problem.transform_bkg(x_values)
valid_arrays = sess.run(problem.valid_data())
bkg_t_arr = sess.run(bkg_t, {x_values: valid_arrays["bkg"]})
obs_phs = {em.s_n_exp: 50.,
em.b_n_exp: 1000.,
em.s_data: valid_arrays["sig"],
em.b_data: bkg_t_arr}
fisher_matrix = em.hess(par_phs={}, obs_phs=obs_phs, sess=sess)[0]
results["likelihood"] = {"common_path": "likelihood",
"fisher_matrix": fisher_matrix}
df = pd.DataFrame.from_dict(results, orient="index")
for b_name, config in benchmarks.items():
pars, aux = config
df.loc[:, b_name] = df.apply(marginal(pars, aux), axis=1)
if path is not None:
df.to_csv(path)
return df
def main():
args = parser.parse_args()
benchmark_model(args.model_re, args.model_type, args.fine_scan)
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.